fix: load model fail overlays thread message error (#1901)

This commit is contained in:
Louis 2024-02-02 10:26:31 +07:00 committed by GitHub
parent 36ad16ff4e
commit bef8dcd6d5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 7 additions and 56 deletions

View File

@ -8,6 +8,8 @@ import {
import { useAtomValue, useSetAtom } from 'jotai' import { useAtomValue, useSetAtom } from 'jotai'
import { loadModelErrorAtom } from './useActiveModel'
import { extensionManager } from '@/extension' import { extensionManager } from '@/extension'
import { setConvoMessagesAtom } from '@/helpers/atoms/ChatMessage.atom' import { setConvoMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import { import {
@ -24,6 +26,7 @@ export default function useSetActiveThread() {
const setThreadMessage = useSetAtom(setConvoMessagesAtom) const setThreadMessage = useSetAtom(setConvoMessagesAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom) const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom) const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
const setLoadModelError = useSetAtom(loadModelErrorAtom)
const setActiveThread = async (thread: Thread) => { const setActiveThread = async (thread: Thread) => {
if (activeThreadId === thread.id) { if (activeThreadId === thread.id) {
@ -32,6 +35,7 @@ export default function useSetActiveThread() {
} }
setIsGeneratingResponse(false) setIsGeneratingResponse(false)
setLoadModelError(undefined)
events.emit(InferenceEvent.OnInferenceStopped, thread.id) events.emit(InferenceEvent.OnInferenceStopped, thread.id)
// load the corresponding messages // load the corresponding messages

View File

@ -25,7 +25,6 @@ const ChatBody: React.FC = () => {
const messages = useAtomValue(getCurrentChatMessagesAtom) const messages = useAtomValue(getCurrentChatMessagesAtom)
const { downloadedModels } = useGetDownloadedModels() const { downloadedModels } = useGetDownloadedModels()
const { setMainViewState } = useMainViewState() const { setMainViewState } = useMainViewState()
const loadModelError = useAtomValue(loadModelErrorAtom)
if (downloadedModels.length === 0) if (downloadedModels.length === 0)
return ( return (
@ -86,9 +85,8 @@ const ChatBody: React.FC = () => {
message.content.length > 0) && ( message.content.length > 0) && (
<ChatItem {...message} key={message.id} /> <ChatItem {...message} key={message.id} />
)} )}
{!loadModelError && {(message.status === MessageStatus.Error ||
(message.status === MessageStatus.Error || message.status === MessageStatus.Stopped) &&
message.status === MessageStatus.Stopped) &&
index === messages.length - 1 && ( index === messages.length - 1 && (
<ErrorMessage message={message} /> <ErrorMessage message={message} />
)} )}

View File

@ -1,48 +0,0 @@
import { MessageStatus, ThreadMessage } from '@janhq/core'
import { useAtomValue } from 'jotai'
import { useActiveModel } from '@/hooks/useActiveModel'
import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
const LoadModelErrorMessage = () => {
const { activeModel } = useActiveModel()
const availableRam = useAtomValue(totalRamAtom)
return (
<>
<div className="mt-10 flex flex-col items-center">
<span className="mb-3 text-center text-sm font-medium text-gray-500">
{Number(activeModel?.metadata.size) > availableRam ? (
<>
Oops! Model size exceeds available RAM. Consider selecting a
smaller model or upgrading your RAM for smoother performance.
</>
) : (
<>
<p>Apologies, something&apos;s amiss!</p>
Jan&apos;s in beta. Find troubleshooting guides{' '}
<a
href="https://jan.ai/guides/troubleshooting"
target="_blank"
className="text-blue-600 hover:underline dark:text-blue-300"
>
here
</a>{' '}
or reach out to us on{' '}
<a
href="https://discord.gg/AsJ8krTT3N"
target="_blank"
className="text-blue-600 hover:underline dark:text-blue-300"
>
Discord
</a>{' '}
for assistance.
</>
)}
</span>
</div>
</>
)
}
export default LoadModelErrorMessage

View File

@ -20,7 +20,7 @@ import { snackbar } from '@/containers/Toast'
import { FeatureToggleContext } from '@/context/FeatureToggle' import { FeatureToggleContext } from '@/context/FeatureToggle'
import { activeModelAtom, loadModelErrorAtom } from '@/hooks/useActiveModel' import { activeModelAtom } from '@/hooks/useActiveModel'
import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage' import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage'
import ChatBody from '@/screens/Chat/ChatBody' import ChatBody from '@/screens/Chat/ChatBody'
@ -28,7 +28,6 @@ import ChatBody from '@/screens/Chat/ChatBody'
import ThreadList from '@/screens/Chat/ThreadList' import ThreadList from '@/screens/Chat/ThreadList'
import ChatInput from './ChatInput' import ChatInput from './ChatInput'
import LoadModelErrorMessage from './LoadModelErrorMessage'
import RequestDownloadModel from './RequestDownloadModel' import RequestDownloadModel from './RequestDownloadModel'
import Sidebar from './Sidebar' import Sidebar from './Sidebar'
@ -70,7 +69,6 @@ const ChatScreen: React.FC = () => {
const activeModel = useAtomValue(activeModelAtom) const activeModel = useAtomValue(activeModelAtom)
const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom) const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom)
const loadModelError = useAtomValue(loadModelErrorAtom)
const { getRootProps, isDragReject } = useDropzone({ const { getRootProps, isDragReject } = useDropzone({
noClick: true, noClick: true,
@ -213,7 +211,6 @@ const ChatScreen: React.FC = () => {
)} )}
{activeModel && isGeneratingResponse && <GenerateResponse />} {activeModel && isGeneratingResponse && <GenerateResponse />}
{loadModelError && <LoadModelErrorMessage />}
<ChatInput /> <ChatInput />
</div> </div>
</div> </div>