* fix: update new api from cortex to support 0.5.0 Signed-off-by: James <namnh0122@gmail.com> * fix stop button for streaming Signed-off-by: James <namnh0122@gmail.com> * fix stop inference for nonstreaming Signed-off-by: James <namnh0122@gmail.com> * chore: remove umami prevent tracking call to vercel Signed-off-by: James <namnh0122@gmail.com> * add warning modal when running more than 2 model concurrently Signed-off-by: James <namnh0122@gmail.com> * fix: skip summarize if abort Signed-off-by: James <namnh0122@gmail.com> * 0.5.0-3 * add inference error popup Signed-off-by: James <namnh0122@gmail.com> * add back import local model Signed-off-by: James <namnh0122@gmail.com> * fix: max token issue (#3225) Signed-off-by: James <namnh0122@gmail.com> * format status Signed-off-by: James <namnh0122@gmail.com> * fix migration missing instructions Signed-off-by: James <namnh0122@gmail.com> * fix: wait for cortex process overlay should be on top (#3224) * fix: wait for cortex process overlay should be on top * chore: update cortex.js * Cortex 0.5.0-5 * add import model to my model screen Signed-off-by: James <namnh0122@gmail.com> * fix: should migrate symlink models (#3226) * fix import on windows (#3229) Signed-off-by: James <namnh0122@gmail.com> * fix yarn lint Signed-off-by: James <namnh0122@gmail.com> * fix: clean up port before start jan (#3232) Signed-off-by: James <namnh0122@gmail.com> --------- Signed-off-by: James <namnh0122@gmail.com> Co-authored-by: Van Pham <64197333+Van-QA@users.noreply.github.com> Co-authored-by: Louis <louis@jan.ai>
52 lines
1.5 KiB
TypeScript
52 lines
1.5 KiB
TypeScript
import { useAtomValue } from 'jotai'
|
|
|
|
import CenterPanelContainer from '@/containers/CenterPanelContainer'
|
|
import GenerateResponse from '@/containers/Loader/GenerateResponse'
|
|
import ModelStart from '@/containers/Loader/ModelStart'
|
|
|
|
import useSendMessage from '@/hooks/useSendMessage'
|
|
|
|
import ChatBody from '@/screens/Thread/ThreadCenterPanel/ChatBody'
|
|
|
|
import ChatInput from './ChatInput'
|
|
|
|
import {
|
|
isGeneratingResponseAtom,
|
|
activeThreadAtom,
|
|
isLoadingModelAtom,
|
|
} from '@/helpers/atoms/Thread.atom'
|
|
|
|
const ThreadCenterPanel: React.FC = () => {
|
|
const { sendMessage, stopInference, resendMessage } = useSendMessage()
|
|
const activeThread = useAtomValue(activeThreadAtom)
|
|
const isLoadingModel = useAtomValue(isLoadingModelAtom)
|
|
|
|
const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom)
|
|
|
|
return (
|
|
<CenterPanelContainer>
|
|
<div className="relative flex h-full w-full flex-col outline-none">
|
|
<div className="flex h-full w-full flex-col justify-between">
|
|
{activeThread && (
|
|
<div className="flex h-full w-full overflow-x-hidden">
|
|
<ChatBody onResendMessage={resendMessage} />
|
|
</div>
|
|
)}
|
|
|
|
{isGeneratingResponse && <GenerateResponse />}
|
|
{isLoadingModel && <ModelStart />}
|
|
|
|
{activeThread && (
|
|
<ChatInput
|
|
sendMessage={sendMessage}
|
|
stopInference={stopInference}
|
|
/>
|
|
)}
|
|
</div>
|
|
</div>
|
|
</CenterPanelContainer>
|
|
)
|
|
}
|
|
|
|
export default ThreadCenterPanel
|