perf: remove unnecessary rerender when user typing input (#1818)

Co-authored-by: Faisal Amir <urmauur@gmail.com>
This commit is contained in:
NamH 2024-01-29 13:53:18 +07:00 committed by GitHub
parent edaf6bb5f7
commit bb47d6869d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 74 additions and 66 deletions

View File

@ -22,7 +22,6 @@ import { extensionManager } from '@/extension'
import {
addNewMessageAtom,
updateMessageAtom,
generateResponseAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import {
updateThreadWaitingForResponseAtom,
@ -35,7 +34,6 @@ export default function EventHandler({ children }: { children: ReactNode }) {
const { downloadedModels } = useGetDownloadedModels()
const setActiveModel = useSetAtom(activeModelAtom)
const setStateModel = useSetAtom(stateModelAtom)
const setGenerateResponse = useSetAtom(generateResponseAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
const threads = useAtomValue(threadsAtom)
@ -52,7 +50,6 @@ export default function EventHandler({ children }: { children: ReactNode }) {
const onNewMessageResponse = useCallback(
(message: ThreadMessage) => {
setGenerateResponse(false)
addNewMessage(message)
},
[addNewMessage]
@ -96,7 +93,6 @@ export default function EventHandler({ children }: { children: ReactNode }) {
const onMessageResponseUpdate = useCallback(
(message: ThreadMessage) => {
setGenerateResponse(false)
updateMessage(
message.id,
message.thread_id,

View File

@ -14,8 +14,6 @@ import {
/**
* Stores all chat messages for all threads
*/
export const generateResponseAtom = atom<boolean>(false)
export const chatMessages = atom<Record<string, ThreadMessage[]>>({})
/**

15
web/hooks/useInference.ts Normal file
View File

@ -0,0 +1,15 @@
import { useAtomValue } from 'jotai'
import { threadStatesAtom } from '@/helpers/atoms/Thread.atom'
export default function useInference() {
const threadStates = useAtomValue(threadStatesAtom)
const isGeneratingResponse = Object.values(threadStates).some(
(threadState) => threadState.waitingForResponse
)
return {
isGeneratingResponse,
}
}

View File

@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { useEffect, useRef, useState } from 'react'
import { useEffect, useRef } from 'react'
import {
ChatCompletionMessage,
@ -18,7 +18,7 @@ import {
ChatCompletionMessageContentType,
AssistantTool,
} from '@janhq/core'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { ulid } from 'ulid'
@ -35,7 +35,6 @@ import { useActiveModel } from './useActiveModel'
import { extensionManager } from '@/extension/ExtensionManager'
import {
addNewMessageAtom,
generateResponseAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import {
@ -48,29 +47,30 @@ import {
updateThreadWaitingForResponseAtom,
} from '@/helpers/atoms/Thread.atom'
export const queuedMessageAtom = atom(false)
export const reloadModelAtom = atom(false)
export default function useSendChatMessage() {
const activeThread = useAtomValue(activeThreadAtom)
const addNewMessage = useSetAtom(addNewMessageAtom)
const updateThread = useSetAtom(updateThreadAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom)
const setGenerateResponse = useSetAtom(generateResponseAtom)
const setCurrentPrompt = useSetAtom(currentPromptAtom)
const currentMessages = useAtomValue(getCurrentChatMessagesAtom)
const { activeModel } = useActiveModel()
const selectedModel = useAtomValue(selectedModelAtom)
const { startModel } = useActiveModel()
const [queuedMessage, setQueuedMessage] = useState(false)
const setQueuedMessage = useSetAtom(queuedMessageAtom)
const modelRef = useRef<Model | undefined>()
const threadStates = useAtomValue(threadStatesAtom)
const updateThreadInitSuccess = useSetAtom(updateThreadInitSuccessAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
const [reloadModel, setReloadModel] = useState(false)
const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
const setReloadModel = useSetAtom(reloadModelAtom)
const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
useEffect(() => {
@ -82,9 +82,7 @@ export default function useSendChatMessage() {
console.error('No active thread')
return
}
updateThreadWaiting(activeThread.id, true)
const messages: ChatCompletionMessage[] = [
activeThread.assistants[0]?.instructions,
]
@ -121,19 +119,19 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
await WaitForModelStarting(modelId)
await waitForModelStarting(modelId)
setQueuedMessage(false)
}
events.emit(MessageEvent.OnMessageSent, messageRequest)
}
// TODO: Refactor @louis
const WaitForModelStarting = async (modelId: string) => {
const waitForModelStarting = async (modelId: string) => {
return new Promise<void>((resolve) => {
setTimeout(async () => {
if (modelRef.current?.id !== modelId) {
console.debug('waiting for model to start')
await WaitForModelStarting(modelId)
await waitForModelStarting(modelId)
resolve()
} else {
resolve()
@ -142,10 +140,8 @@ export default function useSendChatMessage() {
})
}
const sendChatMessage = async () => {
setGenerateResponse(true)
if (!currentPrompt || currentPrompt.trim().length === 0) return
const sendChatMessage = async (message: string) => {
if (!message || message.trim().length === 0) return
if (!activeThread) {
console.error('No active thread')
@ -199,7 +195,7 @@ export default function useSendChatMessage() {
updateThreadWaiting(activeThread.id, true)
const prompt = currentPrompt.trim()
const prompt = message.trim()
setCurrentPrompt('')
const base64Blob = fileUpload[0]
@ -335,7 +331,7 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
await WaitForModelStarting(modelId)
await waitForModelStarting(modelId)
setQueuedMessage(false)
}
@ -346,9 +342,7 @@ export default function useSendChatMessage() {
}
return {
reloadModel,
sendChatMessage,
resendChatMessage,
queuedMessage,
}
}

View File

@ -15,23 +15,21 @@ import { MainViewState } from '@/constants/screens'
import { activeModelAtom } from '@/hooks/useActiveModel'
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
import useInference from '@/hooks/useInference'
import { useMainViewState } from '@/hooks/useMainViewState'
import ChatItem from '../ChatItem'
import ErrorMessage from '../ErrorMessage'
import {
generateResponseAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
const ChatBody: React.FC = () => {
const messages = useAtomValue(getCurrentChatMessagesAtom)
const activeModel = useAtomValue(activeModelAtom)
const { downloadedModels } = useGetDownloadedModels()
const { setMainViewState } = useMainViewState()
const generateResponse = useAtomValue(generateResponseAtom)
const { isGeneratingResponse } = useInference()
if (downloadedModels.length === 0)
return (
@ -101,7 +99,7 @@ const ChatBody: React.FC = () => {
))}
{activeModel &&
(generateResponse ||
(isGeneratingResponse ||
(messages.length &&
messages[messages.length - 1].status ===
MessageStatus.Pending &&

View File

@ -64,13 +64,18 @@ const ChatInput: React.FC = () => {
useEffect(() => {
if (isWaitingToSend && activeThreadId) {
setIsWaitingToSend(false)
sendChatMessage()
sendChatMessage(currentPrompt)
}
if (textareaRef.current) {
textareaRef.current.focus()
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [waitingToSendMessage, activeThreadId])
}, [
activeThreadId,
isWaitingToSend,
currentPrompt,
setIsWaitingToSend,
sendChatMessage,
])
useEffect(() => {
if (textareaRef.current) {
@ -81,13 +86,11 @@ const ChatInput: React.FC = () => {
}, [currentPrompt])
const onKeyDown = async (e: React.KeyboardEvent<HTMLTextAreaElement>) => {
if (e.key === 'Enter') {
if (!e.shiftKey) {
e.preventDefault()
if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
sendChatMessage()
else onStopInferenceClick()
}
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault()
if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
sendChatMessage(currentPrompt)
else onStopInferenceClick()
}
}
@ -237,7 +240,7 @@ const ChatInput: React.FC = () => {
}
themes="primary"
className="min-w-[100px]"
onClick={sendChatMessage}
onClick={() => sendChatMessage(currentPrompt)}
>
Send
</Button>

View File

@ -1,7 +1,9 @@
import useSendChatMessage from '@/hooks/useSendChatMessage'
import { useAtomValue } from 'jotai'
import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
const MessageQueuedBanner: React.FC = () => {
const { queuedMessage } = useSendChatMessage()
const queuedMessage = useAtomValue(queuedMessageAtom)
return (
<div>

View File

@ -15,7 +15,7 @@ import ModelStart from '@/containers/Loader/ModelStart'
import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { showLeftSideBarAtom } from '@/containers/Providers/KeyListener'
import useSendChatMessage from '@/hooks/useSendChatMessage'
import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage'
import ChatBody from '@/screens/Chat/ChatBody'
@ -30,20 +30,37 @@ import {
engineParamsUpdateAtom,
} from '@/helpers/atoms/Thread.atom'
const renderError = (code: string) => {
switch (code) {
case 'multiple-upload':
return 'Currently, we only support 1 attachment at the same time'
case 'retrieval-off':
return 'Turn on Retrieval in Assistant Settings to use this feature'
case 'file-invalid-type':
return 'We do not support this file type'
default:
return 'Oops, something error, please try again.'
}
}
const ChatScreen: React.FC = () => {
const setCurrentPrompt = useSetAtom(currentPromptAtom)
const activeThread = useAtomValue(activeThreadAtom)
const showLeftSideBar = useAtomValue(showLeftSideBarAtom)
const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
const { queuedMessage, reloadModel } = useSendChatMessage()
const [dragOver, setDragOver] = useState(false)
const queuedMessage = useAtomValue(queuedMessageAtom)
const reloadModel = useAtomValue(reloadModelAtom)
const [dragRejected, setDragRejected] = useState({ code: '' })
const setFileUpload = useSetAtom(fileUploadAtom)
const { getRootProps, isDragReject } = useDropzone({
noClick: true,
multiple: false,
accept: {
// 'image/*': ['.png', '.jpg', '.jpeg'],
'application/pdf': ['.pdf'],
},
@ -104,22 +121,6 @@ const ChatScreen: React.FC = () => {
}, 2000)
}, [dragRejected.code])
const renderError = (code: string) => {
switch (code) {
case 'multiple-upload':
return 'Currently, we only support 1 attachment at the same time'
case 'retrieval-off':
return 'Turn on Retrieval in Assistant Settings to use this feature'
case 'file-invalid-type':
return 'We do not support this file type'
default:
return 'Oops, something error, please try again.'
}
}
return (
<div className="flex h-full w-full">
{/* Left side bar */}
@ -216,6 +217,7 @@ const ChatScreen: React.FC = () => {
<ChatInput />
</div>
</div>
{/* Right side bar */}
{activeThread && <Sidebar />}
</div>