fix: app stuck generating assistant response (#2001)

This commit is contained in:
Louis 2024-02-12 13:38:08 +07:00 committed by GitHub
parent 7ae6e35746
commit 9e69946b73
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 18 additions and 56 deletions

View File

@ -38,6 +38,7 @@ import { loadModelErrorAtom, useActiveModel } from './useActiveModel'
import { extensionManager } from '@/extension/ExtensionManager' import { extensionManager } from '@/extension/ExtensionManager'
import { import {
addNewMessageAtom, addNewMessageAtom,
deleteMessageAtom,
getCurrentChatMessagesAtom, getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom' } from '@/helpers/atoms/ChatMessage.atom'
import { import {
@ -58,6 +59,7 @@ export default function useSendChatMessage() {
const updateThread = useSetAtom(updateThreadAtom) const updateThread = useSetAtom(updateThreadAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom) const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
const setCurrentPrompt = useSetAtom(currentPromptAtom) const setCurrentPrompt = useSetAtom(currentPromptAtom)
const deleteMessage = useSetAtom(deleteMessageAtom)
const setEditPrompt = useSetAtom(editPromptAtom) const setEditPrompt = useSetAtom(editPromptAtom)
const currentMessages = useAtomValue(getCurrentChatMessagesAtom) const currentMessages = useAtomValue(getCurrentChatMessagesAtom)
@ -132,6 +134,19 @@ export default function useSendChatMessage() {
await waitForModelStarting(modelId) await waitForModelStarting(modelId)
setQueuedMessage(false) setQueuedMessage(false)
} }
if (currentMessage.role !== ChatCompletionRole.User) {
// Delete last response before regenerating
deleteMessage(currentMessage.id ?? '')
if (activeThread) {
await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.writeMessages(
activeThread.id,
currentMessages.filter((msg) => msg.id !== currentMessage.id)
)
}
}
events.emit(MessageEvent.OnMessageSent, messageRequest) events.emit(MessageEvent.OnMessageSent, messageRequest)
} }

View File

@ -1,10 +1,4 @@
import { import { MessageStatus, ThreadMessage } from '@janhq/core'
ChatCompletionRole,
ConversationalExtension,
ExtensionTypeEnum,
MessageStatus,
ThreadMessage,
} from '@janhq/core'
import { Button } from '@janhq/uikit' import { Button } from '@janhq/uikit'
import { useAtomValue, useSetAtom } from 'jotai' import { useAtomValue, useSetAtom } from 'jotai'
import { RefreshCcw } from 'lucide-react' import { RefreshCcw } from 'lucide-react'
@ -15,39 +9,17 @@ import ModalTroubleShooting, {
import useSendChatMessage from '@/hooks/useSendChatMessage' import useSendChatMessage from '@/hooks/useSendChatMessage'
import { extensionManager } from '@/extension' import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import {
deleteMessageAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
const ErrorMessage = ({ message }: { message: ThreadMessage }) => { const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
const messages = useAtomValue(getCurrentChatMessagesAtom) const messages = useAtomValue(getCurrentChatMessagesAtom)
const thread = useAtomValue(activeThreadAtom)
const deleteMessage = useSetAtom(deleteMessageAtom)
const { resendChatMessage } = useSendChatMessage() const { resendChatMessage } = useSendChatMessage()
const setModalTroubleShooting = useSetAtom(modalTroubleShootingAtom) const setModalTroubleShooting = useSetAtom(modalTroubleShootingAtom)
const regenerateMessage = async () => { const regenerateMessage = async () => {
const lastMessageIndex = messages.length - 1 const lastMessageIndex = messages.length - 1
const message = messages[lastMessageIndex] const message = messages[lastMessageIndex]
if (message.role !== ChatCompletionRole.User) { resendChatMessage(message)
// Delete last response before regenerating
deleteMessage(message.id ?? '')
if (thread) {
await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.writeMessages(
thread.id,
messages.filter((msg) => msg.id !== message.id)
)
}
const targetMessage = messages[lastMessageIndex - 1]
if (targetMessage) resendChatMessage(targetMessage)
} else {
resendChatMessage(message)
}
} }
return ( return (

View File

@ -1,21 +0,0 @@
import { useAtomValue } from 'jotai'
import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
const MessageQueuedBanner: React.FC = () => {
const queuedMessage = useAtomValue(queuedMessageAtom)
return (
<div>
{queuedMessage && (
<div className="my-2 py-2 text-center">
<span className="rounded-lg border border-border px-4 py-2 shadow-lg">
Message queued. It can be sent once the model has started
</span>
</div>
)}
</div>
)
}
export default MessageQueuedBanner

View File

@ -57,10 +57,6 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
} }
const onRegenerateClick = async () => { const onRegenerateClick = async () => {
if (message.role !== ChatCompletionRole.User) {
// Delete last response before regenerating
await onDeleteClick()
}
resendChatMessage(message) resendChatMessage(message)
} }