Merge pull request #4338 from janhq/main

Merge bug fixes from 0.5.12 into dev
This commit is contained in:
Louis 2024-12-26 13:31:11 +07:00 committed by GitHub
commit da4336cc49
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 355 additions and 192 deletions

View File

@ -40,7 +40,7 @@ export default class CortexConversationalExtension extends ConversationalExtensi
async listThreads(): Promise<Thread[]> {
return this.queue.add(() =>
ky
.get(`${API_URL}/v1/threads`)
.get(`${API_URL}/v1/threads?limit=-1`)
.json<ThreadList>()
.then((e) => e.data)
) as Promise<Thread[]>
@ -133,7 +133,7 @@ export default class CortexConversationalExtension extends ConversationalExtensi
async listMessages(threadId: string): Promise<ThreadMessage[]> {
return this.queue.add(() =>
ky
.get(`${API_URL}/v1/threads/${threadId}/messages?order=asc`)
.get(`${API_URL}/v1/threads/${threadId}/messages?order=asc&limit=-1`)
.json<MessageList>()
.then((e) => e.data)
) as Promise<ThreadMessage[]>
@ -147,7 +147,9 @@ export default class CortexConversationalExtension extends ConversationalExtensi
*/
async getThreadAssistant(threadId: string): Promise<ThreadAssistantInfo> {
return this.queue.add(() =>
ky.get(`${API_URL}/v1/assistants/${threadId}`).json<ThreadAssistantInfo>()
ky
.get(`${API_URL}/v1/assistants/${threadId}?limit=-1`)
.json<ThreadAssistantInfo>()
) as Promise<ThreadAssistantInfo>
}
/**
@ -188,7 +190,7 @@ export default class CortexConversationalExtension extends ConversationalExtensi
* Do health check on cortex.cpp
* @returns
*/
healthz(): Promise<void> {
async healthz(): Promise<void> {
return ky
.get(`${API_URL}/healthz`, {
retry: { limit: 20, delay: () => 500, methods: ['get'] },

View File

@ -1 +1 @@
1.0.5-rc2
1.0.6

View File

@ -85,6 +85,63 @@
},
"engine": "openai"
},
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "gpt-4o-mini",
"object": "model",
"name": "OpenAI GPT 4o-mini",
"version": "1.1",
"description": "GPT-4o mini (“o” for “omni”) is a fast, affordable small model for focused tasks.",
"format": "api",
"settings": {
"vision_model": true
},
"parameters": {
"max_tokens": 16384,
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"stop": [],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "OpenAI",
"tags": ["General"]
},
"engine": "openai"
},
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "o1",
"object": "model",
"name": "OpenAI o1",
"version": "1.0",
"description": "OpenAI o1 is a new model with complex reasoning",
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 100000,
"temperature": 1,
"top_p": 1,
"stream": true,
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "OpenAI",
"tags": ["General"]
},
"engine": "openai"
},
{
"sources": [
{

View File

@ -53,7 +53,7 @@ export class CortexAPI implements ICortexAPI {
*/
getModels(): Promise<Model[]> {
return this.queue
.add(() => ky.get(`${API_URL}/v1/models`).json<ModelList>())
.add(() => ky.get(`${API_URL}/v1/models?limit=-1`).json<ModelList>())
.then((e) =>
typeof e === 'object' ? e.data.map((e) => this.transformModel(e)) : []
)

View File

@ -34,7 +34,11 @@ const TextArea = forwardRef<HTMLTextAreaElement, TextAreaProps>(
return (
<div className="textarea__wrapper">
<textarea
className={twMerge('textarea', className)}
className={twMerge(
'textarea',
className,
autoResize && 'resize-none'
)}
ref={autoResize ? textareaRef : ref}
{...props}
/>

View File

@ -183,10 +183,7 @@ const ModelDropdown = ({
if (!activeThread) return
const modelId = activeAssistant?.model?.id
let model = downloadedModels.find((model) => model.id === modelId)
if (!model) {
model = undefined
}
const model = downloadedModels.find((model) => model.id === modelId)
setSelectedModel(model)
}, [
recommendedModel,
@ -378,14 +375,14 @@ const ModelDropdown = ({
!selectedModel && 'text-[hsla(var(--text-tertiary))]'
)}
>
{selectedModel?.name || 'Select Model'}
{selectedModel?.name || 'Select a model'}
</span>
</Badge>
) : (
<Input
value={selectedModel?.name || ''}
className="cursor-pointer"
placeholder="Select Model"
placeholder="Select a model"
disabled={disabled}
readOnly
suffixIcon={

View File

@ -18,7 +18,7 @@ import {
extractInferenceParams,
ModelExtension,
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import { ulid } from 'ulidx'
import { activeModelAtom, stateModelAtom } from '@/hooks/useActiveModel'
@ -32,6 +32,7 @@ import {
updateMessageAtom,
tokenSpeedAtom,
deleteMessageAtom,
subscribedGeneratingMessageAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import {
@ -40,6 +41,7 @@ import {
isGeneratingResponseAtom,
updateThreadAtom,
getActiveThreadModelParamsAtom,
activeThreadAtom,
} from '@/helpers/atoms/Thread.atom'
const maxWordForThreadTitle = 10
@ -54,6 +56,10 @@ export default function ModelHandler() {
const activeModel = useAtomValue(activeModelAtom)
const setActiveModel = useSetAtom(activeModelAtom)
const setStateModel = useSetAtom(stateModelAtom)
const [subscribedGeneratingMessage, setSubscribedGeneratingMessage] = useAtom(
subscribedGeneratingMessageAtom
)
const activeThread = useAtomValue(activeThreadAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
const threads = useAtomValue(threadsAtom)
@ -62,11 +68,17 @@ export default function ModelHandler() {
const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
const updateThread = useSetAtom(updateThreadAtom)
const messagesRef = useRef(messages)
const messageGenerationSubscriber = useRef(subscribedGeneratingMessage)
const activeModelRef = useRef(activeModel)
const activeThreadRef = useRef(activeThread)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const activeModelParamsRef = useRef(activeModelParams)
const setTokenSpeed = useSetAtom(tokenSpeedAtom)
useEffect(() => {
activeThreadRef.current = activeThread
}, [activeThread])
useEffect(() => {
threadsRef.current = threads
}, [threads])
@ -87,6 +99,10 @@ export default function ModelHandler() {
activeModelParamsRef.current = activeModelParams
}, [activeModelParams])
useEffect(() => {
messageGenerationSubscriber.current = subscribedGeneratingMessage
}, [subscribedGeneratingMessage])
const onNewMessageResponse = useCallback(
async (message: ThreadMessage) => {
if (message.type === MessageRequestType.Thread) {
@ -179,12 +195,19 @@ export default function ModelHandler() {
const updateThreadMessage = useCallback(
(message: ThreadMessage) => {
if (
messageGenerationSubscriber.current &&
message.thread_id === activeThreadRef.current?.id &&
!messageGenerationSubscriber.current!.thread_id
) {
updateMessage(
message.id,
message.thread_id,
message.content,
message.status
)
}
if (message.status === MessageStatus.Pending) {
if (message.content.length) {
setIsGeneratingResponse(false)
@ -244,6 +267,7 @@ export default function ModelHandler() {
const metadata = {
...thread.metadata,
...(messageContent && { lastMessage: messageContent }),
updated_at: Date.now(),
}
updateThread({
@ -302,15 +326,10 @@ export default function ModelHandler() {
const generateThreadTitle = (message: ThreadMessage, thread: Thread) => {
// If this is the first ever prompt in the thread
if (
(thread.title ?? thread.metadata?.title)?.trim() !== defaultThreadTitle
) {
if ((thread.title ?? thread.metadata?.title)?.trim() !== defaultThreadTitle)
return
}
if (!activeModelRef.current) {
return
}
if (!activeModelRef.current) return
// Check model engine; we don't want to generate a title when it's not a local engine. remote model using first promp
if (!isLocalEngine(activeModelRef.current?.engine as InferenceEngine)) {
@ -332,6 +351,7 @@ export default function ModelHandler() {
...updatedThread,
})
})
.catch(console.error)
}
// This is the first time message comes in on a new thread

View File

@ -35,6 +35,13 @@ export const chatMessages = atom(
}
)
/**
* Store subscribed generating message thread
*/
export const subscribedGeneratingMessageAtom = atom<{
thread_id?: string
}>({})
/**
* Stores the status of the messages load for each thread
*/
@ -175,6 +182,17 @@ export const updateMessageAtom = atom(
// Update thread last message
if (text.length)
set(updateThreadStateLastMessageAtom, conversationId, text)
} else {
set(addNewMessageAtom, {
id,
thread_id: conversationId,
content: text,
status,
role: ChatCompletionRole.Assistant,
created_at: Date.now() / 1000,
completed_at: Date.now() / 1000,
object: 'thread.message',
})
}
}
)

View File

@ -20,12 +20,7 @@ export const CHAT_WIDTH = 'chatWidth'
export const themesOptionsAtom = atomWithStorage<
{ name: string; value: string }[]
>(THEME_OPTIONS, [], undefined, { getOnInit: true })
export const janThemesPathAtom = atomWithStorage<string | undefined>(
THEME_PATH,
undefined,
undefined,
{ getOnInit: true }
)
export const selectedThemeIdAtom = atomWithStorage<string>(
THEME,
'',

View File

@ -125,6 +125,26 @@ export const waitingToSendMessage = atom<boolean | undefined>(undefined)
*/
export const isGeneratingResponseAtom = atom<boolean | undefined>(undefined)
/**
* Create a new thread and add it to the thread list
*/
export const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
// create thread state for this new thread
const currentState = { ...get(threadStatesAtom) }
const threadState: ThreadState = {
hasMore: false,
waitingForResponse: false,
lastMessage: undefined,
}
currentState[newThread.id] = threadState
set(threadStatesAtom, currentState)
// add the new thread on top of the thread list to the state
const threads = get(threadsAtom)
set(threadsAtom, [newThread, ...threads])
})
/**
* Remove a thread state from the atom
*/
@ -180,12 +200,12 @@ export const updateThreadAtom = atom(
)
// sort new threads based on updated at
threads.sort((thread1, thread2) => {
const aDate = new Date(thread1.updated ?? 0)
const bDate = new Date(thread2.updated ?? 0)
return bDate.getTime() - aDate.getTime()
threads.sort((a, b) => {
return ((a.metadata?.updated_at as number) ?? 0) >
((b.metadata?.updated_at as number) ?? 0)
? -1
: 1
})
set(threadsAtom, threads)
}
)

View File

@ -33,29 +33,12 @@ import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { selectedModelAtom } from '@/helpers/atoms/Model.atom'
import {
threadsAtom,
threadStatesAtom,
updateThreadAtom,
setThreadModelParamsAtom,
isGeneratingResponseAtom,
createNewThreadAtom,
} from '@/helpers/atoms/Thread.atom'
const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
// create thread state for this new thread
const currentState = { ...get(threadStatesAtom) }
const threadState: ThreadState = {
hasMore: false,
waitingForResponse: false,
lastMessage: undefined,
}
currentState[newThread.id] = threadState
set(threadStatesAtom, currentState)
// add the new thread on top of the thread list to the state
const threads = get(threadsAtom)
set(threadsAtom, [newThread, ...threads])
})
export const useCreateNewThread = () => {
const createNewThread = useSetAtom(createNewThreadAtom)
const { setActiveThread } = useSetActiveThread()
@ -153,6 +136,7 @@ export const useCreateNewThread = () => {
updated: createdAt,
metadata: {
title: 'New Thread',
updated_at: Date.now(),
},
}
@ -207,9 +191,11 @@ export const useCreateNewThread = () => {
async (thread: Thread) => {
updateThread(thread)
setActiveAssistant(thread.assistants[0])
updateThreadCallback(thread)
if (thread.assistants && thread.assistants?.length > 0) {
setActiveAssistant(thread.assistants[0])
updateAssistantCallback(thread.id, thread.assistants[0])
}
},
[
updateThread,

View File

@ -55,17 +55,21 @@ describe('useDeleteThread', () => {
const mockCleanMessages = jest.fn()
;(useSetAtom as jest.Mock).mockReturnValue(() => mockCleanMessages)
;(useAtomValue as jest.Mock).mockReturnValue(['thread 1'])
const mockCreateNewThread = jest.fn()
;(useCreateNewThread as jest.Mock).mockReturnValue({
requestCreateNewThread: mockCreateNewThread,
})
const mockSaveThread = jest.fn()
const mockDeleteThread = jest.fn().mockResolvedValue({})
const mockDeleteMessage = jest.fn().mockResolvedValue({})
const mockModifyThread = jest.fn().mockResolvedValue({})
extensionManager.get = jest.fn().mockReturnValue({
saveThread: mockSaveThread,
getThreadAssistant: jest.fn().mockResolvedValue({}),
deleteThread: mockDeleteThread,
listMessages: jest.fn().mockResolvedValue([
{
id: 'message1',
text: 'Message 1',
},
]),
deleteMessage: mockDeleteMessage,
modifyThread: mockModifyThread,
})
const { result } = renderHook(() => useDeleteThread())
@ -74,8 +78,8 @@ describe('useDeleteThread', () => {
await result.current.cleanThread('thread1')
})
expect(mockDeleteThread).toHaveBeenCalled()
expect(mockCreateNewThread).toHaveBeenCalled()
expect(mockDeleteMessage).toHaveBeenCalled()
expect(mockModifyThread).toHaveBeenCalled()
})
it('should handle errors when deleting a thread', async () => {

View File

@ -2,69 +2,68 @@ import { useCallback } from 'react'
import { ExtensionTypeEnum, ConversationalExtension } from '@janhq/core'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import { useAtom, useSetAtom } from 'jotai'
import { currentPromptAtom } from '@/containers/Providers/Jotai'
import { toaster } from '@/containers/Toast'
import { useCreateNewThread } from './useCreateNewThread'
import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension/ExtensionManager'
import { assistantsAtom } from '@/helpers/atoms/Assistant.atom'
import { deleteChatMessageAtom as deleteChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import {
threadsAtom,
setActiveThreadIdAtom,
deleteThreadStateAtom,
updateThreadAtom,
} from '@/helpers/atoms/Thread.atom'
export default function useDeleteThread() {
const [threads, setThreads] = useAtom(threadsAtom)
const { requestCreateNewThread } = useCreateNewThread()
const assistants = useAtomValue(assistantsAtom)
const models = useAtomValue(downloadedModelsAtom)
const updateThread = useSetAtom(updateThreadAtom)
const setCurrentPrompt = useSetAtom(currentPromptAtom)
const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
const deleteMessages = useSetAtom(deleteChatMessagesAtom)
const deleteThreadState = useSetAtom(deleteThreadStateAtom)
const { setActiveThread } = useSetActiveThread()
const cleanThread = useCallback(
async (threadId: string) => {
const thread = threads.find((c) => c.id === threadId)
if (!thread) return
const assistantInfo = await extensionManager
const messages = await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.getThreadAssistant(thread.id)
if (!assistantInfo) return
const model = models.find((c) => c.id === assistantInfo?.model?.id)
requestCreateNewThread(
{
...assistantInfo,
id: assistants[0].id,
name: assistants[0].name,
},
model
? {
...model,
parameters: assistantInfo?.model?.parameters ?? {},
settings: assistantInfo?.model?.settings ?? {},
}
: undefined
)
// Delete this thread
await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.deleteThread(threadId)
?.listMessages(threadId)
.catch(console.error)
if (messages) {
messages.forEach((message) => {
extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.deleteMessage(threadId, message.id)
.catch(console.error)
})
const thread = threads.find((e) => e.id === threadId)
if (thread) {
const updatedThread = {
...thread,
metadata: {
...thread.metadata,
title: 'New Thread',
lastMessage: '',
},
[assistants, models, requestCreateNewThread, threads]
}
extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.modifyThread(updatedThread)
.catch(console.error)
updateThread(updatedThread)
}
}
deleteMessages(threadId)
},
[deleteMessages, threads, updateThread]
)
const deleteThread = async (threadId: string) => {
@ -90,7 +89,7 @@ export default function useDeleteThread() {
type: 'success',
})
if (availableThreads.length > 0) {
setActiveThreadId(availableThreads[0].id)
setActiveThread(availableThreads[0])
} else {
setActiveThreadId(undefined)
}

View File

@ -10,7 +10,6 @@ import cssVars from '@/utils/jsonToCssVariables'
import { janDataFolderPathAtom } from '@/helpers/atoms/AppConfig.atom'
import {
janThemesPathAtom,
selectedThemeIdAtom,
themeDataAtom,
themesOptionsAtom,
@ -21,7 +20,6 @@ type NativeThemeProps = 'light' | 'dark'
export const useLoadTheme = () => {
const janDataFolderPath = useAtomValue(janDataFolderPathAtom)
const [themeOptions, setThemeOptions] = useAtom(themesOptionsAtom)
const [themePath, setThemePath] = useAtom(janThemesPathAtom)
const [themeData, setThemeData] = useAtom(themeDataAtom)
const [selectedIdTheme, setSelectedIdTheme] = useAtom(selectedThemeIdAtom)
const { setTheme } = useTheme()
@ -41,6 +39,14 @@ export const useLoadTheme = () => {
[setTheme]
)
const applyTheme = (theme: Theme) => {
const variables = cssVars(theme.variables)
const headTag = document.getElementsByTagName('head')[0]
const styleTag = document.createElement('style')
styleTag.innerHTML = `:root {${variables}}`
headTag.appendChild(styleTag)
}
const getThemes = useCallback(async () => {
if (!janDataFolderPath.length) return
const folderPath = await joinPath([janDataFolderPath, 'themes'])
@ -59,7 +65,6 @@ export const useLoadTheme = () => {
if (janDataFolderPath.length > 0) {
if (!selectedIdTheme.length) return setSelectedIdTheme('joi-light')
setThemePath(folderPath)
const filePath = await joinPath([
`${folderPath}/${selectedIdTheme}`,
`theme.json`,
@ -68,11 +73,7 @@ export const useLoadTheme = () => {
setThemeData(theme)
setNativeTheme(theme.nativeTheme)
const variables = cssVars(theme.variables)
const headTag = document.getElementsByTagName('head')[0]
const styleTag = document.createElement('style')
styleTag.innerHTML = `:root {${variables}}`
headTag.appendChild(styleTag)
applyTheme(theme)
}
}, [
janDataFolderPath,
@ -81,26 +82,21 @@ export const useLoadTheme = () => {
setSelectedIdTheme,
setThemeData,
setThemeOptions,
setThemePath,
])
const applyTheme = useCallback(async () => {
if (!themeData || !themeOptions || !themePath) {
const configureTheme = useCallback(async () => {
if (!themeData || !themeOptions) {
await getThemes()
} else {
const variables = cssVars(themeData.variables)
const headTag = document.getElementsByTagName('head')[0]
const styleTag = document.createElement('style')
styleTag.innerHTML = `:root {${variables}}`
headTag.appendChild(styleTag)
applyTheme(themeData)
}
setNativeTheme(themeData?.nativeTheme as NativeThemeProps)
}, [themeData, themeOptions, themePath, getThemes])
}, [themeData, themeOptions, getThemes, setNativeTheme])
useEffect(() => {
applyTheme()
configureTheme()
}, [
applyTheme,
configureTheme,
selectedIdTheme,
setNativeTheme,
setSelectedIdTheme,

View File

@ -1,11 +1,15 @@
import { ExtensionTypeEnum, Thread, ConversationalExtension } from '@janhq/core'
import { useSetAtom } from 'jotai'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import { extensionManager } from '@/extension'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { setConvoMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import {
setConvoMessagesAtom,
subscribedGeneratingMessageAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import {
getActiveThreadIdAtom,
setActiveThreadIdAtom,
setThreadModelParamsAtom,
} from '@/helpers/atoms/Thread.atom'
@ -13,14 +17,18 @@ import { ModelParams } from '@/types/model'
export default function useSetActiveThread() {
const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
const setThreadMessage = useSetAtom(setConvoMessagesAtom)
const activeThreadId = useAtomValue(getActiveThreadIdAtom)
const setThreadMessages = useSetAtom(setConvoMessagesAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const setActiveAssistant = useSetAtom(activeAssistantAtom)
const [messageSubscriber, setMessageSubscriber] = useAtom(
subscribedGeneratingMessageAtom
)
const setActiveThread = async (thread: Thread) => {
if (!thread?.id) return
if (!thread?.id || activeThreadId === thread.id) return
setActiveThreadId(thread?.id)
setActiveThreadId(thread.id)
try {
const assistantInfo = await getThreadAssistant(thread.id)
@ -32,7 +40,8 @@ export default function useSetActiveThread() {
...assistantInfo?.model?.settings,
}
setThreadModelParams(thread?.id, modelParams)
setThreadMessage(thread.id, messages)
setThreadMessages(thread.id, messages)
if (messageSubscriber.thread_id !== thread.id) setMessageSubscriber({})
} catch (e) {
console.error(e)
}

View File

@ -26,7 +26,12 @@ const useThreads = () => {
useEffect(() => {
const getThreads = async () => {
const localThreads = await getLocalThreads()
const localThreads = (await getLocalThreads()).sort((a, b) => {
return ((a.metadata?.updated_at as number) ?? 0) >
((b.metadata?.updated_at as number) ?? 0)
? -1
: 1
})
const localThreadStates: Record<string, ThreadState> = {}
const threadModelParams: Record<string, ModelParams> = {}

View File

@ -1,6 +1,6 @@
{
"name": "@janhq/web",
"version": "0.5.11",
"version": "0.5.12",
"private": true,
"homepage": "./",
"scripts": {

View File

@ -176,7 +176,7 @@ const LocalServerLeftPanel = () => {
/>
</div>
<div className="relative z-50 mt-2 block">
<div className="relative mt-2 block">
<Input
className={twMerge(
errorRangePort && 'border-[hsla(var(--destructive-bg))]'

View File

@ -8,9 +8,10 @@ import { useAtom, useAtomValue } from 'jotai'
import { twMerge } from 'tailwind-merge'
import { janDataFolderPathAtom } from '@/helpers/atoms/AppConfig.atom'
import {
chatWidthAtom,
janThemesPathAtom,
reduceTransparentAtom,
selectedThemeIdAtom,
spellCheckAtom,
@ -21,8 +22,8 @@ import {
export default function AppearanceOptions() {
const [selectedIdTheme, setSelectedIdTheme] = useAtom(selectedThemeIdAtom)
const themeOptions = useAtomValue(themesOptionsAtom)
const janDataFolderPath = useAtomValue(janDataFolderPathAtom)
const { setTheme, theme } = useTheme()
const janThemesPath = useAtomValue(janThemesPathAtom)
const [themeData, setThemeData] = useAtom(themeDataAtom)
const [reduceTransparent, setReduceTransparent] = useAtom(
reduceTransparentAtom
@ -48,6 +49,7 @@ export default function AppearanceOptions() {
const handleClickTheme = useCallback(
async (e: string) => {
setSelectedIdTheme(e)
const janThemesPath = await joinPath([janDataFolderPath, 'themes'])
const filePath = await joinPath([`${janThemesPath}/${e}`, `theme.json`])
const theme: Theme = JSON.parse(await fs.readFileSync(filePath, 'utf-8'))
setThemeData(theme)
@ -59,7 +61,7 @@ export default function AppearanceOptions() {
}
},
[
janThemesPath,
janDataFolderPath,
reduceTransparent,
setReduceTransparent,
setSelectedIdTheme,

View File

@ -23,9 +23,7 @@ const EmptyThread = () => {
<LogoMark className="mx-auto mb-2 animate-wave" width={32} height={32} />
{showOnboardingStep ? (
<>
<p className="mt-1 font-medium">
{`You don't have a local model yet.`}
</p>
<p className="mt-1 font-medium">{`You don't have any model`}</p>
<Button
onClick={() => setMainViewState(MainViewState.Hub)}
variant="soft"

View File

@ -14,7 +14,11 @@ import LoadModelError from '../LoadModelError'
import EmptyThread from './EmptyThread'
import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
import {
activeThreadAtom,
isGeneratingResponseAtom,
threadStatesAtom,
} from '@/helpers/atoms/Thread.atom'
const ChatConfigurator = memo(() => {
const messages = useAtomValue(getCurrentChatMessagesAtom)
@ -61,6 +65,12 @@ const ChatBody = memo(
const prevScrollTop = useRef(0)
const isUserManuallyScrollingUp = useRef(false)
const currentThread = useAtomValue(activeThreadAtom)
const threadStates = useAtomValue(threadStatesAtom)
const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom)
const isStreamingResponse = Object.values(threadStates).some(
(threadState) => threadState.waitingForResponse
)
const count = useMemo(
() => (messages?.length ?? 0) + (loadModelError ? 1 : 0),
@ -76,14 +86,31 @@ const ChatBody = memo(
})
useEffect(() => {
// Delay the scroll until the DOM is updated
if (parentRef.current) {
requestAnimationFrame(() => {
if (parentRef.current) {
parentRef.current.scrollTo({ top: parentRef.current.scrollHeight })
virtualizer.scrollToIndex(count - 1)
}
})
}, [count, virtualizer])
useEffect(() => {
if (parentRef.current && isGeneratingResponse) {
parentRef.current.scrollTo({ top: parentRef.current.scrollHeight })
virtualizer.scrollToIndex(count - 1)
}
}, [count, virtualizer, isGeneratingResponse])
useEffect(() => {
if (parentRef.current && isGeneratingResponse) {
parentRef.current.scrollTo({ top: parentRef.current.scrollHeight })
virtualizer.scrollToIndex(count - 1)
}
}, [count, virtualizer, isGeneratingResponse, currentThread?.id])
useEffect(() => {
isUserManuallyScrollingUp.current = false
if (parentRef.current) {
parentRef.current.scrollTo({ top: parentRef.current.scrollHeight })
virtualizer.scrollToIndex(count - 1)
}
}, [count, currentThread?.id, virtualizer])
@ -94,17 +121,19 @@ const ChatBody = memo(
_,
instance
) => {
if (isUserManuallyScrollingUp.current === true) return false
if (isUserManuallyScrollingUp.current === true && isStreamingResponse)
return false
return (
// item.start < (instance.scrollOffset ?? 0) &&
instance.scrollDirection !== 'backward'
)
}
const handleScroll = useCallback((event: React.UIEvent<HTMLElement>) => {
const handleScroll = useCallback(
(event: React.UIEvent<HTMLElement>) => {
const currentScrollTop = event.currentTarget.scrollTop
if (prevScrollTop.current > currentScrollTop) {
if (prevScrollTop.current > currentScrollTop && isStreamingResponse) {
isUserManuallyScrollingUp.current = true
} else {
const currentScrollTop = event.currentTarget.scrollTop
@ -121,7 +150,9 @@ const ChatBody = memo(
event.stopPropagation()
}
prevScrollTop.current = currentScrollTop
}, [])
},
[isStreamingResponse]
)
return (
<div className="flex h-full w-full flex-col overflow-x-hidden">

View File

@ -231,12 +231,10 @@ const ChatInput = () => {
)}
>
<ul>
<Tooltip
trigger={
<li
className={twMerge(
'text-[hsla(var(--text-secondary)] hover:bg-secondary flex w-full items-center space-x-2 px-4 py-2 hover:bg-[hsla(var(--dropdown-menu-hover-bg))]',
activeAssistant?.model.settings?.vision_model ||
activeAssistant?.model.settings?.vision_model &&
isModelSupportRagAndTools
? 'cursor-pointer'
: 'cursor-not-allowed opacity-50'
@ -251,10 +249,6 @@ const ChatInput = () => {
<ImageIcon size={16} />
<span className="font-medium">Image</span>
</li>
}
content="This feature only supports multimodal models."
disabled={activeAssistant?.model.settings?.vision_model}
/>
<Tooltip
side="bottom"
trigger={

View File

@ -1,4 +1,4 @@
import React, { forwardRef, useEffect, useState } from 'react'
import React, { forwardRef, useEffect, useRef, useState } from 'react'
import {
events,
@ -8,10 +8,14 @@ import {
ThreadMessage,
} from '@janhq/core'
import { useAtom } from 'jotai'
import ErrorMessage from '@/containers/ErrorMessage'
import MessageContainer from '../TextMessage'
import { subscribedGeneratingMessageAtom } from '@/helpers/atoms/ChatMessage.atom'
type Ref = HTMLDivElement
type Props = {
@ -22,9 +26,13 @@ type Props = {
const ChatItem = forwardRef<Ref, Props>((message, ref) => {
const [content, setContent] = useState<ThreadContent[]>(message.content)
const [status, setStatus] = useState<MessageStatus>(message.status)
const [subscribedGeneratingMessage, setSubscribedGeneratingMessage] = useAtom(
subscribedGeneratingMessageAtom
)
const [errorMessage, setErrorMessage] = useState<ThreadMessage | undefined>(
message.isCurrentMessage && !!message?.metadata?.error ? message : undefined
)
const subscribedGeneratingMessageRef = useRef(subscribedGeneratingMessage)
function onMessageUpdate(data: ThreadMessage) {
if (data.id === message.id) {
@ -32,9 +40,21 @@ const ChatItem = forwardRef<Ref, Props>((message, ref) => {
if (data.status !== status) setStatus(data.status)
if (data.status === MessageStatus.Error && message.isCurrentMessage)
setErrorMessage(data)
// Update subscriber if the message is generating
if (
subscribedGeneratingMessageRef.current?.thread_id !== message.thread_id
)
setSubscribedGeneratingMessage({
thread_id: message.thread_id,
})
}
}
useEffect(() => {
subscribedGeneratingMessageRef.current = subscribedGeneratingMessage
}, [subscribedGeneratingMessage])
useEffect(() => {
if (!message.isCurrentMessage && errorMessage) setErrorMessage(undefined)
}, [message, errorMessage])

View File

@ -30,6 +30,7 @@ import RequestDownloadModel from './RequestDownloadModel'
import { showSystemMonitorPanelAtom } from '@/helpers/atoms/App.atom'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { chatWidthAtom } from '@/helpers/atoms/Setting.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
import {
@ -59,6 +60,7 @@ const ThreadCenterPanel = () => {
const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const activeThread = useAtomValue(activeThreadAtom)
const activeAssistant = useAtomValue(activeAssistantAtom)
const chatWidth = useAtomValue(chatWidthAtom)
const upload = uploader()
const acceptedFormat: Accept = activeAssistant?.model.settings?.vision_model
? {
@ -235,9 +237,16 @@ const ThreadCenterPanel = () => {
{reloadModel && <ModelReload />}
{activeModel && isGeneratingResponse && <GenerateResponse />}
<div
className={twMerge(
'mx-auto w-full',
chatWidth === 'compact' && 'max-w-[700px]'
)}
>
<ChatInput />
</div>
</div>
</div>
</CenterPanelContainer>
)
}

View File

@ -32,12 +32,9 @@ const Tools = () => {
useEffect(() => {
if (!activeThread) return
let model = downloadedModels.find(
const model = downloadedModels.find(
(model) => model.id === activeAssistant?.model.id
)
if (!model) {
model = recommendedModel
}
setSelectedModel(model)
}, [
recommendedModel,

View File

@ -257,7 +257,7 @@ const ThreadRightPanel = () => {
id="assistant-instructions"
placeholder="Eg. You are a helpful assistant."
value={activeAssistant?.instructions ?? ''}
autoResize
// autoResize
onChange={onAssistantInstructionChanged}
/>
</div>