diff --git a/README.md b/README.md index c02517fd5..a614442a5 100644 --- a/README.md +++ b/README.md @@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute Experimental (Nightly Build) - + jan.exe - + Intel - + M1/M2 - + jan.deb - + jan.AppImage diff --git a/core/src/node/api/routes/download.ts b/core/src/node/api/routes/download.ts index 7fb05daee..cc95fe1d4 100644 --- a/core/src/node/api/routes/download.ts +++ b/core/src/node/api/routes/download.ts @@ -1,5 +1,5 @@ import { DownloadRoute } from '../../../api' -import { join } from 'path' +import { join, sep } from 'path' import { DownloadManager } from '../../download' import { HttpServer } from '../HttpServer' import { createWriteStream } from 'fs' @@ -38,7 +38,7 @@ export const downloadRouter = async (app: HttpServer) => { }) const localPath = normalizedArgs[1] - const array = localPath.split('/') + const array = localPath.split(sep) const fileName = array.pop() ?? '' const modelId = array.pop() ?? '' console.debug('downloadFile', normalizedArgs, fileName, modelId) @@ -99,7 +99,7 @@ export const downloadRouter = async (app: HttpServer) => { }) const localPath = normalizedArgs[0] - const fileName = localPath.split('/').pop() ?? '' + const fileName = localPath.split(sep).pop() ?? '' const rq = DownloadManager.instance.networkRequests[fileName] DownloadManager.instance.networkRequests[fileName] = undefined rq?.abort() diff --git a/core/src/types/message/index.ts b/core/src/types/message/index.ts index e8d78deda..ebb4c363d 100644 --- a/core/src/types/message/index.ts +++ b/core/src/types/message/index.ts @@ -1,3 +1,4 @@ export * from './messageEntity' export * from './messageInterface' export * from './messageEvent' +export * from './messageRequestType' diff --git a/core/src/types/message/messageEntity.ts b/core/src/types/message/messageEntity.ts index 87e4b1997..e9211d550 100644 --- a/core/src/types/message/messageEntity.ts +++ b/core/src/types/message/messageEntity.ts @@ -27,6 +27,8 @@ export type ThreadMessage = { updated: number /** The additional metadata of this message. **/ metadata?: Record + + type?: string } /** @@ -56,6 +58,8 @@ export type MessageRequest = { /** The thread of this message is belong to. **/ // TODO: deprecate threadId field thread?: Thread + + type?: string } /** diff --git a/core/src/types/message/messageRequestType.ts b/core/src/types/message/messageRequestType.ts new file mode 100644 index 000000000..51be51996 --- /dev/null +++ b/core/src/types/message/messageRequestType.ts @@ -0,0 +1,5 @@ +export enum MessageRequestType { + Thread = 'Thread', + Assistant = 'Assistant', + Summary = 'Summary', +} \ No newline at end of file diff --git a/electron/handlers/download.ts b/electron/handlers/download.ts index 5f1d8371e..85261847b 100644 --- a/electron/handlers/download.ts +++ b/electron/handlers/download.ts @@ -1,5 +1,5 @@ import { ipcMain } from 'electron' -import { resolve } from 'path' +import { resolve, sep } from 'path' import { WindowManager } from './../managers/window' import request from 'request' import { createWriteStream, renameSync } from 'fs' @@ -46,7 +46,7 @@ export function handleDownloaderIPCs() { DownloadEvent.onFileDownloadError, { fileName, - err: { message: 'aborted' }, + error: 'aborted', } ) } @@ -68,7 +68,7 @@ export function handleDownloaderIPCs() { if (typeof localPath === 'string') { localPath = normalizeFilePath(localPath) } - const array = localPath.split('/') + const array = localPath.split(sep) const fileName = array.pop() ?? '' const modelId = array.pop() ?? '' @@ -92,13 +92,13 @@ export function handleDownloaderIPCs() { } ) }) - .on('error', function (err: Error) { + .on('error', function (error: Error) { WindowManager?.instance.currentWindow?.webContents.send( DownloadEvent.onFileDownloadError, { fileName, - err, modelId, + error, } ) }) @@ -121,7 +121,7 @@ export function handleDownloaderIPCs() { { fileName, modelId, - err: { message: 'aborted' }, + error: 'aborted', } ) } diff --git a/electron/main.ts b/electron/main.ts index 5d7e59c0f..6cbac9a06 100644 --- a/electron/main.ts +++ b/electron/main.ts @@ -25,25 +25,11 @@ import { migrateExtensions } from './utils/migration' import { cleanUpAndQuit } from './utils/clean' import { setupExtensions } from './utils/extension' import { setupCore } from './utils/setup' +import { setupReactDevTool } from './utils/dev' app .whenReady() - .then(async () => { - if (!app.isPackaged) { - // Which means you're running from source code - const { default: installExtension, REACT_DEVELOPER_TOOLS } = await import( - 'electron-devtools-installer' - ) // Don't use import on top level, since the installer package is dev-only - try { - const name = installExtension(REACT_DEVELOPER_TOOLS) - console.log(`Added Extension: ${name}`) - } catch (err) { - console.log('An error occurred while installing devtools:') - console.error(err) - // Only log the error and don't throw it because it's not critical - } - } - }) + .then(setupReactDevTool) .then(setupCore) .then(createUserSpace) .then(migrateExtensions) diff --git a/electron/package.json b/electron/package.json index 48f3a0811..deff3826a 100644 --- a/electron/package.json +++ b/electron/package.json @@ -63,11 +63,11 @@ "build:test:darwin": "tsc -p . && electron-builder -p never -m --dir", "build:test:win32": "tsc -p . && electron-builder -p never -w --dir", "build:test:linux": "tsc -p . && electron-builder -p never -l --dir", - "build:darwin": "tsc -p . && electron-builder -p never -m --x64 --arm64", + "build:darwin": "tsc -p . && electron-builder -p never -m", "build:win32": "tsc -p . && electron-builder -p never -w", "build:linux": "tsc -p . && electron-builder -p never -l deb -l AppImage", "build:publish": "yarn copy:assets && run-script-os", - "build:publish:darwin": "tsc -p . && electron-builder -p always -m --x64 --arm64", + "build:publish:darwin": "tsc -p . && electron-builder -p always -m", "build:publish:win32": "tsc -p . && electron-builder -p always -w", "build:publish:linux": "tsc -p . && electron-builder -p always -l deb -l AppImage" }, diff --git a/electron/utils/dev.ts b/electron/utils/dev.ts new file mode 100644 index 000000000..fdec3b1d4 --- /dev/null +++ b/electron/utils/dev.ts @@ -0,0 +1,18 @@ +import { app } from 'electron' + +export const setupReactDevTool = async () => { + if (!app.isPackaged) { + // Which means you're running from source code + const { default: installExtension, REACT_DEVELOPER_TOOLS } = await import( + 'electron-devtools-installer' + ) // Don't use import on top level, since the installer package is dev-only + try { + const name = installExtension(REACT_DEVELOPER_TOOLS) + console.log(`Added Extension: ${name}`) + } catch (err) { + console.log('An error occurred while installing devtools:') + console.error(err) + // Only log the error and don't throw it because it's not critical + } + } +} diff --git a/extensions/assistant-extension/package.json b/extensions/assistant-extension/package.json index 2d0d8f5c7..baa858655 100644 --- a/extensions/assistant-extension/package.json +++ b/extensions/assistant-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/assistant-extension", - "version": "1.0.0", + "version": "1.0.1", "description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models", "main": "dist/index.js", "node": "dist/node/index.js", diff --git a/extensions/assistant-extension/src/index.ts b/extensions/assistant-extension/src/index.ts index 8bc8cafdc..785b3768e 100644 --- a/extensions/assistant-extension/src/index.ts +++ b/extensions/assistant-extension/src/index.ts @@ -14,6 +14,7 @@ import { export default class JanAssistantExtension extends AssistantExtension { private static readonly _homeDir = "file://assistants"; + private static readonly _threadDir = "file://threads"; controller = new AbortController(); isCancelled = false; @@ -64,6 +65,8 @@ export default class JanAssistantExtension extends AssistantExtension { if ( data.model?.engine !== InferenceEngine.tool_retrieval_enabled || !data.messages || + // TODO: Since the engine is defined, its unsafe to assume that assistant tools are defined + // That could lead to an issue where thread stuck at generating response !data.thread?.assistants[0]?.tools ) { return; @@ -71,11 +74,12 @@ export default class JanAssistantExtension extends AssistantExtension { const latestMessage = data.messages[data.messages.length - 1]; - // Ingest the document if needed + // 1. Ingest the document if needed if ( latestMessage && latestMessage.content && - typeof latestMessage.content !== "string" + typeof latestMessage.content !== "string" && + latestMessage.content.length > 1 ) { const docFile = latestMessage.content[1]?.doc_url?.url; if (docFile) { @@ -86,9 +90,29 @@ export default class JanAssistantExtension extends AssistantExtension { data.model?.proxyEngine ); } + } else if ( + // Check whether we need to ingest document or not + // Otherwise wrong context will be sent + !(await fs.existsSync( + await joinPath([ + JanAssistantExtension._threadDir, + data.threadId, + "memory", + ]) + )) + ) { + // No document ingested, reroute the result to inference engine + const output = { + ...data, + model: { + ...data.model, + engine: data.model.proxyEngine, + }, + }; + events.emit(MessageEvent.OnMessageSent, output); + return; } - - // Load agent on thread changed + // 2. Load agent on thread changed if (instance.retrievalThreadId !== data.threadId) { await executeOnMain(NODE, "toolRetrievalLoadThreadMemory", data.threadId); @@ -103,22 +127,22 @@ export default class JanAssistantExtension extends AssistantExtension { ); } + // 3. Using the retrieval template with the result and query if (latestMessage.content) { const prompt = typeof latestMessage.content === "string" ? latestMessage.content : latestMessage.content[0].text; // Retrieve the result - console.debug("toolRetrievalQuery", latestMessage.content); const retrievalResult = await executeOnMain( NODE, "toolRetrievalQueryResult", prompt ); + console.debug("toolRetrievalQueryResult", retrievalResult); - // Update the message content - // Using the retrieval template with the result and query - if (data.thread?.assistants[0].tools) + // Update message content + if (data.thread?.assistants[0]?.tools && retrievalResult) data.messages[data.messages.length - 1].content = data.thread.assistants[0].tools[0].settings?.retrieval_template ?.replace("{CONTEXT}", retrievalResult) @@ -140,7 +164,7 @@ export default class JanAssistantExtension extends AssistantExtension { return message; }); - // Reroute the result to inference engine + // 4. Reroute the result to inference engine const output = { ...data, model: { @@ -248,12 +272,12 @@ export default class JanAssistantExtension extends AssistantExtension { chunk_size: 1024, chunk_overlap: 64, retrieval_template: `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. - ---------------- - CONTEXT: {CONTEXT} - ---------------- - QUESTION: {QUESTION} - ---------------- - Helpful Answer:`, +---------------- +CONTEXT: {CONTEXT} +---------------- +QUESTION: {QUESTION} +---------------- +Helpful Answer:`, }, }, ], diff --git a/extensions/assistant-extension/src/node/index.ts b/extensions/assistant-extension/src/node/index.ts index 95a7243a4..c308a2d57 100644 --- a/extensions/assistant-extension/src/node/index.ts +++ b/extensions/assistant-extension/src/node/index.ts @@ -1,39 +1,39 @@ import { getJanDataFolderPath, normalizeFilePath } from "@janhq/core/node"; -import { Retrieval } from "./tools/retrieval"; +import { retrieval } from "./tools/retrieval"; import path from "path"; -const retrieval = new Retrieval(); - -export async function toolRetrievalUpdateTextSplitter( +export function toolRetrievalUpdateTextSplitter( chunkSize: number, - chunkOverlap: number, + chunkOverlap: number ) { retrieval.updateTextSplitter(chunkSize, chunkOverlap); - return Promise.resolve(); } export async function toolRetrievalIngestNewDocument( file: string, - engine: string, + engine: string ) { const filePath = path.join(getJanDataFolderPath(), normalizeFilePath(file)); const threadPath = path.dirname(filePath.replace("files", "")); retrieval.updateEmbeddingEngine(engine); - await retrieval.ingestAgentKnowledge(filePath, `${threadPath}/memory`); - return Promise.resolve(); + return retrieval + .ingestAgentKnowledge(filePath, `${threadPath}/memory`) + .catch((err) => { + console.error(err); + }); } export async function toolRetrievalLoadThreadMemory(threadId: string) { - try { - await retrieval.loadRetrievalAgent( - path.join(getJanDataFolderPath(), "threads", threadId, "memory"), - ); - return Promise.resolve(); - } catch (err) { - console.debug(err); - } + return retrieval + .loadRetrievalAgent( + path.join(getJanDataFolderPath(), "threads", threadId, "memory") + ) + .catch((err) => { + console.error(err); + }); } export async function toolRetrievalQueryResult(query: string) { - const res = await retrieval.generateResult(query); - return Promise.resolve(res); + return retrieval.generateResult(query).catch((err) => { + console.error(err); + }); } diff --git a/extensions/assistant-extension/src/node/tools/retrieval/index.ts b/extensions/assistant-extension/src/node/tools/retrieval/index.ts index 8c7a6aa2b..b30291579 100644 --- a/extensions/assistant-extension/src/node/tools/retrieval/index.ts +++ b/extensions/assistant-extension/src/node/tools/retrieval/index.ts @@ -35,6 +35,7 @@ export class Retrieval { if (engine === "nitro") { this.embeddingModel = new OpenAIEmbeddings( { openAIApiKey: "nitro-embedding" }, + // TODO: Raw settings { basePath: "http://127.0.0.1:3928/v1" }, ); } else { @@ -75,3 +76,5 @@ export class Retrieval { return Promise.resolve(serializedDoc); }; } + +export const retrieval = new Retrieval(); \ No newline at end of file diff --git a/extensions/inference-nitro-extension/src/index.ts b/extensions/inference-nitro-extension/src/index.ts index 9e96ad93f..7374b6977 100644 --- a/extensions/inference-nitro-extension/src/index.ts +++ b/extensions/inference-nitro-extension/src/index.ts @@ -10,6 +10,7 @@ import { ChatCompletionRole, ContentType, MessageRequest, + MessageRequestType, MessageStatus, ThreadContent, ThreadMessage, @@ -250,6 +251,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension { const message: ThreadMessage = { id: ulid(), thread_id: data.threadId, + type: data.type, assistant_id: data.assistantId, role: ChatCompletionRole.Assistant, content: [], @@ -258,7 +260,10 @@ export default class JanInferenceNitroExtension extends InferenceExtension { updated: timestamp, object: "thread.message", }; - events.emit(MessageEvent.OnMessageResponse, message); + + if (data.type !== MessageRequestType.Summary) { + events.emit(MessageEvent.OnMessageResponse, message); + } this.isCancelled = false; this.controller = new AbortController(); diff --git a/extensions/inference-openai-extension/src/index.ts b/extensions/inference-openai-extension/src/index.ts index fd1230bc7..23fd8983e 100644 --- a/extensions/inference-openai-extension/src/index.ts +++ b/extensions/inference-openai-extension/src/index.ts @@ -18,6 +18,7 @@ import { InferenceEngine, BaseExtension, MessageEvent, + MessageRequestType, ModelEvent, InferenceEvent, AppConfigurationEventName, @@ -157,6 +158,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension { const message: ThreadMessage = { id: ulid(), thread_id: data.threadId, + type: data.type, assistant_id: data.assistantId, role: ChatCompletionRole.Assistant, content: [], @@ -165,7 +167,10 @@ export default class JanInferenceOpenAIExtension extends BaseExtension { updated: timestamp, object: "thread.message", }; - events.emit(MessageEvent.OnMessageResponse, message); + + if (data.type !== MessageRequestType.Summary) { + events.emit(MessageEvent.OnMessageResponse, message); + } instance.isCancelled = false; instance.controller = new AbortController(); diff --git a/uikit/src/select/styles.scss b/uikit/src/select/styles.scss index 6f6cd5800..90485723a 100644 --- a/uikit/src/select/styles.scss +++ b/uikit/src/select/styles.scss @@ -21,6 +21,7 @@ &-item { @apply hover:bg-secondary relative my-1 block w-full cursor-pointer select-none items-center rounded-sm px-4 py-2 text-sm data-[disabled]:pointer-events-none data-[disabled]:opacity-50; + @apply focus:outline-none focus-visible:outline-0; } &-trigger-viewport { diff --git a/web/containers/DropdownListSidebar/index.tsx b/web/containers/DropdownListSidebar/index.tsx index 140a1aba1..2679d6869 100644 --- a/web/containers/DropdownListSidebar/index.tsx +++ b/web/containers/DropdownListSidebar/index.tsx @@ -14,7 +14,14 @@ import { import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai' -import { MonitorIcon } from 'lucide-react' +import { + MonitorIcon, + LayoutGridIcon, + FoldersIcon, + GlobeIcon, + CheckIcon, + CopyIcon, +} from 'lucide-react' import { twMerge } from 'tailwind-merge' @@ -22,6 +29,7 @@ import { MainViewState } from '@/constants/screens' import { useActiveModel } from '@/hooks/useActiveModel' +import { useClipboard } from '@/hooks/useClipboard' import { useMainViewState } from '@/hooks/useMainViewState' import useRecommendedModel from '@/hooks/useRecommendedModel' @@ -42,6 +50,8 @@ import { export const selectedModelAtom = atom(undefined) +const engineOptions = ['Local', 'Remote'] + // TODO: Move all of the unscoped logics outside of the component const DropdownListSidebar = ({ strictedThread = true, @@ -51,13 +61,24 @@ const DropdownListSidebar = ({ const activeThread = useAtomValue(activeThreadAtom) const [selectedModel, setSelectedModel] = useAtom(selectedModelAtom) const setThreadModelParams = useSetAtom(setThreadModelParamsAtom) - + const [isTabActive, setIsTabActive] = useState(0) const { stateModel } = useActiveModel() const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom) const { setMainViewState } = useMainViewState() const [loader, setLoader] = useState(0) const { recommendedModel, downloadedModels } = useRecommendedModel() const { updateModelParameter } = useUpdateModelParameters() + const clipboard = useClipboard({ timeout: 1000 }) + const [copyId, setCopyId] = useState('') + + const localModel = downloadedModels.filter( + (model) => model.engine === InferenceEngine.nitro + ) + const remoteModel = downloadedModels.filter( + (model) => model.engine === InferenceEngine.openai + ) + + const modelOptions = isTabActive === 0 ? localModel : remoteModel useEffect(() => { if (!activeThread) return @@ -171,48 +192,145 @@ const DropdownListSidebar = ({ -
- - Local +
+
    + {engineOptions.map((name, i) => { + return ( +
  • setIsTabActive(i)} + > + {i === 0 ? ( + + ) : ( + + )} + + {name} + +
  • + ) + })} +
+
{downloadedModels.length === 0 ? (

{`Oops, you don't have a model yet.`}

) : ( - - {downloadedModels.map((x, i) => ( - -
- {x.name} -
- - {toGibibytes(x.metadata.size)} - - {x.engine == InferenceEngine.nitro && ( - + + <> + {modelOptions.map((x, i) => ( +
+ +
+ {x.engine === InferenceEngine.openai && ( + + + + )} +
+ + {x.name} + +
+ + {toGibibytes(x.metadata.size)} + + {x.engine == InferenceEngine.nitro && ( + + )} +
+
+
+
+
+ {x.id} + {clipboard.copied && copyId === x.id ? ( + + ) : ( + { + clipboard.copy(x.id) + setCopyId(x.id) + }} + /> )}
- - ))} + ))} +
)}
-
+
+
diff --git a/web/containers/Providers/EventHandler.tsx b/web/containers/Providers/EventHandler.tsx index 170ec5e64..7f8bd261c 100644 --- a/web/containers/Providers/EventHandler.tsx +++ b/web/containers/Providers/EventHandler.tsx @@ -2,16 +2,21 @@ import { ReactNode, useCallback, useEffect, useRef } from 'react' import { + ChatCompletionMessage, + ChatCompletionRole, events, ThreadMessage, ExtensionTypeEnum, MessageStatus, + MessageRequest, Model, ConversationalExtension, MessageEvent, + MessageRequestType, ModelEvent, } from '@janhq/core' import { useAtomValue, useSetAtom } from 'jotai' +import { ulid } from 'ulid' import { activeModelAtom, @@ -25,6 +30,7 @@ import { toaster } from '../Toast' import { extensionManager } from '@/extension' import { + getCurrentChatMessagesAtom, addNewMessageAtom, updateMessageAtom, } from '@/helpers/atoms/ChatMessage.atom' @@ -37,9 +43,11 @@ import { } from '@/helpers/atoms/Thread.atom' export default function EventHandler({ children }: { children: ReactNode }) { + const messages = useAtomValue(getCurrentChatMessagesAtom) const addNewMessage = useSetAtom(addNewMessageAtom) const updateMessage = useSetAtom(updateMessageAtom) const downloadedModels = useAtomValue(downloadedModelsAtom) + const activeModel = useAtomValue(activeModelAtom) const setActiveModel = useSetAtom(activeModelAtom) const setStateModel = useSetAtom(stateModelAtom) const setQueuedMessage = useSetAtom(queuedMessageAtom) @@ -51,6 +59,8 @@ export default function EventHandler({ children }: { children: ReactNode }) { const threadsRef = useRef(threads) const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom) const updateThread = useSetAtom(updateThreadAtom) + const messagesRef = useRef(messages) + const activeModelRef = useRef(activeModel) useEffect(() => { threadsRef.current = threads @@ -60,9 +70,51 @@ export default function EventHandler({ children }: { children: ReactNode }) { modelsRef.current = downloadedModels }, [downloadedModels]) + useEffect(() => { + messagesRef.current = messages + }, [messages]) + + useEffect(() => { + activeModelRef.current = activeModel + }, [activeModel]) + const onNewMessageResponse = useCallback( (message: ThreadMessage) => { - addNewMessage(message) + const thread = threadsRef.current?.find((e) => e.id == message.thread_id) + // If this is the first ever prompt in the thread + if (thread && thread.title.trim() == 'New Thread') { + // This is the first time message comes in on a new thread + // Summarize the first message, and make that the title of the Thread + // 1. Get the summary of the first prompt using whatever engine user is currently using + const firstPrompt = messagesRef?.current[0].content[0].text.value.trim() + const summarizeFirstPrompt = + 'Summarize "' + firstPrompt + '" in 5 words as a title' + + // Prompt: Given this query from user {query}, return to me the summary in 5 words as the title + const msgId = ulid() + const messages: ChatCompletionMessage[] = [ + { + role: ChatCompletionRole.User, + content: summarizeFirstPrompt, + } as ChatCompletionMessage, + ] + + const firstPromptRequest: MessageRequest = { + id: msgId, + threadId: message.thread_id, + type: MessageRequestType.Summary, + messages, + model: activeModelRef?.current, + } + + // 2. Update the title with the result of the inference + // the title will be updated as part of the `EventName.OnFirstPromptUpdate` + events.emit(MessageEvent.OnMessageSent, firstPromptRequest) + } + + if (message.type !== MessageRequestType.Summary) { + addNewMessage(message) + } }, [addNewMessage] ) @@ -134,6 +186,11 @@ export default function EventHandler({ children }: { children: ReactNode }) { ...(messageContent && { lastMessage: messageContent }), } + // Update the Thread title with the response of the inference on the 1st prompt + if (message.type === MessageRequestType.Summary) { + thread.title = messageContent + } + updateThread({ ...thread, metadata, @@ -146,9 +203,12 @@ export default function EventHandler({ children }: { children: ReactNode }) { metadata, }) - extensionManager - .get(ExtensionTypeEnum.Conversational) - ?.addNewMessage(message) + // If this is not the summary of the Thread, don't need to add it to the Thread + if (message.type !== MessageRequestType.Summary) { + extensionManager + .get(ExtensionTypeEnum.Conversational) + ?.addNewMessage(message) + } } }, [updateMessage, updateThreadWaiting, setIsGeneratingResponse, updateThread] diff --git a/web/hooks/useDownloadState.ts b/web/hooks/useDownloadState.ts index 207cca69f..863c612ed 100644 --- a/web/hooks/useDownloadState.ts +++ b/web/hooks/useDownloadState.ts @@ -44,7 +44,10 @@ export const setDownloadStateAtom = atom( }) } else { let error = state.error - if (state.error?.includes('certificate')) { + if ( + typeof error?.includes === 'function' && + state.error?.includes('certificate') + ) { error += '. To fix enable "Ignore SSL Certificates" in Advanced settings.' } diff --git a/web/hooks/useSendChatMessage.ts b/web/hooks/useSendChatMessage.ts index 7d89764db..d7c2d10fd 100644 --- a/web/hooks/useSendChatMessage.ts +++ b/web/hooks/useSendChatMessage.ts @@ -6,6 +6,7 @@ import { ChatCompletionRole, ContentType, MessageRequest, + MessageRequestType, MessageStatus, ExtensionTypeEnum, Thread, @@ -112,6 +113,7 @@ export default function useSendChatMessage() { const messageRequest: MessageRequest = { id: ulid(), + type: MessageRequestType.Thread, messages: messages, threadId: activeThread.id, model: activeThread.assistants[0].model ?? selectedModel, @@ -209,6 +211,7 @@ export default function useSendChatMessage() { } const messageRequest: MessageRequest = { id: msgId, + type: MessageRequestType.Thread, threadId: activeThread.id, messages, model: { @@ -218,8 +221,8 @@ export default function useSendChatMessage() { }, thread: activeThread, } - const timestamp = Date.now() + const timestamp = Date.now() const content: any = [] if (base64Blob && fileUpload[0]?.type === 'image') { diff --git a/web/screens/LocalServer/index.tsx b/web/screens/LocalServer/index.tsx index 65b6c8563..88a9d86ba 100644 --- a/web/screens/LocalServer/index.tsx +++ b/web/screens/LocalServer/index.tsx @@ -373,6 +373,28 @@ const LocalServerScreen = () => { )} >
+
+ + + + +

+ You can concurrently send requests to one active local model and + multiple remote models. +

+
{loadModelError && (