Merge pull request #4486 from janhq/main

chore: sync from main into dev
This commit is contained in:
Louis 2025-01-20 13:50:44 +07:00 committed by GitHub
commit 105a1c3078
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 217 additions and 220 deletions

View File

@ -91,8 +91,12 @@ export function requestInference(
const toParse = cachedLines + line const toParse = cachedLines + line
if (!line.includes('data: [DONE]')) { if (!line.includes('data: [DONE]')) {
const data = JSON.parse(toParse.replace('data: ', '')) const data = JSON.parse(toParse.replace('data: ', ''))
if ('error' in data) { if (
subscriber.error(data.error) 'error' in data ||
'message' in data ||
'detail' in data
) {
subscriber.error(data.error ?? data)
subscriber.complete() subscriber.complete()
return return
} }

View File

@ -8,7 +8,7 @@
"inference_params": { "inference_params": {
"max_tokens": 4096, "max_tokens": 4096,
"temperature": 0.7, "temperature": 0.7,
"stream": false "stream": true
}, },
"engine": "anthropic" "engine": "anthropic"
}, },
@ -21,7 +21,7 @@
"inference_params": { "inference_params": {
"max_tokens": 8192, "max_tokens": 8192,
"temperature": 0.7, "temperature": 0.7,
"stream": false "stream": true
}, },
"engine": "anthropic" "engine": "anthropic"
}, },

View File

@ -5,7 +5,7 @@
"name": "Martian Model Router", "name": "Martian Model Router",
"version": "1.0", "version": "1.0",
"description": "Martian Model Router dynamically routes requests to the best LLM in real-time", "description": "Martian Model Router dynamically routes requests to the best LLM in real-time",
"parameters": { "inference_params": {
"max_tokens": 4096, "max_tokens": 4096,
"temperature": 0.7, "temperature": 0.7,
"top_p": 0.95, "top_p": 0.95,

View File

@ -5,7 +5,7 @@
"name": "Mistral Small", "name": "Mistral Small",
"version": "1.1", "version": "1.1",
"description": "Mistral Small is the ideal choice for simple tasks (Classification, Customer Support, or Text Generation) at an affordable price.", "description": "Mistral Small is the ideal choice for simple tasks (Classification, Customer Support, or Text Generation) at an affordable price.",
"parameters": { "inference_params": {
"max_tokens": 32000, "max_tokens": 32000,
"temperature": 0.7, "temperature": 0.7,
"top_p": 0.95, "top_p": 0.95,
@ -19,7 +19,7 @@
"name": "Mistral Large", "name": "Mistral Large",
"version": "1.1", "version": "1.1",
"description": "Mistral Large is ideal for complex tasks (Synthetic Text Generation, Code Generation, RAG, or Agents).", "description": "Mistral Large is ideal for complex tasks (Synthetic Text Generation, Code Generation, RAG, or Agents).",
"parameters": { "inference_params": {
"max_tokens": 32000, "max_tokens": 32000,
"temperature": 0.7, "temperature": 0.7,
"top_p": 0.95, "top_p": 0.95,
@ -33,7 +33,7 @@
"name": "Mixtral 8x22B", "name": "Mixtral 8x22B",
"version": "1.1", "version": "1.1",
"description": "Mixtral 8x22B is a high-performance, cost-effective model designed for complex tasks.", "description": "Mixtral 8x22B is a high-performance, cost-effective model designed for complex tasks.",
"parameters": { "inference_params": {
"max_tokens": 32000, "max_tokens": 32000,
"temperature": 0.7, "temperature": 0.7,
"top_p": 0.95, "top_p": 0.95,

View File

@ -5,7 +5,7 @@
"name": "Mistral 7B", "name": "Mistral 7B",
"version": "1.1", "version": "1.1",
"description": "Mistral 7B with NVIDIA", "description": "Mistral 7B with NVIDIA",
"parameters": { "inference_params": {
"max_tokens": 1024, "max_tokens": 1024,
"temperature": 0.3, "temperature": 0.3,
"top_p": 1, "top_p": 1,

View File

@ -5,7 +5,7 @@
"name": "OpenRouter", "name": "OpenRouter",
"version": "1.0", "version": "1.0",
"description": " OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.", "description": " OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.",
"parameters": { "inference_params": {
"max_tokens": 128000, "max_tokens": 128000,
"temperature": 0.7, "temperature": 0.7,
"top_p": 0.95, "top_p": 0.95,

View File

@ -15,7 +15,7 @@
}, },
"transform_resp": { "transform_resp": {
"chat_completions": { "chat_completions": {
"template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.type == \"message_start\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"ping\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_delta\" %} \"role\": \"assistant\", \"content\": \"{{ input_request.delta.text }}\" {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.type == \"content_block_stop\" %} \"finish_reason\": \"stop\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {\"id\": \"{{ input_request.id }}\", \"created\": null, \"object\": \"chat.completion\", \"model\": \"{{ input_request.model }}\", \"choices\": [{ \"index\": 0, \"message\": { \"role\": \"{{ input_request.role }}\", \"content\": \"{% if input_request.content and input_request.content.0.type == \"text\" %} \"{{input_request.content.0.text}}\" {% endif %}\", \"refusal\": null }, \"logprobs\": null, \"finish_reason\": \"{{ input_request.stop_reason }}\" } ], \"usage\": { \"prompt_tokens\": {{ input_request.usage.input_tokens }}, \"completion_tokens\": {{ input_request.usage.output_tokens }}, \"total_tokens\": {{ input_request.usage.input_tokens + input_request.usage.output_tokens }}, \"prompt_tokens_details\": { \"cached_tokens\": 0 }, \"completion_tokens_details\": { \"reasoning_tokens\": 0, \"accepted_prediction_tokens\": 0, \"rejected_prediction_tokens\": 0 } }, \"system_fingerprint\": \"fp_6b68a8204b\"} {% endif %}" "template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.type == \"message_start\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"ping\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_delta\" %} \"role\": \"assistant\", \"content\": \"{{ input_request.delta.text }}\" {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.type == \"content_block_stop\" %} \"finish_reason\": \"stop\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {\"id\": \"{{ input_request.id }}\", \"created\": null, \"object\": \"chat.completion\", \"model\": \"{{ input_request.model }}\", \"choices\": [{ \"index\": 0, \"message\": { \"role\": \"{{ input_request.role }}\", \"content\": {% if input_request.content and input_request.content.0.type == \"text\" %} \"{{input_request.content.0.text}}\" {% else %} null {% endif %}, \"refusal\": null }, \"logprobs\": null, \"finish_reason\": \"{{ input_request.stop_reason }}\" } ], \"usage\": { \"prompt_tokens\": {{ input_request.usage.input_tokens }}, \"completion_tokens\": {{ input_request.usage.output_tokens }}, \"total_tokens\": {{ input_request.usage.input_tokens + input_request.usage.output_tokens }}, \"prompt_tokens_details\": { \"cached_tokens\": 0 }, \"completion_tokens_details\": { \"reasoning_tokens\": 0, \"accepted_prediction_tokens\": 0, \"rejected_prediction_tokens\": 0 } }, \"system_fingerprint\": \"fp_6b68a8204b\"} {% endif %}"
} }
} }
} }

View File

@ -15,7 +15,7 @@
}, },
"transform_resp": { "transform_resp": {
"chat_completions": { "chat_completions": {
"template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" "template": "{{tojson(input_request)}}"
} }
} }
} }

View File

@ -1 +1 @@
1.0.9-rc3 1.0.9-rc4

View File

@ -1,19 +1,20 @@
import { Tooltip, Button, Badge } from '@janhq/joi' import { Tooltip, Button, Badge } from '@janhq/joi'
import { useAtom, useAtomValue } from 'jotai' import { useAtom } from 'jotai'
import { useActiveModel } from '@/hooks/useActiveModel' import { useActiveModel } from '@/hooks/useActiveModel'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { toGibibytes } from '@/utils/converter' import { toGibibytes } from '@/utils/converter'
import { isLocalEngine } from '@/utils/modelEngine' import { isLocalEngine } from '@/utils/modelEngine'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom' import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
const TableActiveModel = () => { const TableActiveModel = () => {
const { activeModel, stateModel, stopModel } = useActiveModel() const { activeModel, stateModel, stopModel } = useActiveModel()
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom) const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
@ -21,9 +22,7 @@ const TableActiveModel = () => {
<div className="w-1/2"> <div className="w-1/2">
<div className="overflow-hidden border-b border-[hsla(var(--app-border))]"> <div className="overflow-hidden border-b border-[hsla(var(--app-border))]">
<table className="w-full px-8"> <table className="w-full px-8">
{activeModel && {activeModel && isLocalEngine(engines, activeModel.engine) ? (
engines &&
isLocalEngine(engines, activeModel.engine) ? (
<tbody> <tbody>
<tr> <tr>
<td <td

View File

@ -31,6 +31,7 @@ import SetupRemoteModel from '@/containers/SetupRemoteModel'
import { useCreateNewThread } from '@/hooks/useCreateNewThread' import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import useDownloadModel from '@/hooks/useDownloadModel' import useDownloadModel from '@/hooks/useDownloadModel'
import { modelDownloadStateAtom } from '@/hooks/useDownloadState' import { modelDownloadStateAtom } from '@/hooks/useDownloadState'
import { useGetEngines } from '@/hooks/useEngineManagement'
import useRecommendedModel from '@/hooks/useRecommendedModel' import useRecommendedModel from '@/hooks/useRecommendedModel'
@ -42,7 +43,6 @@ import { manualRecommendationModel } from '@/utils/model'
import { getLogoEngine } from '@/utils/modelEngine' import { getLogoEngine } from '@/utils/modelEngine'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom' import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { import {
configuredModelsAtom, configuredModelsAtom,
getDownloadingModelAtom, getDownloadingModelAtom,
@ -86,7 +86,7 @@ const ModelDropdown = ({
null null
) )
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
const downloadStates = useAtomValue(modelDownloadStateAtom) const downloadStates = useAtomValue(modelDownloadStateAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom) const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
@ -194,13 +194,22 @@ const ModelDropdown = ({
const modelId = activeAssistant?.model?.id const modelId = activeAssistant?.model?.id
const model = downloadedModels.find((model) => model.id === modelId) const model = downloadedModels.find((model) => model.id === modelId)
setSelectedModel(model) if (model) {
if (
engines?.[model.engine]?.[0]?.type === 'local' ||
(engines?.[model.engine]?.[0]?.api_key?.length ?? 0) > 0
)
setSelectedModel(model)
} else {
setSelectedModel(undefined)
}
}, [ }, [
recommendedModel, recommendedModel,
activeThread, activeThread,
downloadedModels, downloadedModels,
setSelectedModel, setSelectedModel,
activeAssistant?.model?.id, activeAssistant?.model?.id,
engines,
]) ])
const isLocalEngine = useCallback( const isLocalEngine = useCallback(

View File

@ -2,11 +2,18 @@
import { Fragment, useEffect } from 'react' import { Fragment, useEffect } from 'react'
import { AppConfiguration, getUserHomePath } from '@janhq/core' import {
AppConfiguration,
EngineEvent,
events,
getUserHomePath,
} from '@janhq/core'
import { useSetAtom } from 'jotai' import { useSetAtom } from 'jotai'
import { useDebouncedCallback } from 'use-debounce'
import useAssistants from '@/hooks/useAssistants' import useAssistants from '@/hooks/useAssistants'
import useEngines from '@/hooks/useEngines' import { useGetEngines } from '@/hooks/useEngineManagement'
import useGetSystemResources from '@/hooks/useGetSystemResources' import useGetSystemResources from '@/hooks/useGetSystemResources'
import useModels from '@/hooks/useModels' import useModels from '@/hooks/useModels'
import useThreads from '@/hooks/useThreads' import useThreads from '@/hooks/useThreads'
@ -26,7 +33,7 @@ const DataLoader: React.FC = () => {
const setJanDefaultDataFolder = useSetAtom(defaultJanDataFolderAtom) const setJanDefaultDataFolder = useSetAtom(defaultJanDataFolderAtom)
const setJanSettingScreen = useSetAtom(janSettingScreenAtom) const setJanSettingScreen = useSetAtom(janSettingScreenAtom)
const { getData: loadModels } = useModels() const { getData: loadModels } = useModels()
const { getData: loadEngines } = useEngines() const { mutate } = useGetEngines()
useThreads() useThreads()
useAssistants() useAssistants()
@ -35,9 +42,19 @@ const DataLoader: React.FC = () => {
useEffect(() => { useEffect(() => {
// Load data once // Load data once
loadModels() loadModels()
loadEngines()
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, []) }, [])
const reloadData = useDebouncedCallback(() => {
mutate()
}, 300)
useEffect(() => {
events.on(EngineEvent.OnEngineUpdate, reloadData)
return () => {
// Remove listener on unmount
events.off(EngineEvent.OnEngineUpdate, reloadData)
}
}, [reloadData])
useEffect(() => { useEffect(() => {
window.core?.api window.core?.api

View File

@ -23,6 +23,8 @@ import { ulid } from 'ulidx'
import { activeModelAtom, stateModelAtom } from '@/hooks/useActiveModel' import { activeModelAtom, stateModelAtom } from '@/hooks/useActiveModel'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { isLocalEngine } from '@/utils/modelEngine' import { isLocalEngine } from '@/utils/modelEngine'
import { extensionManager } from '@/extension' import { extensionManager } from '@/extension'
@ -34,7 +36,6 @@ import {
deleteMessageAtom, deleteMessageAtom,
subscribedGeneratingMessageAtom, subscribedGeneratingMessageAtom,
} from '@/helpers/atoms/ChatMessage.atom' } from '@/helpers/atoms/ChatMessage.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom' import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import { import {
updateThreadWaitingForResponseAtom, updateThreadWaitingForResponseAtom,
@ -75,7 +76,7 @@ export default function ModelHandler() {
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom) const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const activeModelParamsRef = useRef(activeModelParams) const activeModelParamsRef = useRef(activeModelParams)
const setTokenSpeed = useSetAtom(tokenSpeedAtom) const setTokenSpeed = useSetAtom(tokenSpeedAtom)
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
useEffect(() => { useEffect(() => {
activeThreadRef.current = activeThread activeThreadRef.current = activeThread
@ -336,7 +337,8 @@ export default function ModelHandler() {
// Check model engine; we don't want to generate a title when it's not a local engine. remote model using first promp // Check model engine; we don't want to generate a title when it's not a local engine. remote model using first promp
if ( if (
!isLocalEngine(engines, activeModelRef.current?.engine as InferenceEngine) activeModelRef.current?.engine !== InferenceEngine.cortex &&
activeModelRef.current?.engine !== InferenceEngine.cortex_llamacpp
) { ) {
const updatedThread: Thread = { const updatedThread: Thread = {
...thread, ...thread,
@ -396,9 +398,7 @@ export default function ModelHandler() {
// 2. Update the title with the result of the inference // 2. Update the title with the result of the inference
setTimeout(() => { setTimeout(() => {
const engine = EngineManager.instance().get( const engine = EngineManager.instance().get(InferenceEngine.cortex)
messageRequest.model?.engine ?? activeModelRef.current?.engine ?? ''
)
engine?.inference(messageRequest) engine?.inference(messageRequest)
}, 1000) }, 1000)
} }

View File

@ -0,0 +1,30 @@
'use client'
import * as React from 'react'
import { SWRConfig } from 'swr'
function SWRConfigProvider({ children }: { children: React.ReactNode }) {
// https://swr.vercel.app/docs/advanced/cache#localstorage-based-persistent-cache
// When initializing, we restore the data from `localStorage` into a map.
const map = React.useMemo(() => new Map<string, object>(), [])
React.useEffect(() => {
const savedCache = JSON.parse(
window.localStorage.getItem('app-cache') || '[]'
)
savedCache.forEach(([key, value]: [string, object]) => {
map.set(key, value)
})
// Before unloading the app, we write back all the data into `localStorage`.
window.addEventListener('beforeunload', () => {
const appCache = JSON.stringify(Array.from(map.entries()))
window.localStorage.setItem('app-cache', appCache)
})
}, [map])
return <SWRConfig value={{ provider: () => map }}>{children}</SWRConfig>
}
export default SWRConfigProvider

View File

@ -4,6 +4,8 @@ import { PropsWithChildren } from 'react'
import { Toaster } from 'react-hot-toast' import { Toaster } from 'react-hot-toast'
import { SWRConfig } from 'swr'
import EventListener from '@/containers/Providers/EventListener' import EventListener from '@/containers/Providers/EventListener'
import JotaiWrapper from '@/containers/Providers/Jotai' import JotaiWrapper from '@/containers/Providers/Jotai'
@ -18,27 +20,30 @@ import DeepLinkListener from './DeepLinkListener'
import KeyListener from './KeyListener' import KeyListener from './KeyListener'
import Responsive from './Responsive' import Responsive from './Responsive'
import SWRConfigProvider from './SWRConfigProvider'
import SettingsHandler from './SettingsHandler' import SettingsHandler from './SettingsHandler'
const Providers = ({ children }: PropsWithChildren) => { const Providers = ({ children }: PropsWithChildren) => {
return ( return (
<ThemeWrapper> <SWRConfigProvider>
<JotaiWrapper> <ThemeWrapper>
<Umami /> <JotaiWrapper>
<CoreConfigurator> <Umami />
<> <CoreConfigurator>
<Responsive /> <>
<KeyListener /> <Responsive />
<EventListener /> <KeyListener />
<DataLoader /> <EventListener />
<SettingsHandler /> <DataLoader />
<DeepLinkListener /> <SettingsHandler />
<Toaster /> <DeepLinkListener />
{children} <Toaster />
</> {children}
</CoreConfigurator> </>
</JotaiWrapper> </CoreConfigurator>
</ThemeWrapper> </JotaiWrapper>
</ThemeWrapper>
</SWRConfigProvider>
) )
} }

View File

@ -1,7 +0,0 @@
import { Engines } from '@janhq/core'
import { atom } from 'jotai'
/**
* Store all of the installed engines including local and remote engines
*/
export const installedEnginesAtom = atom<Engines>()

View File

@ -159,7 +159,7 @@ export function useActiveModel() {
} }
if (!activeModel) return if (!activeModel) return
const engine = EngineManager.instance().get(activeModel.engine) const engine = EngineManager.instance().get(InferenceEngine.cortex)
engine?.stopInference() engine?.stopInference()
}, [activeModel, stateModel, stopModel]) }, [activeModel, stateModel, stopModel])

View File

@ -33,6 +33,7 @@ export const useConfigurations = () => {
useEffect(() => { useEffect(() => {
configurePullOptions() configurePullOptions()
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []) }, [])
return { return {

View File

@ -1,53 +0,0 @@
import { useCallback, useEffect } from 'react'
import {
ExtensionTypeEnum,
events,
EngineEvent,
EngineManagementExtension,
Engines,
} from '@janhq/core'
import { useSetAtom } from 'jotai'
import { useDebouncedCallback } from 'use-debounce'
import { extensionManager } from '@/extension'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
/**
* useModels hook - Handles the state of models
* It fetches the downloaded models, configured models and default model from Model Extension
* and updates the atoms accordingly.
*/
const useEngines = () => {
const setInstalledEngines = useSetAtom(installedEnginesAtom)
const getData = useCallback(() => {
getEngines().then(setInstalledEngines)
}, [setInstalledEngines])
const reloadData = useDebouncedCallback(() => getData(), 300)
const getEngines = async (): Promise<Engines> =>
extensionManager
.get<EngineManagementExtension>(ExtensionTypeEnum.Engine)
?.getEngines()
.catch(() => ({}) as Engines) ?? ({} as Engines)
useEffect(() => {
// Listen for engine updates
events.on(EngineEvent.OnEngineUpdate, reloadData)
return () => {
// Remove listener on unmount
events.off(EngineEvent.OnEngineUpdate, reloadData)
}
}, [reloadData])
return {
getData,
}
}
export default useEngines

View File

@ -6,6 +6,8 @@ import { atom, useAtomValue } from 'jotai'
import { activeModelAtom } from './useActiveModel' import { activeModelAtom } from './useActiveModel'
import { useGetEngines } from './useEngineManagement'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom' import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom' import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom' import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
@ -30,6 +32,7 @@ export default function useRecommendedModel() {
const activeThread = useAtomValue(activeThreadAtom) const activeThread = useAtomValue(activeThreadAtom)
const downloadedModels = useAtomValue(downloadedModelsAtom) const downloadedModels = useAtomValue(downloadedModelsAtom)
const activeAssistant = useAtomValue(activeAssistantAtom) const activeAssistant = useAtomValue(activeAssistantAtom)
const { engines } = useGetEngines()
const getAndSortDownloadedModels = useCallback(async (): Promise<Model[]> => { const getAndSortDownloadedModels = useCallback(async (): Promise<Model[]> => {
const models = downloadedModels.sort((a, b) => const models = downloadedModels.sort((a, b) =>
@ -45,7 +48,12 @@ export default function useRecommendedModel() {
const getRecommendedModel = useCallback(async (): Promise< const getRecommendedModel = useCallback(async (): Promise<
Model | undefined Model | undefined
> => { > => {
const models = await getAndSortDownloadedModels() const models = (await getAndSortDownloadedModels()).filter((e: Model) =>
engines?.[e.engine]?.[0].type === 'local' ||
(engines?.[e.engine]?.[0].api_key?.length ?? 0) > 0
? true
: false
)
if (!activeThread || !activeAssistant) return if (!activeThread || !activeAssistant) return
const modelId = activeAssistant.model.id const modelId = activeAssistant.model.id
@ -63,10 +71,8 @@ export default function useRecommendedModel() {
} }
// sort the model, for display purpose // sort the model, for display purpose
if (models.length === 0) { if (models.length === 0) {
// if we have no downloaded models, then can't recommend anything // if we have no downloaded models, then can't recommend anything
console.debug("No downloaded models, can't recommend anything")
setRecommendedModel(undefined) setRecommendedModel(undefined)
return return
} }
@ -94,7 +100,7 @@ export default function useRecommendedModel() {
setRecommendedModel(lastUsedModel) setRecommendedModel(lastUsedModel)
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [getAndSortDownloadedModels, activeThread]) }, [getAndSortDownloadedModels, activeThread, engines])
useEffect(() => { useEffect(() => {
getRecommendedModel() getRecommendedModel()

View File

@ -1,3 +1,4 @@
/* eslint-disable @typescript-eslint/no-unused-vars */
import { useMemo } from 'react' import { useMemo } from 'react'
import { InferenceEngine, EngineConfig } from '@janhq/core' import { InferenceEngine, EngineConfig } from '@janhq/core'
@ -5,7 +6,8 @@ import { useAtomValue } from 'jotai'
import { isLocalEngine } from '@/utils/modelEngine' import { isLocalEngine } from '@/utils/modelEngine'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom' import { useGetEngines } from './useEngineManagement'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom' import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import { threadsAtom } from '@/helpers/atoms/Thread.atom' import { threadsAtom } from '@/helpers/atoms/Thread.atom'
@ -13,7 +15,7 @@ export function useStarterScreen() {
const downloadedModels = useAtomValue(downloadedModelsAtom) const downloadedModels = useAtomValue(downloadedModelsAtom)
const threads = useAtomValue(threadsAtom) const threads = useAtomValue(threadsAtom)
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
const remoteEngines = const remoteEngines =
engines && engines &&

View File

@ -4,6 +4,8 @@ import { Model } from '@janhq/core'
import { useAtomValue } from 'jotai' import { useAtomValue } from 'jotai'
import { useGetEngines } from '@/hooks/useEngineManagement'
import ModelItem from '@/screens/Hub/ModelList/ModelItem' import ModelItem from '@/screens/Hub/ModelList/ModelItem'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom' import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
@ -14,6 +16,7 @@ type Props = {
const ModelList = ({ models }: Props) => { const ModelList = ({ models }: Props) => {
const downloadedModels = useAtomValue(downloadedModelsAtom) const downloadedModels = useAtomValue(downloadedModelsAtom)
const { engines } = useGetEngines()
const sortedModels: Model[] = useMemo(() => { const sortedModels: Model[] = useMemo(() => {
const featuredModels: Model[] = [] const featuredModels: Model[] = []
const remoteModels: Model[] = [] const remoteModels: Model[] = []
@ -22,7 +25,7 @@ const ModelList = ({ models }: Props) => {
models.forEach((m) => { models.forEach((m) => {
if (m.metadata?.tags?.includes('Featured')) { if (m.metadata?.tags?.includes('Featured')) {
featuredModels.push(m) featuredModels.push(m)
} else if (m.format === 'api') { } else if (engines?.[m.engine]?.[0]?.type === 'remote') {
remoteModels.push(m) remoteModels.push(m)
} else if (downloadedModels.map((m) => m.id).includes(m.id)) { } else if (downloadedModels.map((m) => m.id).includes(m.id)) {
localModels.push(m) localModels.push(m)
@ -40,7 +43,7 @@ const ModelList = ({ models }: Props) => {
...remainingModels, ...remainingModels,
...remoteModels, ...remoteModels,
] ]
}, [models, downloadedModels]) }, [models, downloadedModels, engines])
return ( return (
<div className="relative h-full w-full flex-shrink-0"> <div className="relative h-full w-full flex-shrink-0">

View File

@ -1,6 +1,11 @@
import { Fragment, useCallback, useState } from 'react' import { Fragment, useCallback, useState } from 'react'
import { EngineManager, Model, ModelSettingParams } from '@janhq/core' import {
EngineManager,
InferenceEngine,
Model,
ModelSettingParams,
} from '@janhq/core'
import { Button, Tooltip, Select, Input, Checkbox } from '@janhq/joi' import { Button, Tooltip, Select, Input, Checkbox } from '@janhq/joi'
import { useAtom, useAtomValue, useSetAtom } from 'jotai' import { useAtom, useAtomValue, useSetAtom } from 'jotai'
@ -94,7 +99,7 @@ const LocalServerLeftPanel = () => {
localStorage.setItem(FIRST_TIME_VISIT_API_SERVER, 'false') localStorage.setItem(FIRST_TIME_VISIT_API_SERVER, 'false')
setFirstTimeVisitAPIServer(false) setFirstTimeVisitAPIServer(false)
} }
const engine = EngineManager.instance().get((model as Model).engine) const engine = EngineManager.instance().get(InferenceEngine.cortex)
engine?.loadModel(model as Model) engine?.loadModel(model as Model)
// startModel(selectedModel.id, false).catch((e) => console.error(e)) // startModel(selectedModel.id, false).catch((e) => console.error(e))
setIsLoading(false) setIsLoading(false)

View File

@ -1,11 +1,14 @@
/* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable react/no-unescaped-entities */ /* eslint-disable react/no-unescaped-entities */
/* eslint-disable @typescript-eslint/no-unused-vars */
import React, { useCallback, useRef, useState, useEffect } from 'react' import React, { useCallback, useRef, useState, useEffect } from 'react'
import { import {
EngineConfig as OriginalEngineConfig, EngineConfig as OriginalEngineConfig,
InferenceEngine, InferenceEngine,
events,
EngineEvent,
} from '@janhq/core' } from '@janhq/core'
interface EngineConfig extends OriginalEngineConfig { interface EngineConfig extends OriginalEngineConfig {
@ -64,6 +67,7 @@ const RemoteEngineSettings = ({
set(updatedEngine, field, value) set(updatedEngine, field, value)
await updateEngine(name, updatedEngine) await updateEngine(name, updatedEngine)
mutate() mutate()
events.emit(EngineEvent.OnEngineUpdate, {})
}, 300) }, 300)
}, },
[engine, name, mutate] [engine, name, mutate]
@ -115,6 +119,8 @@ const RemoteEngineSettings = ({
} }
}, [engine]) }, [engine])
if (!engine) return null
return ( return (
<ScrollArea className="h-full w-full"> <ScrollArea className="h-full w-full">
<div className="block w-full px-4"> <div className="block w-full px-4">

View File

@ -4,16 +4,16 @@ import { InferenceEngine } from '@janhq/core'
import { ScrollArea } from '@janhq/joi' import { ScrollArea } from '@janhq/joi'
import { useAtomValue } from 'jotai' import { useAtomValue } from 'jotai'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { isLocalEngine } from '@/utils/modelEngine' import { isLocalEngine } from '@/utils/modelEngine'
import LocalEngineItems from './LocalEngineItem' import LocalEngineItems from './LocalEngineItem'
import ModalAddRemoteEngine from './ModalAddRemoteEngine' import ModalAddRemoteEngine from './ModalAddRemoteEngine'
import RemoteEngineItems from './RemoteEngineItem' import RemoteEngineItems from './RemoteEngineItem'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
const Engines = () => { const Engines = () => {
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
return ( return (
<ScrollArea className="h-full w-full"> <ScrollArea className="h-full w-full">

View File

@ -2,7 +2,7 @@ import { memo, useState } from 'react'
import { Model } from '@janhq/core' import { Model } from '@janhq/core'
import { Badge, Button, Tooltip, useClickOutside } from '@janhq/joi' import { Badge, Button, Tooltip, useClickOutside } from '@janhq/joi'
import { useAtom, useAtomValue } from 'jotai' import { useAtom } from 'jotai'
import { import {
MoreVerticalIcon, MoreVerticalIcon,
PlayIcon, PlayIcon,
@ -14,11 +14,12 @@ import { twMerge } from 'tailwind-merge'
import { useActiveModel } from '@/hooks/useActiveModel' import { useActiveModel } from '@/hooks/useActiveModel'
import useDeleteModel from '@/hooks/useDeleteModel' import useDeleteModel from '@/hooks/useDeleteModel'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { toGibibytes } from '@/utils/converter' import { toGibibytes } from '@/utils/converter'
import { isLocalEngine } from '@/utils/modelEngine' import { isLocalEngine } from '@/utils/modelEngine'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom' import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
type Props = { type Props = {
@ -32,7 +33,7 @@ const MyModelList = ({ model }: Props) => {
const { deleteModel } = useDeleteModel() const { deleteModel } = useDeleteModel()
const [more, setMore] = useState(false) const [more, setMore] = useState(false)
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom) const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
const [menu, setMenu] = useState<HTMLDivElement | null>(null) const [menu, setMenu] = useState<HTMLDivElement | null>(null)
const [toggle, setToggle] = useState<HTMLDivElement | null>(null) const [toggle, setToggle] = useState<HTMLDivElement | null>(null)

View File

@ -36,8 +36,6 @@ import {
import MyModelList from './MyModelList' import MyModelList from './MyModelList'
import { extensionManager } from '@/extension'
import { import {
downloadedModelsAtom, downloadedModelsAtom,
showEngineListModelAtom, showEngineListModelAtom,
@ -52,9 +50,6 @@ const MyModels = () => {
showEngineListModelAtom showEngineListModelAtom
) )
const [extensionHasSettings, setExtensionHasSettings] = useState<
{ name?: string; setting: string; apiKey: string; provider: string }[]
>([])
const { engines } = useGetEngines() const { engines } = useGetEngines()
const isLocalEngine = useCallback( const isLocalEngine = useCallback(
@ -97,45 +92,6 @@ const MyModels = () => {
setSearchText(input) setSearchText(input)
}, []) }, [])
useEffect(() => {
const getAllSettings = async () => {
const extensionsMenu: {
name?: string
setting: string
apiKey: string
provider: string
}[] = []
const extensions = extensionManager.getAll()
for (const extension of extensions) {
if (typeof extension.getSettings === 'function') {
const settings = await extension.getSettings()
if (
(settings && settings.length > 0) ||
(await extension.installationState()) !== 'NotRequired'
) {
extensionsMenu.push({
name: extension.productName,
setting: extension.name,
apiKey:
'apiKey' in extension && typeof extension.apiKey === 'string'
? extension.apiKey
: '',
provider:
'provider' in extension &&
typeof extension.provider === 'string'
? extension.provider
: '',
})
}
}
}
setExtensionHasSettings(extensionsMenu)
}
getAllSettings()
}, [])
const findByEngine = filteredDownloadedModels.map((x) => { const findByEngine = filteredDownloadedModels.map((x) => {
// Legacy engine support - they will be grouped under Cortex LlamaCPP // Legacy engine support - they will be grouped under Cortex LlamaCPP
if (x.engine === InferenceEngine.nitro) if (x.engine === InferenceEngine.nitro)
@ -158,9 +114,11 @@ const MyModels = () => {
} }
}) })
const getEngineStatusReady: InferenceEngine[] = extensionHasSettings const getEngineStatusReady: InferenceEngine[] = Object.entries(engines ?? {})
?.filter((e) => e.apiKey.length > 0) // eslint-disable-next-line @typescript-eslint/no-unused-vars
.map((x) => x.provider as InferenceEngine) ?.filter(([_, value]) => (value?.[0]?.api_key?.length ?? 0) > 0)
// eslint-disable-next-line @typescript-eslint/no-unused-vars
.map(([key, _]) => key as InferenceEngine)
useEffect(() => { useEffect(() => {
setShowEngineListModel((prev) => [ setShowEngineListModel((prev) => [
@ -168,7 +126,7 @@ const MyModels = () => {
...(getEngineStatusReady as InferenceEngine[]), ...(getEngineStatusReady as InferenceEngine[]),
]) ])
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [setShowEngineListModel, extensionHasSettings]) }, [setShowEngineListModel, engines])
return ( return (
<div {...getRootProps()} className="h-full w-full"> <div {...getRootProps()} className="h-full w-full">

View File

@ -1,6 +1,8 @@
import { InferenceEngine } from '@janhq/core' import { InferenceEngine } from '@janhq/core'
import { useAtomValue } from 'jotai' import { useAtomValue } from 'jotai'
import { useGetEngines } from '@/hooks/useEngineManagement'
import Advanced from '@/screens/Settings/Advanced' import Advanced from '@/screens/Settings/Advanced'
import AppearanceOptions from '@/screens/Settings/Appearance' import AppearanceOptions from '@/screens/Settings/Appearance'
import ExtensionCatalog from '@/screens/Settings/CoreExtensions' import ExtensionCatalog from '@/screens/Settings/CoreExtensions'
@ -14,12 +16,11 @@ import Privacy from '@/screens/Settings/Privacy'
import { isLocalEngine } from '@/utils/modelEngine' import { isLocalEngine } from '@/utils/modelEngine'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { selectedSettingAtom } from '@/helpers/atoms/Setting.atom' import { selectedSettingAtom } from '@/helpers/atoms/Setting.atom'
const SettingDetail = () => { const SettingDetail = () => {
const selectedSetting = useAtomValue(selectedSettingAtom) const selectedSetting = useAtomValue(selectedSettingAtom)
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
switch (selectedSetting) { switch (selectedSetting) {
case 'Engines': case 'Engines':

View File

@ -6,12 +6,13 @@ import { useAtomValue } from 'jotai'
import LeftPanelContainer from '@/containers/LeftPanelContainer' import LeftPanelContainer from '@/containers/LeftPanelContainer'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { getTitleByEngine, isLocalEngine } from '@/utils/modelEngine' import { getTitleByEngine, isLocalEngine } from '@/utils/modelEngine'
import SettingItem from './SettingItem' import SettingItem from './SettingItem'
import { extensionManager } from '@/extension' import { extensionManager } from '@/extension'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { import {
showSettingActiveLocalEngineAtom, showSettingActiveLocalEngineAtom,
@ -20,7 +21,7 @@ import {
import { janSettingScreenAtom } from '@/helpers/atoms/Setting.atom' import { janSettingScreenAtom } from '@/helpers/atoms/Setting.atom'
const SettingLeftPanel = () => { const SettingLeftPanel = () => {
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
const settingScreens = useAtomValue(janSettingScreenAtom) const settingScreens = useAtomValue(janSettingScreenAtom)
const showSettingActiveLocalEngine = useAtomValue( const showSettingActiveLocalEngine = useAtomValue(

View File

@ -88,6 +88,7 @@ const AssistantSetting: React.FC<Props> = ({ componentData }) => {
setEngineParamsUpdate, setEngineParamsUpdate,
stopModel, stopModel,
updateThreadMetadata, updateThreadMetadata,
resetGenerating,
] ]
) )

View File

@ -7,16 +7,17 @@ import LogoMark from '@/containers/Brand/Logo/Mark'
import { MainViewState } from '@/constants/screens' import { MainViewState } from '@/constants/screens'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { isLocalEngine } from '@/utils/modelEngine' import { isLocalEngine } from '@/utils/modelEngine'
import { mainViewStateAtom } from '@/helpers/atoms/App.atom' import { mainViewStateAtom } from '@/helpers/atoms/App.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom' import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
const EmptyThread = () => { const EmptyThread = () => {
const downloadedModels = useAtomValue(downloadedModelsAtom) const downloadedModels = useAtomValue(downloadedModelsAtom)
const setMainViewState = useSetAtom(mainViewStateAtom) const setMainViewState = useSetAtom(mainViewStateAtom)
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
const showOnboardingStep = useMemo( const showOnboardingStep = useMemo(
() => () =>
!downloadedModels.some( !downloadedModels.some(

View File

@ -24,6 +24,8 @@ import useDownloadModel from '@/hooks/useDownloadModel'
import { modelDownloadStateAtom } from '@/hooks/useDownloadState' import { modelDownloadStateAtom } from '@/hooks/useDownloadState'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { formatDownloadPercentage, toGibibytes } from '@/utils/converter' import { formatDownloadPercentage, toGibibytes } from '@/utils/converter'
import { manualRecommendationModel } from '@/utils/model' import { manualRecommendationModel } from '@/utils/model'
import { import {
@ -33,7 +35,6 @@ import {
} from '@/utils/modelEngine' } from '@/utils/modelEngine'
import { mainViewStateAtom } from '@/helpers/atoms/App.atom' import { mainViewStateAtom } from '@/helpers/atoms/App.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { import {
configuredModelsAtom, configuredModelsAtom,
getDownloadingModelAtom, getDownloadingModelAtom,
@ -51,7 +52,7 @@ const OnDeviceStarterScreen = ({ isShowStarterScreen }: Props) => {
const { downloadModel } = useDownloadModel() const { downloadModel } = useDownloadModel()
const downloadStates = useAtomValue(modelDownloadStateAtom) const downloadStates = useAtomValue(modelDownloadStateAtom)
const setSelectedSetting = useSetAtom(selectedSettingAtom) const setSelectedSetting = useSetAtom(selectedSettingAtom)
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
const configuredModels = useAtomValue(configuredModelsAtom) const configuredModels = useAtomValue(configuredModelsAtom)
const setMainViewState = useSetAtom(mainViewStateAtom) const setMainViewState = useSetAtom(mainViewStateAtom)
@ -298,40 +299,46 @@ const OnDeviceStarterScreen = ({ isShowStarterScreen }: Props) => {
key={rowIndex} key={rowIndex}
className="my-2 flex items-center gap-4 md:gap-10" className="my-2 flex items-center gap-4 md:gap-10"
> >
{row.map((remoteEngine) => { {row
const engineLogo = getLogoEngine( .filter(
remoteEngine as InferenceEngine (e) =>
engines?.[e as InferenceEngine]?.[0]?.type ===
'remote'
) )
.map((remoteEngine) => {
const engineLogo = getLogoEngine(
remoteEngine as InferenceEngine
)
return ( return (
<div <div
className="flex cursor-pointer flex-col items-center justify-center gap-4" className="flex cursor-pointer flex-col items-center justify-center gap-4"
key={remoteEngine} key={remoteEngine}
onClick={() => { onClick={() => {
setMainViewState(MainViewState.Settings) setMainViewState(MainViewState.Settings)
setSelectedSetting( setSelectedSetting(
remoteEngine as InferenceEngine remoteEngine as InferenceEngine
) )
}} }}
> >
{engineLogo && ( {engineLogo && (
<Image <Image
width={48} width={48}
height={48} height={48}
src={engineLogo} src={engineLogo}
alt="Engine logo" alt="Engine logo"
className="h-10 w-10 flex-shrink-0" className="h-10 w-10 flex-shrink-0"
/> />
)}
<p className="font-medium">
{getTitleByEngine(
remoteEngine as InferenceEngine
)} )}
</p>
</div> <p className="font-medium">
) {getTitleByEngine(
})} remoteEngine as InferenceEngine
)}
</p>
</div>
)
})}
</div> </div>
) )
})} })}

View File

@ -22,6 +22,7 @@ import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { useActiveModel } from '@/hooks/useActiveModel' import { useActiveModel } from '@/hooks/useActiveModel'
import { useGetEngines } from '@/hooks/useEngineManagement'
import useSendChatMessage from '@/hooks/useSendChatMessage' import useSendChatMessage from '@/hooks/useSendChatMessage'
import { uploader } from '@/utils/file' import { uploader } from '@/utils/file'
@ -35,7 +36,6 @@ import RichTextEditor from './RichTextEditor'
import { showRightPanelAtom } from '@/helpers/atoms/App.atom' import { showRightPanelAtom } from '@/helpers/atoms/App.atom'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom' import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom' import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { selectedModelAtom } from '@/helpers/atoms/Model.atom' import { selectedModelAtom } from '@/helpers/atoms/Model.atom'
import { spellCheckAtom } from '@/helpers/atoms/Setting.atom' import { spellCheckAtom } from '@/helpers/atoms/Setting.atom'
import { import {
@ -64,7 +64,7 @@ const ChatInput = () => {
const textareaRef = useRef<HTMLTextAreaElement>(null) const textareaRef = useRef<HTMLTextAreaElement>(null)
const fileInputRef = useRef<HTMLInputElement>(null) const fileInputRef = useRef<HTMLInputElement>(null)
const imageInputRef = useRef<HTMLInputElement>(null) const imageInputRef = useRef<HTMLInputElement>(null)
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom) const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const isBlockingSend = useAtomValue(isBlockingSendAtom) const isBlockingSend = useAtomValue(isBlockingSendAtom)
const activeAssistant = useAtomValue(activeAssistantAtom) const activeAssistant = useAtomValue(activeAssistantAtom)

View File

@ -1,4 +1,4 @@
import { EngineManager } from '@janhq/core' import { EngineManager, InferenceEngine } from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai' import { useAtomValue, useSetAtom } from 'jotai'
import ModalTroubleShooting, { import ModalTroubleShooting, {
@ -35,7 +35,7 @@ const LoadModelError = () => {
setMainState(MainViewState.Settings) setMainState(MainViewState.Settings)
if (activeAssistant?.model.engine) { if (activeAssistant?.model.engine) {
const engine = EngineManager.instance().get( const engine = EngineManager.instance().get(
activeAssistant.model.engine InferenceEngine.cortex
) )
engine?.name && setSelectedSettingScreen(engine.name) engine?.name && setSelectedSettingScreen(engine.name)
} }

View File

@ -29,6 +29,7 @@ import RightPanelContainer from '@/containers/RightPanelContainer'
import { useActiveModel } from '@/hooks/useActiveModel' import { useActiveModel } from '@/hooks/useActiveModel'
import { useCreateNewThread } from '@/hooks/useCreateNewThread' import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import { useGetEngines } from '@/hooks/useEngineManagement'
import useUpdateModelParameters from '@/hooks/useUpdateModelParameters' import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
import { getConfigurationsData } from '@/utils/componentSettings' import { getConfigurationsData } from '@/utils/componentSettings'
@ -39,7 +40,7 @@ import Tools from './Tools'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom' import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom' import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { selectedModelAtom } from '@/helpers/atoms/Model.atom' import { selectedModelAtom } from '@/helpers/atoms/Model.atom'
import { import {
activeThreadAtom, activeThreadAtom,
@ -61,7 +62,7 @@ const ThreadRightPanel = () => {
const [activeTabThreadRightPanel, setActiveTabThreadRightPanel] = useAtom( const [activeTabThreadRightPanel, setActiveTabThreadRightPanel] = useAtom(
activeTabThreadRightPanelAtom activeTabThreadRightPanelAtom
) )
const engines = useAtomValue(installedEnginesAtom) const { engines } = useGetEngines()
const { updateThreadMetadata } = useCreateNewThread() const { updateThreadMetadata } = useCreateNewThread()
const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom) const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)

View File

@ -167,7 +167,6 @@ export class MessageRequestBuilder {
messages: this.normalizeMessages(this.messages), messages: this.normalizeMessages(this.messages),
model: this.model, model: this.model,
thread: this.thread, thread: this.thread,
engine: this.model.engine,
} }
} }
} }