Merge pull request #4486 from janhq/main

chore: sync from main into dev
This commit is contained in:
Louis 2025-01-20 13:50:44 +07:00 committed by GitHub
commit 105a1c3078
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 217 additions and 220 deletions

View File

@ -91,8 +91,12 @@ export function requestInference(
const toParse = cachedLines + line
if (!line.includes('data: [DONE]')) {
const data = JSON.parse(toParse.replace('data: ', ''))
if ('error' in data) {
subscriber.error(data.error)
if (
'error' in data ||
'message' in data ||
'detail' in data
) {
subscriber.error(data.error ?? data)
subscriber.complete()
return
}

View File

@ -8,7 +8,7 @@
"inference_params": {
"max_tokens": 4096,
"temperature": 0.7,
"stream": false
"stream": true
},
"engine": "anthropic"
},
@ -21,7 +21,7 @@
"inference_params": {
"max_tokens": 8192,
"temperature": 0.7,
"stream": false
"stream": true
},
"engine": "anthropic"
},

View File

@ -5,7 +5,7 @@
"name": "Martian Model Router",
"version": "1.0",
"description": "Martian Model Router dynamically routes requests to the best LLM in real-time",
"parameters": {
"inference_params": {
"max_tokens": 4096,
"temperature": 0.7,
"top_p": 0.95,

View File

@ -5,7 +5,7 @@
"name": "Mistral Small",
"version": "1.1",
"description": "Mistral Small is the ideal choice for simple tasks (Classification, Customer Support, or Text Generation) at an affordable price.",
"parameters": {
"inference_params": {
"max_tokens": 32000,
"temperature": 0.7,
"top_p": 0.95,
@ -19,7 +19,7 @@
"name": "Mistral Large",
"version": "1.1",
"description": "Mistral Large is ideal for complex tasks (Synthetic Text Generation, Code Generation, RAG, or Agents).",
"parameters": {
"inference_params": {
"max_tokens": 32000,
"temperature": 0.7,
"top_p": 0.95,
@ -33,7 +33,7 @@
"name": "Mixtral 8x22B",
"version": "1.1",
"description": "Mixtral 8x22B is a high-performance, cost-effective model designed for complex tasks.",
"parameters": {
"inference_params": {
"max_tokens": 32000,
"temperature": 0.7,
"top_p": 0.95,

View File

@ -5,7 +5,7 @@
"name": "Mistral 7B",
"version": "1.1",
"description": "Mistral 7B with NVIDIA",
"parameters": {
"inference_params": {
"max_tokens": 1024,
"temperature": 0.3,
"top_p": 1,

View File

@ -5,7 +5,7 @@
"name": "OpenRouter",
"version": "1.0",
"description": " OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.",
"parameters": {
"inference_params": {
"max_tokens": 128000,
"temperature": 0.7,
"top_p": 0.95,

View File

@ -15,7 +15,7 @@
},
"transform_resp": {
"chat_completions": {
"template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.type == \"message_start\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"ping\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_delta\" %} \"role\": \"assistant\", \"content\": \"{{ input_request.delta.text }}\" {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.type == \"content_block_stop\" %} \"finish_reason\": \"stop\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {\"id\": \"{{ input_request.id }}\", \"created\": null, \"object\": \"chat.completion\", \"model\": \"{{ input_request.model }}\", \"choices\": [{ \"index\": 0, \"message\": { \"role\": \"{{ input_request.role }}\", \"content\": \"{% if input_request.content and input_request.content.0.type == \"text\" %} \"{{input_request.content.0.text}}\" {% endif %}\", \"refusal\": null }, \"logprobs\": null, \"finish_reason\": \"{{ input_request.stop_reason }}\" } ], \"usage\": { \"prompt_tokens\": {{ input_request.usage.input_tokens }}, \"completion_tokens\": {{ input_request.usage.output_tokens }}, \"total_tokens\": {{ input_request.usage.input_tokens + input_request.usage.output_tokens }}, \"prompt_tokens_details\": { \"cached_tokens\": 0 }, \"completion_tokens_details\": { \"reasoning_tokens\": 0, \"accepted_prediction_tokens\": 0, \"rejected_prediction_tokens\": 0 } }, \"system_fingerprint\": \"fp_6b68a8204b\"} {% endif %}"
"template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.type == \"message_start\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"ping\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_delta\" %} \"role\": \"assistant\", \"content\": \"{{ input_request.delta.text }}\" {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.type == \"content_block_stop\" %} \"finish_reason\": \"stop\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {\"id\": \"{{ input_request.id }}\", \"created\": null, \"object\": \"chat.completion\", \"model\": \"{{ input_request.model }}\", \"choices\": [{ \"index\": 0, \"message\": { \"role\": \"{{ input_request.role }}\", \"content\": {% if input_request.content and input_request.content.0.type == \"text\" %} \"{{input_request.content.0.text}}\" {% else %} null {% endif %}, \"refusal\": null }, \"logprobs\": null, \"finish_reason\": \"{{ input_request.stop_reason }}\" } ], \"usage\": { \"prompt_tokens\": {{ input_request.usage.input_tokens }}, \"completion_tokens\": {{ input_request.usage.output_tokens }}, \"total_tokens\": {{ input_request.usage.input_tokens + input_request.usage.output_tokens }}, \"prompt_tokens_details\": { \"cached_tokens\": 0 }, \"completion_tokens_details\": { \"reasoning_tokens\": 0, \"accepted_prediction_tokens\": 0, \"rejected_prediction_tokens\": 0 } }, \"system_fingerprint\": \"fp_6b68a8204b\"} {% endif %}"
}
}
}

View File

@ -15,7 +15,7 @@
},
"transform_resp": {
"chat_completions": {
"template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }"
"template": "{{tojson(input_request)}}"
}
}
}

View File

@ -1 +1 @@
1.0.9-rc3
1.0.9-rc4

View File

@ -1,19 +1,20 @@
import { Tooltip, Button, Badge } from '@janhq/joi'
import { useAtom, useAtomValue } from 'jotai'
import { useAtom } from 'jotai'
import { useActiveModel } from '@/hooks/useActiveModel'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { toGibibytes } from '@/utils/converter'
import { isLocalEngine } from '@/utils/modelEngine'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
const TableActiveModel = () => {
const { activeModel, stateModel, stopModel } = useActiveModel()
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
@ -21,9 +22,7 @@ const TableActiveModel = () => {
<div className="w-1/2">
<div className="overflow-hidden border-b border-[hsla(var(--app-border))]">
<table className="w-full px-8">
{activeModel &&
engines &&
isLocalEngine(engines, activeModel.engine) ? (
{activeModel && isLocalEngine(engines, activeModel.engine) ? (
<tbody>
<tr>
<td

View File

@ -31,6 +31,7 @@ import SetupRemoteModel from '@/containers/SetupRemoteModel'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import useDownloadModel from '@/hooks/useDownloadModel'
import { modelDownloadStateAtom } from '@/hooks/useDownloadState'
import { useGetEngines } from '@/hooks/useEngineManagement'
import useRecommendedModel from '@/hooks/useRecommendedModel'
@ -42,7 +43,6 @@ import { manualRecommendationModel } from '@/utils/model'
import { getLogoEngine } from '@/utils/modelEngine'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import {
configuredModelsAtom,
getDownloadingModelAtom,
@ -86,7 +86,7 @@ const ModelDropdown = ({
null
)
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
const downloadStates = useAtomValue(modelDownloadStateAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
@ -194,13 +194,22 @@ const ModelDropdown = ({
const modelId = activeAssistant?.model?.id
const model = downloadedModels.find((model) => model.id === modelId)
setSelectedModel(model)
if (model) {
if (
engines?.[model.engine]?.[0]?.type === 'local' ||
(engines?.[model.engine]?.[0]?.api_key?.length ?? 0) > 0
)
setSelectedModel(model)
} else {
setSelectedModel(undefined)
}
}, [
recommendedModel,
activeThread,
downloadedModels,
setSelectedModel,
activeAssistant?.model?.id,
engines,
])
const isLocalEngine = useCallback(

View File

@ -2,11 +2,18 @@
import { Fragment, useEffect } from 'react'
import { AppConfiguration, getUserHomePath } from '@janhq/core'
import {
AppConfiguration,
EngineEvent,
events,
getUserHomePath,
} from '@janhq/core'
import { useSetAtom } from 'jotai'
import { useDebouncedCallback } from 'use-debounce'
import useAssistants from '@/hooks/useAssistants'
import useEngines from '@/hooks/useEngines'
import { useGetEngines } from '@/hooks/useEngineManagement'
import useGetSystemResources from '@/hooks/useGetSystemResources'
import useModels from '@/hooks/useModels'
import useThreads from '@/hooks/useThreads'
@ -26,7 +33,7 @@ const DataLoader: React.FC = () => {
const setJanDefaultDataFolder = useSetAtom(defaultJanDataFolderAtom)
const setJanSettingScreen = useSetAtom(janSettingScreenAtom)
const { getData: loadModels } = useModels()
const { getData: loadEngines } = useEngines()
const { mutate } = useGetEngines()
useThreads()
useAssistants()
@ -35,9 +42,19 @@ const DataLoader: React.FC = () => {
useEffect(() => {
// Load data once
loadModels()
loadEngines()
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
const reloadData = useDebouncedCallback(() => {
mutate()
}, 300)
useEffect(() => {
events.on(EngineEvent.OnEngineUpdate, reloadData)
return () => {
// Remove listener on unmount
events.off(EngineEvent.OnEngineUpdate, reloadData)
}
}, [reloadData])
useEffect(() => {
window.core?.api

View File

@ -23,6 +23,8 @@ import { ulid } from 'ulidx'
import { activeModelAtom, stateModelAtom } from '@/hooks/useActiveModel'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { isLocalEngine } from '@/utils/modelEngine'
import { extensionManager } from '@/extension'
@ -34,7 +36,6 @@ import {
deleteMessageAtom,
subscribedGeneratingMessageAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import {
updateThreadWaitingForResponseAtom,
@ -75,7 +76,7 @@ export default function ModelHandler() {
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const activeModelParamsRef = useRef(activeModelParams)
const setTokenSpeed = useSetAtom(tokenSpeedAtom)
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
useEffect(() => {
activeThreadRef.current = activeThread
@ -336,7 +337,8 @@ export default function ModelHandler() {
// Check model engine; we don't want to generate a title when it's not a local engine. remote model using first promp
if (
!isLocalEngine(engines, activeModelRef.current?.engine as InferenceEngine)
activeModelRef.current?.engine !== InferenceEngine.cortex &&
activeModelRef.current?.engine !== InferenceEngine.cortex_llamacpp
) {
const updatedThread: Thread = {
...thread,
@ -396,9 +398,7 @@ export default function ModelHandler() {
// 2. Update the title with the result of the inference
setTimeout(() => {
const engine = EngineManager.instance().get(
messageRequest.model?.engine ?? activeModelRef.current?.engine ?? ''
)
const engine = EngineManager.instance().get(InferenceEngine.cortex)
engine?.inference(messageRequest)
}, 1000)
}

View File

@ -0,0 +1,30 @@
'use client'
import * as React from 'react'
import { SWRConfig } from 'swr'
function SWRConfigProvider({ children }: { children: React.ReactNode }) {
// https://swr.vercel.app/docs/advanced/cache#localstorage-based-persistent-cache
// When initializing, we restore the data from `localStorage` into a map.
const map = React.useMemo(() => new Map<string, object>(), [])
React.useEffect(() => {
const savedCache = JSON.parse(
window.localStorage.getItem('app-cache') || '[]'
)
savedCache.forEach(([key, value]: [string, object]) => {
map.set(key, value)
})
// Before unloading the app, we write back all the data into `localStorage`.
window.addEventListener('beforeunload', () => {
const appCache = JSON.stringify(Array.from(map.entries()))
window.localStorage.setItem('app-cache', appCache)
})
}, [map])
return <SWRConfig value={{ provider: () => map }}>{children}</SWRConfig>
}
export default SWRConfigProvider

View File

@ -4,6 +4,8 @@ import { PropsWithChildren } from 'react'
import { Toaster } from 'react-hot-toast'
import { SWRConfig } from 'swr'
import EventListener from '@/containers/Providers/EventListener'
import JotaiWrapper from '@/containers/Providers/Jotai'
@ -18,27 +20,30 @@ import DeepLinkListener from './DeepLinkListener'
import KeyListener from './KeyListener'
import Responsive from './Responsive'
import SWRConfigProvider from './SWRConfigProvider'
import SettingsHandler from './SettingsHandler'
const Providers = ({ children }: PropsWithChildren) => {
return (
<ThemeWrapper>
<JotaiWrapper>
<Umami />
<CoreConfigurator>
<>
<Responsive />
<KeyListener />
<EventListener />
<DataLoader />
<SettingsHandler />
<DeepLinkListener />
<Toaster />
{children}
</>
</CoreConfigurator>
</JotaiWrapper>
</ThemeWrapper>
<SWRConfigProvider>
<ThemeWrapper>
<JotaiWrapper>
<Umami />
<CoreConfigurator>
<>
<Responsive />
<KeyListener />
<EventListener />
<DataLoader />
<SettingsHandler />
<DeepLinkListener />
<Toaster />
{children}
</>
</CoreConfigurator>
</JotaiWrapper>
</ThemeWrapper>
</SWRConfigProvider>
)
}

View File

@ -1,7 +0,0 @@
import { Engines } from '@janhq/core'
import { atom } from 'jotai'
/**
* Store all of the installed engines including local and remote engines
*/
export const installedEnginesAtom = atom<Engines>()

View File

@ -159,7 +159,7 @@ export function useActiveModel() {
}
if (!activeModel) return
const engine = EngineManager.instance().get(activeModel.engine)
const engine = EngineManager.instance().get(InferenceEngine.cortex)
engine?.stopInference()
}, [activeModel, stateModel, stopModel])

View File

@ -33,6 +33,7 @@ export const useConfigurations = () => {
useEffect(() => {
configurePullOptions()
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
return {

View File

@ -1,53 +0,0 @@
import { useCallback, useEffect } from 'react'
import {
ExtensionTypeEnum,
events,
EngineEvent,
EngineManagementExtension,
Engines,
} from '@janhq/core'
import { useSetAtom } from 'jotai'
import { useDebouncedCallback } from 'use-debounce'
import { extensionManager } from '@/extension'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
/**
* useModels hook - Handles the state of models
* It fetches the downloaded models, configured models and default model from Model Extension
* and updates the atoms accordingly.
*/
const useEngines = () => {
const setInstalledEngines = useSetAtom(installedEnginesAtom)
const getData = useCallback(() => {
getEngines().then(setInstalledEngines)
}, [setInstalledEngines])
const reloadData = useDebouncedCallback(() => getData(), 300)
const getEngines = async (): Promise<Engines> =>
extensionManager
.get<EngineManagementExtension>(ExtensionTypeEnum.Engine)
?.getEngines()
.catch(() => ({}) as Engines) ?? ({} as Engines)
useEffect(() => {
// Listen for engine updates
events.on(EngineEvent.OnEngineUpdate, reloadData)
return () => {
// Remove listener on unmount
events.off(EngineEvent.OnEngineUpdate, reloadData)
}
}, [reloadData])
return {
getData,
}
}
export default useEngines

View File

@ -6,6 +6,8 @@ import { atom, useAtomValue } from 'jotai'
import { activeModelAtom } from './useActiveModel'
import { useGetEngines } from './useEngineManagement'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
@ -30,6 +32,7 @@ export default function useRecommendedModel() {
const activeThread = useAtomValue(activeThreadAtom)
const downloadedModels = useAtomValue(downloadedModelsAtom)
const activeAssistant = useAtomValue(activeAssistantAtom)
const { engines } = useGetEngines()
const getAndSortDownloadedModels = useCallback(async (): Promise<Model[]> => {
const models = downloadedModels.sort((a, b) =>
@ -45,7 +48,12 @@ export default function useRecommendedModel() {
const getRecommendedModel = useCallback(async (): Promise<
Model | undefined
> => {
const models = await getAndSortDownloadedModels()
const models = (await getAndSortDownloadedModels()).filter((e: Model) =>
engines?.[e.engine]?.[0].type === 'local' ||
(engines?.[e.engine]?.[0].api_key?.length ?? 0) > 0
? true
: false
)
if (!activeThread || !activeAssistant) return
const modelId = activeAssistant.model.id
@ -63,10 +71,8 @@ export default function useRecommendedModel() {
}
// sort the model, for display purpose
if (models.length === 0) {
// if we have no downloaded models, then can't recommend anything
console.debug("No downloaded models, can't recommend anything")
setRecommendedModel(undefined)
return
}
@ -94,7 +100,7 @@ export default function useRecommendedModel() {
setRecommendedModel(lastUsedModel)
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [getAndSortDownloadedModels, activeThread])
}, [getAndSortDownloadedModels, activeThread, engines])
useEffect(() => {
getRecommendedModel()

View File

@ -1,3 +1,4 @@
/* eslint-disable @typescript-eslint/no-unused-vars */
import { useMemo } from 'react'
import { InferenceEngine, EngineConfig } from '@janhq/core'
@ -5,7 +6,8 @@ import { useAtomValue } from 'jotai'
import { isLocalEngine } from '@/utils/modelEngine'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { useGetEngines } from './useEngineManagement'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import { threadsAtom } from '@/helpers/atoms/Thread.atom'
@ -13,7 +15,7 @@ export function useStarterScreen() {
const downloadedModels = useAtomValue(downloadedModelsAtom)
const threads = useAtomValue(threadsAtom)
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
const remoteEngines =
engines &&

View File

@ -4,6 +4,8 @@ import { Model } from '@janhq/core'
import { useAtomValue } from 'jotai'
import { useGetEngines } from '@/hooks/useEngineManagement'
import ModelItem from '@/screens/Hub/ModelList/ModelItem'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
@ -14,6 +16,7 @@ type Props = {
const ModelList = ({ models }: Props) => {
const downloadedModels = useAtomValue(downloadedModelsAtom)
const { engines } = useGetEngines()
const sortedModels: Model[] = useMemo(() => {
const featuredModels: Model[] = []
const remoteModels: Model[] = []
@ -22,7 +25,7 @@ const ModelList = ({ models }: Props) => {
models.forEach((m) => {
if (m.metadata?.tags?.includes('Featured')) {
featuredModels.push(m)
} else if (m.format === 'api') {
} else if (engines?.[m.engine]?.[0]?.type === 'remote') {
remoteModels.push(m)
} else if (downloadedModels.map((m) => m.id).includes(m.id)) {
localModels.push(m)
@ -40,7 +43,7 @@ const ModelList = ({ models }: Props) => {
...remainingModels,
...remoteModels,
]
}, [models, downloadedModels])
}, [models, downloadedModels, engines])
return (
<div className="relative h-full w-full flex-shrink-0">

View File

@ -1,6 +1,11 @@
import { Fragment, useCallback, useState } from 'react'
import { EngineManager, Model, ModelSettingParams } from '@janhq/core'
import {
EngineManager,
InferenceEngine,
Model,
ModelSettingParams,
} from '@janhq/core'
import { Button, Tooltip, Select, Input, Checkbox } from '@janhq/joi'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
@ -94,7 +99,7 @@ const LocalServerLeftPanel = () => {
localStorage.setItem(FIRST_TIME_VISIT_API_SERVER, 'false')
setFirstTimeVisitAPIServer(false)
}
const engine = EngineManager.instance().get((model as Model).engine)
const engine = EngineManager.instance().get(InferenceEngine.cortex)
engine?.loadModel(model as Model)
// startModel(selectedModel.id, false).catch((e) => console.error(e))
setIsLoading(false)

View File

@ -1,11 +1,14 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable react/no-unescaped-entities */
/* eslint-disable @typescript-eslint/no-unused-vars */
import React, { useCallback, useRef, useState, useEffect } from 'react'
import {
EngineConfig as OriginalEngineConfig,
InferenceEngine,
events,
EngineEvent,
} from '@janhq/core'
interface EngineConfig extends OriginalEngineConfig {
@ -64,6 +67,7 @@ const RemoteEngineSettings = ({
set(updatedEngine, field, value)
await updateEngine(name, updatedEngine)
mutate()
events.emit(EngineEvent.OnEngineUpdate, {})
}, 300)
},
[engine, name, mutate]
@ -115,6 +119,8 @@ const RemoteEngineSettings = ({
}
}, [engine])
if (!engine) return null
return (
<ScrollArea className="h-full w-full">
<div className="block w-full px-4">

View File

@ -4,16 +4,16 @@ import { InferenceEngine } from '@janhq/core'
import { ScrollArea } from '@janhq/joi'
import { useAtomValue } from 'jotai'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { isLocalEngine } from '@/utils/modelEngine'
import LocalEngineItems from './LocalEngineItem'
import ModalAddRemoteEngine from './ModalAddRemoteEngine'
import RemoteEngineItems from './RemoteEngineItem'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
const Engines = () => {
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
return (
<ScrollArea className="h-full w-full">

View File

@ -2,7 +2,7 @@ import { memo, useState } from 'react'
import { Model } from '@janhq/core'
import { Badge, Button, Tooltip, useClickOutside } from '@janhq/joi'
import { useAtom, useAtomValue } from 'jotai'
import { useAtom } from 'jotai'
import {
MoreVerticalIcon,
PlayIcon,
@ -14,11 +14,12 @@ import { twMerge } from 'tailwind-merge'
import { useActiveModel } from '@/hooks/useActiveModel'
import useDeleteModel from '@/hooks/useDeleteModel'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { toGibibytes } from '@/utils/converter'
import { isLocalEngine } from '@/utils/modelEngine'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
type Props = {
@ -32,7 +33,7 @@ const MyModelList = ({ model }: Props) => {
const { deleteModel } = useDeleteModel()
const [more, setMore] = useState(false)
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
const [menu, setMenu] = useState<HTMLDivElement | null>(null)
const [toggle, setToggle] = useState<HTMLDivElement | null>(null)

View File

@ -36,8 +36,6 @@ import {
import MyModelList from './MyModelList'
import { extensionManager } from '@/extension'
import {
downloadedModelsAtom,
showEngineListModelAtom,
@ -52,9 +50,6 @@ const MyModels = () => {
showEngineListModelAtom
)
const [extensionHasSettings, setExtensionHasSettings] = useState<
{ name?: string; setting: string; apiKey: string; provider: string }[]
>([])
const { engines } = useGetEngines()
const isLocalEngine = useCallback(
@ -97,45 +92,6 @@ const MyModels = () => {
setSearchText(input)
}, [])
useEffect(() => {
const getAllSettings = async () => {
const extensionsMenu: {
name?: string
setting: string
apiKey: string
provider: string
}[] = []
const extensions = extensionManager.getAll()
for (const extension of extensions) {
if (typeof extension.getSettings === 'function') {
const settings = await extension.getSettings()
if (
(settings && settings.length > 0) ||
(await extension.installationState()) !== 'NotRequired'
) {
extensionsMenu.push({
name: extension.productName,
setting: extension.name,
apiKey:
'apiKey' in extension && typeof extension.apiKey === 'string'
? extension.apiKey
: '',
provider:
'provider' in extension &&
typeof extension.provider === 'string'
? extension.provider
: '',
})
}
}
}
setExtensionHasSettings(extensionsMenu)
}
getAllSettings()
}, [])
const findByEngine = filteredDownloadedModels.map((x) => {
// Legacy engine support - they will be grouped under Cortex LlamaCPP
if (x.engine === InferenceEngine.nitro)
@ -158,9 +114,11 @@ const MyModels = () => {
}
})
const getEngineStatusReady: InferenceEngine[] = extensionHasSettings
?.filter((e) => e.apiKey.length > 0)
.map((x) => x.provider as InferenceEngine)
const getEngineStatusReady: InferenceEngine[] = Object.entries(engines ?? {})
// eslint-disable-next-line @typescript-eslint/no-unused-vars
?.filter(([_, value]) => (value?.[0]?.api_key?.length ?? 0) > 0)
// eslint-disable-next-line @typescript-eslint/no-unused-vars
.map(([key, _]) => key as InferenceEngine)
useEffect(() => {
setShowEngineListModel((prev) => [
@ -168,7 +126,7 @@ const MyModels = () => {
...(getEngineStatusReady as InferenceEngine[]),
])
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [setShowEngineListModel, extensionHasSettings])
}, [setShowEngineListModel, engines])
return (
<div {...getRootProps()} className="h-full w-full">

View File

@ -1,6 +1,8 @@
import { InferenceEngine } from '@janhq/core'
import { useAtomValue } from 'jotai'
import { useGetEngines } from '@/hooks/useEngineManagement'
import Advanced from '@/screens/Settings/Advanced'
import AppearanceOptions from '@/screens/Settings/Appearance'
import ExtensionCatalog from '@/screens/Settings/CoreExtensions'
@ -14,12 +16,11 @@ import Privacy from '@/screens/Settings/Privacy'
import { isLocalEngine } from '@/utils/modelEngine'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { selectedSettingAtom } from '@/helpers/atoms/Setting.atom'
const SettingDetail = () => {
const selectedSetting = useAtomValue(selectedSettingAtom)
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
switch (selectedSetting) {
case 'Engines':

View File

@ -6,12 +6,13 @@ import { useAtomValue } from 'jotai'
import LeftPanelContainer from '@/containers/LeftPanelContainer'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { getTitleByEngine, isLocalEngine } from '@/utils/modelEngine'
import SettingItem from './SettingItem'
import { extensionManager } from '@/extension'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import {
showSettingActiveLocalEngineAtom,
@ -20,7 +21,7 @@ import {
import { janSettingScreenAtom } from '@/helpers/atoms/Setting.atom'
const SettingLeftPanel = () => {
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
const settingScreens = useAtomValue(janSettingScreenAtom)
const showSettingActiveLocalEngine = useAtomValue(

View File

@ -88,6 +88,7 @@ const AssistantSetting: React.FC<Props> = ({ componentData }) => {
setEngineParamsUpdate,
stopModel,
updateThreadMetadata,
resetGenerating,
]
)

View File

@ -7,16 +7,17 @@ import LogoMark from '@/containers/Brand/Logo/Mark'
import { MainViewState } from '@/constants/screens'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { isLocalEngine } from '@/utils/modelEngine'
import { mainViewStateAtom } from '@/helpers/atoms/App.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
const EmptyThread = () => {
const downloadedModels = useAtomValue(downloadedModelsAtom)
const setMainViewState = useSetAtom(mainViewStateAtom)
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
const showOnboardingStep = useMemo(
() =>
!downloadedModels.some(

View File

@ -24,6 +24,8 @@ import useDownloadModel from '@/hooks/useDownloadModel'
import { modelDownloadStateAtom } from '@/hooks/useDownloadState'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { formatDownloadPercentage, toGibibytes } from '@/utils/converter'
import { manualRecommendationModel } from '@/utils/model'
import {
@ -33,7 +35,6 @@ import {
} from '@/utils/modelEngine'
import { mainViewStateAtom } from '@/helpers/atoms/App.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import {
configuredModelsAtom,
getDownloadingModelAtom,
@ -51,7 +52,7 @@ const OnDeviceStarterScreen = ({ isShowStarterScreen }: Props) => {
const { downloadModel } = useDownloadModel()
const downloadStates = useAtomValue(modelDownloadStateAtom)
const setSelectedSetting = useSetAtom(selectedSettingAtom)
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
const configuredModels = useAtomValue(configuredModelsAtom)
const setMainViewState = useSetAtom(mainViewStateAtom)
@ -298,40 +299,46 @@ const OnDeviceStarterScreen = ({ isShowStarterScreen }: Props) => {
key={rowIndex}
className="my-2 flex items-center gap-4 md:gap-10"
>
{row.map((remoteEngine) => {
const engineLogo = getLogoEngine(
remoteEngine as InferenceEngine
{row
.filter(
(e) =>
engines?.[e as InferenceEngine]?.[0]?.type ===
'remote'
)
.map((remoteEngine) => {
const engineLogo = getLogoEngine(
remoteEngine as InferenceEngine
)
return (
<div
className="flex cursor-pointer flex-col items-center justify-center gap-4"
key={remoteEngine}
onClick={() => {
setMainViewState(MainViewState.Settings)
setSelectedSetting(
remoteEngine as InferenceEngine
)
}}
>
{engineLogo && (
<Image
width={48}
height={48}
src={engineLogo}
alt="Engine logo"
className="h-10 w-10 flex-shrink-0"
/>
)}
<p className="font-medium">
{getTitleByEngine(
remoteEngine as InferenceEngine
return (
<div
className="flex cursor-pointer flex-col items-center justify-center gap-4"
key={remoteEngine}
onClick={() => {
setMainViewState(MainViewState.Settings)
setSelectedSetting(
remoteEngine as InferenceEngine
)
}}
>
{engineLogo && (
<Image
width={48}
height={48}
src={engineLogo}
alt="Engine logo"
className="h-10 w-10 flex-shrink-0"
/>
)}
</p>
</div>
)
})}
<p className="font-medium">
{getTitleByEngine(
remoteEngine as InferenceEngine
)}
</p>
</div>
)
})}
</div>
)
})}

View File

@ -22,6 +22,7 @@ import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { useActiveModel } from '@/hooks/useActiveModel'
import { useGetEngines } from '@/hooks/useEngineManagement'
import useSendChatMessage from '@/hooks/useSendChatMessage'
import { uploader } from '@/utils/file'
@ -35,7 +36,6 @@ import RichTextEditor from './RichTextEditor'
import { showRightPanelAtom } from '@/helpers/atoms/App.atom'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { selectedModelAtom } from '@/helpers/atoms/Model.atom'
import { spellCheckAtom } from '@/helpers/atoms/Setting.atom'
import {
@ -64,7 +64,7 @@ const ChatInput = () => {
const textareaRef = useRef<HTMLTextAreaElement>(null)
const fileInputRef = useRef<HTMLInputElement>(null)
const imageInputRef = useRef<HTMLInputElement>(null)
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const isBlockingSend = useAtomValue(isBlockingSendAtom)
const activeAssistant = useAtomValue(activeAssistantAtom)

View File

@ -1,4 +1,4 @@
import { EngineManager } from '@janhq/core'
import { EngineManager, InferenceEngine } from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
import ModalTroubleShooting, {
@ -35,7 +35,7 @@ const LoadModelError = () => {
setMainState(MainViewState.Settings)
if (activeAssistant?.model.engine) {
const engine = EngineManager.instance().get(
activeAssistant.model.engine
InferenceEngine.cortex
)
engine?.name && setSelectedSettingScreen(engine.name)
}

View File

@ -29,6 +29,7 @@ import RightPanelContainer from '@/containers/RightPanelContainer'
import { useActiveModel } from '@/hooks/useActiveModel'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import { useGetEngines } from '@/hooks/useEngineManagement'
import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
import { getConfigurationsData } from '@/utils/componentSettings'
@ -39,7 +40,7 @@ import Tools from './Tools'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { installedEnginesAtom } from '@/helpers/atoms/Engines.atom'
import { selectedModelAtom } from '@/helpers/atoms/Model.atom'
import {
activeThreadAtom,
@ -61,7 +62,7 @@ const ThreadRightPanel = () => {
const [activeTabThreadRightPanel, setActiveTabThreadRightPanel] = useAtom(
activeTabThreadRightPanelAtom
)
const engines = useAtomValue(installedEnginesAtom)
const { engines } = useGetEngines()
const { updateThreadMetadata } = useCreateNewThread()
const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)

View File

@ -167,7 +167,6 @@ export class MessageRequestBuilder {
messages: this.normalizeMessages(this.messages),
model: this.model,
thread: this.thread,
engine: this.model.engine,
}
}
}