Merge pull request #4480 from janhq/chore/correct-mistral-ai-request-transformation-template

chore: remote provider error handling and chore bug fix
This commit is contained in:
Louis 2025-01-17 18:18:34 +07:00 committed by GitHub
commit 0547cac0bf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 14 additions and 52 deletions

View File

@ -91,8 +91,12 @@ export function requestInference(
const toParse = cachedLines + line const toParse = cachedLines + line
if (!line.includes('data: [DONE]')) { if (!line.includes('data: [DONE]')) {
const data = JSON.parse(toParse.replace('data: ', '')) const data = JSON.parse(toParse.replace('data: ', ''))
if ('error' in data) { if (
subscriber.error(data.error) 'error' in data ||
'message' in data ||
'detail' in data
) {
subscriber.error(data.error ?? data)
subscriber.complete() subscriber.complete()
return return
} }

View File

@ -10,7 +10,7 @@
"transform_req": { "transform_req": {
"chat_completions": { "chat_completions": {
"url": "https://api.mistral.ai/v1/chat/completions", "url": "https://api.mistral.ai/v1/chat/completions",
"template": "{{tojson(input_request)}}" "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"messages\" or key == \"model\" or key == \"temperature\" or key == \"store\" or key == \"max_tokens\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stop\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }"
} }
}, },
"transform_resp": { "transform_resp": {

View File

@ -1 +1 @@
1.0.9-rc3 1.0.9-rc4

View File

@ -36,8 +36,6 @@ import {
import MyModelList from './MyModelList' import MyModelList from './MyModelList'
import { extensionManager } from '@/extension'
import { import {
downloadedModelsAtom, downloadedModelsAtom,
showEngineListModelAtom, showEngineListModelAtom,
@ -52,9 +50,6 @@ const MyModels = () => {
showEngineListModelAtom showEngineListModelAtom
) )
const [extensionHasSettings, setExtensionHasSettings] = useState<
{ name?: string; setting: string; apiKey: string; provider: string }[]
>([])
const { engines } = useGetEngines() const { engines } = useGetEngines()
const isLocalEngine = useCallback( const isLocalEngine = useCallback(
@ -97,45 +92,6 @@ const MyModels = () => {
setSearchText(input) setSearchText(input)
}, []) }, [])
useEffect(() => {
const getAllSettings = async () => {
const extensionsMenu: {
name?: string
setting: string
apiKey: string
provider: string
}[] = []
const extensions = extensionManager.getAll()
for (const extension of extensions) {
if (typeof extension.getSettings === 'function') {
const settings = await extension.getSettings()
if (
(settings && settings.length > 0) ||
(await extension.installationState()) !== 'NotRequired'
) {
extensionsMenu.push({
name: extension.productName,
setting: extension.name,
apiKey:
'apiKey' in extension && typeof extension.apiKey === 'string'
? extension.apiKey
: '',
provider:
'provider' in extension &&
typeof extension.provider === 'string'
? extension.provider
: '',
})
}
}
}
setExtensionHasSettings(extensionsMenu)
}
getAllSettings()
}, [])
const findByEngine = filteredDownloadedModels.map((x) => { const findByEngine = filteredDownloadedModels.map((x) => {
// Legacy engine support - they will be grouped under Cortex LlamaCPP // Legacy engine support - they will be grouped under Cortex LlamaCPP
if (x.engine === InferenceEngine.nitro) if (x.engine === InferenceEngine.nitro)
@ -158,9 +114,11 @@ const MyModels = () => {
} }
}) })
const getEngineStatusReady: InferenceEngine[] = extensionHasSettings const getEngineStatusReady: InferenceEngine[] = Object.entries(engines ?? {})
?.filter((e) => e.apiKey.length > 0) // eslint-disable-next-line @typescript-eslint/no-unused-vars
.map((x) => x.provider as InferenceEngine) ?.filter(([_, value]) => (value?.[0]?.api_key?.length ?? 0) > 0)
// eslint-disable-next-line @typescript-eslint/no-unused-vars
.map(([key, _]) => key as InferenceEngine)
useEffect(() => { useEffect(() => {
setShowEngineListModel((prev) => [ setShowEngineListModel((prev) => [
@ -168,7 +126,7 @@ const MyModels = () => {
...(getEngineStatusReady as InferenceEngine[]), ...(getEngineStatusReady as InferenceEngine[]),
]) ])
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [setShowEngineListModel, extensionHasSettings]) }, [setShowEngineListModel, engines])
return ( return (
<div {...getRootProps()} className="h-full w-full"> <div {...getRootProps()} className="h-full w-full">