jan/web/utils/modelEngine.ts
Faisal Amir 2a0601f75a
feat: remote engine management (#4364)
* feat: remote engine management

* chore: fix linter issue

* chore: remove unused imports

* fix: populate engines, models and legacy settings (#4403)

* fix: populate engines, models and legacy settings

* chore: legacy logics update configured remote engine

* fix: check exist path before reading

* fix: engines and models persist - race condition

* chore: update issue state

* test: update test cases

* chore: bring back Cortex extension settings

* chore: setup button gear / plus based apikey

* chore: fix remote engine from welcome screen

* chore: resolve linter issue

* chore: support request headers template

* chore: update engines using header_template instead of api_key_template

* chore: update models on changes

* fix: anthropic response template

* chore: fix welcome screen and debounce update value input

* chore: update engines list on changes

* chore: update engines list on change

* chore: update desc form add modal remote engines

* chore: bump cortex version to latest RC

* chore: fix linter

* fix: transform payload of Anthropic and OpenAI

* fix: typo

* fix: openrouter model id for auto routing

* chore: remove remote engine URL setting

* chore: add cohere engine and model support

* fix: should not clean on app launch - models list display issue

* fix: local engine check logic

* chore: bump app version to latest release 0.5.13

* test: fix failed tests

---------

Co-authored-by: Louis <louis@jan.ai>
2025-01-14 17:29:56 +07:00

73 lines
2.2 KiB
TypeScript

import { Engines, InferenceEngine } from '@janhq/core'
export const getLogoEngine = (engine: InferenceEngine) => {
switch (engine) {
case InferenceEngine.nitro:
case InferenceEngine.cortex_llamacpp:
case InferenceEngine.cortex_onnx:
case InferenceEngine.cortex_tensorrtllm:
return 'images/ModelProvider/cortex.svg'
case InferenceEngine.anthropic:
return 'images/ModelProvider/anthropic.svg'
case InferenceEngine.nitro_tensorrt_llm:
return 'images/ModelProvider/nitro.svg'
case InferenceEngine.mistral:
return 'images/ModelProvider/mistral.svg'
case InferenceEngine.martian:
return 'images/ModelProvider/martian.svg'
case InferenceEngine.openrouter:
return 'images/ModelProvider/openRouter.svg'
case InferenceEngine.openai:
return 'images/ModelProvider/openai.svg'
case InferenceEngine.groq:
return 'images/ModelProvider/groq.svg'
case InferenceEngine.triton_trtllm:
return 'images/ModelProvider/triton_trtllm.svg'
case InferenceEngine.cohere:
return 'images/ModelProvider/cohere.svg'
case InferenceEngine.nvidia:
return 'images/ModelProvider/nvidia.svg'
default:
return undefined
}
}
/**
* Check whether the engine is conform to LocalOAIEngine
* @param engine
* @returns
*/
export const isLocalEngine = (engines?: Engines, engine?: string) => {
if (!engines || !engine) return false
return engines[engine as InferenceEngine]?.[0]?.type === 'local'
}
export const getTitleByEngine = (engine: InferenceEngine) => {
switch (engine) {
case InferenceEngine.nitro:
case InferenceEngine.cortex_llamacpp:
return 'Llama.cpp'
case InferenceEngine.nitro_tensorrt_llm:
return 'TensorRT-LLM (Nitro)'
case InferenceEngine.cortex_onnx:
return 'Onnx'
case InferenceEngine.cortex_tensorrtllm:
return 'TensorRT-LLM'
case InferenceEngine.openai:
return 'OpenAI'
case InferenceEngine.openrouter:
return 'OpenRouter'
default:
return engine.charAt(0).toUpperCase() + engine.slice(1)
}
}
export const priorityEngine = [
InferenceEngine.cortex_llamacpp,
InferenceEngine.cortex_onnx,
InferenceEngine.cortex_tensorrtllm,
InferenceEngine.nitro,
]