* feat: remote engine management * chore: fix linter issue * chore: remove unused imports * fix: populate engines, models and legacy settings (#4403) * fix: populate engines, models and legacy settings * chore: legacy logics update configured remote engine * fix: check exist path before reading * fix: engines and models persist - race condition * chore: update issue state * test: update test cases * chore: bring back Cortex extension settings * chore: setup button gear / plus based apikey * chore: fix remote engine from welcome screen * chore: resolve linter issue * chore: support request headers template * chore: update engines using header_template instead of api_key_template * chore: update models on changes * fix: anthropic response template * chore: fix welcome screen and debounce update value input * chore: update engines list on changes * chore: update engines list on change * chore: update desc form add modal remote engines * chore: bump cortex version to latest RC * chore: fix linter * fix: transform payload of Anthropic and OpenAI * fix: typo * fix: openrouter model id for auto routing * chore: remove remote engine URL setting * chore: add cohere engine and model support * fix: should not clean on app launch - models list display issue * fix: local engine check logic * chore: bump app version to latest release 0.5.13 * test: fix failed tests --------- Co-authored-by: Louis <louis@jan.ai>
175 lines
5.7 KiB
TypeScript
175 lines
5.7 KiB
TypeScript
import { EngineManager, InferenceEngine, LocalOAIEngine } from '@janhq/core'
|
|
import {
|
|
getTitleByEngine,
|
|
isLocalEngine,
|
|
priorityEngine,
|
|
getLogoEngine,
|
|
} from './modelEngine'
|
|
|
|
jest.mock('@janhq/core', () => ({
|
|
...jest.requireActual('@janhq/core'),
|
|
EngineManager: {
|
|
instance: jest.fn().mockReturnValue({
|
|
get: jest.fn(),
|
|
}),
|
|
},
|
|
}))
|
|
|
|
describe('isLocalEngine', () => {
|
|
const mockEngineManagerInstance = EngineManager.instance()
|
|
const mockGet = mockEngineManagerInstance.get as jest.Mock
|
|
|
|
beforeEach(() => {
|
|
jest.clearAllMocks()
|
|
})
|
|
|
|
it('should return false if engine is not found', () => {
|
|
mockGet.mockReturnValue(null)
|
|
const result = isLocalEngine(
|
|
{
|
|
'llama-cpp': [],
|
|
} as any,
|
|
'nonexistentEngine'
|
|
)
|
|
expect(result).toBe(false)
|
|
})
|
|
it('should return true if this is a local engine', () => {
|
|
mockGet.mockReturnValue(null)
|
|
const result = isLocalEngine(
|
|
{
|
|
'llama-cpp': [
|
|
{
|
|
type: 'local',
|
|
},
|
|
],
|
|
} as any,
|
|
'llama-cpp'
|
|
)
|
|
expect(result).toBe(true)
|
|
})
|
|
|
|
jest.mock('@janhq/core', () => ({
|
|
...jest.requireActual('@janhq/core'),
|
|
EngineManager: {
|
|
instance: jest.fn().mockReturnValue({
|
|
get: jest.fn(),
|
|
}),
|
|
},
|
|
}))
|
|
|
|
describe('getTitleByEngine', () => {
|
|
it('should return correct title for InferenceEngine.nitro', () => {
|
|
const result = getTitleByEngine(InferenceEngine.nitro)
|
|
expect(result).toBe('Llama.cpp')
|
|
})
|
|
|
|
it('should return correct title for InferenceEngine.nitro_tensorrt_llm', () => {
|
|
const result = getTitleByEngine(InferenceEngine.nitro_tensorrt_llm)
|
|
expect(result).toBe('TensorRT-LLM (Nitro)')
|
|
})
|
|
|
|
it('should return correct title for InferenceEngine.cortex_llamacpp', () => {
|
|
const result = getTitleByEngine(InferenceEngine.cortex_llamacpp)
|
|
expect(result).toBe('Llama.cpp')
|
|
})
|
|
|
|
it('should return correct title for InferenceEngine.cortex_onnx', () => {
|
|
const result = getTitleByEngine(InferenceEngine.cortex_onnx)
|
|
expect(result).toBe('Onnx')
|
|
})
|
|
|
|
it('should return correct title for InferenceEngine.cortex_tensorrtllm', () => {
|
|
const result = getTitleByEngine(InferenceEngine.cortex_tensorrtllm)
|
|
expect(result).toBe('TensorRT-LLM')
|
|
})
|
|
|
|
it('should return correct title for InferenceEngine.openai', () => {
|
|
const result = getTitleByEngine(InferenceEngine.openai)
|
|
expect(result).toBe('OpenAI')
|
|
})
|
|
|
|
it('should return correct title for InferenceEngine.openrouter', () => {
|
|
const result = getTitleByEngine(InferenceEngine.openrouter)
|
|
expect(result).toBe('OpenRouter')
|
|
})
|
|
|
|
it('should return capitalized engine name for unknown engine', () => {
|
|
const result = getTitleByEngine('unknownEngine' as InferenceEngine)
|
|
expect(result).toBe('UnknownEngine')
|
|
})
|
|
})
|
|
|
|
describe('priorityEngine', () => {
|
|
it('should contain the correct engines in the correct order', () => {
|
|
expect(priorityEngine).toEqual([
|
|
InferenceEngine.cortex_llamacpp,
|
|
InferenceEngine.cortex_onnx,
|
|
InferenceEngine.cortex_tensorrtllm,
|
|
InferenceEngine.nitro,
|
|
])
|
|
})
|
|
})
|
|
|
|
describe('getLogoEngine', () => {
|
|
it('should return correct logo path for InferenceEngine.anthropic', () => {
|
|
const result = getLogoEngine(InferenceEngine.anthropic)
|
|
expect(result).toBe('images/ModelProvider/anthropic.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.nitro_tensorrt_llm', () => {
|
|
const result = getLogoEngine(InferenceEngine.nitro_tensorrt_llm)
|
|
expect(result).toBe('images/ModelProvider/nitro.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.cortex_llamacpp', () => {
|
|
const result = getLogoEngine(InferenceEngine.cortex_llamacpp)
|
|
expect(result).toBe('images/ModelProvider/cortex.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.mistral', () => {
|
|
const result = getLogoEngine(InferenceEngine.mistral)
|
|
expect(result).toBe('images/ModelProvider/mistral.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.martian', () => {
|
|
const result = getLogoEngine(InferenceEngine.martian)
|
|
expect(result).toBe('images/ModelProvider/martian.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.openrouter', () => {
|
|
const result = getLogoEngine(InferenceEngine.openrouter)
|
|
expect(result).toBe('images/ModelProvider/openRouter.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.openai', () => {
|
|
const result = getLogoEngine(InferenceEngine.openai)
|
|
expect(result).toBe('images/ModelProvider/openai.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.groq', () => {
|
|
const result = getLogoEngine(InferenceEngine.groq)
|
|
expect(result).toBe('images/ModelProvider/groq.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.triton_trtllm', () => {
|
|
const result = getLogoEngine(InferenceEngine.triton_trtllm)
|
|
expect(result).toBe('images/ModelProvider/triton_trtllm.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.cohere', () => {
|
|
const result = getLogoEngine(InferenceEngine.cohere)
|
|
expect(result).toBe('images/ModelProvider/cohere.svg')
|
|
})
|
|
|
|
it('should return correct logo path for InferenceEngine.nvidia', () => {
|
|
const result = getLogoEngine(InferenceEngine.nvidia)
|
|
expect(result).toBe('images/ModelProvider/nvidia.svg')
|
|
})
|
|
|
|
it('should return undefined for unknown engine', () => {
|
|
const result = getLogoEngine('unknownEngine' as InferenceEngine)
|
|
expect(result).toBeUndefined()
|
|
})
|
|
})
|
|
})
|