diff --git a/web-app/src/mock/data.ts b/web-app/src/mock/data.ts index 9ee3fdb2d..128ec38da 100644 --- a/web-app/src/mock/data.ts +++ b/web-app/src/mock/data.ts @@ -24,133 +24,7 @@ export const openAIProviderSettings = [ }, }, ] -export const mockModelProvider = [ - // { - // active: true, - // provider: 'llama.cpp', - // settings: [ - // { - // key: 'cont_batching', - // title: 'Continuous Batching', - // description: - // 'Allows processing prompts in parallel with text generation, which usually improves performance.', - // controller_type: 'checkbox', - // controller_props: { - // value: true, - // }, - // }, - // { - // key: 'n_parallel', - // title: 'Parallel Operations', - // description: - // 'Number of prompts that can be processed simultaneously by the model.', - // controller_type: 'input', - // controller_props: { - // value: '4', - // placeholder: '4', - // type: 'number', - // }, - // }, - // { - // key: 'cpu_threads', - // title: 'CPU Threads', - // description: - // 'Number of CPU cores used for model processing when running without GPU.', - // controller_type: 'input', - // controller_props: { - // value: '1', - // placeholder: '1', - // type: 'number', - // }, - // }, - // { - // key: 'flash_attn', - // title: 'Flash Attention', - // description: - // 'Optimizes memory usage and speeds up model inference using an efficient attention implementation.', - // controller_type: 'checkbox', - // controller_props: { - // value: true, - // }, - // }, - - // { - // key: 'caching_enabled', - // title: 'Caching', - // description: - // 'Stores recent prompts and responses to improve speed when similar questions are asked.', - // controller_type: 'checkbox', - // controller_props: { - // value: true, - // }, - // }, - // { - // key: 'cache_type', - // title: 'KV Cache Type', - // description: 'Controls memory usage and precision trade-off.', - // controller_type: 'dropdown', - // controller_props: { - // value: 'f16', - // options: [ - // { - // value: 'q4_0', - // name: 'q4_0', - // }, - // { - // value: 'q8_0', - // name: 'q8_0', - // }, - // { - // value: 'f16', - // name: 'f16', - // }, - // ], - // }, - // }, - // { - // key: 'use_mmap', - // title: 'mmap', - // description: - // 'Loads model files more efficiently by mapping them to memory, reducing RAM usage.', - // controller_type: 'checkbox', - // controller_props: { - // value: true, - // }, - // }, - // ], - // models: [ - // { - // id: 'llama3.2:3b', - // model: 'llama3.2:3b', - // name: 'llama3.2:3b', - // capabilities: ['completion', 'tools'], - // version: 2, - // settings: { - // prompt_template: - // '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n', - // ctx_len: 4096, - // n_parallel: 1, - // cpu_threads: 1, - // ngl: 29, - // }, - // }, - // { - // id: 'deepseek-r1.2:3b', - // model: 'deepseek-r1.2:3b', - // name: 'deepseek-r1.2:3b', - // capabilities: ['completion', 'tools'], - // version: 2, - // settings: { - // prompt_template: - // '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n', - // ctx_len: 4096, - // n_parallel: 1, - // cpu_threads: 1, - // ngl: 29, - // }, - // }, - // ], - // }, +export const predefinedProviders = [ { active: true, api_key: '', @@ -407,55 +281,4 @@ export const mockModelProvider = [ ], models: [], }, - // { - // active: true, - // api_key: '', - // base_url: 'https://api.deepseek.com', - // explore_models_url: 'https://api-docs.deepseek.com/quick_start/pricing', - // provider: 'deepseek', - // settings: [ - // { - // key: 'api-key', - // title: 'API Key', - // description: - // "The DeepSeek API uses API keys for authentication. Visit your [API Keys](https://platform.deepseek.com/api_keys) page to retrieve the API key you'll use in your requests.", - // controller_type: 'input', - // controller_props: { - // placeholder: 'Insert API Key', - // value: '', - // type: 'password', - // input_actions: ['unobscure', 'copy'], - // }, - // }, - // { - // key: 'base-url', - // title: 'Base URL', - // description: - // 'The base endpoint to use. See the [DeepSeek documentation](https://api-docs.deepseek.com/) for more information.', - // controller_type: 'input', - // controller_props: { - // placeholder: 'https://api.deepseek.com', - // value: 'https://api.deepseek.com', - // }, - // }, - // ], - // models: [ - // { - // id: 'deepseek-chat', - // name: 'DeepSeek-V3', - // version: '1.0', - // description: - // 'The deepseek-chat model has been upgraded to DeepSeek-V3. deepseek-reasoner points to the new model DeepSeek-R1', - // capabilities: ['completion'], - // }, - // { - // id: 'deepseek-reasoner', - // name: 'DeepSeek-R1', - // version: '1.0', - // description: - // 'CoT (Chain of Thought) is the reasoning content deepseek-reasoner gives before output the final answer. For details, please refer to Reasoning Model.', - // capabilities: ['completion'], - // }, - // ], - // }, ] diff --git a/web-app/src/routes/settings/providers/$providerName.tsx b/web-app/src/routes/settings/providers/$providerName.tsx index d4dfaa9d5..5b1250e23 100644 --- a/web-app/src/routes/settings/providers/$providerName.tsx +++ b/web-app/src/routes/settings/providers/$providerName.tsx @@ -37,6 +37,7 @@ import { getProviders } from '@/services/providers' import { toast } from 'sonner' import { ActiveModel } from '@/types/models' import { useEffect, useState } from 'react' +import { predefinedProviders } from '@/mock/data' // as route.threadsDetail export const Route = createFileRoute('/settings/providers/$providerName')({ @@ -62,7 +63,7 @@ const steps = [ title: 'Get Your API Key', disableBeacon: true, content: - 'Log into the provider’s dashboard to find or generate your API key.', + "Log into the provider's dashboard to find or generate your API key.", }, { target: '.third-step-setup-remote-provider', @@ -357,32 +358,36 @@ function ProviderDetail() {
{provider && provider.provider !== 'llama.cpp' && ( <> - + {!predefinedProviders.some( + (p) => p.provider === provider.provider + ) && ( + + )} )} diff --git a/web-app/src/services/providers.ts b/web-app/src/services/providers.ts index 1ac7754da..3b877d39f 100644 --- a/web-app/src/services/providers.ts +++ b/web-app/src/services/providers.ts @@ -1,5 +1,5 @@ import { models as providerModels } from 'token.js' -import { mockModelProvider } from '@/mock/data' +import { predefinedProviders } from '@/mock/data' import { EngineManagementExtension, EngineManager, @@ -17,7 +17,7 @@ export const getProviders = async (): Promise => { .get(ExtensionTypeEnum.Engine) ?.getEngines() : {} - const builtinProviders = mockModelProvider.map((provider) => { + const builtinProviders = predefinedProviders.map((provider) => { let models = provider.models as Model[] if (Object.keys(providerModels).includes(provider.provider)) { const builtInModels = providerModels[