fix: conditional button refresh model (#5221)

This commit is contained in:
Faisal Amir 2025-06-09 17:43:48 +07:00 committed by GitHub
parent 1bbac32d88
commit 1d69101f97
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 35 additions and 207 deletions

View File

@ -24,133 +24,7 @@ export const openAIProviderSettings = [
},
},
]
export const mockModelProvider = [
// {
// active: true,
// provider: 'llama.cpp',
// settings: [
// {
// key: 'cont_batching',
// title: 'Continuous Batching',
// description:
// 'Allows processing prompts in parallel with text generation, which usually improves performance.',
// controller_type: 'checkbox',
// controller_props: {
// value: true,
// },
// },
// {
// key: 'n_parallel',
// title: 'Parallel Operations',
// description:
// 'Number of prompts that can be processed simultaneously by the model.',
// controller_type: 'input',
// controller_props: {
// value: '4',
// placeholder: '4',
// type: 'number',
// },
// },
// {
// key: 'cpu_threads',
// title: 'CPU Threads',
// description:
// 'Number of CPU cores used for model processing when running without GPU.',
// controller_type: 'input',
// controller_props: {
// value: '1',
// placeholder: '1',
// type: 'number',
// },
// },
// {
// key: 'flash_attn',
// title: 'Flash Attention',
// description:
// 'Optimizes memory usage and speeds up model inference using an efficient attention implementation.',
// controller_type: 'checkbox',
// controller_props: {
// value: true,
// },
// },
// {
// key: 'caching_enabled',
// title: 'Caching',
// description:
// 'Stores recent prompts and responses to improve speed when similar questions are asked.',
// controller_type: 'checkbox',
// controller_props: {
// value: true,
// },
// },
// {
// key: 'cache_type',
// title: 'KV Cache Type',
// description: 'Controls memory usage and precision trade-off.',
// controller_type: 'dropdown',
// controller_props: {
// value: 'f16',
// options: [
// {
// value: 'q4_0',
// name: 'q4_0',
// },
// {
// value: 'q8_0',
// name: 'q8_0',
// },
// {
// value: 'f16',
// name: 'f16',
// },
// ],
// },
// },
// {
// key: 'use_mmap',
// title: 'mmap',
// description:
// 'Loads model files more efficiently by mapping them to memory, reducing RAM usage.',
// controller_type: 'checkbox',
// controller_props: {
// value: true,
// },
// },
// ],
// models: [
// {
// id: 'llama3.2:3b',
// model: 'llama3.2:3b',
// name: 'llama3.2:3b',
// capabilities: ['completion', 'tools'],
// version: 2,
// settings: {
// prompt_template:
// '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n',
// ctx_len: 4096,
// n_parallel: 1,
// cpu_threads: 1,
// ngl: 29,
// },
// },
// {
// id: 'deepseek-r1.2:3b',
// model: 'deepseek-r1.2:3b',
// name: 'deepseek-r1.2:3b',
// capabilities: ['completion', 'tools'],
// version: 2,
// settings: {
// prompt_template:
// '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n',
// ctx_len: 4096,
// n_parallel: 1,
// cpu_threads: 1,
// ngl: 29,
// },
// },
// ],
// },
export const predefinedProviders = [
{
active: true,
api_key: '',
@ -407,55 +281,4 @@ export const mockModelProvider = [
],
models: [],
},
// {
// active: true,
// api_key: '',
// base_url: 'https://api.deepseek.com',
// explore_models_url: 'https://api-docs.deepseek.com/quick_start/pricing',
// provider: 'deepseek',
// settings: [
// {
// key: 'api-key',
// title: 'API Key',
// description:
// "The DeepSeek API uses API keys for authentication. Visit your [API Keys](https://platform.deepseek.com/api_keys) page to retrieve the API key you'll use in your requests.",
// controller_type: 'input',
// controller_props: {
// placeholder: 'Insert API Key',
// value: '',
// type: 'password',
// input_actions: ['unobscure', 'copy'],
// },
// },
// {
// key: 'base-url',
// title: 'Base URL',
// description:
// 'The base endpoint to use. See the [DeepSeek documentation](https://api-docs.deepseek.com/) for more information.',
// controller_type: 'input',
// controller_props: {
// placeholder: 'https://api.deepseek.com',
// value: 'https://api.deepseek.com',
// },
// },
// ],
// models: [
// {
// id: 'deepseek-chat',
// name: 'DeepSeek-V3',
// version: '1.0',
// description:
// 'The deepseek-chat model has been upgraded to DeepSeek-V3. deepseek-reasoner points to the new model DeepSeek-R1',
// capabilities: ['completion'],
// },
// {
// id: 'deepseek-reasoner',
// name: 'DeepSeek-R1',
// version: '1.0',
// description:
// 'CoT (Chain of Thought) is the reasoning content deepseek-reasoner gives before output the final answer. For details, please refer to Reasoning Model.',
// capabilities: ['completion'],
// },
// ],
// },
]

View File

@ -37,6 +37,7 @@ import { getProviders } from '@/services/providers'
import { toast } from 'sonner'
import { ActiveModel } from '@/types/models'
import { useEffect, useState } from 'react'
import { predefinedProviders } from '@/mock/data'
// as route.threadsDetail
export const Route = createFileRoute('/settings/providers/$providerName')({
@ -62,7 +63,7 @@ const steps = [
title: 'Get Your API Key',
disableBeacon: true,
content:
'Log into the providers dashboard to find or generate your API key.',
"Log into the provider's dashboard to find or generate your API key.",
},
{
target: '.third-step-setup-remote-provider',
@ -357,32 +358,36 @@ function ProviderDetail() {
<div className="flex items-center gap-2">
{provider && provider.provider !== 'llama.cpp' && (
<>
<Button
variant="link"
size="sm"
className="hover:no-underline"
onClick={handleRefreshModels}
disabled={refreshingModels}
>
<div className="cursor-pointer flex items-center justify-center rounded hover:bg-main-view-fg/15 bg-main-view-fg/10 transition-all duration-200 ease-in-out px-1.5 py-1 gap-1">
{refreshingModels ? (
<IconLoader
size={18}
className="text-main-view-fg/50 animate-spin"
/>
) : (
<IconRefresh
size={18}
className="text-main-view-fg/50"
/>
)}
<span className="text-main-view-fg/70">
{refreshingModels
? 'Refreshing...'
: 'Refresh'}
</span>
</div>
</Button>
{!predefinedProviders.some(
(p) => p.provider === provider.provider
) && (
<Button
variant="link"
size="sm"
className="hover:no-underline"
onClick={handleRefreshModels}
disabled={refreshingModels}
>
<div className="cursor-pointer flex items-center justify-center rounded hover:bg-main-view-fg/15 bg-main-view-fg/10 transition-all duration-200 ease-in-out px-1.5 py-1 gap-1">
{refreshingModels ? (
<IconLoader
size={18}
className="text-main-view-fg/50 animate-spin"
/>
) : (
<IconRefresh
size={18}
className="text-main-view-fg/50"
/>
)}
<span className="text-main-view-fg/70">
{refreshingModels
? 'Refreshing...'
: 'Refresh'}
</span>
</div>
</Button>
)}
<DialogAddModel provider={provider} />
</>
)}

View File

@ -1,5 +1,5 @@
import { models as providerModels } from 'token.js'
import { mockModelProvider } from '@/mock/data'
import { predefinedProviders } from '@/mock/data'
import {
EngineManagementExtension,
EngineManager,
@ -17,7 +17,7 @@ export const getProviders = async (): Promise<ModelProvider[]> => {
.get<EngineManagementExtension>(ExtensionTypeEnum.Engine)
?.getEngines()
: {}
const builtinProviders = mockModelProvider.map((provider) => {
const builtinProviders = predefinedProviders.map((provider) => {
let models = provider.models as Model[]
if (Object.keys(providerModels).includes(provider.provider)) {
const builtInModels = providerModels[