chore: provider llamacpp will always check runtime capabilities

This commit is contained in:
Faisal Amir 2025-09-03 21:39:16 +07:00
parent a8ca0d6418
commit 371a0dd6ec
2 changed files with 6 additions and 2 deletions

View File

@ -290,7 +290,7 @@ export const useChat = () => {
...modelSettings,
...currentAssistant.parameters,
...(selectedModel?.reasoning?.reasoning_budget &&
selectedModel?.reasoning?.reasoning_budget !== 'auto' && {
selectedModel?.reasoning?.reasoning_effort !== 'auto' && {
reasoning_effort: selectedModel?.reasoning?.reasoning_effort,
}),
} as unknown as Record<string, object>

View File

@ -77,6 +77,7 @@ export const useModelProvider = create<ModelProviderState>()(
('id' in e || 'model' in e) &&
typeof (e.id ?? e.model) === 'string'
)
console.log(models, 'models')
const mergedModels = [
...(provider?.models ?? []).filter(
(e) =>
@ -99,7 +100,10 @@ export const useModelProvider = create<ModelProviderState>()(
return {
...model,
settings: settings,
capabilities: existingModel?.capabilities || model.capabilities,
capabilities:
provider.provider === 'llamacpp'
? model.capabilities
: existingModel?.capabilities || model.capabilities,
}
})