diff --git a/core/src/browser/extensions/engines/OAIEngine.ts b/core/src/browser/extensions/engines/OAIEngine.ts index 308e363f6..4db2282b5 100644 --- a/core/src/browser/extensions/engines/OAIEngine.ts +++ b/core/src/browser/extensions/engines/OAIEngine.ts @@ -102,7 +102,7 @@ export abstract class OAIEngine extends AIEngine { events.emit(MessageEvent.OnMessageUpdate, message) }, error: async (err: any) => { - console.error(`Inference error:`, err) + console.error(`Inference error:`, JSON.stringify(err)) if (this.isCancelled || message.content.length) { message.status = MessageStatus.Stopped events.emit(MessageEvent.OnMessageUpdate, message) diff --git a/extensions/inference-groq-extension/resources/settings.json b/extensions/inference-groq-extension/resources/settings.json index 60064f687..493b602cd 100644 --- a/extensions/inference-groq-extension/resources/settings.json +++ b/extensions/inference-groq-extension/resources/settings.json @@ -5,7 +5,7 @@ "description": "The endpoint to use for chat completions. See the [Groq documentation](https://console.groq.com/docs/openai) for more information.", "controllerType": "input", "controllerProps": { - "placeholder": "Chat Completions Endpoint", + "placeholder": "https://api.groq.com/openai/v1/chat/completions", "value": "https://api.groq.com/openai/v1/chat/completions" } }, diff --git a/extensions/inference-groq-extension/src/index.ts b/extensions/inference-groq-extension/src/index.ts index d0c88c7a2..c7f589b22 100644 --- a/extensions/inference-groq-extension/src/index.ts +++ b/extensions/inference-groq-extension/src/index.ts @@ -6,7 +6,7 @@ * @module inference-groq-extension/src/index */ -import { RemoteOAIEngine } from '@janhq/core' +import { RemoteOAIEngine, SettingComponentProps } from '@janhq/core' declare const SETTINGS: Array declare const MODELS: Array @@ -43,7 +43,17 @@ export default class JanInferenceGroqExtension extends RemoteOAIEngine { if (key === Settings.apiKey) { this.apiKey = value as string } else if (key === Settings.chatCompletionsEndPoint) { - this.inferenceUrl = value as string + if (typeof value !== 'string') return + + if (value.trim().length === 0) { + SETTINGS.forEach((setting) => { + if (setting.key === Settings.chatCompletionsEndPoint) { + this.inferenceUrl = setting.controllerProps.value as string + } + }) + } else { + this.inferenceUrl = value + } } } } diff --git a/extensions/inference-mistral-extension/resources/settings.json b/extensions/inference-mistral-extension/resources/settings.json index 4e1cbc267..2ca8ec7e5 100644 --- a/extensions/inference-mistral-extension/resources/settings.json +++ b/extensions/inference-mistral-extension/resources/settings.json @@ -5,7 +5,7 @@ "description": "The endpoint to use for chat completions. See the [Mistral API documentation](https://docs.mistral.ai/api/#operation/createChatCompletion) for more information.", "controllerType": "input", "controllerProps": { - "placeholder": "Chat Completions Endpoint", + "placeholder": "https://api.mistral.ai/v1/chat/completions", "value": "https://api.mistral.ai/v1/chat/completions" } }, diff --git a/extensions/inference-mistral-extension/src/index.ts b/extensions/inference-mistral-extension/src/index.ts index 1654c1718..b58d7bb22 100644 --- a/extensions/inference-mistral-extension/src/index.ts +++ b/extensions/inference-mistral-extension/src/index.ts @@ -42,7 +42,17 @@ export default class JanInferenceMistralExtension extends RemoteOAIEngine { if (key === Settings.apiKey) { this.apiKey = value as string } else if (key === Settings.chatCompletionsEndPoint) { - this.inferenceUrl = value as string + if (typeof value !== 'string') return + + if (value.trim().length === 0) { + SETTINGS.forEach((setting) => { + if (setting.key === Settings.chatCompletionsEndPoint) { + this.inferenceUrl = setting.controllerProps.value as string + } + }) + } else { + this.inferenceUrl = value + } } } } diff --git a/extensions/inference-openai-extension/resources/settings.json b/extensions/inference-openai-extension/resources/settings.json index c779aac90..ccd7dd545 100644 --- a/extensions/inference-openai-extension/resources/settings.json +++ b/extensions/inference-openai-extension/resources/settings.json @@ -5,7 +5,7 @@ "description": "The endpoint to use for chat completions. See the [OpenAI API documentation](https://platform.openai.com/docs/api-reference/chat/create) for more information.", "controllerType": "input", "controllerProps": { - "placeholder": "Chat Completions Endpoint", + "placeholder": "https://api.openai.com/v1/chat/completions", "value": "https://api.openai.com/v1/chat/completions" } }, diff --git a/extensions/inference-openai-extension/src/index.ts b/extensions/inference-openai-extension/src/index.ts index 27b144d12..61686a0a5 100644 --- a/extensions/inference-openai-extension/src/index.ts +++ b/extensions/inference-openai-extension/src/index.ts @@ -6,7 +6,7 @@ * @module inference-openai-extension/src/index */ -import { RemoteOAIEngine } from '@janhq/core' +import { RemoteOAIEngine, SettingComponentProps } from '@janhq/core' declare const SETTINGS: Array declare const MODELS: Array @@ -43,7 +43,17 @@ export default class JanInferenceOpenAIExtension extends RemoteOAIEngine { if (key === Settings.apiKey) { this.apiKey = value as string } else if (key === Settings.chatCompletionsEndPoint) { - this.inferenceUrl = value as string + if (typeof value !== 'string') return + + if (value.trim().length === 0) { + SETTINGS.forEach((setting) => { + if (setting.key === Settings.chatCompletionsEndPoint) { + this.inferenceUrl = setting.controllerProps.value as string + } + }) + } else { + this.inferenceUrl = value + } } } } diff --git a/extensions/inference-triton-trtllm-extension/resources/settings.json b/extensions/inference-triton-trtllm-extension/resources/settings.json index 6c90e917d..9c220eed7 100644 --- a/extensions/inference-triton-trtllm-extension/resources/settings.json +++ b/extensions/inference-triton-trtllm-extension/resources/settings.json @@ -5,7 +5,7 @@ "description": "The endpoint to use for chat completions.", "controllerType": "input", "controllerProps": { - "placeholder": "Chat Completions Endpoint", + "placeholder": "http://localhost:8000/v2/models/tensorrt_llm_bls/generate", "value": "http://localhost:8000/v2/models/tensorrt_llm_bls/generate" } }, diff --git a/extensions/inference-triton-trtllm-extension/src/index.ts b/extensions/inference-triton-trtllm-extension/src/index.ts index a3032f01d..9322935fc 100644 --- a/extensions/inference-triton-trtllm-extension/src/index.ts +++ b/extensions/inference-triton-trtllm-extension/src/index.ts @@ -6,7 +6,7 @@ * @module inference-nvidia-triton-trt-llm-extension/src/index */ -import { RemoteOAIEngine } from '@janhq/core' +import { RemoteOAIEngine, SettingComponentProps } from '@janhq/core' declare const SETTINGS: Array enum Settings { @@ -43,7 +43,17 @@ export default class JanInferenceTritonTrtLLMExtension extends RemoteOAIEngine { if (key === Settings.apiKey) { this.apiKey = value as string } else if (key === Settings.chatCompletionsEndPoint) { - this.inferenceUrl = value as string + if (typeof value !== 'string') return + + if (value.trim().length === 0) { + SETTINGS.forEach((setting) => { + if (setting.key === Settings.chatCompletionsEndPoint) { + this.inferenceUrl = setting.controllerProps.value as string + } + }) + } else { + this.inferenceUrl = value + } } } }