From 4846befd4a17709ef09fbf61640369d28dd740c7 Mon Sep 17 00:00:00 2001 From: Louis Date: Wed, 5 Feb 2025 15:28:35 +0700 Subject: [PATCH] chore: engine model adding ux refinement (#4580) * fix: enhance remote engine models adding UX * fix: OpenAI o3 support template * chore: fix models populating error * chore: missing dependency --- core/src/types/engine/index.ts | 1 + .../resources/anthropic.json | 3 ++- .../resources/cohere.json | 3 ++- .../resources/groq.json | 3 ++- .../resources/martian.json | 3 ++- .../resources/mistral.json | 3 ++- .../resources/nvidia.json | 3 ++- .../resources/openai.json | 5 +++-- .../resources/openrouter.json | 3 ++- .../engine-management-extension/src/index.ts | 4 +++- web/containers/ModelDropdown/index.tsx | 1 + web/package.json | 2 +- web/screens/Settings/Engines/ModalAddModel.tsx | 17 +++++++++++------ 13 files changed, 34 insertions(+), 17 deletions(-) diff --git a/core/src/types/engine/index.ts b/core/src/types/engine/index.ts index 7c848a279..9a6beeeff 100644 --- a/core/src/types/engine/index.ts +++ b/core/src/types/engine/index.ts @@ -18,6 +18,7 @@ export type EngineMetadata = { template?: string } } + explore_models_url?: string } export type EngineVariant = { diff --git a/extensions/engine-management-extension/resources/anthropic.json b/extensions/engine-management-extension/resources/anthropic.json index 12a3f08b8..02315147d 100644 --- a/extensions/engine-management-extension/resources/anthropic.json +++ b/extensions/engine-management-extension/resources/anthropic.json @@ -17,6 +17,7 @@ "chat_completions": { "template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.type == \"message_start\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"ping\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_delta\" %} \"role\": \"assistant\", \"content\": \"{{ input_request.delta.text }}\" {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.type == \"content_block_stop\" %} \"finish_reason\": \"stop\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {\"id\": \"{{ input_request.id }}\", \"created\": null, \"object\": \"chat.completion\", \"model\": \"{{ input_request.model }}\", \"choices\": [{ \"index\": 0, \"message\": { \"role\": \"{{ input_request.role }}\", \"content\": {% if input_request.content and input_request.content.0.type == \"text\" %} \"{{input_request.content.0.text}}\" {% else %} null {% endif %}, \"refusal\": null }, \"logprobs\": null, \"finish_reason\": \"{{ input_request.stop_reason }}\" } ], \"usage\": { \"prompt_tokens\": {{ input_request.usage.input_tokens }}, \"completion_tokens\": {{ input_request.usage.output_tokens }}, \"total_tokens\": {{ input_request.usage.input_tokens + input_request.usage.output_tokens }}, \"prompt_tokens_details\": { \"cached_tokens\": 0 }, \"completion_tokens_details\": { \"reasoning_tokens\": 0, \"accepted_prediction_tokens\": 0, \"rejected_prediction_tokens\": 0 } }, \"system_fingerprint\": \"fp_6b68a8204b\"} {% endif %}" } - } + }, + "explore_models_url": "https://docs.anthropic.com/en/docs/about-claude/models" } } diff --git a/extensions/engine-management-extension/resources/cohere.json b/extensions/engine-management-extension/resources/cohere.json index b10e00e5b..7f18c9558 100644 --- a/extensions/engine-management-extension/resources/cohere.json +++ b/extensions/engine-management-extension/resources/cohere.json @@ -17,6 +17,7 @@ "chat_completions": { "template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.event_type == \"text-generation\" %} \"role\": \"assistant\", \"content\": \"{{ input_request.text }}\" {% else %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.event_type == \"stream-end\" %} \"finish_reason\": \"{{ input_request.finish_reason }}\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {\"id\": \"{{ input_request.generation_id }}\", \"created\": null, \"object\": \"chat.completion\", \"model\": {% if input_request.model %} \"{{ input_request.model }}\" {% else %} \"command-r-plus-08-2024\" {% endif %}, \"choices\": [{ \"index\": 0, \"message\": { \"role\": \"assistant\", \"content\": {% if not input_request.text %} null {% else %} \"{{ input_request.text }}\" {% endif %}, \"refusal\": null }, \"logprobs\": null, \"finish_reason\": \"{{ input_request.finish_reason }}\" } ], \"usage\": { \"prompt_tokens\": {{ input_request.meta.tokens.input_tokens }}, \"completion_tokens\": {{ input_request.meta.tokens.output_tokens }},\"total_tokens\": {{ input_request.meta.tokens.input_tokens + input_request.meta.tokens.output_tokens }}, \"prompt_tokens_details\": { \"cached_tokens\": 0 },\"completion_tokens_details\": { \"reasoning_tokens\": 0, \"accepted_prediction_tokens\": 0, \"rejected_prediction_tokens\": 0 } }, \"system_fingerprint\": \"fp_6b68a8204b\"} {% endif %}" } - } + }, + "explore_models_url": "https://docs.cohere.com/v2/docs/models" } } diff --git a/extensions/engine-management-extension/resources/groq.json b/extensions/engine-management-extension/resources/groq.json index 60d553a92..67d6e0932 100644 --- a/extensions/engine-management-extension/resources/groq.json +++ b/extensions/engine-management-extension/resources/groq.json @@ -17,6 +17,7 @@ "chat_completions": { "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" } - } + }, + "explore_models_url": "https://console.groq.com/docs/models" } } diff --git a/extensions/engine-management-extension/resources/martian.json b/extensions/engine-management-extension/resources/martian.json index 3a65f3981..6fb1cecf7 100644 --- a/extensions/engine-management-extension/resources/martian.json +++ b/extensions/engine-management-extension/resources/martian.json @@ -17,6 +17,7 @@ "chat_completions": { "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" } - } + }, + "explore_models_url": "https://withmartian.github.io/llm-adapters/" } } diff --git a/extensions/engine-management-extension/resources/mistral.json b/extensions/engine-management-extension/resources/mistral.json index 3f447dc4c..3013d5f46 100644 --- a/extensions/engine-management-extension/resources/mistral.json +++ b/extensions/engine-management-extension/resources/mistral.json @@ -17,6 +17,7 @@ "chat_completions": { "template": "{{tojson(input_request)}}" } - } + }, + "explore_models_url": "https://docs.mistral.ai/getting-started/models/models_overview/" } } diff --git a/extensions/engine-management-extension/resources/nvidia.json b/extensions/engine-management-extension/resources/nvidia.json index 240130090..f703f8f18 100644 --- a/extensions/engine-management-extension/resources/nvidia.json +++ b/extensions/engine-management-extension/resources/nvidia.json @@ -17,6 +17,7 @@ "chat_completions": { "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" } - } + }, + "explore_models_url": "https://build.nvidia.com/models" } } diff --git a/extensions/engine-management-extension/resources/openai.json b/extensions/engine-management-extension/resources/openai.json index 97effd42a..62260715c 100644 --- a/extensions/engine-management-extension/resources/openai.json +++ b/extensions/engine-management-extension/resources/openai.json @@ -10,13 +10,14 @@ "transform_req": { "chat_completions": { "url": "https://api.openai.com/v1/chat/completions", - "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"model\" or key == \"temperature\" or key == \"store\" or key == \"messages\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" or key == \"max_tokens\" or ((input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\") and (key == \"stop\")) %} {% if not first %} , {% endif %} {% if key == \"messages\" and (input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\") and input_request.messages.0.role == \"system\" %} \"messages\": [{% for message in input_request.messages %} {% if not loop.is_first %} { \"role\": \"{{ message.role }}\", \"content\": \"{{ message.content }}\" } {% if not loop.is_last %} , {% endif %} {% endif %} {% endfor %}] {% else if key == \"max_tokens\" and (input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\") %} \"max_completion_tokens\": {{ tojson(value) }} {% else %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endif %} {% endfor %} }" + "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"model\" or key == \"temperature\" or key == \"store\" or key == \"messages\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" or key == \"max_tokens\" or ((input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\" or input_request.model == \"o3\" or input_request.model == \"o3-mini\") and (key == \"stop\")) %} {% if not first %} , {% endif %} {% if key == \"messages\" and (input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\") and input_request.messages.0.role == \"system\" %} \"messages\": [{% for message in input_request.messages %} {% if not loop.is_first %} { \"role\": \"{{ message.role }}\", \"content\": \"{{ message.content }}\" } {% if not loop.is_last %} , {% endif %} {% endif %} {% endfor %}] {% else if key == \"max_tokens\" and (input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\" or input_request.model == \"o3\" or input_request.model == \"o3-mini\") %} \"max_completion_tokens\": {{ tojson(value) }} {% else %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endif %} {% endfor %} }" } }, "transform_resp": { "chat_completions": { "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" } - } + }, + "explore_models_url": "https://platform.openai.com/docs/models" } } diff --git a/extensions/engine-management-extension/resources/openrouter.json b/extensions/engine-management-extension/resources/openrouter.json index 45dc48414..39872838d 100644 --- a/extensions/engine-management-extension/resources/openrouter.json +++ b/extensions/engine-management-extension/resources/openrouter.json @@ -17,6 +17,7 @@ "chat_completions": { "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" } - } + }, + "explore_models_url": "https://openrouter.ai/models" } } diff --git a/extensions/engine-management-extension/src/index.ts b/extensions/engine-management-extension/src/index.ts index d9fbbdd97..a668aa2b2 100644 --- a/extensions/engine-management-extension/src/index.ts +++ b/extensions/engine-management-extension/src/index.ts @@ -167,7 +167,9 @@ export default class JSONEngineManagementExtension extends EngineManagementExten ky.post(`${API_URL}/v1/engines`, { json: engineConfig }).then((e) => { if (persistModels && engineConfig.metadata?.get_models_url) { // Pull /models from remote models endpoint - return this.populateRemoteModels(engineConfig).then(() => e) + return this.populateRemoteModels(engineConfig) + .then(() => e) + .catch(() => e) } return e }) diff --git a/web/containers/ModelDropdown/index.tsx b/web/containers/ModelDropdown/index.tsx index de4959bc3..e5fab361d 100644 --- a/web/containers/ModelDropdown/index.tsx +++ b/web/containers/ModelDropdown/index.tsx @@ -293,6 +293,7 @@ const ModelDropdown = ({ updateThreadMetadata, setThreadModelParams, updateModelParameter, + stopModel, ] ) diff --git a/web/package.json b/web/package.json index da1c47c78..41386c587 100644 --- a/web/package.json +++ b/web/package.json @@ -30,7 +30,7 @@ "jotai": "^2.6.0", "katex": "^0.16.10", "lodash": "^4.17.21", - "lucide-react": "^0.291.0", + "lucide-react": "^0.311.0", "marked": "^9.1.2", "next": "14.2.3", "next-themes": "^0.2.1", diff --git a/web/screens/Settings/Engines/ModalAddModel.tsx b/web/screens/Settings/Engines/ModalAddModel.tsx index 40c986e92..1fbdabb6a 100644 --- a/web/screens/Settings/Engines/ModalAddModel.tsx +++ b/web/screens/Settings/Engines/ModalAddModel.tsx @@ -10,7 +10,7 @@ import { InferenceEngine, Model } from '@janhq/core' import { Button, Input, Modal } from '@janhq/joi' import { useAtomValue } from 'jotai' -import { PlusIcon } from 'lucide-react' +import { PlusIcon, ArrowUpRightFromSquare } from 'lucide-react' import { z } from 'zod' @@ -71,7 +71,7 @@ const ModelAddModel = ({ engine }: { engine: string }) => { {prefix} {label} -

+

{desc} {isRequired && *}

@@ -97,7 +97,7 @@ const ModelAddModel = ({ engine }: { engine: string }) => { className="w-[500px]" content={
-
+