diff --git a/src-tauri/src/core/setup.rs b/src-tauri/src/core/setup.rs index 3fe85a9a2..ef372ec43 100644 --- a/src-tauri/src/core/setup.rs +++ b/src-tauri/src/core/setup.rs @@ -11,7 +11,7 @@ use tauri_plugin_shell::process::{CommandChild, CommandEvent}; use tauri_plugin_shell::ShellExt; use tauri_plugin_store::StoreExt; use tokio::time::{sleep, Duration}; -use tokio::{process::Command, sync::Mutex}; // Using tokio::sync::Mutex +use tokio::{sync::Mutex}; // Using tokio::sync::Mutex // MCP use super::{ cmd::{get_jan_data_folder_path, get_jan_extensions_path}, diff --git a/web-app/src/containers/SetupScreen.tsx b/web-app/src/containers/SetupScreen.tsx index e9622ed7c..6f3f07873 100644 --- a/web-app/src/containers/SetupScreen.tsx +++ b/web-app/src/containers/SetupScreen.tsx @@ -26,7 +26,12 @@ function SetupScreen() {
+

Set up local model diff --git a/web-app/src/routes/hub.tsx b/web-app/src/routes/hub.tsx index b0dfc3422..47e518fc2 100644 --- a/web-app/src/routes/hub.tsx +++ b/web-app/src/routes/hub.tsx @@ -23,6 +23,8 @@ import { RenderMarkdown } from '@/containers/RenderMarkdown' import { extractModelName, extractDescription } from '@/lib/models' import { IconDownload, IconFileCode, IconSearch } from '@tabler/icons-react' import { Switch } from '@/components/ui/switch' +import Joyride, { CallBackProps, STATUS } from 'react-joyride' +import { CustomTooltipJoyRide } from '@/containers/CustomeTooltipJoyRide' import { DropdownMenu, DropdownMenuContent, @@ -38,6 +40,7 @@ import { Loader } from 'lucide-react' type ModelProps = { model: { id: string + metadata?: any models: { id: string }[] @@ -69,6 +72,8 @@ function Hub() { ) const [isSearching, setIsSearching] = useState(false) const [showOnlyDownloaded, setShowOnlyDownloaded] = useState(false) + const [joyrideReady, setJoyrideReady] = useState(false) + const [currentStepIndex, setCurrentStepIndex] = useState(0) const addModelSourceTimeoutRef = useRef | null>( null ) @@ -190,6 +195,10 @@ function Hub() { const navigate = useNavigate() + const isRecommendedModel = useCallback((modelId: string) => { + return (extractModelName(modelId) === 'Jan-nano') as boolean + }, []) + const handleUseModel = useCallback( (modelId: string) => { navigate({ @@ -215,280 +224,394 @@ function Hub() { const isDownloaded = llamaProvider?.models.some( (m: { id: string }) => m.id === modelId ) + const isRecommended = isRecommendedModel(model.metadata?.id) return ( - <> - {isDownloading ? ( -
- - - {Math.round(downloadProgress * 100)}% - -
- ) : isDownloaded ? ( +
+
+ + + {Math.round(downloadProgress * 100)}% + +
+ {isDownloaded ? ( ) : ( - )} - +
) } - }, [downloadProcesses, llamaProvider?.models, handleUseModel]) + }, [ + downloadProcesses, + llamaProvider?.models, + handleUseModel, + isRecommendedModel, + ]) + + const { step } = useSearch({ from: Route.id }) + const isSetup = step === 'setup_local_provider' + + // Wait for DOM to be ready before starting Joyride + useEffect(() => { + if (!loading && filteredModels.length > 0 && isSetup) { + const timer = setTimeout(() => { + setJoyrideReady(true) + }, 100) + return () => clearTimeout(timer) + } else { + setJoyrideReady(false) + } + }, [loading, filteredModels.length, isSetup]) + + const handleJoyrideCallback = (data: CallBackProps) => { + const { status, index } = data + + if (status === STATUS.FINISHED && !isDownloading && isLastStep) { + const recommendedModel = filteredModels.find((model) => + isRecommendedModel(model.metadata?.id) + ) + if (recommendedModel && recommendedModel.models[0]?.id) { + downloadModel(recommendedModel.models[0].id) + + return + } + } + + if (status === STATUS.FINISHED) { + navigate({ + to: route.hub, + }) + } + + // Track current step index + setCurrentStepIndex(index) + } + + // Check if any model is currently downloading + const isDownloading = downloadProcesses.length > 0 + + const steps = [ + { + target: '.hub-model-card-step', + title: 'Recommended Model', + disableBeacon: true, + content: + 'These are models available for download from various providers. The featured model from Menlo AI is specifically optimized for tool calling and function execution, making it ideal for building AI agents and interactive applications. Each card shows the model name, size, and download options.', + }, + { + target: '.hub-download-button-step', + title: isDownloading ? 'Download Progress' : 'Download Model', + disableBeacon: true, + content: isDownloading + ? 'Your model is now downloading. You can track the progress here. Once the download is complete, the model will be available in your local collection and ready to use for AI conversations and tool calling.' + : 'Click the Download button to get this recommended model from Menlo AI. This model is optimized for tool calling and function execution, making it perfect for building AI agents.', + }, + ] + + // Check if we're on the last step + const isLastStep = currentStepIndex === steps.length - 1 return ( -
-
- -
-
- {isSearching ? ( - - ) : ( - - )} - -
-
- - - - { - sortOptions.find( - (option) => option.value === sortSelected - )?.name - } - - - - {sortOptions.map((option) => ( - setSortSelected(option.value)} - > - {option.name} - - ))} - - -
- + +
+
+ +
+
+ {isSearching ? ( + + ) : ( + + )} + - - Downloaded - +
+
+ + + + { + sortOptions.find( + (option) => option.value === sortSelected + )?.name + } + + + + {sortOptions.map((option) => ( + setSortSelected(option.value)} + > + {option.name} + + ))} + + +
+ + + Downloaded + +
-
- -
-
- {loading ? ( -
-
- Loading models... + +
+
+ {loading ? ( +
+
+ Loading models... +
-
- ) : filteredModels.length === 0 ? ( -
-
- No models found + ) : filteredModels.length === 0 ? ( +
+
+ No models found +
-
- ) : ( -
- {filteredModels.map((model) => ( -
- - -

- {extractModelName(model.metadata?.id) || ''} -

- -
- - {toGigabytes(model.models?.[0]?.size)} - - -
-
- } - > - -
- - By {model?.author} - -
-
- - - {model.metadata?.downloads || 0} - -
-
- - - {model.models?.length || 0} - -
- {model.models.length > 1 && ( -
- - toggleModelExpansion(model.id) - } - /> -

- Show variants -

+ ) : ( +
- {expandedModels[model.id] && model.models.length > 0 && ( -
- {model.models.map((variant) => ( - -

- {toGigabytes(variant.size)} -

- {(() => { - const isDownloading = - downloadProcesses.some( - (e) => e.id === variant.id - ) - const downloadProgress = - downloadProcesses.find( - (e) => e.id === variant.id - )?.progress || 0 - const isDownloaded = - llamaProvider?.models.some( - (m: { id: string }) => - m.id === variant.id - ) +
+ + By {model?.author} + +
+
+ + + {model.metadata?.downloads || 0} + +
+
+ + + {model.models?.length || 0} + +
+ {model.models.length > 1 && ( +
+ + toggleModelExpansion(model.id) + } + /> +

+ Show variants +

+
+ )} +
+
+ {expandedModels[model.id] && + model.models.length > 0 && ( +
+ {model.models.map((variant) => ( + +

+ {toGigabytes(variant.size)} +

+ {(() => { + const isDownloading = + downloadProcesses.some( + (e) => e.id === variant.id + ) + const downloadProgress = + downloadProcesses.find( + (e) => e.id === variant.id + )?.progress || 0 + const isDownloaded = + llamaProvider?.models.some( + (m: { id: string }) => + m.id === variant.id + ) - if (isDownloading) { - return ( - <> -
- - - {Math.round( - downloadProgress * 100 - )} - % - -
- - ) - } + if (isDownloading) { + return ( + <> +
+ + + {Math.round( + downloadProgress * 100 + )} + % + +
+ + ) + } - if (isDownloaded) { - return ( -
- +
+ ) + } + + return ( +
- handleUseModel(variant.id) + downloadModel(variant.id) } > - Use - -
- ) - } - - return ( -
- downloadModel(variant.id) - } - > - -
- ) - })()} -
- } - /> - ))} -
- )} - -
- ))} -
- )} + +
+ ) + })()} +
+ } + /> + ))} +
+ )} + +
+ ))} +
+ )} +
-
+ ) } diff --git a/web-app/src/services/models.ts b/web-app/src/services/models.ts index 16adaaaa7..071f032b8 100644 --- a/web-app/src/services/models.ts +++ b/web-app/src/services/models.ts @@ -1,5 +1,7 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ import { ExtensionManager } from '@/lib/extension' import { normalizeProvider } from '@/lib/models' +import { hardcodedModel } from '@/utils/models' import { EngineManager, ExtensionTypeEnum, ModelExtension } from '@janhq/core' import { Model as CoreModel } from '@janhq/core' @@ -17,22 +19,25 @@ export const fetchModels = async () => { * Fetches the sources of the models. * @returns A promise that resolves to the model sources. */ -export const fetchModelSources = async () => { +export const fetchModelSources = async (): Promise => { const extension = ExtensionManager.getInstance().get( ExtensionTypeEnum.Model ) - if (!extension) return [] + if (!extension) return [hardcodedModel] try { const sources = await extension.getSources() - return sources.map((m) => ({ + const mappedSources = sources.map((m) => ({ ...m, models: m.models.sort((a, b) => a.size - b.size), })) + + // Prepend the hardcoded model to the sources + return [hardcodedModel, ...mappedSources] } catch (error) { console.error('Failed to fetch model sources:', error) - return [] + return [hardcodedModel] } } @@ -40,10 +45,13 @@ export const fetchModelSources = async () => { * Fetches the model hub. * @returns A promise that resolves to the model hub. */ -export const fetchModelHub = async () => { - return ExtensionManager.getInstance() +export const fetchModelHub = async (): Promise => { + const hubData = await ExtensionManager.getInstance() .get(ExtensionTypeEnum.Model) ?.fetchModelsHub() + + // Prepend the hardcoded model to the hub data + return hubData ? [hardcodedModel, ...hubData] : [hardcodedModel] } /** diff --git a/web-app/src/services/providers.ts b/web-app/src/services/providers.ts index 96d340cb4..ed3fccf5a 100644 --- a/web-app/src/services/providers.ts +++ b/web-app/src/services/providers.ts @@ -6,7 +6,10 @@ import { ExtensionTypeEnum, SettingComponentProps, } from '@janhq/core' -import { ModelCapabilities } from '@/types/models' +import { + DefaultToolUseSupportedModels, + ModelCapabilities, +} from '@/types/models' import { modelSettings } from '@/lib/predefined' import { fetchModels } from './models' import { ExtensionManager } from '@/lib/extension' @@ -115,7 +118,14 @@ export const getProviders = async (): Promise => { capabilities: 'capabilities' in model ? (model.capabilities as string[]) - : [ModelCapabilities.COMPLETION], + : [ + ModelCapabilities.COMPLETION, + ...(Object.values(DefaultToolUseSupportedModels).some((v) => + model.id.toLowerCase().includes(v.toLowerCase()) + ) + ? [ModelCapabilities.TOOLS] + : []), + ], provider: providerName, settings: Object.values(modelSettings).reduce( (acc, setting) => { diff --git a/web-app/src/types/models.ts b/web-app/src/types/models.ts index 1ea1e865b..ed93cdbae 100644 --- a/web-app/src/types/models.ts +++ b/web-app/src/types/models.ts @@ -15,6 +15,12 @@ export enum ModelCapabilities { AUDIO_TO_TEXT = 'audio_to_text', } +// TODO: Remove this enum when we integrate llama.cpp extension +export enum DefaultToolUseSupportedModels { + JanNano = 'jan-nano', + Qwen3 = 'qwen3', +} + export type ActiveModel = { engine: string id: string diff --git a/web-app/src/utils/models.ts b/web-app/src/utils/models.ts new file mode 100644 index 000000000..9b79fa4e0 --- /dev/null +++ b/web-app/src/utils/models.ts @@ -0,0 +1,79 @@ +export const hardcodedModel = { + author: 'Menlo', + id: 'https://huggingface.co/Menlo/Jan-nano', + metadata: { + '_id': '68492cd9cada68b1d11ca1bd', + 'author': 'Menlo', + 'cardData': { + license: 'apache-2.0', + pipeline_tag: 'text-generation', + }, + 'createdAt': '2025-06-11T07:14:33.000Z', + 'description': + '---\nlicense: apache-2.0\npipeline_tag: text-generation\n---\n\n## Overview\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/657a81129ea9d52e5cbd67f7/YQci8jiHjAAFpXWYOadrU.png)\n\nJan Nano is a fine-tuned language model built on top of the Qwen3 architecture. Developed as part of the Jan ecosystem, it balances compact size and extended context length, making it ideal for efficient, high-quality text generation in local or embedded environments.\n\nWith 36 transformer blocks, 4B parameters, and an extended context window of 40,960 tokens, Jan Nano is optimized for dialogue, reasoning, and creative tasks. It is released in the Q4_K_M quantized format, enabling faster inference with reduced memory usage.\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)', + 'disabled': false, + 'downloads': 0, + 'gated': false, + 'gguf': { + architecture: 'qwen3', + bos_token: '<|endoftext|>', + chat_template: + "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('') and message.content.endswith('')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '' in content %}\n {%- set reasoning_content = content.split('')[0].rstrip('\\n').split('')[-1].lstrip('\\n') %}\n {%- set content = content.split('')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n\\n' + reasoning_content.strip('\\n') + '\\n\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n\\n\\n\\n\\n' }}\n{%- endif %}", + context_length: 40960, + eos_token: '<|im_end|>', + total: 4022468096, + }, + 'id': 'Menlo/Jan-nano', + 'lastModified': '2025-06-11T10:00:23.000Z', + 'likes': 1, + 'model-index': null, + 'modelId': 'Menlo/Jan-nano', + 'pipeline_tag': 'text-generation', + 'private': false, + 'sha': '9966a3efaf6fe36ac4f2d8bd4343ae5791def2b0', + 'siblings': [ + { + rfilename: '.gitattributes', + size: 1569, + }, + { + rfilename: 'Jan-nano.gguf', + size: 2497280288, + }, + { + rfilename: 'README.md', + size: 817, + }, + ], + 'spaces': [], + 'tags': [ + 'gguf', + 'text-generation', + 'license:apache-2.0', + 'endpoints_compatible', + 'region:us', + 'conversational', + ], + 'usedStorage': 7491840896, + 'widgetData': [ + { + text: 'Hi, what can you help me with?', + }, + { + text: 'What is 84 * 3 / 2?', + }, + { + text: 'Tell me an interesting fact about the universe!', + }, + { + text: 'Explain quantum computing in simple terms.', + }, + ], + }, + models: [ + { + id: 'Menlo:Jan-nano:Jan-nano.gguf', + size: 2497280288, + }, + ], +}