chore: onboarding local model (#5234)

* chore: simple onboarding local model

* chore: update new model and improve flow e2e onboarding local model

* fix: default tool support models

---------

Co-authored-by: Louis <louis@jan.ai>
This commit is contained in:
Faisal Amir 2025-06-11 18:38:07 +07:00 committed by GitHub
parent 50b83d7342
commit 7b59aa32f9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 488 additions and 257 deletions

View File

@ -11,7 +11,7 @@ use tauri_plugin_shell::process::{CommandChild, CommandEvent};
use tauri_plugin_shell::ShellExt; use tauri_plugin_shell::ShellExt;
use tauri_plugin_store::StoreExt; use tauri_plugin_store::StoreExt;
use tokio::time::{sleep, Duration}; use tokio::time::{sleep, Duration};
use tokio::{process::Command, sync::Mutex}; // Using tokio::sync::Mutex use tokio::{sync::Mutex}; // Using tokio::sync::Mutex
// MCP // MCP
use super::{ use super::{
cmd::{get_jan_data_folder_path, get_jan_extensions_path}, cmd::{get_jan_data_folder_path, get_jan_extensions_path},

View File

@ -26,7 +26,12 @@ function SetupScreen() {
<div className="flex gap-4 flex-col"> <div className="flex gap-4 flex-col">
<Card <Card
header={ header={
<Link to={route.hub}> <Link
to={route.hub}
search={{
step: 'setup_local_provider',
}}
>
<div> <div>
<h1 className="text-main-view-fg font-medium text-base"> <h1 className="text-main-view-fg font-medium text-base">
Set up local model Set up local model

View File

@ -23,6 +23,8 @@ import { RenderMarkdown } from '@/containers/RenderMarkdown'
import { extractModelName, extractDescription } from '@/lib/models' import { extractModelName, extractDescription } from '@/lib/models'
import { IconDownload, IconFileCode, IconSearch } from '@tabler/icons-react' import { IconDownload, IconFileCode, IconSearch } from '@tabler/icons-react'
import { Switch } from '@/components/ui/switch' import { Switch } from '@/components/ui/switch'
import Joyride, { CallBackProps, STATUS } from 'react-joyride'
import { CustomTooltipJoyRide } from '@/containers/CustomeTooltipJoyRide'
import { import {
DropdownMenu, DropdownMenu,
DropdownMenuContent, DropdownMenuContent,
@ -38,6 +40,7 @@ import { Loader } from 'lucide-react'
type ModelProps = { type ModelProps = {
model: { model: {
id: string id: string
metadata?: any
models: { models: {
id: string id: string
}[] }[]
@ -69,6 +72,8 @@ function Hub() {
) )
const [isSearching, setIsSearching] = useState(false) const [isSearching, setIsSearching] = useState(false)
const [showOnlyDownloaded, setShowOnlyDownloaded] = useState(false) const [showOnlyDownloaded, setShowOnlyDownloaded] = useState(false)
const [joyrideReady, setJoyrideReady] = useState(false)
const [currentStepIndex, setCurrentStepIndex] = useState(0)
const addModelSourceTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>( const addModelSourceTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(
null null
) )
@ -190,6 +195,10 @@ function Hub() {
const navigate = useNavigate() const navigate = useNavigate()
const isRecommendedModel = useCallback((modelId: string) => {
return (extractModelName(modelId) === 'Jan-nano') as boolean
}, [])
const handleUseModel = useCallback( const handleUseModel = useCallback(
(modelId: string) => { (modelId: string) => {
navigate({ navigate({
@ -215,280 +224,394 @@ function Hub() {
const isDownloaded = llamaProvider?.models.some( const isDownloaded = llamaProvider?.models.some(
(m: { id: string }) => m.id === modelId (m: { id: string }) => m.id === modelId
) )
const isRecommended = isRecommendedModel(model.metadata?.id)
return ( return (
<> <div
{isDownloading ? ( className={cn(
<div className="flex items-center gap-2 w-20"> 'flex items-center',
<Progress value={downloadProgress * 100} /> isRecommended && 'hub-download-button-step'
<span className="text-xs text-center text-main-view-fg/70"> )}
{Math.round(downloadProgress * 100)}% >
</span> <div
</div> className={cn(
) : isDownloaded ? ( 'flex items-center gap-2 w-20 ',
!isDownloading && 'opacity-0 visibility-hidden w-0'
)}
>
<Progress value={downloadProgress * 100} />
<span className="text-xs text-center text-main-view-fg/70">
{Math.round(downloadProgress * 100)}%
</span>
</div>
{isDownloaded ? (
<Button size="sm" onClick={() => handleUseModel(modelId)}> <Button size="sm" onClick={() => handleUseModel(modelId)}>
Use Use
</Button> </Button>
) : ( ) : (
<Button size="sm" onClick={() => downloadModel(modelId)}> <Button
size="sm"
onClick={() => downloadModel(modelId)}
className={cn(isDownloading && 'hidden')}
>
Download Download
</Button> </Button>
)} )}
</> </div>
) )
} }
}, [downloadProcesses, llamaProvider?.models, handleUseModel]) }, [
downloadProcesses,
llamaProvider?.models,
handleUseModel,
isRecommendedModel,
])
const { step } = useSearch({ from: Route.id })
const isSetup = step === 'setup_local_provider'
// Wait for DOM to be ready before starting Joyride
useEffect(() => {
if (!loading && filteredModels.length > 0 && isSetup) {
const timer = setTimeout(() => {
setJoyrideReady(true)
}, 100)
return () => clearTimeout(timer)
} else {
setJoyrideReady(false)
}
}, [loading, filteredModels.length, isSetup])
const handleJoyrideCallback = (data: CallBackProps) => {
const { status, index } = data
if (status === STATUS.FINISHED && !isDownloading && isLastStep) {
const recommendedModel = filteredModels.find((model) =>
isRecommendedModel(model.metadata?.id)
)
if (recommendedModel && recommendedModel.models[0]?.id) {
downloadModel(recommendedModel.models[0].id)
return
}
}
if (status === STATUS.FINISHED) {
navigate({
to: route.hub,
})
}
// Track current step index
setCurrentStepIndex(index)
}
// Check if any model is currently downloading
const isDownloading = downloadProcesses.length > 0
const steps = [
{
target: '.hub-model-card-step',
title: 'Recommended Model',
disableBeacon: true,
content:
'These are models available for download from various providers. The featured model from Menlo AI is specifically optimized for tool calling and function execution, making it ideal for building AI agents and interactive applications. Each card shows the model name, size, and download options.',
},
{
target: '.hub-download-button-step',
title: isDownloading ? 'Download Progress' : 'Download Model',
disableBeacon: true,
content: isDownloading
? 'Your model is now downloading. You can track the progress here. Once the download is complete, the model will be available in your local collection and ready to use for AI conversations and tool calling.'
: 'Click the Download button to get this recommended model from Menlo AI. This model is optimized for tool calling and function execution, making it perfect for building AI agents.',
},
]
// Check if we're on the last step
const isLastStep = currentStepIndex === steps.length - 1
return ( return (
<div className="flex h-full w-full"> <>
<div className="flex flex-col h-full w-full"> <Joyride
<HeaderPage> run={joyrideReady}
<div className="pr-4 py-3 h-10 w-full flex items-center justify-between relative z-20 "> floaterProps={{
<div className="flex items-center gap-2 w-full"> hideArrow: true,
{isSearching ? ( }}
<Loader className="size-4 animate-spin text-main-view-fg/60" /> steps={steps}
) : ( tooltipComponent={CustomTooltipJoyRide}
<IconSearch className="text-main-view-fg/60" size={14} /> spotlightPadding={0}
)} continuous={true}
<input showSkipButton={!isLastStep}
placeholder="Search for models on Hugging Face..." hideCloseButton={true}
value={searchValue} spotlightClicks={true}
onChange={handleSearchChange} disableOverlayClose={true}
className="w-full focus:outline-none" callback={handleJoyrideCallback}
/> locale={{
</div> back: 'Back',
<div className="flex items-center gap-2 shrink-0"> close: 'Close',
<DropdownMenu> last: !isDownloading ? 'Download' : 'Finish',
<DropdownMenuTrigger> next: 'Next',
<span skip: 'Skip',
title="Edit Theme" }}
className="flex cursor-pointer items-center gap-1 px-2 py-1 rounded-sm bg-main-view-fg/15 text-sm outline-none text-main-view-fg font-medium" />
> <div className="flex h-full w-full">
{ <div className="flex flex-col h-full w-full ">
sortOptions.find( <HeaderPage>
(option) => option.value === sortSelected <div className="pr-4 py-3 h-10 w-full flex items-center justify-between relative z-20">
)?.name <div className="flex items-center gap-2 w-full">
} {isSearching ? (
</span> <Loader className="size-4 animate-spin text-main-view-fg/60" />
</DropdownMenuTrigger> ) : (
<DropdownMenuContent side="bottom" align="end"> <IconSearch className="text-main-view-fg/60" size={14} />
{sortOptions.map((option) => ( )}
<DropdownMenuItem <input
className={cn( placeholder="Search for models on Hugging Face..."
'cursor-pointer my-0.5', value={searchValue}
sortSelected === option.value && 'bg-main-view-fg/5' onChange={handleSearchChange}
)} className="w-full focus:outline-none"
key={option.value}
onClick={() => setSortSelected(option.value)}
>
{option.name}
</DropdownMenuItem>
))}
</DropdownMenuContent>
</DropdownMenu>
<div className="flex items-center gap-2">
<Switch
checked={showOnlyDownloaded}
onCheckedChange={setShowOnlyDownloaded}
/> />
<span className="text-xs text-main-view-fg/70 font-medium whitespace-nowrap"> </div>
Downloaded <div className="flex items-center gap-2 shrink-0">
</span> <DropdownMenu>
<DropdownMenuTrigger>
<span
title="Edit Theme"
className="flex cursor-pointer items-center gap-1 px-2 py-1 rounded-sm bg-main-view-fg/15 text-sm outline-none text-main-view-fg font-medium"
>
{
sortOptions.find(
(option) => option.value === sortSelected
)?.name
}
</span>
</DropdownMenuTrigger>
<DropdownMenuContent side="bottom" align="end">
{sortOptions.map((option) => (
<DropdownMenuItem
className={cn(
'cursor-pointer my-0.5',
sortSelected === option.value && 'bg-main-view-fg/5'
)}
key={option.value}
onClick={() => setSortSelected(option.value)}
>
{option.name}
</DropdownMenuItem>
))}
</DropdownMenuContent>
</DropdownMenu>
<div className="flex items-center gap-2">
<Switch
checked={showOnlyDownloaded}
onCheckedChange={setShowOnlyDownloaded}
/>
<span className="text-xs text-main-view-fg/70 font-medium whitespace-nowrap">
Downloaded
</span>
</div>
</div> </div>
</div> </div>
</div> </HeaderPage>
</HeaderPage> <div className="p-4 w-full h-[calc(100%-32px)] overflow-y-auto first-step-setup-local-provider">
<div className="p-4 w-full h-[calc(100%-32px)] overflow-y-auto"> <div className="flex flex-col h-full justify-between gap-4 gap-y-3 w-4/5 mx-auto">
<div className="flex flex-col h-full justify-between gap-4 gap-y-3 w-4/5 mx-auto"> {loading ? (
{loading ? ( <div className="flex items-center justify-center">
<div className="flex items-center justify-center"> <div className="text-center text-muted-foreground">
<div className="text-center text-muted-foreground"> Loading models...
Loading models... </div>
</div> </div>
</div> ) : filteredModels.length === 0 ? (
) : filteredModels.length === 0 ? ( <div className="flex items-center justify-center">
<div className="flex items-center justify-center"> <div className="text-center text-muted-foreground">
<div className="text-center text-muted-foreground"> No models found
No models found </div>
</div> </div>
</div> ) : (
) : ( <div className="flex flex-col pb-2 mb-2 gap-2 ">
<div className="flex flex-col pb-2 mb-2 gap-2"> {filteredModels.map((model) => (
{filteredModels.map((model) => ( <div key={model.id}>
<div key={model.id}> <Card
<Card header={
header={ <div className="flex items-center justify-between gap-x-2">
<div className="flex items-center justify-between gap-x-2"> <Link
<Link to={
to={ `https://huggingface.co/${model.metadata?.id}` as string
`https://huggingface.co/${model.metadata?.id}` as string }
} target="_blank"
target="_blank" >
> <h1
<h1 className="text-main-view-fg font-medium text-base capitalize truncate"> className={cn(
{extractModelName(model.metadata?.id) || ''} 'text-main-view-fg font-medium text-base capitalize truncate',
</h1> isRecommendedModel(model.metadata?.id)
</Link> ? 'hub-model-card-step'
<div className="shrink-0 space-x-3 flex items-center"> : ''
<span className="text-main-view-fg/70 font-medium text-xs"> )}
{toGigabytes(model.models?.[0]?.size)} >
</span> {extractModelName(model.metadata?.id) || ''}
<DownloadButtonPlaceholder model={model} /> </h1>
</div> </Link>
</div> <div className="shrink-0 space-x-3 flex items-center">
} <span className="text-main-view-fg/70 font-medium text-xs">
> {toGigabytes(model.models?.[0]?.size)}
<div className="line-clamp-2 mt-3 text-main-view-fg/60"> </span>
<RenderMarkdown <DownloadButtonPlaceholder model={model} />
enableRawHtml={true}
className="select-none"
components={{
a: ({ ...props }) => (
<a
{...props}
target="_blank"
rel="noopener noreferrer"
/>
),
}}
content={
extractDescription(model.metadata?.description) ||
''
}
/>
</div>
<div className="flex items-center gap-2 mt-2">
<span className="capitalize text-main-view-fg/80">
By {model?.author}
</span>
<div className="flex items-center gap-4 ml-2">
<div className="flex items-center gap-1">
<IconDownload
size={18}
className="text-main-view-fg/50"
title="Downloads"
/>
<span className="text-main-view-fg/80">
{model.metadata?.downloads || 0}
</span>
</div>
<div className="flex items-center gap-1">
<IconFileCode
size={20}
className="text-main-view-fg/50"
title="Variants"
/>
<span className="text-main-view-fg/80">
{model.models?.length || 0}
</span>
</div>
{model.models.length > 1 && (
<div className="flex items-center gap-2">
<Switch
checked={!!expandedModels[model.id]}
onCheckedChange={() =>
toggleModelExpansion(model.id)
}
/>
<p className="text-main-view-fg/70">
Show variants
</p>
</div> </div>
)} </div>
}
>
<div className="line-clamp-2 mt-3 text-main-view-fg/60">
<RenderMarkdown
enableRawHtml={true}
className="select-none"
components={{
a: ({ ...props }) => (
<a
{...props}
target="_blank"
rel="noopener noreferrer"
/>
),
}}
content={
extractDescription(model.metadata?.description) ||
''
}
/>
</div> </div>
</div> <div className="flex items-center gap-2 mt-2">
{expandedModels[model.id] && model.models.length > 0 && ( <span className="capitalize text-main-view-fg/80">
<div className="mt-5"> By {model?.author}
{model.models.map((variant) => ( </span>
<CardItem <div className="flex items-center gap-4 ml-2">
key={variant.id} <div className="flex items-center gap-1">
title={variant.id} <IconDownload
actions={ size={18}
<div className="flex items-center gap-2"> className="text-main-view-fg/50"
<p className="text-main-view-fg/70 font-medium text-xs"> title="Downloads"
{toGigabytes(variant.size)} />
</p> <span className="text-main-view-fg/80">
{(() => { {model.metadata?.downloads || 0}
const isDownloading = </span>
downloadProcesses.some( </div>
(e) => e.id === variant.id <div className="flex items-center gap-1">
) <IconFileCode
const downloadProgress = size={20}
downloadProcesses.find( className="text-main-view-fg/50"
(e) => e.id === variant.id title="Variants"
)?.progress || 0 />
const isDownloaded = <span className="text-main-view-fg/80">
llamaProvider?.models.some( {model.models?.length || 0}
(m: { id: string }) => </span>
m.id === variant.id </div>
) {model.models.length > 1 && (
<div className="flex items-center gap-2 hub-show-variants-step">
<Switch
checked={!!expandedModels[model.id]}
onCheckedChange={() =>
toggleModelExpansion(model.id)
}
/>
<p className="text-main-view-fg/70">
Show variants
</p>
</div>
)}
</div>
</div>
{expandedModels[model.id] &&
model.models.length > 0 && (
<div className="mt-5">
{model.models.map((variant) => (
<CardItem
key={variant.id}
title={variant.id}
actions={
<div className="flex items-center gap-2">
<p className="text-main-view-fg/70 font-medium text-xs">
{toGigabytes(variant.size)}
</p>
{(() => {
const isDownloading =
downloadProcesses.some(
(e) => e.id === variant.id
)
const downloadProgress =
downloadProcesses.find(
(e) => e.id === variant.id
)?.progress || 0
const isDownloaded =
llamaProvider?.models.some(
(m: { id: string }) =>
m.id === variant.id
)
if (isDownloading) { if (isDownloading) {
return ( return (
<> <>
<div className="flex items-center gap-2 w-20"> <div className="flex items-center gap-2 w-20">
<Progress <Progress
value={downloadProgress * 100} value={downloadProgress * 100}
/> />
<span className="text-xs text-center text-main-view-fg/70"> <span className="text-xs text-center text-main-view-fg/70">
{Math.round( {Math.round(
downloadProgress * 100 downloadProgress * 100
)} )}
% %
</span> </span>
</div> </div>
</> </>
) )
} }
if (isDownloaded) { if (isDownloaded) {
return ( return (
<div <div
className="flex items-center justify-center rounded bg-main-view-fg/10" className="flex items-center justify-center rounded bg-main-view-fg/10"
title="Use this model" title="Use this model"
> >
<Button <Button
variant="link" variant="link"
size="sm" size="sm"
onClick={() =>
handleUseModel(variant.id)
}
>
Use
</Button>
</div>
)
}
return (
<div
className="size-6 cursor-pointer flex items-center justify-center rounded hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out"
title="Download model"
onClick={() => onClick={() =>
handleUseModel(variant.id) downloadModel(variant.id)
} }
> >
Use <IconDownload
</Button> size={16}
</div> className="text-main-view-fg/80"
) />
} </div>
)
return ( })()}
<div </div>
className="size-6 cursor-pointer flex items-center justify-center rounded hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out" }
title="Download model" />
onClick={() => ))}
downloadModel(variant.id) </div>
} )}
> </Card>
<IconDownload </div>
size={16} ))}
className="text-main-view-fg/80" </div>
/> )}
</div> </div>
)
})()}
</div>
}
/>
))}
</div>
)}
</Card>
</div>
))}
</div>
)}
</div> </div>
</div> </div>
</div> </div>
</div> </>
) )
} }

View File

@ -1,5 +1,7 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { ExtensionManager } from '@/lib/extension' import { ExtensionManager } from '@/lib/extension'
import { normalizeProvider } from '@/lib/models' import { normalizeProvider } from '@/lib/models'
import { hardcodedModel } from '@/utils/models'
import { EngineManager, ExtensionTypeEnum, ModelExtension } from '@janhq/core' import { EngineManager, ExtensionTypeEnum, ModelExtension } from '@janhq/core'
import { Model as CoreModel } from '@janhq/core' import { Model as CoreModel } from '@janhq/core'
@ -17,22 +19,25 @@ export const fetchModels = async () => {
* Fetches the sources of the models. * Fetches the sources of the models.
* @returns A promise that resolves to the model sources. * @returns A promise that resolves to the model sources.
*/ */
export const fetchModelSources = async () => { export const fetchModelSources = async (): Promise<any[]> => {
const extension = ExtensionManager.getInstance().get<ModelExtension>( const extension = ExtensionManager.getInstance().get<ModelExtension>(
ExtensionTypeEnum.Model ExtensionTypeEnum.Model
) )
if (!extension) return [] if (!extension) return [hardcodedModel]
try { try {
const sources = await extension.getSources() const sources = await extension.getSources()
return sources.map((m) => ({ const mappedSources = sources.map((m) => ({
...m, ...m,
models: m.models.sort((a, b) => a.size - b.size), models: m.models.sort((a, b) => a.size - b.size),
})) }))
// Prepend the hardcoded model to the sources
return [hardcodedModel, ...mappedSources]
} catch (error) { } catch (error) {
console.error('Failed to fetch model sources:', error) console.error('Failed to fetch model sources:', error)
return [] return [hardcodedModel]
} }
} }
@ -40,10 +45,13 @@ export const fetchModelSources = async () => {
* Fetches the model hub. * Fetches the model hub.
* @returns A promise that resolves to the model hub. * @returns A promise that resolves to the model hub.
*/ */
export const fetchModelHub = async () => { export const fetchModelHub = async (): Promise<any[]> => {
return ExtensionManager.getInstance() const hubData = await ExtensionManager.getInstance()
.get<ModelExtension>(ExtensionTypeEnum.Model) .get<ModelExtension>(ExtensionTypeEnum.Model)
?.fetchModelsHub() ?.fetchModelsHub()
// Prepend the hardcoded model to the hub data
return hubData ? [hardcodedModel, ...hubData] : [hardcodedModel]
} }
/** /**

View File

@ -6,7 +6,10 @@ import {
ExtensionTypeEnum, ExtensionTypeEnum,
SettingComponentProps, SettingComponentProps,
} from '@janhq/core' } from '@janhq/core'
import { ModelCapabilities } from '@/types/models' import {
DefaultToolUseSupportedModels,
ModelCapabilities,
} from '@/types/models'
import { modelSettings } from '@/lib/predefined' import { modelSettings } from '@/lib/predefined'
import { fetchModels } from './models' import { fetchModels } from './models'
import { ExtensionManager } from '@/lib/extension' import { ExtensionManager } from '@/lib/extension'
@ -115,7 +118,14 @@ export const getProviders = async (): Promise<ModelProvider[]> => {
capabilities: capabilities:
'capabilities' in model 'capabilities' in model
? (model.capabilities as string[]) ? (model.capabilities as string[])
: [ModelCapabilities.COMPLETION], : [
ModelCapabilities.COMPLETION,
...(Object.values(DefaultToolUseSupportedModels).some((v) =>
model.id.toLowerCase().includes(v.toLowerCase())
)
? [ModelCapabilities.TOOLS]
: []),
],
provider: providerName, provider: providerName,
settings: Object.values(modelSettings).reduce( settings: Object.values(modelSettings).reduce(
(acc, setting) => { (acc, setting) => {

View File

@ -15,6 +15,12 @@ export enum ModelCapabilities {
AUDIO_TO_TEXT = 'audio_to_text', AUDIO_TO_TEXT = 'audio_to_text',
} }
// TODO: Remove this enum when we integrate llama.cpp extension
export enum DefaultToolUseSupportedModels {
JanNano = 'jan-nano',
Qwen3 = 'qwen3',
}
export type ActiveModel = { export type ActiveModel = {
engine: string engine: string
id: string id: string

View File

@ -0,0 +1,79 @@
export const hardcodedModel = {
author: 'Menlo',
id: 'https://huggingface.co/Menlo/Jan-nano',
metadata: {
'_id': '68492cd9cada68b1d11ca1bd',
'author': 'Menlo',
'cardData': {
license: 'apache-2.0',
pipeline_tag: 'text-generation',
},
'createdAt': '2025-06-11T07:14:33.000Z',
'description':
'---\nlicense: apache-2.0\npipeline_tag: text-generation\n---\n\n## Overview\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/657a81129ea9d52e5cbd67f7/YQci8jiHjAAFpXWYOadrU.png)\n\nJan Nano is a fine-tuned language model built on top of the Qwen3 architecture. Developed as part of the Jan ecosystem, it balances compact size and extended context length, making it ideal for efficient, high-quality text generation in local or embedded environments.\n\nWith 36 transformer blocks, 4B parameters, and an extended context window of 40,960 tokens, Jan Nano is optimized for dialogue, reasoning, and creative tasks. It is released in the Q4_K_M quantized format, enabling faster inference with reduced memory usage.\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)',
'disabled': false,
'downloads': 0,
'gated': false,
'gguf': {
architecture: 'qwen3',
bos_token: '<|endoftext|>',
chat_template:
"{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in content %}\n {%- set reasoning_content = content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n<think>\\n\\n</think>\\n\\n' }}\n{%- endif %}",
context_length: 40960,
eos_token: '<|im_end|>',
total: 4022468096,
},
'id': 'Menlo/Jan-nano',
'lastModified': '2025-06-11T10:00:23.000Z',
'likes': 1,
'model-index': null,
'modelId': 'Menlo/Jan-nano',
'pipeline_tag': 'text-generation',
'private': false,
'sha': '9966a3efaf6fe36ac4f2d8bd4343ae5791def2b0',
'siblings': [
{
rfilename: '.gitattributes',
size: 1569,
},
{
rfilename: 'Jan-nano.gguf',
size: 2497280288,
},
{
rfilename: 'README.md',
size: 817,
},
],
'spaces': [],
'tags': [
'gguf',
'text-generation',
'license:apache-2.0',
'endpoints_compatible',
'region:us',
'conversational',
],
'usedStorage': 7491840896,
'widgetData': [
{
text: 'Hi, what can you help me with?',
},
{
text: 'What is 84 * 3 / 2?',
},
{
text: 'Tell me an interesting fact about the universe!',
},
{
text: 'Explain quantum computing in simple terms.',
},
],
},
models: [
{
id: 'Menlo:Jan-nano:Jan-nano.gguf',
size: 2497280288,
},
],
}