Merge remote-tracking branch 'origin/dev' into mobile/dev

# Conflicts:
#	src-tauri/src/core/setup.rs
#	src-tauri/src/lib.rs
#	web-app/src/hooks/useChat.ts
This commit is contained in:
Vanalite 2025-10-01 09:52:01 +07:00
commit 262a1a9544
231 changed files with 1306 additions and 467 deletions

View File

@ -54,6 +54,8 @@ on:
value: ${{ jobs.build-windows-x64.outputs.WIN_SIG }}
FILE_NAME:
value: ${{ jobs.build-windows-x64.outputs.FILE_NAME }}
MSI_FILE_NAME:
value: ${{ jobs.build-windows-x64.outputs.MSI_FILE_NAME }}
jobs:
build-windows-x64:
@ -61,6 +63,7 @@ jobs:
outputs:
WIN_SIG: ${{ steps.metadata.outputs.WIN_SIG }}
FILE_NAME: ${{ steps.metadata.outputs.FILE_NAME }}
MSI_FILE_NAME: ${{ steps.metadata.outputs.MSI_FILE_NAME }}
permissions:
contents: write
steps:
@ -189,9 +192,15 @@ jobs:
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-windows-${{ inputs.new_version }}
name: jan-windows-exe-${{ inputs.new_version }}
path: |
./src-tauri/target/release/bundle/nsis/*.exe
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-windows-msi-${{ inputs.new_version }}
path: |
./src-tauri/target/release/bundle/msi/*.msi
## Set output filename for windows
- name: Set output filename for windows
@ -201,13 +210,18 @@ jobs:
if [ "${{ inputs.channel }}" != "stable" ]; then
FILE_NAME=Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64-setup.exe
WIN_SIG=$(cat Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64-setup.exe.sig)
MSI_FILE="Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64_en-US.msi"
else
FILE_NAME=Jan_${{ inputs.new_version }}_x64-setup.exe
WIN_SIG=$(cat Jan_${{ inputs.new_version }}_x64-setup.exe.sig)
MSI_FILE="Jan_${{ inputs.new_version }}_x64_en-US.msi"
fi
echo "::set-output name=WIN_SIG::$WIN_SIG"
echo "::set-output name=FILE_NAME::$FILE_NAME"
echo "::set-output name=MSI_FILE_NAME::$MSI_FILE"
id: metadata
## Upload to s3 for nightly and beta
@ -220,6 +234,8 @@ jobs:
# Upload for tauri updater
aws s3 cp ./${{ steps.metadata.outputs.FILE_NAME }} s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.FILE_NAME }}
aws s3 cp ./${{ steps.metadata.outputs.FILE_NAME }}.sig s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.FILE_NAME }}.sig
aws s3 cp ./src-tauri/target/release/bundle/msi/${{ steps.metadata.outputs.MSI_FILE_NAME }} s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.MSI_FILE_NAME }}
env:
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
@ -236,3 +252,13 @@ jobs:
asset_path: ./src-tauri/target/release/bundle/nsis/${{ steps.metadata.outputs.FILE_NAME }}
asset_name: ${{ steps.metadata.outputs.FILE_NAME }}
asset_content_type: application/octet-stream
- name: Upload release assert if public provider is github
if: inputs.public_provider == 'github'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
uses: actions/upload-release-asset@v1.0.1
with:
upload_url: ${{ inputs.upload_url }}
asset_path: ./src-tauri/target/release/bundle/msi/${{ steps.metadata.outputs.MSI_FILE_NAME }}
asset_name: ${{ steps.metadata.outputs.MSI_FILE_NAME }}
asset_content_type: application/octet-stream

View File

@ -27,11 +27,13 @@
"devDependencies": {
"@npmcli/arborist": "^7.1.0",
"@types/node": "^22.10.0",
"@types/react": "19.1.2",
"@vitest/coverage-v8": "^2.1.8",
"@vitest/ui": "^2.1.8",
"eslint": "8.57.0",
"happy-dom": "^15.11.6",
"pacote": "^21.0.0",
"react": "19.0.0",
"request": "^2.88.2",
"request-progress": "^3.0.0",
"rimraf": "^6.0.1",
@ -44,5 +46,8 @@
"rxjs": "^7.8.1",
"ulidx": "^2.3.0"
},
"peerDependencies": {
"react": "19.0.0"
},
"packageManager": "yarn@4.5.3"
}

View File

@ -10,7 +10,7 @@ export default defineConfig([
sourcemap: true,
},
platform: 'browser',
external: ['path'],
external: ['path', 'react', 'react-dom', 'react/jsx-runtime'],
define: {
NODE: JSON.stringify(`${pkgJson.name}/${pkgJson.node}`),
VERSION: JSON.stringify(pkgJson.version),

View File

@ -1,5 +1,6 @@
import { MCPInterface, MCPTool, MCPToolCallResult } from '../../types'
import { MCPInterface, MCPTool, MCPToolCallResult, MCPToolComponentProps } from '../../types'
import { BaseExtension, ExtensionTypeEnum } from '../extension'
import type { ComponentType } from 'react'
/**
* MCP (Model Context Protocol) extension for managing tools and server communication.
@ -18,4 +19,16 @@ export abstract class MCPExtension extends BaseExtension implements MCPInterface
abstract getConnectedServers(): Promise<string[]>
abstract refreshTools(): Promise<void>
abstract isHealthy(): Promise<boolean>
/**
* Optional method to provide a custom UI component for tools
* @returns A React component or null if no custom component is provided
*/
getToolComponent?(): ComponentType<MCPToolComponentProps> | null
/**
* Optional method to get the list of tool names that should be disabled by default
* @returns Array of tool names that should be disabled by default for new users
*/
getDefaultDisabledTools?(): Promise<string[]>
}

View File

@ -22,3 +22,17 @@ export interface MCPServerInfo {
connected: boolean
tools?: MCPTool[]
}
/**
* Props for MCP tool UI components
*/
export interface MCPToolComponentProps {
/** List of available MCP tools */
tools: MCPTool[]
/** Function to check if a specific tool is currently enabled */
isToolEnabled: (toolName: string) => boolean
/** Function to toggle a tool's enabled/disabled state */
onToolToggle: (toolName: string, enabled: boolean) => void
}

View File

@ -115,6 +115,9 @@
/docs/built-in/tensorrt-llm /docs/desktop/llama-cpp 302
/docs/desktop/docs/desktop/linux /docs/desktop/install/linux 302
/windows /docs/desktop/install/windows 302
/docs/quickstart /docs/ 302
/docs/desktop/mac /docs/desktop/install/mac 302
/handbook/open-superintelligence /handbook/why/open-superintelligence 302
/guides/integrations/continue/ /docs/desktop/server-examples/continue-dev 302
/continue-dev /docs/desktop/server-examples/continue-dev 302

View File

@ -6,7 +6,7 @@ const camelCase = (str) => {
return str.replace(/[-_](\w)/g, (_, c) => c.toUpperCase())
}
const categories = ['building-jan', 'research']
const categories = ['building-jan', 'research', 'guides']
/**
* @param {import("plop").NodePlopAPI} plop

Binary file not shown.

After

Width:  |  Height:  |  Size: 325 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 355 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 320 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 395 KiB

View File

@ -19,6 +19,10 @@ const Blog = () => {
name: 'Research',
id: 'research',
},
{
name: 'Guides',
id: 'guides',
},
]
return (

Binary file not shown.

After

Width:  |  Height:  |  Size: 320 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 395 KiB

View File

@ -20,5 +20,10 @@
"title": "Research",
"display": "normal",
"href": "/blog?category=research"
},
"guides-cat": {
"title": "Guides",
"display": "normal",
"href": "/blog?category=guides"
}
}

View File

@ -0,0 +1,123 @@
---
title: "Private AI for legal professionals who need confidentiality"
description: "It's possible to use AI without risking client data. Jan helps lawyers save time while keeping clients safe."
tags: AI, ai for law, ai for lawyers, ChatGPT alternative, Jan, local AI, offline AI
categories: guides
date: 2025-09-30
ogImage: assets/images/general/jan-for-ai-law-assistant-chat.jpeg
twitter:
card: summary_large_image
site: "@jandotai"
title: "Private AI for legal professionals who need confidentiality"
description: "It's possible to use AI without risking client data. Jan helps lawyers save time while keeping clients safe."
image: assets/images/general/jan-assistants-ai-for-legal.jpeg
---
import { Callout } from 'nextra/components'
import CTABlog from '@/components/Blog/CTA'
import { OpenAIStatusChecker } from '@/components/OpenAIStatusChecker'
# Private AI for legal professionals who need confidentiality
![AI for Law](/assets/images/general/jan-for-ai-law-assistant-chat.jpeg)
Yes, it's possible to use AI in legal work without risking client data.
<Callout type="warning">
Client trust depends on privacy. Sending documents into public AI tools risks compliance and reputation.
</Callout>
Start by [downloading Jan](/download) and installing the **Jan v1 model**. Once installed, you can create assistants tailored to your practice and keep contracts, case notes, and client files under your control.
<Callout type="info">
**Why use Jan for legal tasks**
- Runs locally on your hardware, no cloud uploads
- Keeps chats and interactions private
- Works offline once installed
- Lets you build assistants for your own workflows
</Callout>
---
## Create your assistant
Once Jan is installed with the **Jan v1 model**, onboarding will guide you through downloading and setup.
Click **Create assistant** to start:
![Create your first AI assistant in Jan](./_assets/create-assistant-1.jpeg)
*Create your first assistant in Jan*
Add an assistant name and prompt:
![Jan assistant for contract review](./_assets/jan-assistant-for-law.png)
*Example of a Jan assistant for contract review*
You can create assistants using specific prompts. Below are examples for common legal workflows.
---
## Contract review assistant
AI can help lawyers move faster through long contracts by pointing out what matters most.
**Prompt for Jan:**
> You are a contract review assistant.
> When I paste a contract:
> - Highlight risky or unusual clauses
> - Flag ambiguous or missing terms
> - Summarize the agreement in plain English for a non-lawyer client
> Format your response with sections: **Risks**, **Ambiguities/Missing**, **Summary**.
> Do not provide legal advice.
---
## Drafting assistant
Use AI to create first drafts of NDAs, service agreements, or client letters. You still refine the output, but AI saves time on boilerplate.
**Prompt for Jan:**
> You are a drafting assistant.
> When asked to draft a legal agreement or client letter:
> - Produce a professional first version
> - Use clear, concise language
> - Leave placeholders like [Party Name], [Date], [Amount] for details
> - Structure output with headings, numbered clauses, and consistent formatting
> Do not provide legal advice.
---
## Case preparation assistant
Case prep often means reading hundreds of pages. AI can turn depositions, discovery files, or judgments into concise notes.
![Jan legal case preparation assistant](./_assets/jan-for-ai-law-assistant-chat.jpeg)
*Jan chat interface for case preparation — process documents and extract key information*
**Prompt for Jan:**
> You are a case preparation assistant.
> When I provide case materials:
> - Extract key facts, issues, and arguments
> - Present them as bullet points under headings: **Facts**, **Issues**, **Arguments**
> - Keep summaries concise (under 500 words unless I request more)
> Use plain English, no speculation or legal conclusions.
---
## Knowledge management assistant
Law firms accumulate memos, policies, and precedents. AI can help organize and retrieve them quickly.
**Prompt for Jan:**
> You are a knowledge management assistant.
> When I ask questions about internal documents:
> - Return concise summaries or direct excerpts
> - Always cite the source (e.g., “Policy Manual, Section 4”)
> - If not found in provided material, reply “Not found in documents.”
> Do not invent information.
---
## Final note
AI in legal practice is not about replacing lawyers. Its about handling repetitive tasks safely so you can focus on real decisions.
With private AI, you gain efficiency without compromising client confidentiality.
<CTABlog />

View File

@ -0,0 +1,134 @@
---
title: "AI for teachers who care about student privacy"
description: "Use AI in teaching without risking student data. Jan helps teachers plan lessons, grade faster, and communicate with parents."
tags: AI, ai for teachers, ChatGPT alternative, Jan, local AI, offline AI, education
categories: guides
date: 2025-10-01
ogImage: assets/images/general/ai-for-teacher.jpeg
twitter:
card: summary_large_image
site: "@jandotai"
title: "AI for teachers who care about student privacy"
description: "Use AI in teaching without risking student data. Jan helps teachers plan lessons, grade faster, and communicate with parents."
image: assets/images/general/ai-for-teacher.jpeg
---
import { Callout } from 'nextra/components'
import CTABlog from '@/components/Blog/CTA'
# AI for teachers who care about student privacy
![AI for teachers](/assets/images/general/ai-for-teacher.jpeg)
AI can help teachers handle the work that piles up outside class. It can draft a lesson outline, suggest feedback on essays, or turn notes into a polite parent email. These are the tasks that usually stretch into evenings and weekends.
<Callout>
Most AI tools like ChatGPT run in the cloud. Sharing lesson plans, student writing, or parent details there risks compliance and trust.
</Callout>
That's where Jan comes in:
- [Download Jan](/download)
- You get the same time-saving help
- Your data never leaves your device.
<video controls>
<source src="/assets/images/general/jan-ai-for-teacher.mp4" type="video/mp4" />
Your browser does not support the video tag.
</video>
*See how teachers use Jan for AI-powered lesson planning and grading*
<Callout type="info">
**Why use Jan for teaching**
- Runs locally, no cloud servers
- Keeps lesson plans and student data private
- Works offline once installed
- Lets you build assistants for your daily teaching tasks
</Callout>
---
## Create your assistant
Once Jan is installed, click **Create assistant** and add one of the prompts below. Each assistant is for a specific classroom task.
![Create your first AI assistant in Jan](/assets/images/general/assistants-ai-for-teachers.jpeg)
---
## Lesson planning assistant
AI can draft lesson outlines in minutes. You adapt and refine them for your students.
**Prompt for Jan:**
> You are a lesson planning assistant.
> When I give you a topic or subject:
> - Suggest a lesson outline with objectives, activities, and discussion questions
> - Adjust for different grade levels if I specify
> - Keep plans practical and realistic for a classroom setting
Example ask: For Grade 6 science on ecosystems. Objectives: define food chains, explain producer/consumer roles. Activity: group poster on an ecosystem. Questions: How would removing one species affect the whole system?
---
## Grading support assistant
AI won't replace your judgment, but it can make feedback faster and more consistent.
**Prompt for Jan:**
> You are a grading support assistant.
> When I paste student writing or answers:
> - Highlight strengths and areas for improvement
> - Suggest short, constructive feedback I can reuse
> - Keep tone supportive and professional
> Do not assign final grades.
Example: For a history essay. Strength: clear thesis. Improvement: weak evidence. Feedback: "Great thesis and structure. Next time, support your points with specific historical examples."
---
## Parent communication assistant
Writing parent emails is important but time-consuming.
**Prompt for Jan:**
> You are a parent communication assistant.
> When I give you key points about a student:
> - Draft a polite and empathetic email to parents
> - Use clear and professional language
> - Keep tone supportive, not overly formal
> Only include details I provide.
Example: Notes: “Student is falling behind on homework, otherwise engaged in class.” - Output: a short, encouraging message suggesting a check-in at home.
---
## Classroom resources assistant
Generate quizzes, worksheets, or practice activities at short notice.
**Prompt for Jan:**
> You are a classroom resource assistant.
> When I provide a topic or subject:
> - Generate sample quiz questions (multiple choice and short answer)
> - Suggest short practice activities
> - Provide answer keys separately
> Keep material age-appropriate for the level I specify.
Example: For Grade 4 fractions. 5 multiple-choice questions with answer key, plus a quick worksheet with 3 practice problems.
---
## Getting started
1. [Download Jan](/download).
2. Install the Jan model (guided in-app)
3. Create your first assistant using one of the prompts above
4. Test with non-sensitive examples first
5. Use it in real classroom tasks once you're comfortable
---
## Final note
AI isn't here to replace teachers. It's here to take repetitive tasks off your plate so you can focus on teaching. With Jan, you can use AI confidently without risking student privacy.
<CTABlog />

View File

@ -18,7 +18,7 @@ import { OpenAIStatusChecker } from '@/components/OpenAIStatusChecker'
# If ChatGPT is down, switch to AI that never goes down
If you're seeing ChatGPT is down, it could a good signal to switch to [Jan](https://www.jan.ai/), AI that never goes down.
If you're seeing ChatGPT is down, it could be a good signal to switch to [Jan](https://www.jan.ai/), AI that never goes down.
## 🔴 Realtime Status: Is ChatGPT down?
<Callout>
@ -108,17 +108,17 @@ When ChatGPT experiences issues, you might see these error messages:
## Quick answers about ChatGPT status
### Is ChatGPT down today?
Check the ChatGPT realtime status above. If ChatGPT is down, you'll see it here.
### Is ChatGPT down?
Check the ChatGPT realtime status above. [See if ChatGPT is down right now.](http://localhost:3000/post/is-chatgpt-down-use-jan#-realtime-status-is-chatgpt-down)
### Why is ChatGPT down?
Usually server overload, maintenance, or outages at OpenAI.
### What does “ChatGPT is at capacity” mean?
### What does "ChatGPT is at capacity" mean?
Too many users are online at the same time. Youll need to wait or switch to Jan instead.
### Is ChatGPT shutting down?
No, ChatGPT isnt shutting down. Outages are temporary.
No, ChatGPT isn't shutting down. Outages are temporary.
### Can I use ChatGPT offline?
No. ChatGPT always requires internet. For [offline AI](https://www.jan.ai/post/offline-chatgpt-alternative), use [Jan](https://jan.ai).

View File

@ -22,6 +22,9 @@
},
"devDependencies": {
"@janhq/core": "workspace:*",
"@tabler/icons-react": "^3.34.0",
"@types/react": "19.1.2",
"react": "19.0.0",
"typescript": "5.9.2",
"vite": "5.4.20",
"vitest": "2.1.9",
@ -29,6 +32,8 @@
},
"peerDependencies": {
"@janhq/core": "*",
"@tabler/icons-react": "*",
"react": "19.0.0",
"zustand": "5.0.3"
},
"dependencies": {

View File

@ -0,0 +1,54 @@
import { useMemo, useCallback } from 'react'
import { IconWorld } from '@tabler/icons-react'
import { MCPToolComponentProps } from '@janhq/core'
// List of tool names considered as web search tools
const WEB_SEARCH_TOOL_NAMES = ['google_search', 'scrape'];
export const WebSearchButton = ({
tools,
isToolEnabled,
onToolToggle,
}: MCPToolComponentProps) => {
const webSearchTools = useMemo(
() => tools.filter((tool) => WEB_SEARCH_TOOL_NAMES.includes(tool.name)),
[tools]
)
// Early return if no web search tools available
if (webSearchTools.length === 0) {
return null
}
// Check if all web search tools are enabled
const isEnabled = useMemo(
() => webSearchTools.every((tool) => isToolEnabled(tool.name)),
[webSearchTools, isToolEnabled]
)
const handleToggle = useCallback(() => {
// Toggle all web search tools at once
const newState = !isEnabled
webSearchTools.forEach((tool) => {
onToolToggle(tool.name, newState)
})
}, [isEnabled, webSearchTools, onToolToggle])
return (
<button
onClick={handleToggle}
className={`h-7 px-2 py-1 flex items-center justify-center rounded-md transition-all duration-200 ease-in-out gap-1 cursor-pointer ml-0.5 border-0 ${
isEnabled
? 'bg-accent/20 text-accent'
: 'bg-transparent text-main-view-fg/70 hover:bg-main-view-fg/5'
}`}
title={isEnabled ? 'Disable Web Search' : 'Enable Web Search'}
>
<IconWorld
size={16}
className={isEnabled ? 'text-accent' : 'text-main-view-fg/70'}
/>
<span className={`text-sm font-medium ${isEnabled ? 'text-accent' : ''}`}>Search</span>
</button>
)
}

View File

@ -0,0 +1 @@
export { WebSearchButton } from './WebSearchButton'

View File

@ -4,11 +4,13 @@
* Uses official MCP TypeScript SDK with proper session handling
*/
import { MCPExtension, MCPTool, MCPToolCallResult } from '@janhq/core'
import { MCPExtension, MCPTool, MCPToolCallResult, MCPToolComponentProps } from '@janhq/core'
import { getSharedAuthService, JanAuthService } from '../shared'
import { Client } from '@modelcontextprotocol/sdk/client/index.js'
import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'
import { JanMCPOAuthProvider } from './oauth-provider'
import { WebSearchButton } from './components'
import type { ComponentType } from 'react'
// JAN_API_BASE is defined in vite.config.ts (defaults to 'https://api-dev.jan.ai/jan/v1')
declare const JAN_API_BASE: string
@ -232,4 +234,27 @@ export default class MCPExtensionWeb extends MCPExtension {
throw error
}
}
/**
* Provides a custom UI component for web search tools
* @returns The WebSearchButton component
*/
getToolComponent(): ComponentType<MCPToolComponentProps> | null {
return WebSearchButton
}
/**
* Returns the list of tool names that should be disabled by default for new users
* All MCP web tools are disabled by default to prevent accidental API usage
* @returns Array of tool names to disable by default
*/
async getDefaultDisabledTools(): Promise<string[]> {
try {
const tools = await this.getTools()
return tools.map(tool => tool.name)
} catch (error) {
console.error('Failed to get default disabled tools:', error)
return []
}
}
}

View File

@ -3,6 +3,7 @@
"target": "ES2020",
"module": "ESNext",
"moduleResolution": "bundler",
"jsx": "react-jsx",
"allowSyntheticDefaultImports": true,
"esModuleInterop": true,
"strict": true,

View File

@ -9,7 +9,7 @@ export default defineConfig({
fileName: 'index'
},
rollupOptions: {
external: ['@janhq/core', 'zustand']
external: ['@janhq/core', 'zustand', 'react', 'react-dom', 'react/jsx-runtime', '@tabler/icons-react']
},
emptyOutDir: false // Don't clean the output directory
},

View File

@ -106,13 +106,11 @@ async function main() {
}
// Adjust these URLs based on latest releases
const bunVersion = '1.2.10' // Example Bun version
const bunUrl = `https://github.com/oven-sh/bun/releases/download/bun-v${bunVersion}/bun-${bunPlatform}.zip`
const bunUrl = `https://github.com/oven-sh/bun/releases/latest/download/bun-${bunPlatform}.zip`
const uvVersion = '0.6.17' // Example UV version
let uvUrl = `https://github.com/astral-sh/uv/releases/download/${uvVersion}/uv-${uvPlatform}.tar.gz`
let uvUrl = `https://github.com/astral-sh/uv/releases/latest/download/uv-${uvPlatform}.tar.gz`
if (platform === 'win32') {
uvUrl = `https://github.com/astral-sh/uv/releases/download/${uvVersion}/uv-${uvPlatform}.zip`
uvUrl = `https://github.com/astral-sh/uv/releases/latest/download/uv-${uvPlatform}.zip`
}
console.log(`Downloading Bun for ${bunPlatform}...`)

View File

@ -62,6 +62,7 @@ pub async fn estimate_kv_cache_internal(
ctx_size: Option<u64>,
) -> Result<KVCacheEstimate, KVCacheError> {
log::info!("Received ctx_size parameter: {:?}", ctx_size);
log::info!("Received model metadata:\n{:?}", &meta);
let arch = meta
.get("general.architecture")
.ok_or(KVCacheError::ArchitectureNotFound)?;
@ -94,15 +95,43 @@ pub async fn estimate_kv_cache_internal(
let key_len_key = format!("{}.attention.key_length", arch);
let val_len_key = format!("{}.attention.value_length", arch);
let key_len = meta
let mut key_len = meta
.get(&key_len_key)
.and_then(|s| s.parse::<u64>().ok())
.unwrap_or(0);
let val_len = meta
let mut val_len = meta
.get(&val_len_key)
.and_then(|s| s.parse::<u64>().ok())
.unwrap_or(0);
// Fallback: calculate from embedding_length if key/val lengths not found
if key_len == 0 || val_len == 0 {
let emb_len_key = format!("{}.embedding_length", arch);
let emb_len = meta
.get(&emb_len_key)
.and_then(|s| s.parse::<u64>().ok())
.unwrap_or(0);
if emb_len > 0 && n_head > 0 {
// For most transformers: head_dim = embedding_length / total_heads
let total_heads = meta
.get(&n_head_key)
.and_then(|s| s.parse::<u64>().ok())
.unwrap_or(n_head);
let head_dim = emb_len / total_heads;
key_len = head_dim;
val_len = head_dim;
log::info!(
"Calculated key_len and val_len from embedding_length: {} / {} heads = {} per head",
emb_len,
total_heads,
head_dim
);
}
}
if key_len == 0 || val_len == 0 {
return Err(KVCacheError::EmbeddingLengthInvalid);
}

View File

@ -627,17 +627,28 @@ async fn schedule_mcp_start_task<R: Runtime>(
}
} else {
let mut cmd = Command::new(config_params.command.clone());
let bun_x_path = format!("{}/bun", bin_path.display());
if config_params.command.clone() == "npx" && can_override_npx(bun_x_path.clone()) {
let bun_x_path = if cfg!(windows) {
bin_path.join("bun.exe")
} else {
bin_path.join("bun")
};
if config_params.command.clone() == "npx"
&& can_override_npx(bun_x_path.display().to_string())
{
let mut cache_dir = app_path.clone();
cache_dir.push(".npx");
cmd = Command::new(bun_x_path);
cmd = Command::new(bun_x_path.display().to_string());
cmd.arg("x");
cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap().to_string());
}
let uv_path = format!("{}/uv", bin_path.display());
if config_params.command.clone() == "uvx" && can_override_uvx(uv_path.clone()) {
let uv_path = if cfg!(windows) {
bin_path.join("uv.exe")
} else {
bin_path.join("uv")
};
if config_params.command.clone() == "uvx" && can_override_uvx(uv_path.display().to_string())
{
let mut cache_dir = app_path.clone();
cache_dir.push(".uvx");
cmd = Command::new(uv_path);
@ -935,3 +946,47 @@ pub async fn should_restart_server(
}
}
}
// Add a new server configuration to the MCP config file
pub fn add_server_config<R: Runtime>(
app_handle: tauri::AppHandle<R>,
server_key: String,
server_value: Value,
) -> Result<(), String> {
add_server_config_with_path(app_handle, server_key, server_value, None)
}
// Add a new server configuration to the MCP config file with custom path support
pub fn add_server_config_with_path<R: Runtime>(
app_handle: tauri::AppHandle<R>,
server_key: String,
server_value: Value,
config_filename: Option<&str>,
) -> Result<(), String> {
let config_filename = config_filename.unwrap_or("mcp_config.json");
let config_path = get_jan_data_folder_path(app_handle).join(config_filename);
let mut config: Value = serde_json::from_str(
&std::fs::read_to_string(&config_path)
.map_err(|e| format!("Failed to read config file: {e}"))?,
)
.map_err(|e| format!("Failed to parse config: {e}"))?;
config
.as_object_mut()
.ok_or("Config root is not an object")?
.entry("mcpServers")
.or_insert_with(|| Value::Object(serde_json::Map::new()))
.as_object_mut()
.ok_or("mcpServers is not an object")?
.insert(server_key, server_value);
std::fs::write(
&config_path,
serde_json::to_string_pretty(&config)
.map_err(|e| format!("Failed to serialize config: {e}"))?,
)
.map_err(|e| format!("Failed to write config file: {e}"))?;
Ok(())
}

View File

@ -1,9 +1,10 @@
use super::helpers::run_mcp_commands;
use super::helpers::{add_server_config, add_server_config_with_path, run_mcp_commands};
use crate::core::app::commands::get_jan_data_folder_path;
use crate::core::state::SharedMcpServers;
use std::collections::HashMap;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use std::sync::Arc;
use tauri::test::mock_app;
use tokio::sync::Mutex;
@ -27,8 +28,7 @@ async fn test_run_mcp_commands() {
.expect("Failed to write to config file");
// Call the run_mcp_commands function
let servers_state: SharedMcpServers =
Arc::new(Mutex::new(HashMap::new()));
let servers_state: SharedMcpServers = Arc::new(Mutex::new(HashMap::new()));
let result = run_mcp_commands(app.handle(), servers_state).await;
// Assert that the function returns Ok(())
@ -37,3 +37,188 @@ async fn test_run_mcp_commands() {
// Clean up the mock config file
std::fs::remove_file(&config_path).expect("Failed to remove config file");
}
#[test]
fn test_add_server_config_new_file() {
let app = mock_app();
let app_path = get_jan_data_folder_path(app.handle().clone());
let config_path = app_path.join("mcp_config_test_new.json");
// Ensure the directory exists
if let Some(parent) = config_path.parent() {
std::fs::create_dir_all(parent).expect("Failed to create parent directory");
}
// Create initial config file with empty mcpServers
let mut file = File::create(&config_path).expect("Failed to create config file");
file.write_all(b"{\"mcpServers\":{}}")
.expect("Failed to write to config file");
drop(file);
// Test adding a new server config
let server_value = serde_json::json!({
"command": "npx",
"args": ["-y", "test-server"],
"env": { "TEST_API_KEY": "test_key" },
"active": false
});
let result = add_server_config_with_path(
app.handle().clone(),
"test_server".to_string(),
server_value.clone(),
Some("mcp_config_test_new.json"),
);
assert!(result.is_ok(), "Failed to add server config: {:?}", result);
// Verify the config was added correctly
let config_content = std::fs::read_to_string(&config_path)
.expect("Failed to read config file");
let config: serde_json::Value = serde_json::from_str(&config_content)
.expect("Failed to parse config");
assert!(config["mcpServers"]["test_server"].is_object());
assert_eq!(config["mcpServers"]["test_server"]["command"], "npx");
assert_eq!(config["mcpServers"]["test_server"]["args"][0], "-y");
assert_eq!(config["mcpServers"]["test_server"]["args"][1], "test-server");
// Clean up
std::fs::remove_file(&config_path).expect("Failed to remove config file");
}
#[test]
fn test_add_server_config_existing_servers() {
let app = mock_app();
let app_path = get_jan_data_folder_path(app.handle().clone());
let config_path = app_path.join("mcp_config_test_existing.json");
// Ensure the directory exists
if let Some(parent) = config_path.parent() {
std::fs::create_dir_all(parent).expect("Failed to create parent directory");
}
// Create config file with existing server
let initial_config = serde_json::json!({
"mcpServers": {
"existing_server": {
"command": "existing_command",
"args": ["arg1"],
"active": true
}
}
});
let mut file = File::create(&config_path).expect("Failed to create config file");
file.write_all(serde_json::to_string_pretty(&initial_config).unwrap().as_bytes())
.expect("Failed to write to config file");
drop(file);
// Add new server
let new_server_value = serde_json::json!({
"command": "new_command",
"args": ["new_arg"],
"active": false
});
let result = add_server_config_with_path(
app.handle().clone(),
"new_server".to_string(),
new_server_value,
Some("mcp_config_test_existing.json"),
);
assert!(result.is_ok(), "Failed to add server config: {:?}", result);
// Verify both servers exist
let config_content = std::fs::read_to_string(&config_path)
.expect("Failed to read config file");
let config: serde_json::Value = serde_json::from_str(&config_content)
.expect("Failed to parse config");
// Check existing server is still there
assert!(config["mcpServers"]["existing_server"].is_object());
assert_eq!(config["mcpServers"]["existing_server"]["command"], "existing_command");
// Check new server was added
assert!(config["mcpServers"]["new_server"].is_object());
assert_eq!(config["mcpServers"]["new_server"]["command"], "new_command");
// Clean up
std::fs::remove_file(&config_path).expect("Failed to remove config file");
}
#[test]
fn test_add_server_config_missing_config_file() {
let app = mock_app();
let app_path = get_jan_data_folder_path(app.handle().clone());
// Ensure the directory exists
if let Some(parent) = app_path.parent() {
std::fs::create_dir_all(parent).ok();
}
std::fs::create_dir_all(&app_path).ok();
let config_path = app_path.join("mcp_config.json");
// Ensure the file doesn't exist
if config_path.exists() {
std::fs::remove_file(&config_path).ok();
}
let server_value = serde_json::json!({
"command": "test",
"args": [],
"active": false
});
let result = add_server_config(
app.handle().clone(),
"test".to_string(),
server_value,
);
assert!(result.is_err(), "Expected error when config file doesn't exist");
assert!(result.unwrap_err().contains("Failed to read config file"));
}
#[cfg(not(target_os = "windows"))]
#[test]
fn test_bin_path_construction_with_join() {
// Test that PathBuf::join properly constructs paths
let bin_path = PathBuf::from("/usr/local/bin");
let bun_path = bin_path.join("bun");
assert_eq!(bun_path.to_string_lossy(), "/usr/local/bin/bun");
// Test conversion to String via display()
let bun_path_str = bun_path.display().to_string();
assert_eq!(bun_path_str, "/usr/local/bin/bun");
}
#[cfg(not(target_os = "windows"))]
#[test]
fn test_uv_path_construction_with_join() {
// Test that PathBuf::join properly constructs paths for uv
let bin_path = PathBuf::from("/usr/local/bin");
let uv_path = bin_path.join("uv");
assert_eq!(uv_path.to_string_lossy(), "/usr/local/bin/uv");
// Test conversion to String via display()
let uv_path_str = uv_path.display().to_string();
assert_eq!(uv_path_str, "/usr/local/bin/uv");
}
#[cfg(target_os = "windows")]
#[test]
fn test_bin_path_construction_windows() {
// Test Windows-style paths
let bin_path = PathBuf::from(r"C:\Program Files\bin");
let bun_path = bin_path.join("bun.exe");
assert_eq!(bun_path.to_string_lossy(), r"C:\Program Files\bin\bun.exe");
let bun_path_str = bun_path.display().to_string();
assert_eq!(bun_path_str, r"C:\Program Files\bin\bun.exe");
}

View File

@ -3,10 +3,11 @@ use std::{
fs::{self, File},
io::Read,
path::PathBuf,
sync::Arc,
};
use tar::Archive;
use tauri::{
App, Emitter, Manager, Runtime,
App, Emitter, Manager, Runtime, Wry
};
#[cfg(desktop)]
@ -14,32 +15,15 @@ use tauri::{
menu::{Menu, MenuItem, PredefinedMenuItem},
tray::{MouseButton, MouseButtonState, TrayIcon, TrayIconBuilder, TrayIconEvent},
};
use tauri_plugin_store::StoreExt;
// use tokio::sync::Mutex;
// use tokio::time::{sleep, Duration}; // Using tokio::sync::Mutex
// // MCP
use tauri_plugin_store::Store;
use crate::core::mcp::helpers::add_server_config;
// MCP
use super::{
app::commands::get_jan_data_folder_path, extensions::commands::get_jan_extensions_path,
mcp::helpers::run_mcp_commands, state::AppState,
extensions::commands::get_jan_extensions_path, mcp::helpers::run_mcp_commands, state::AppState,
};
pub fn install_extensions<R: Runtime>(app: tauri::AppHandle<R>, force: bool) -> Result<(), String> {
let mut store_path = get_jan_data_folder_path(app.clone());
store_path.push("store.json");
let store = app.store(store_path).expect("Store not initialized");
let stored_version = store
.get("version")
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_default();
let app_version = app
.config()
.version
.clone()
.unwrap_or_else(|| "".to_string());
let extensions_path = get_jan_extensions_path(app.clone());
let pre_install_path = app
.path()
@ -54,13 +38,8 @@ pub fn install_extensions<R: Runtime>(app: tauri::AppHandle<R>, force: bool) ->
if std::env::var("IS_CLEAN").is_ok() {
clean_up = true;
}
log::info!(
"Installing extensions. Clean up: {}, Stored version: {}, App version: {}",
clean_up,
stored_version,
app_version
);
if !clean_up && stored_version == app_version && extensions_path.exists() {
log::info!("Installing extensions. Clean up: {}", clean_up);
if !clean_up && extensions_path.exists() {
return Ok(());
}
@ -164,10 +143,36 @@ pub fn install_extensions<R: Runtime>(app: tauri::AppHandle<R>, force: bool) ->
)
.map_err(|e| e.to_string())?;
// Store the new app version
store.set("version", serde_json::json!(app_version));
store.save().expect("Failed to save store");
Ok(())
}
// Migrate MCP servers configuration
pub fn migrate_mcp_servers(
app_handle: tauri::AppHandle,
store: Arc<Store<Wry>>,
) -> Result<(), String> {
let mcp_version = store
.get("mcp_version")
.and_then(|v| v.as_i64())
.unwrap_or_else(|| 0);
if mcp_version < 1 {
log::info!("Migrating MCP schema version 1");
let result = add_server_config(
app_handle,
"exa".to_string(),
serde_json::json!({
"command": "npx",
"args": ["-y", "exa-mcp-server"],
"env": { "EXA_API_KEY": "YOUR_EXA_API_KEY_HERE" },
"active": false
}),
);
if let Err(e) = result {
log::error!("Failed to add server config: {}", e);
}
}
store.set("mcp_version", 1);
store.save().expect("Failed to save store");
Ok(())
}

View File

@ -150,6 +150,9 @@ pub async fn create_message<R: Runtime>(
let data = serde_json::to_string(&message).map_err(|e| e.to_string())?;
writeln!(file, "{}", data).map_err(|e| e.to_string())?;
// Explicitly flush to ensure data is written before returning
file.flush().map_err(|e| e.to_string())?;
}
Ok(message)

View File

@ -13,7 +13,6 @@
pub mod commands;
mod constants;
pub mod helpers;
pub mod models;
pub mod utils;
#[cfg(test)]

View File

@ -1,103 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Thread {
pub id: String,
pub object: String,
pub title: String,
pub assistants: Vec<ThreadAssistantInfo>,
pub created: i64,
pub updated: i64,
pub metadata: Option<serde_json::Value>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ThreadMessage {
pub id: String,
pub object: String,
pub thread_id: String,
pub assistant_id: Option<String>,
pub attachments: Option<Vec<Attachment>>,
pub role: String,
pub content: Vec<ThreadContent>,
pub status: String,
pub created_at: i64,
pub completed_at: i64,
pub metadata: Option<serde_json::Value>,
pub type_: Option<String>,
pub error_code: Option<String>,
pub tool_call_id: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Attachment {
pub file_id: Option<String>,
pub tools: Option<Vec<Tool>>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(tag = "type")]
pub enum Tool {
#[serde(rename = "file_search")]
FileSearch,
#[serde(rename = "code_interpreter")]
CodeInterpreter,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ThreadContent {
pub type_: String,
pub text: Option<ContentValue>,
pub image_url: Option<ImageContentValue>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ContentValue {
pub value: String,
pub annotations: Vec<String>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ImageContentValue {
pub detail: Option<String>,
pub url: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ThreadAssistantInfo {
pub id: String,
pub name: String,
pub model: ModelInfo,
pub instructions: Option<String>,
pub tools: Option<Vec<AssistantTool>>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ModelInfo {
pub id: String,
pub name: String,
pub settings: serde_json::Value,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(tag = "type")]
pub enum AssistantTool {
#[serde(rename = "code_interpreter")]
CodeInterpreter,
#[serde(rename = "retrieval")]
Retrieval,
#[serde(rename = "function")]
Function {
name: String,
description: Option<String>,
parameters: Option<serde_json::Value>,
},
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ThreadState {
pub has_more: bool,
pub waiting_for_response: bool,
pub error: Option<String>,
pub last_message: Option<String>,
}

View File

@ -88,7 +88,7 @@ async fn test_create_and_list_messages() {
let messages = list_messages(app.handle().clone(), thread_id.clone())
.await
.unwrap();
assert!(messages.len() > 0);
assert!(messages.len() > 0, "Expected at least one message, but got none. Thread ID: {}", thread_id);
assert_eq!(messages[0]["role"], "user");
// Clean up

View File

@ -10,6 +10,7 @@ use jan_utils::generate_app_token;
use std::{collections::HashMap, sync::Arc};
use tauri::{Emitter, Manager, RunEvent};
use tauri_plugin_llamacpp::cleanup_llama_processes;
use tauri_plugin_store::StoreExt;
use tokio::sync::Mutex;
#[cfg_attr(all(mobile, any(target_os = "android", target_os = "ios")), tauri::mobile_entry_point)]
@ -134,11 +135,46 @@ pub fn run() {
)?;
#[cfg(not(any(target_os = "ios", target_os = "android")))]
app.handle().plugin(tauri_plugin_updater::Builder::new().build())?;
// Install extensions
if let Err(e) = setup::install_extensions(app.handle().clone(), false) {
// Start migration
let mut store_path = get_jan_data_folder_path(app.handle().clone());
store_path.push("store.json");
let store = app
.handle()
.store(store_path)
.expect("Store not initialized");
let stored_version = store
.get("version")
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_default();
let app_version = app
.config()
.version
.clone()
.unwrap_or_else(|| "".to_string());
// Migrate extensions
if let Err(e) =
setup::install_extensions(app.handle().clone(), stored_version != app_version)
{
log::error!("Failed to install extensions: {}", e);
}
// Migrate MCP servers
if let Err(e) = setup::migrate_mcp_servers(app.handle().clone(), store.clone()) {
log::error!("Failed to migrate MCP servers: {}", e);
}
// Store the new app version
store.set("version", serde_json::json!(app_version));
store.save().expect("Failed to save store");
// Migration completed
#[cfg(desktop)]
if option_env!("ENABLE_SYSTEM_TRAY_ICON").unwrap_or("false") == "true" {
log::info!("Enabling system tray icon");
let _ = setup::setup_tray(app);
}
#[cfg(all(feature = "deep-link", any(windows, target_os = "linux")))]
{
use tauri_plugin_deep_link::DeepLinkExt;

View File

@ -76,6 +76,7 @@
}
},
"bundle": {
"publisher": "Menlo Research Pte. Ltd.",
"active": true,
"createUpdaterArtifacts": false,
"icon": [

View File

@ -38,6 +38,9 @@ import { useTools } from '@/hooks/useTools'
import { TokenCounter } from '@/components/TokenCounter'
import { useMessages } from '@/hooks/useMessages'
import { useShallow } from 'zustand/react/shallow'
import { McpExtensionToolLoader } from './McpExtensionToolLoader'
import { ExtensionTypeEnum, MCPExtension } from '@janhq/core'
import { ExtensionManager } from '@/lib/extension'
type ChatInputProps = {
className?: string
@ -171,6 +174,12 @@ const ChatInput = ({
// Check if there are active MCP servers
const hasActiveMCPServers = connectedServers.length > 0 || tools.length > 0
// Get MCP extension and its custom component
const extensionManager = ExtensionManager.getInstance()
const mcpExtension = extensionManager.get<MCPExtension>(ExtensionTypeEnum.MCP)
const MCPToolComponent = mcpExtension?.getToolComponent?.()
const handleSendMesage = (prompt: string) => {
if (!selectedModel) {
setMessage('Please select a model to start chatting.')
@ -719,6 +728,17 @@ const ChatInput = ({
{selectedModel?.capabilities?.includes('tools') &&
hasActiveMCPServers && (
MCPToolComponent ? (
// Use custom MCP component
<McpExtensionToolLoader
tools={tools}
hasActiveMCPServers={hasActiveMCPServers}
selectedModelHasTools={selectedModel?.capabilities?.includes('tools') ?? false}
initialMessage={initialMessage}
MCPToolComponent={MCPToolComponent}
/>
) : (
// Use default tools dropdown
<TooltipProvider>
<Tooltip
open={tooltipToolsAvailable}
@ -773,6 +793,7 @@ const ChatInput = ({
</TooltipContent>
</Tooltip>
</TooltipProvider>
)
)}
{selectedModel?.capabilities?.includes('web_search') && (
<TooltipProvider>

View File

@ -1,4 +1,4 @@
import { Link, useRouterState } from '@tanstack/react-router'
import { Link, useRouterState, useNavigate } from '@tanstack/react-router'
import { useLeftPanel } from '@/hooks/useLeftPanel'
import { cn } from '@/lib/utils'
import {
@ -58,6 +58,9 @@ const mainMenus = [
route: route.project,
isEnabled: true,
},
]
const secondaryMenus = [
{
title: 'common:assistants',
icon: IconClipboardSmile,
@ -82,6 +85,7 @@ const LeftPanel = () => {
const open = useLeftPanel((state) => state.open)
const setLeftPanel = useLeftPanel((state) => state.setLeftPanel)
const { t } = useTranslation()
const navigate = useNavigate()
const [searchTerm, setSearchTerm] = useState('')
const { isAuthenticated } = useAuth()
@ -213,7 +217,12 @@ const LeftPanel = () => {
if (editingProjectKey) {
updateFolder(editingProjectKey, name)
} else {
addFolder(name)
const newProject = addFolder(name)
// Navigate to the newly created project
navigate({
to: '/project/$projectId',
params: { projectId: newProject.id },
})
}
setProjectDialogOpen(false)
setEditingProjectKey(null)
@ -488,7 +497,7 @@ const LeftPanel = () => {
)}
<div className="flex flex-col h-full overflow-y-scroll w-[calc(100%+6px)]">
<div className="flex flex-col w-full h-full overflow-y-auto overflow-x-hidden">
<div className="flex flex-col w-full h-full overflow-y-auto overflow-x-hidden mb-3">
<div className="h-full w-full overflow-y-auto">
{favoritedThreads.length > 0 && (
<>
@ -608,6 +617,44 @@ const LeftPanel = () => {
</div>
</div>
</div>
{secondaryMenus.map((menu) => {
if (!menu.isEnabled) {
return null
}
// Regular menu items must have route and icon
if (!menu.route || !menu.icon) return null
const isActive = (() => {
// Settings routes
if (menu.route.includes(route.settings.index)) {
return currentPath.includes(route.settings.index)
}
// Default exact match for other routes
return currentPath === menu.route
})()
return (
<Link
key={menu.title}
to={menu.route}
onClick={() => isSmallScreen && setLeftPanel(false)}
data-test-id={`menu-${menu.title}`}
activeOptions={{ exact: true }}
className={cn(
'flex items-center gap-1.5 cursor-pointer hover:bg-left-panel-fg/10 py-1 px-1 rounded',
isActive && 'bg-left-panel-fg/10'
)}
>
<menu.icon size={18} className="text-left-panel-fg/70" />
<span className="font-medium text-left-panel-fg/90">
{t(menu.title)}
</span>
</Link>
)
})}
{PlatformFeatures[PlatformFeature.AUTHENTICATION] && (
<div className="space-y-1 shrink-0 py-1">
<div>

View File

@ -0,0 +1,61 @@
import { ComponentType } from 'react'
import { MCPTool, MCPToolComponentProps } from '@janhq/core'
import { useToolAvailable } from '@/hooks/useToolAvailable'
import { useThreads } from '@/hooks/useThreads'
interface McpExtensionToolLoaderProps {
tools: MCPTool[]
hasActiveMCPServers: boolean
selectedModelHasTools: boolean
initialMessage?: boolean
MCPToolComponent?: ComponentType<MCPToolComponentProps> | null
}
export const McpExtensionToolLoader = ({
tools,
hasActiveMCPServers,
selectedModelHasTools,
initialMessage,
MCPToolComponent,
}: McpExtensionToolLoaderProps) => {
// Get tool management hooks
const { isToolDisabled, setToolDisabledForThread, setDefaultDisabledTools, getDefaultDisabledTools } = useToolAvailable()
const { getCurrentThread } = useThreads()
const currentThread = getCurrentThread()
// Handle tool toggle for custom component
const handleToolToggle = (toolName: string, enabled: boolean) => {
if (initialMessage) {
const currentDefaults = getDefaultDisabledTools()
if (enabled) {
setDefaultDisabledTools(currentDefaults.filter((name) => name !== toolName))
} else {
setDefaultDisabledTools([...currentDefaults, toolName])
}
} else if (currentThread?.id) {
setToolDisabledForThread(currentThread.id, toolName, enabled)
}
}
const isToolEnabled = (toolName: string): boolean => {
if (initialMessage) {
return !getDefaultDisabledTools().includes(toolName)
} else if (currentThread?.id) {
return !isToolDisabled(currentThread.id, toolName)
}
return false
}
// Only render if we have the custom MCP component and conditions are met
if (!selectedModelHasTools || !hasActiveMCPServers || !MCPToolComponent) {
return null
}
return (
<MCPToolComponent
tools={tools}
isToolEnabled={isToolEnabled}
onToolToggle={handleToolToggle}
/>
)
}

View File

@ -1,4 +1,3 @@
/* eslint-disable react-hooks/exhaustive-deps */
import ReactMarkdown, { Components } from 'react-markdown'
import remarkGfm from 'remark-gfm'
import remarkEmoji from 'remark-emoji'

View File

@ -3,6 +3,7 @@ import { create } from 'zustand'
import { RenderMarkdown } from './RenderMarkdown'
import { useAppState } from '@/hooks/useAppState'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { extractThinkingContent } from '@/lib/utils'
interface Props {
text: string
@ -43,19 +44,6 @@ const ThinkingBlock = ({ id, text }: Props) => {
setThinkingState(id, newExpandedState)
}
// Extract thinking content from either format
const extractThinkingContent = (text: string) => {
return text
.replace(/<\/?think>/g, '')
.replace(/<\|channel\|>analysis<\|message\|>/g, '')
.replace(/<\|start\|>assistant<\|channel\|>final<\|message\|>/g, '')
.replace(/assistant<\|channel\|>final<\|message\|>/g, '')
.replace(/<\|channel\|>/g, '') // remove any remaining channel markers
.replace(/<\|message\|>/g, '') // remove any remaining message markers
.replace(/<\|start\|>/g, '') // remove any remaining start markers
.trim()
}
const thinkingContent = extractThinkingContent(text)
if (!thinkingContent) return null

View File

@ -23,7 +23,7 @@ import { useThreads } from '@/hooks/useThreads'
import { useThreadManagement } from '@/hooks/useThreadManagement'
import { useLeftPanel } from '@/hooks/useLeftPanel'
import { useMessages } from '@/hooks/useMessages'
import { cn } from '@/lib/utils'
import { cn, extractThinkingContent } from '@/lib/utils'
import { useSmallScreen } from '@/hooks/useMediaQuery'
import {
@ -167,14 +167,10 @@ const SortableItem = memo(
)}
>
<span>{thread.title || t('common:newThread')}</span>
{variant === 'project' && (
<>
{variant === 'project' && getLastMessageInfo?.content && (
<div className="text-sm text-main-view-fg/60 mt-0.5 line-clamp-2">
{getLastMessageInfo.content}
</div>
)}
</>
<span className="block text-sm text-main-view-fg/60 mt-0.5 truncate">
{extractThinkingContent(getLastMessageInfo.content)}
</span>
)}
</div>
<div className="flex items-center">
@ -185,7 +181,10 @@ const SortableItem = memo(
<DropdownMenuTrigger asChild>
<IconDots
size={14}
className="text-left-panel-fg/60 shrink-0 cursor-pointer px-0.5 -mr-1 data-[state=open]:bg-left-panel-fg/10 rounded group-hover/thread-list:data-[state=closed]:size-5 size-5 data-[state=closed]:size-0"
className={cn(
'text-left-panel-fg/60 shrink-0 cursor-pointer px-0.5 -mr-1 data-[state=open]:bg-left-panel-fg/10 rounded group-hover/thread-list:data-[state=closed]:size-5 size-5 data-[state=closed]:size-0',
variant === 'project' && 'text-main-view-fg/60'
)}
onClick={(e) => {
e.preventDefault()
e.stopPropagation()

View File

@ -0,0 +1,19 @@
import { useThreadScrolling } from '@/hooks/useThreadScrolling'
export const ThreadPadding = ({
threadId,
scrollContainerRef,
}: {
threadId: string
scrollContainerRef: React.RefObject<HTMLDivElement | null>
}) => {
// Get padding height for ChatGPT-style message positioning
const { paddingHeight } = useThreadScrolling(threadId, scrollContainerRef)
return (
<div
style={{ height: paddingHeight }}
className="flex-shrink-0"
data-testid="chat-padding"
/>
)
}

View File

@ -17,6 +17,7 @@ import { getProviderTitle } from '@/lib/utils'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { ModelCapabilities } from '@/types/models'
import { models as providerModels } from 'token.js'
import { toast } from 'sonner'
type DialogAddModelProps = {
provider: ModelProvider
@ -37,8 +38,13 @@ export const DialogAddModel = ({ provider, trigger }: DialogAddModelProps) => {
// Handle form submission
const handleSubmit = () => {
if (!modelId.trim()) {
return // Don't submit if model ID is empty
if (!modelId.trim()) return // Don't submit if model ID is empty
if (provider.models.some((e) => e.id === modelId)) {
toast.error(t('providers:addModel.modelExists'), {
description: t('providers:addModel.modelExistsDesc'),
})
return // Don't submit if model ID already exists
}
// Create the new model

View File

@ -131,8 +131,7 @@ export const useChat = () => {
})
}
return currentThread
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [createThread, retrieveThread, router])
}, [createThread, retrieveThread, router, setMessages])
const restartModel = useCallback(
async (provider: ProviderObject, modelId: string) => {

View File

@ -13,7 +13,7 @@ type ThreadFolder = {
type ThreadManagementState = {
folders: ThreadFolder[]
setFolders: (folders: ThreadFolder[]) => void
addFolder: (name: string) => void
addFolder: (name: string) => ThreadFolder
updateFolder: (id: string, name: string) => void
deleteFolder: (id: string) => void
getFolderById: (id: string) => ThreadFolder | undefined
@ -37,6 +37,7 @@ export const useThreadManagement = create<ThreadManagementState>()(
set((state) => ({
folders: [...state.folders, newFolder],
}))
return newFolder
},
updateFolder: (id, name) => {

View File

@ -78,7 +78,7 @@ export const useThreadScrolling = (
return () =>
scrollContainer.removeEventListener('scroll', handleScroll)
}
}, [handleScroll])
}, [handleScroll, scrollContainerRef])
const checkScrollState = useCallback(() => {
const scrollContainer = scrollContainerRef.current
@ -90,7 +90,7 @@ export const useThreadScrolling = (
setIsAtBottom(isBottom)
setHasScrollbar(hasScroll)
}, [])
}, [scrollContainerRef])
useEffect(() => {
if (!scrollContainerRef.current) return
@ -101,7 +101,7 @@ export const useThreadScrolling = (
scrollToBottom(false)
checkScrollState()
}
}, [checkScrollState, scrollToBottom])
}, [checkScrollState, scrollToBottom, scrollContainerRef])
const prevCountRef = useRef(messageCount)
@ -146,7 +146,7 @@ export const useThreadScrolling = (
}
prevCountRef.current = messageCount
}, [messageCount, lastMessageRole])
}, [messageCount, lastMessageRole, getDOMElements, setPaddingHeight])
useEffect(() => {
const previouslyStreaming = wasStreamingRef.current
@ -197,7 +197,7 @@ export const useThreadScrolling = (
}
wasStreamingRef.current = currentlyStreaming
}, [streamingContent, threadId])
}, [streamingContent, threadId, getDOMElements, setPaddingHeight])
useEffect(() => {
userIntendedPositionRef.current = null
@ -207,7 +207,7 @@ export const useThreadScrolling = (
prevCountRef.current = messageCount
scrollToBottom(false)
checkScrollState()
}, [threadId])
}, [threadId, messageCount, scrollToBottom, checkScrollState, setPaddingHeight])
return useMemo(
() => ({

View File

@ -8,6 +8,8 @@ type ToolDisabledState = {
disabledTools: Record<string, string[]> // threadId -> toolNames[]
// Global default disabled tools (for new threads/index page)
defaultDisabledTools: string[]
// Flag to track if defaults have been initialized from extension
defaultsInitialized: boolean
// Actions
setToolDisabledForThread: (
@ -19,6 +21,8 @@ type ToolDisabledState = {
getDisabledToolsForThread: (threadId: string) => string[]
setDefaultDisabledTools: (toolNames: string[]) => void
getDefaultDisabledTools: () => string[]
isDefaultsInitialized: () => boolean
markDefaultsAsInitialized: () => void
// Initialize thread tools from default or existing thread settings
initializeThreadTools: (threadId: string, allTools: MCPTool[]) => void
}
@ -28,6 +32,7 @@ export const useToolAvailable = create<ToolDisabledState>()(
(set, get) => ({
disabledTools: {},
defaultDisabledTools: [],
defaultsInitialized: false,
setToolDisabledForThread: (
threadId: string,
@ -81,6 +86,14 @@ export const useToolAvailable = create<ToolDisabledState>()(
return get().defaultDisabledTools
},
isDefaultsInitialized: () => {
return get().defaultsInitialized
},
markDefaultsAsInitialized: () => {
set({ defaultsInitialized: true })
},
initializeThreadTools: (threadId: string, allTools: MCPTool[]) => {
const state = get()
// If thread already has settings, don't override
@ -109,6 +122,7 @@ export const useToolAvailable = create<ToolDisabledState>()(
partialize: (state) => ({
disabledTools: state.disabledTools,
defaultDisabledTools: state.defaultDisabledTools,
defaultsInitialized: state.defaultsInitialized,
}),
}
)

View File

@ -1,19 +1,38 @@
import { useEffect } from 'react'
import { getServiceHub } from '@/hooks/useServiceHub'
import { MCPTool } from '@/types/completion'
import { SystemEvent } from '@/types/events'
import { useAppState } from './useAppState'
import { useToolAvailable } from './useToolAvailable'
import { ExtensionManager } from '@/lib/extension'
import { ExtensionTypeEnum, MCPExtension } from '@janhq/core'
export const useTools = () => {
const updateTools = useAppState((state) => state.updateTools)
const { isDefaultsInitialized, setDefaultDisabledTools, markDefaultsAsInitialized } = useToolAvailable()
useEffect(() => {
function setTools() {
getServiceHub().mcp().getTools().then((data: MCPTool[]) => {
async function setTools() {
try {
// Get MCP extension first
const mcpExtension = ExtensionManager.getInstance().get<MCPExtension>(
ExtensionTypeEnum.MCP
)
// Fetch tools
const data = await getServiceHub().mcp().getTools()
updateTools(data)
}).catch((error) => {
// Initialize default disabled tools for new users (only once)
if (!isDefaultsInitialized() && data.length > 0 && mcpExtension?.getDefaultDisabledTools) {
const defaultDisabled = await mcpExtension.getDefaultDisabledTools()
if (defaultDisabled.length > 0) {
setDefaultDisabledTools(defaultDisabled)
markDefaultsAsInitialized()
}
}
} catch (error) {
console.error('Failed to fetch MCP tools:', error)
})
}
}
setTools()

View File

@ -3,10 +3,12 @@ import { twMerge } from 'tailwind-merge'
import { ExtensionManager } from './extension'
import path from "path"
export function cn(...inputs: ClassValue[]) {
return twMerge(clsx(inputs))
}
export function basenameNoExt(filePath: string): string {
const base = path.basename(filePath);
const VALID_EXTENSIONS = [".tar.gz", ".zip"];
@ -22,6 +24,7 @@ export function basenameNoExt(filePath: string): string {
return base.slice(0, -path.extname(base).length);
}
/**
* Get the display name for a model, falling back to the model ID if no display name is set
*/
@ -188,3 +191,15 @@ export function formatDuration(startTime: number, endTime?: number): string {
export function sanitizeModelId(modelId: string): string {
return modelId.replace(/[^a-zA-Z0-9/_\-.]/g, '').replace(/\./g, '_')
}
export const extractThinkingContent = (text: string) => {
return text
.replace(/<\/?think>/g, '')
.replace(/<\|channel\|>analysis<\|message\|>/g, '')
.replace(/<\|start\|>assistant<\|channel\|>final<\|message\|>/g, '')
.replace(/assistant<\|channel\|>final<\|message\|>/g, '')
.replace(/<\|channel\|>/g, '') // remove any remaining channel markers
.replace(/<\|message\|>/g, '') // remove any remaining message markers
.replace(/<\|start\|>/g, '') // remove any remaining start markers
.trim()
}

View File

@ -35,7 +35,9 @@
"modelId": "Modell ID",
"enterModelId": "Modell ID eingeben",
"exploreModels": "Sehe Modellliste von {{provider}}",
"addModel": "Modell hinzufügen"
"addModel": "Modell hinzufügen",
"modelExists": "Modell bereits vorhanden",
"modelExistsDesc": "Bitte wähle eine andere Modell-ID."
},
"deleteModel": {
"title": "Lösche Modell: {{modelId}}",

View File

@ -35,7 +35,9 @@
"modelId": "Model ID",
"enterModelId": "Enter model ID",
"exploreModels": "See model list from {{provider}}",
"addModel": "Add Model"
"addModel": "Add Model",
"modelExists": "Model already exists",
"modelExistsDesc": "Please choose a different model ID."
},
"deleteModel": {
"title": "Delete Model: {{modelId}}",

View File

@ -35,7 +35,9 @@
"modelId": "ID Model",
"enterModelId": "Masukkan ID model",
"exploreModels": "Lihat daftar model dari {{provider}}",
"addModel": "Tambah Model"
"addModel": "Tambah Model",
"modelExists": "Model sudah ada",
"modelExistsDesc": "Silakan pilih ID model yang berbeda."
},
"deleteModel": {
"title": "Hapus Model: {{modelId}}",

View File

@ -35,7 +35,9 @@
"modelId": "Identyfikator Modelu",
"enterModelId": "Wprowadź identyfikator modelu",
"exploreModels": "Zobacz listę modeli dostawcy {{provider}}",
"addModel": "Dodaj Model"
"addModel": "Dodaj Model",
"modelExists": "Model już istnieje",
"modelExistsDesc": "Wybierz inny identyfikator modelu."
},
"deleteModel": {
"title": "Usuń Model: {{modelId}}",

View File

@ -35,7 +35,9 @@
"modelId": "ID mô hình",
"enterModelId": "Nhập ID mô hình",
"exploreModels": "Xem danh sách mô hình từ {{provider}}",
"addModel": "Thêm mô hình"
"addModel": "Thêm mô hình",
"modelExists": "Mô hình đã tồn tại",
"modelExistsDesc": "Vui lòng chọn một ID mô hình khác."
},
"deleteModel": {
"title": "Xóa mô hình: {{modelId}}",

View File

@ -35,7 +35,9 @@
"modelId": "模型 ID",
"enterModelId": "输入模型 ID",
"exploreModels": "查看 {{provider}} 的模型列表",
"addModel": "添加模型"
"addModel": "添加模型",
"modelExists": "模型已存在",
"modelExistsDesc": "请选择不同的模型 ID。"
},
"deleteModel": {
"title": "删除模型:{{modelId}}",

View File

@ -35,7 +35,9 @@
"modelId": "模型 ID",
"enterModelId": "輸入模型 ID",
"exploreModels": "查看 {{provider}} 的模型清單",
"addModel": "新增模型"
"addModel": "新增模型",
"modelExists": "模型已存在",
"modelExistsDesc": "請選擇不同的模型 ID。"
},
"deleteModel": {
"title": "刪除模型:{{modelId}}",

View File

@ -1,4 +1,4 @@
import { createFileRoute } from '@tanstack/react-router'
import { createFileRoute, useNavigate } from '@tanstack/react-router'
import { useState, useMemo } from 'react'
import { useThreadManagement } from '@/hooks/useThreadManagement'
@ -31,6 +31,7 @@ function Project() {
function ProjectContent() {
const { t } = useTranslation()
const navigate = useNavigate()
const { folders, addFolder, updateFolder, deleteFolder, getFolderById } =
useThreadManagement()
const threads = useThreads((state) => state.threads)
@ -59,7 +60,12 @@ function ProjectContent() {
if (editingKey) {
updateFolder(editingKey, name)
} else {
addFolder(name)
const newProject = addFolder(name)
// Navigate to the newly created project
navigate({
to: '/project/$projectId',
params: { projectId: newProject.id },
})
}
setOpen(false)
setEditingKey(null)

View File

@ -318,17 +318,7 @@ function ProviderDetail() {
.getActiveModels()
.then((models) => setActiveModels(models || []))
} catch (error) {
console.error('Error starting model:', error)
if (
error &&
typeof error === 'object' &&
'message' in error &&
typeof error.message === 'string'
) {
setModelLoadError({ message: error.message })
} else {
setModelLoadError(typeof error === 'string' ? error : `${error}`)
}
setModelLoadError(error as ErrorObject)
} finally {
// Remove model from loading state
setLoadingModels((prev) => prev.filter((id) => id !== modelId))

View File

@ -24,8 +24,8 @@ import { PlatformFeatures } from '@/lib/platform/const'
import { PlatformFeature } from '@/lib/platform/types'
import ScrollToBottom from '@/containers/ScrollToBottom'
import { PromptProgress } from '@/components/PromptProgress'
import { ThreadPadding } from '@/containers/ThreadPadding'
import { TEMPORARY_CHAT_ID, TEMPORARY_CHAT_QUERY_ID } from '@/constants/chat'
import { useThreadScrolling } from '@/hooks/useThreadScrolling'
import { IconInfoCircle } from '@tabler/icons-react'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
@ -102,9 +102,6 @@ function ThreadDetail() {
const scrollContainerRef = useRef<HTMLDivElement>(null)
// Get padding height for ChatGPT-style message positioning
const { paddingHeight } = useThreadScrolling(threadId, scrollContainerRef)
// Listen for conversation not found events
useEffect(() => {
const handleConversationNotFound = (event: CustomEvent) => {
@ -126,7 +123,7 @@ function ThreadDetail() {
return () => {
window.removeEventListener(CONVERSATION_NOT_FOUND_EVENT, handleConversationNotFound as EventListener)
}
}, [threadId, navigate])
}, [threadId, navigate, t])
useEffect(() => {
setCurrentThreadId(threadId)
@ -278,11 +275,7 @@ function ThreadDetail() {
data-test-id="thread-content-text"
/>
{/* Persistent padding element for ChatGPT-style message positioning */}
<div
style={{ height: paddingHeight }}
className="flex-shrink-0"
data-testid="chat-padding"
/>
<ThreadPadding threadId={threadId} scrollContainerRef={scrollContainerRef} />
</div>
</div>
<div

View File

@ -3466,12 +3466,17 @@ __metadata:
dependencies:
"@janhq/core": "workspace:*"
"@modelcontextprotocol/sdk": "npm:1.17.5"
"@tabler/icons-react": "npm:^3.34.0"
"@types/react": "npm:19.1.2"
react: "npm:19.0.0"
typescript: "npm:5.9.2"
vite: "npm:5.4.20"
vitest: "npm:2.1.9"
zustand: "npm:5.0.8"
peerDependencies:
"@janhq/core": "*"
"@tabler/icons-react": "*"
react: 19.0.0
zustand: 5.0.3
languageName: unknown
linkType: soft
@ -3482,11 +3487,13 @@ __metadata:
dependencies:
"@npmcli/arborist": "npm:^7.1.0"
"@types/node": "npm:^22.10.0"
"@types/react": "npm:19.1.2"
"@vitest/coverage-v8": "npm:^2.1.8"
"@vitest/ui": "npm:^2.1.8"
eslint: "npm:8.57.0"
happy-dom: "npm:^15.11.6"
pacote: "npm:^21.0.0"
react: "npm:19.0.0"
request: "npm:^2.88.2"
request-progress: "npm:^3.0.0"
rimraf: "npm:^6.0.1"
@ -3496,6 +3503,8 @@ __metadata:
typescript: "npm:^5.8.3"
ulidx: "npm:^2.3.0"
vitest: "npm:^2.1.8"
peerDependencies:
react: 19.0.0
languageName: unknown
linkType: soft
@ -6880,6 +6889,17 @@ __metadata:
languageName: node
linkType: hard
"@tabler/icons-react@npm:^3.34.0":
version: 3.35.0
resolution: "@tabler/icons-react@npm:3.35.0"
dependencies:
"@tabler/icons": "npm:3.35.0"
peerDependencies:
react: ">= 16"
checksum: 10c0/8d280fcdae00916b001142ba0800ea05d8fa2acdcbd82f88a299b4141fb941237be2e826b86b1af710e038b4f8bb6f76f452c3309c29fd62398b4d5789c2b3e0
languageName: node
linkType: hard
"@tabler/icons@npm:3.34.0":
version: 3.34.0
resolution: "@tabler/icons@npm:3.34.0"
@ -6887,6 +6907,13 @@ __metadata:
languageName: node
linkType: hard
"@tabler/icons@npm:3.35.0":
version: 3.35.0
resolution: "@tabler/icons@npm:3.35.0"
checksum: 10c0/93098828128ffed2cf412b39bd78992f93f25b22349a4e04523d2a018b7fe376ddeff105babcc3efedd707aa00b705425c7d9f598d6987552a563c62125795a2
languageName: node
linkType: hard
"@tailwindcss/node@npm:4.1.4":
version: 4.1.4
resolution: "@tailwindcss/node@npm:4.1.4"