Merge branch 'dev' into rp/v2-docs-improvements

This commit is contained in:
Ramon Perez 2025-08-25 21:55:16 +10:00
commit 2a5cffd64c
103 changed files with 4668 additions and 546 deletions

View File

@ -7,6 +7,7 @@ on:
jobs: jobs:
assign_milestone: assign_milestone:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
permissions: permissions:
pull-requests: write pull-requests: write
issues: write issues: write

330
.github/workflows/autoqa-migration.yml vendored Normal file
View File

@ -0,0 +1,330 @@
name: AutoQA Migration (Manual)
on:
workflow_dispatch:
inputs:
old_windows_installer:
description: 'Windows OLD installer URL or path (.exe)'
required: true
type: string
new_windows_installer:
description: 'Windows NEW installer URL or path (.exe)'
required: true
type: string
old_ubuntu_installer:
description: 'Ubuntu OLD installer URL or path (.deb)'
required: false
type: string
default: ''
new_ubuntu_installer:
description: 'Ubuntu NEW installer URL or path (.deb)'
required: false
type: string
default: ''
old_macos_installer:
description: 'macOS OLD installer URL or path (.dmg)'
required: false
type: string
default: ''
new_macos_installer:
description: 'macOS NEW installer URL or path (.dmg)'
required: false
type: string
default: ''
migration_test_case:
description: 'Specific migration test case key (leave empty to run all)'
required: false
type: string
default: ''
max_turns:
description: 'Maximum turns per test phase'
required: false
type: number
default: 65
jobs:
migration-windows:
runs-on: windows-11-nvidia-gpu
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Clean existing Jan installations
shell: powershell
run: |
.\autoqa\scripts\windows_cleanup.ps1 -IsNightly $false
- name: Download OLD and NEW installers
shell: powershell
run: |
# Download OLD installer using existing script
.\autoqa\scripts\windows_download.ps1 `
-WorkflowInputUrl "${{ inputs.old_windows_installer }}" `
-WorkflowInputIsNightly "false" `
-RepoVariableUrl "" `
-RepoVariableIsNightly "" `
-DefaultUrl "" `
-DefaultIsNightly ""
$oldSrc = Join-Path $env:TEMP 'jan-installer.exe'
$oldOut = Join-Path $env:TEMP 'jan-old.exe'
Copy-Item -Path $oldSrc -Destination $oldOut -Force
# Download NEW installer using existing script
.\autoqa\scripts\windows_download.ps1 `
-WorkflowInputUrl "${{ inputs.new_windows_installer }}" `
-WorkflowInputIsNightly "false" `
-RepoVariableUrl "" `
-RepoVariableIsNightly "" `
-DefaultUrl "" `
-DefaultIsNightly ""
$newSrc = Join-Path $env:TEMP 'jan-installer.exe'
$newOut = Join-Path $env:TEMP 'jan-new.exe'
Copy-Item -Path $newSrc -Destination $newOut -Force
Write-Host "OLD installer: $oldOut"
Write-Host "NEW installer: $newOut"
echo "OLD_VERSION=$oldOut" | Out-File -FilePath $env:GITHUB_ENV -Append
echo "NEW_VERSION=$newOut" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run migration tests (Windows)
working-directory: autoqa
shell: powershell
env:
RP_TOKEN: ${{ secrets.RP_TOKEN }}
ENABLE_REPORTPORTAL: 'true'
RP_ENDPOINT: 'https://reportportal.menlo.ai'
RP_PROJECT: 'default_personal'
run: |
$case = "${{ inputs.migration_test_case }}"
$caseArg = ""
if ($case -and $case.Trim() -ne "") { $caseArg = "--migration-test-case `"$case`"" }
python main.py --enable-migration-test --old-version "$env:OLD_VERSION" --new-version "$env:NEW_VERSION" --max-turns ${{ inputs.max_turns }} $caseArg
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-recordings-${{ github.run_number }}-windows
path: autoqa/recordings/
- name: Upload trajectories
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-trajectories-${{ github.run_number }}-windows
path: autoqa/trajectories/
- name: Cleanup after tests
if: always()
shell: powershell
run: |
.\autoqa\scripts\windows_post_cleanup.ps1 -IsNightly $false
migration-ubuntu:
if: inputs.old_ubuntu_installer != '' && inputs.new_ubuntu_installer != ''
runs-on: ubuntu-22-04-nvidia-gpu
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y \
x11-utils \
python3-tk \
python3-dev \
wmctrl \
xdotool \
libnss3-dev \
libgconf-2-4 \
libxss1 \
libasound2 \
libxtst6 \
libgtk-3-0 \
libgbm-dev \
libxshmfence1 \
libxrandr2 \
libpangocairo-1.0-0 \
libatk1.0-0 \
libcairo-gobject2 \
libgdk-pixbuf2.0-0 \
gnome-screenshot \
xvfb
- name: Setup script permissions
run: |
chmod +x autoqa/scripts/setup_permissions.sh || true
./autoqa/scripts/setup_permissions.sh || true
- name: Clean existing Jan installations
run: |
./autoqa/scripts/ubuntu_cleanup.sh
- name: Download OLD and NEW installers
run: |
set -e
# Download OLD installer using existing script
./autoqa/scripts/ubuntu_download.sh \
"${{ inputs.old_ubuntu_installer }}" \
"false" \
"" \
"" \
"" \
""
cp /tmp/jan-installer.deb /tmp/jan-old.deb
# Download NEW installer using existing script
./autoqa/scripts/ubuntu_download.sh \
"${{ inputs.new_ubuntu_installer }}" \
"false" \
"" \
"" \
"" \
""
cp /tmp/jan-installer.deb /tmp/jan-new.deb
echo "OLD_VERSION=/tmp/jan-old.deb" >> $GITHUB_ENV
echo "NEW_VERSION=/tmp/jan-new.deb" >> $GITHUB_ENV
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run migration tests (Ubuntu)
working-directory: autoqa
run: |
case="${{ inputs.migration_test_case }}"
caseArg=""
if [ -n "${case}" ]; then caseArg="--migration-test-case \"${case}\""; fi
xvfb-run -a python main.py --enable-migration-test --old-version "${OLD_VERSION}" --new-version "${NEW_VERSION}" --max-turns ${{ inputs.max_turns }} ${caseArg}
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-recordings-${{ github.run_number }}-ubuntu
path: autoqa/recordings/
- name: Upload trajectories
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-trajectories-${{ github.run_number }}-ubuntu
path: autoqa/trajectories/
- name: Cleanup after tests
if: always()
run: |
./autoqa/scripts/ubuntu_post_cleanup.sh "false"
migration-macos:
if: inputs.old_macos_installer != '' && inputs.new_macos_installer != ''
runs-on: macos-selfhosted-15-arm64-cua
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Setup script permissions
run: |
chmod +x autoqa/scripts/setup_permissions.sh || true
./autoqa/scripts/setup_permissions.sh || true
- name: Clean existing Jan installations
run: |
./autoqa/scripts/macos_cleanup.sh
- name: Download OLD and NEW installers
run: |
set -e
# Download OLD installer using existing script
./autoqa/scripts/macos_download.sh \
"${{ inputs.old_macos_installer }}" \
"false" \
"" \
"" \
"" \
""
cp /tmp/jan-installer.dmg /tmp/jan-old.dmg
# Download NEW installer using existing script
./autoqa/scripts/macos_download.sh \
"${{ inputs.new_macos_installer }}" \
"false" \
"" \
"" \
"" \
""
cp /tmp/jan-installer.dmg /tmp/jan-new.dmg
echo "OLD_VERSION=/tmp/jan-old.dmg" >> $GITHUB_ENV
echo "NEW_VERSION=/tmp/jan-new.dmg" >> $GITHUB_ENV
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run migration tests (macOS)
working-directory: autoqa
run: |
case="${{ inputs.migration_test_case }}"
caseArg=""
if [ -n "${case}" ]; then caseArg="--migration-test-case \"${case}\""; fi
python main.py --enable-migration-test --old-version "${OLD_VERSION}" --new-version "${NEW_VERSION}" --max-turns ${{ inputs.max_turns }} ${caseArg}
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-recordings-${{ github.run_number }}-macos
path: autoqa/recordings/
- name: Upload trajectories
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-trajectories-${{ github.run_number }}-macos
path: autoqa/trajectories/
- name: Cleanup after tests
if: always()
run: |
./autoqa/scripts/macos_post_cleanup.sh

View File

@ -47,6 +47,8 @@ test: lint
yarn copy:assets:tauri yarn copy:assets:tauri
yarn build:icon yarn build:icon
cargo test --manifest-path src-tauri/Cargo.toml --no-default-features --features test-tauri -- --test-threads=1 cargo test --manifest-path src-tauri/Cargo.toml --no-default-features --features test-tauri -- --test-threads=1
cargo test --manifest-path src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
cargo test --manifest-path src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
# Builds and publishes the app # Builds and publishes the app
build-and-publish: install-and-build build-and-publish: install-and-build

View File

@ -194,6 +194,10 @@ export interface chatOptions {
export interface ImportOptions { export interface ImportOptions {
modelPath: string modelPath: string
mmprojPath?: string mmprojPath?: string
modelSha256?: string
modelSize?: number
mmprojSha256?: string
mmprojSize?: number
} }
export interface importResult { export interface importResult {

View File

@ -73,6 +73,9 @@ export enum DownloadEvent {
onFileDownloadSuccess = 'onFileDownloadSuccess', onFileDownloadSuccess = 'onFileDownloadSuccess',
onFileDownloadStopped = 'onFileDownloadStopped', onFileDownloadStopped = 'onFileDownloadStopped',
onFileDownloadStarted = 'onFileDownloadStarted', onFileDownloadStarted = 'onFileDownloadStarted',
onModelValidationStarted = 'onModelValidationStarted',
onModelValidationFailed = 'onModelValidationFailed',
onFileDownloadAndVerificationSuccess = 'onFileDownloadAndVerificationSuccess',
} }
export enum ExtensionRoute { export enum ExtensionRoute {
baseExtensions = 'baseExtensions', baseExtensions = 'baseExtensions',

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

View File

@ -0,0 +1,446 @@
---
title: "Jan v1 for Deep Research: System Prompts & Setup Guide"
description: "Explore Jan-V1 capabilities in report generation and research tasks with prompt comparisons, examples, and customization instructions."
keywords: ["Jan-V1", "AI research", "system prompts", "LLM optimization", "research AI", "Jan App", "model configuration"]
readingTime: "8 min read"
tags: Qwen, Jan-V1, Agentic
categories: research
ogImage: assets/images/general/og-jan-research.jpeg
date: 2025-08-22
---
# Jan v1 for Deep Research: System Prompts & Setup Guide
This cookbook will transform your Jan-V1 from a basic Q&A tool into a comprehensive research assistant. By the end of this guide, you'll have a custom-configured model that generates detailed reports with proper citations instead of surface-level answers.
![Jan-V1 research comparison](./_assets/deep_research_compare_jan.gif)
## Key Points
- **Jan-V1 includes a default chat template** that's automatically embedded in its Hugging Face configuration
- **Use the default prompt** for daily tasks requiring short, accurate answers
- **Use the research prompt** for report generation and comprehensive research tasks
- **Always specify dates** when asking time-sensitive questions (e.g., "What's the world population in 2023?")
---
## Introduction
[Jan-V1](https://huggingface.co/janhq/Jan-v1-4B) is the first release in the **Jan Family**, designed for agentic reasoning and problem-solving within the [Jan App](https://jan.ai/). Based on our [**Lucy**](https://huggingface.co/Menlo/Lucy) model, Jan-v1 achieves improved performance through model scaling.
Jan-v1 uses the [Qwen3-4B-thinking](https://huggingface.co/Qwen/Qwen3-4B-Thinking-2507) model to provide enhanced reasoning capabilities and tool utilization. This architecture delivers better performance on complex agentic tasks.
This guide explores how to optimize Jan-V1 for research-intensive tasks by switching from the default prompt to a specialized research system prompt. Through our internal testing, we've found significant improvements in report generation quality, depth of analysis, and citation accuracy when using the research-optimized system prompt.
## Findings
Jan-V1 comes with a built-in search and scrape prompt in its default template. This prompt is optimized for SimpleQA-style questions and daily tasks that require short, accurate responses. However, our testing revealed that when tasked with generating comprehensive long-form reports or conducting in-depth research, the model's performance was inconsistent with the default prompt.
We experimented with Jan-V1 using a new system prompt optimized for research-intensive tasks and found significant improvements in report generation capability. The research prompt produces more comprehensive, well-structured reports with proper citations and deeper analysis compared to the default prompt.
---
## Output comparison between Default System Prompt and Research Prompt on report generation task
### Example 1: Long-Context Benchmark Comparison
**User Request:**
> Compare all published benchmarks for long-context performance across LLMs released in 2024-2025
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6 mb-8">
{/* Default Prompt Column */}
<div className="bg-gray-50 dark:bg-gray-900 p-6 rounded-lg border">
<h4 className="font-semibold mb-4 text-lg border-b border-gray-300 dark:border-gray-600 pb-2">Default Prompt Response</h4>
<div className="prose prose-xs max-w-none text-xs mt-4 h-[500px] overflow-y-auto [&_h1]:text-sm [&_h2]:text-xs [&_h3]:text-xs [&_h4]:text-xs [&_h5]:text-xs [&_h6]:text-xs [&_p]:text-xs [&_li]:text-xs [&_td]:text-xs [&_th]:text-xs">
I've analyzed multiple benchmarks for long-context LLM performance published in 2024-2025 through comprehensive scraping and search results. Here's a structured comparison of the major benchmarks:
#### 1. Loong (2024)
- **Purpose**: Evaluates long-context understanding in *realistic multi-document scenarios* where all documents are relevant to the answer (no document can be ignored)
- **Key Features**:
- Four task types: Spotlight Locating, Comparison, Clustering, Chain of Reasoning
- Context lengths: 10k-250k tokens across 4 sets
- Documents sourced from financial reports, legal cases, and academic papers (2024)
- **Notable Findings**:
- Top models (e.g., Gemini-1.5-pro) still struggle with ultra-long contexts (performance drops significantly at >100k tokens)
- RAG shows minimal improvement over native models for these tasks
- Human performance on hardest tasks: 53.7% accuracy with 15-minute time limit
#### 2. Ada-LEval (2024)
- **Purpose**: Length-adaptable benchmark for evaluating LLMs' long-context capabilities across varying context lengths
- **Key Features**:
- Two subsets: TSort (text sorting) and BestAnswer
- Supports context lengths up to 128k tokens
- Designed to test models in "ultra-long" settings (100k+ tokens)
- **Notable Findings**:
- Reveals significant gaps in models' capabilities at extreme lengths
- Shows that even top models (e.g., GPT-4o) have limitations in ultra-long contexts
#### 3. LongBench v2 (2025)
- **Purpose**: Assesses LLMs' ability to handle long-context problems requiring deep understanding across real-world multitasks
- **Key Features**:
- 503 multiple-choice questions across 6 categories
- Context lengths: 8k-2M words (majority under 128k)
- Human expert validation (53.7% accuracy on hardest questions)
- **Notable Findings**:
- Best-performing model (o1-preview): 57.7% accuracy (vs. human baseline of 53.7%)
- Clear performance degradation at longer contexts (e.g., 128k+ tokens)
- Leaderboard shows models like Gemini-2.5-Pro and DeepSeek-R1 outperform others
#### 4. Vellum AI LLM Leaderboard (2025)
- **Purpose**: Comparative benchmark of LLMs' capabilities, price, and context window sizes
- **Key Features**:
- Includes models from 2024-2025 (e.g., GPT-4o, Gemini-2.5-Pro, Qwen3-235B)
- Focuses on practical metrics like context window size and real-world performance
- **Notable Findings**:
- Highlights the gap between claimed context window sizes and actual performance
- Shows that most models (even those with "1M" context windows) show performance drops beyond 128k tokens
#### Cross-Benchmark Comparison Summary
| Benchmark | Release Year | Key Innovation | Context Length Range |
|-----------|---------------|----------------|----------------------|
| Loong | 2024 | No-document-ignored multi-document QA | 10k-250k tokens |
| Ada-LEval | 2024 | Length-adaptive task subsets | Up to 128k tokens |
| LongBench v2 | 2025 | Real-world multitask evaluation | 8k-2M words |
| Vellum Leaderboard | 2025 | Context window vs. real performance | Varies |
#### Critical Insights from All Benchmarks
1. **Performance Decline at Scale**: All benchmarks show significant performance degradation beyond 100-128k tokens, indicating that current models don't fully leverage their claimed context windows
2. **Realism Gap**: Benchmarks like Loong and LongBench v2 reveal that models struggle with *realistic* multi-document scenarios (where evidence is scattered across documents) better than synthetic benchmarks
3. **RAG Limitations**: RAG implementations show minimal benefit for these tasks (Loong results indicate RAG often fails to retrieve all relevant documents)
4. **Model Variability**: Larger models (e.g., 235B+ parameter) show better performance but still fall short in ultra-long contexts compared to human capabilities
This comparison demonstrates that while 2024-2025 benchmarks have advanced long-context evaluation, there remains a significant gap between theoretical context window capabilities and practical performance in real-world multi-document scenarios.
</div>
</div>
{/* Research Prompt Column */}
<div className="bg-blue-50 dark:bg-blue-950 p-6 rounded-lg border border-blue-200 dark:border-blue-800">
<h4 className="font-semibold mb-4 text-lg border-b border-gray-300 dark:border-gray-600 pb-2">Research Prompt Response</h4>
<div className="prose prose-xs max-w-none text-xs mt-4 h-[500px] overflow-y-auto [&_h1]:text-sm [&_h2]:text-xs [&_h3]:text-xs [&_h4]:text-xs [&_h5]:text-xs [&_h6]:text-xs [&_p]:text-xs [&_li]:text-xs [&_td]:text-xs [&_th]:text-xs">
# Comparative Analysis of Long-Context LLM Benchmarks (2024-2025)
## Overview
This report provides a comprehensive comparison of published long-context LLM benchmarks released during 2024-2025. Long-context capabilities have become a critical differentiator among large language models (LLMs), particularly for applications requiring processing of extensive documents, multi-step reasoning across lengthy inputs, and complex information integration. As model context windows have expanded from traditional 128K to 2M+ tokens, the need for rigorous, standardized evaluation frameworks has intensified.
## Key Benchmarks Identified
The following four benchmarks represent the most comprehensive and widely cited long-context evaluation frameworks from 2024-2025:
| Benchmark | Release Date | Context Length Range | Primary Focus |
|-----------|---------------|----------------------|----------------|
| LongBench v2 | May 2025 | 8k - 2M words | Real-world long-context understanding |
| MIR-Bench | Feb 2025 | Variable (up to 10k+ shots) | Many-shot in-context inductive reasoning |
| GSM-∞ | Feb 2025 | 0 - 16M+ tokens | Infinitely scalable reasoning complexity |
| Vellum AI Leaderboard 2025 | April 2025 | Up to 2M tokens | Cross-benchmark model comparison |
## Key Findings and Implications
Based on the comprehensive analysis of these benchmarks, several important findings emerge:
1. **Sigmoid performance pattern**: Across all benchmarks (MIR-Bench, GSM-∞), LLM performance shows a consistent sigmoid decline as reasoning complexity increases. This suggests fundamental limitations in scaling LLMs for highly complex long-context tasks.
2. **Context length ≠ performance**: While longer context windows correlate with better performance in some benchmarks (Vellum), the relationship is not linear. GSM-∞ shows that beyond a certain point, adding more context does not improve performance proportionally.
3. **CoT has diminishing returns**: MIR-Bench findings indicate that Chain-of-Thought techniques often decrease performance in many-shot inductive reasoning tasks, contrary to their benefits in simpler tasks.
4. **Noise robustness matters**: GSM-∞ demonstrates that models struggle to distinguish relevant information from noise in extremely long contexts, highlighting a critical challenge for real-world applications.
5. **Real-world relevance**: LongBench v2 shows that models perform significantly better on real-world tasks than on synthetic ones, indicating that current benchmarks may not fully capture practical long-context capabilities.
## Conclusion
The landscape of long-context LLM benchmarks in 2024-2025 reveals both progress and persistent challenges. While benchmarks like LongBench v2, MIR-Bench, GSM-∞, and the Vellum Leaderboard provide increasingly sophisticated ways to evaluate long-context capabilities, significant gaps remain in understanding how LLMs scale for highly complex, real-world tasks. Future work should focus on:
1. Developing benchmarks that better represent the full spectrum of real-world long-context challenges
2. Creating evaluation frameworks that measure both context length and information density
3. Establishing standardized methods to assess noise robustness and inductive reasoning capabilities
These benchmarks collectively provide the foundation for continued progress in long-context LLM development, with their findings guiding researchers and practitioners toward more effective model design and evaluation.
## References
[1] Bai, Y., Tu, S., Zhang, J., et al. (2025). LongBench v2: Towards Deeper Understanding and Reasoning on Realistic Long-context Multitasks. https://longbench2.github.io/
[2] Yan, K., Chen, Z., & Tian, Y. (2025). MIR-Bench: Benchmarking LLM's Long-Context Intelligence via Many-Shot In-Context Inductive Reasoning. arXiv preprint arXiv:2502.09933.
[3] Liu, H., Chen, Z., & Tian, Y. (2025). GSM-∞: How Do Your LLMs Behave over Infinitely Increasing Context Length and Reasoning Complexity? arXiv preprint arXiv:2502.05252.
[4] Vellum AI. (2025). LLM Leaderboard 2025. https://www.vellum.ai/llm-leaderboard
</div>
</div>
</div>
### Example 2: AI Model Comparison
**User Request:**
> Explain the difference between different AI models: 1) Jan-Nano 2) Lucy 3) Jan-V1
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6 mb-8">
{/* Default Prompt Column */}
<div className="bg-gray-50 dark:bg-gray-900 p-6 rounded-lg border">
<h4 className="font-semibold mb-4 text-lg border-b border-gray-300 dark:border-gray-600 pb-2">Default Prompt Response</h4>
<div className="prose prose-xs max-w-none text-xs mt-4 h-[500px] overflow-y-auto [&_h1]:text-sm [&_h2]:text-xs [&_h3]:text-xs [&_h4]:text-xs [&_h5]:text-xs [&_h6]:text-xs [&_p]:text-xs [&_li]:text-xs [&_td]:text-xs [&_th]:text-xs">
The differences between Jan-Nano, Lucy, and Jan-V1 can be summarized as follows based on available information:
1. **Jan-Nano**
- A **4-billion parameter model** developed by Menlo Research for deep research tasks.
- Achieves **83.2% accuracy** on the SimpleQA benchmark (per arXiv technical report).
- Optimized for local deployment with tools like MCP (Multi-Channel Protocol) for web search.
- Has variants like *Jan-Nano-128k*, which features a native 128k context window for improved research capabilities.
2. **Lucy**
- A **1.7-billion parameter model** created by Menlo Research as a lightweight, mobile-friendly variant.
- Designed for **agentic web search** and lightweight browsing (e.g., using tools like Serper and Crawl4AI).
- Benchmarked at **78.3% accuracy** on SimpleQA with MCP, slightly below Jan-Nano (80.7%), but optimized for mobile/CPU efficiency.
- Built on Qwen3-1.7B with RLVR (Reinforcement Learning with Value Functions) and "machine-generated task vectors" to enhance reasoning.
3. **Jan-V1**
- A **4-billion parameter model** from Menlo Research's *Jan Family*, explicitly built *based on Lucy*.
- Achieves **91.1% accuracy** on SimpleQA, outperforming Jan-Nano by ~8 percentage points.
- Focuses on **agentic reasoning and problem-solving** via enhanced tool integration (e.g., for complex tasks).
- Targets seamless integration with the Jan App for local, high-performance research workflows.
### Key Distinctions:
- **Size & Use Case**: Lucy is the smallest (1.7B) and mobile-optimized; Jan-Nano and Jan-V1 are both 4B but Jan-V1 is a scaled-up version of Lucy for higher performance.
- **Performance**: Jan-V1 > Jan-Nano > Lucy in SimpleQA benchmarks (91.1% → 83.2% → 78.3%).
- **Technical Approach**: Lucy uses RLVR with task vectors for efficiency; Jan-V1 inherits this but scales it for better accuracy and tool utilization.
- **Context Window**: Jan-Nano-128k offers a larger context window (128k) than base Jan-Nano, but Lucy is designed for mobile constraints.
This hierarchy reflects Menlo Research's progression from lightweight mobile models (Lucy) to scalable research tools (Jan-Nano and Jan-V1).
</div>
</div>
{/* Research Prompt Column */}
<div className="bg-blue-50 dark:bg-blue-950 p-6 rounded-lg border border-blue-200 dark:border-blue-800">
<h4 className="font-semibold mb-4 text-lg border-b border-gray-300 dark:border-gray-600 pb-2">Research Prompt Response</h4>
<div className="prose prose-xs max-w-none text-xs mt-4 h-[500px] overflow-y-auto [&_h1]:text-sm [&_h2]:text-xs [&_h3]:text-xs [&_h4]:text-xs [&_h5]:text-xs [&_h6]:text-xs [&_p]:text-xs [&_li]:text-xs [&_td]:text-xs [&_th]:text-xs">
# Differences Between Jan-Nano, Lucy, and Jan-V1 AI Models: A Comparative Analysis
## Overview
This report provides a detailed comparison of three AI language models: **Jan-Nano**, **Lucy**, and **Jan-V1**. All three are part of the broader Jan AI ecosystem, though they serve distinct purposes and have different technical specifications. The analysis synthesizes information from authoritative sources to clarify their architectures, capabilities, intended use cases, and relationships.
## Key Findings
### 1. **Jan-Nano**
- **Parameters**: 4 billion (4B)
- **Primary Purpose**: Deep research tasks
- **Notable Feature**: Native 128k context window in its enhanced version (Jan-Nano 128k)
### 2. **Lucy**
- **Parameters**: 1.7 billion (1.7B)
- **Primary Purpose**: Mobile-capable search and lightweight browsing tasks
- **Notable Feature**: Designed as a "toy model" for search and browsing with minimal resource requirements
### 3. **Jan-V1**
- **Parameters**: 4 billion (4B)
- **Primary Purpose**: Agentic reasoning and problem-solving via web search and tool invocation
- **Notable Feature**: Open-source alternative to Perplexity Pro with 91% SimpleQA accuracy
## Technical and Functional Differences
| **Model** | **Parameter Count** | **Specialized For** | **Key Differentiator** |
|-----------|---------------------|--------------------------------------|------------------------------------------------|
| Jan-Nano | 4B | Deep research tasks | 128k context window (enhanced version) |
| Lucy | 1.7B | Mobile search and lightweight browsing | Ultra-low resource footprint; "toy model" design |
| Jan-V1 | 4B | Agentic reasoning & web search | Built on Lucy; 91% SimpleQA accuracy; open-source |
### Performance and Contextual Capabilities
- **Jan-Nano**'s 128k context window allows it to process longer documents or complex queries than standard models.
- **Lucy**'s 1.7B size makes it significantly lighter than Jan-Nano or Jan-V1, enabling deployment on mobile devices.
- **Jan-V1**'s 91% SimpleQA accuracy (from benchmarks) demonstrates strong performance in structured knowledge retrieval tasks.
## References
1. Jan-Nano: [Menlo/Jan-nano on Hugging Face](https://huggingface.co/Menlo/Jan-nano)
2. Lucy: [Reddit discussion on Lucy as a 1.7B model](https://www.reddit.com/r/LocalLLaMA/comments/1m2tjjc/lucy_a_mobilecapable_17b_reasoning_model_that/)
3. Jan-V1: [Jan-v1 on Hugging Face](https://huggingface.co/janhq/Jan-v1-4B)
4. Jan-V1: [Jan's X announcement about Jan-v1](https://x.com/jandotai/status/1955176280535732415)
5. Jan-V1: [Reddit comparison of Jan-v1 with Qwen3 and Perplexity](https://www.reddit.com/r/LocalLLaMA/comments/1mpmeba/janv1_trial_results_followup_and_comparison_to/)
</div>
</div>
</div>
### Example 3: Software Comparison
**User Request:** "Compare Signal, Telegram, WhatsApp for secure messaging"
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6 mb-8">
{/* Default Prompt Column */}
<div className="bg-gray-50 dark:bg-gray-900 p-6 rounded-lg border">
<h4 className="font-semibold mb-4 text-lg border-b border-gray-300 dark:border-gray-600 pb-2">Default Prompt Response</h4>
<div className="prose prose-sm max-w-none text-sm mt-4">
**Security Overview:**
- Signal: E2EE by default, open-source
- Telegram: E2EE only in Secret Chats
- WhatsApp: E2EE but Meta data collection
- Basic privacy comparison
- 200 words, single citation
**Strengths:** Clear recommendations
**Weaknesses:** Limited analysis depth
</div>
</div>
{/* Research Prompt Column */}
<div className="bg-blue-50 dark:bg-blue-950 p-6 rounded-lg border border-blue-200 dark:border-blue-800">
<h4 className="font-semibold mb-4 text-lg border-b border-gray-300 dark:border-blue-800 pb-2">Research Prompt Response</h4>
<div className="prose prose-sm max-w-none text-sm mt-4">
**Comprehensive Security Analysis:**
- Full report with methodology and detailed comparison table
- Academic research citations (IACR 2023 study)
- Privacy practices analysis with breach history
- User experience and practical considerations
- Final recommendations table by use case
- 1000+ words, 5 authoritative references
**Strengths:** Evidence-based analysis, academic rigor
**Result:** Professional security assessment format
</div>
</div>
</div>
---
## How to use Research Prompt with Jan-V1
Jan-V1 comes with an [embedded chat template](https://huggingface.co/janhq/Jan-v1-4B/blob/main/chat_template.jinja) that automatically applies a default system prompt. By default, users are automatically opted-in to this chat template. To optimize for research tasks, you need to extend this default prompt in the Jan app settings.
### Step 1: Access assistant settings
Navigate to the Jan app and access the **assistant settings icon** ⚙️ on the top left of the screen.
![Jan app: how to open assistant settings](./_assets/jan_settings.png)
When you open the assistant settings, you'll notice the system prompt field appears empty. This is expected behavior because Jan-V1's default system prompt is embedded directly into the [chat template](https://huggingface.co/janhq/Jan-v1-4B/blob/main/chat_template.jinja) rather than being displayed in the Jan app's UI. The empty field doesn't mean there's no system prompt - it's just not visible in the interface.
![Jan app assistant settings interface showing empty system prompt field interface](./_assets/jan_default_prompt.png)
### Step 2: Understanding the Default System Prompt
Before switching to the research prompt, it's helpful to understand what the default Jan-V1 system prompt provides:
```md
In this environment you have access to a set of tools you can use to answer the user's question. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
Tool Use Rules
Here are the rules you should always follow to solve your task:
1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead.
2. Call a tool only when needed: do not call the search agent if you do not need information, try to solve the task yourself.
3. If no tool call is needed, just answer the question directly.
4. Never re-do a tool call that you previously did with the exact same parameters.
5. For tool use, MARK SURE use XML tag format as shown in the examples above. Do not use any other format.
Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
```
### Step 3: Implementing the Research Prompt
To switch to the research-optimized prompt, replace the default system prompt with the following research template:
```md
You are a **research agent** designed to conduct **in-depth, methodical investigations** into user questions. Your goal is to produce a **comprehensive, well-structured, and accurately cited report** using **authoritative sources**. You will use available tools to gather detailed information, analyze it, and synthesize a final response.
### **Tool Use Rules (Strictly Enforced)**
1. **Use correct arguments**: Always use actual values — never pass variable names (e.g., use "Paris" not {city}).
2. **Call tools only when necessary**: If you can answer from prior results, do so — **do not search unnecessarily**. However, All cited **url in the report must be visited**, and all **entities (People, Organization, Location, etc.) mentioned on the report must be searched/visited**.
3. **Terminate When Full Coverage Is Achieved** Conclude tool usage and deliver a final response only when the investigation has achieved **comprehensive coverage** of the topic. This means not only gathering sufficient data to answer the question but also ensuring all critical aspects—context, subtopics, and nuances—are adequately addressed. Once the analysis is complete and no further tool use would add meaningful value, **immediately stop searching and provide a direct, fully formed response**.
4. **Visit all urls:** All cited **url in the report must be visited**, and all **entities mentioned on the report must be browsed**.
5. **Avoid repetition**: Never repeat the same tool call with identical arguments. If you detect a cycle (e.g., repeating the same search), **stop and answer based on available data**.
6. **Track progress**: Treat each tool call as a step in a plan. After each result, ask: "Did you have full coverage?" and "What is the next step?"
7. **Limit tool usage**: If you've used a tool multiple times without progress, **reassess and attempt to conclude** — do not continue indefinitely.
8. **Use proper format**: MARK sure you wrap tool calls in XML tags as shown in the example.
### Output Format Requirements
At the end, respond **only** with a **self-contained markdown report**. Do not include tool calls or internal reasoning in the final output.
Example structure:
```markdown
# [Clear Title]
## Overview
...
## Key Findings
- Finding 1 [1]
- Finding 2 [2]
## Detailed Analysis
...
## References
[1] https://example.com/source1
[2] https://example.com/study2
...
Goal
Answer with depth, precision, and scholarly rigor. You will be rewarded for:
Thoroughness in research
Use of high-quality sources when available (.gov, .edu, peer-reviewed, reputable media)
Clear, structured reporting
Efficient path to completion without redundancy
Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
```
If set up correctly, you should see this on Jan screen.
![jan v1 deep_research_prompt](./_assets/jan_research_prompt.png)
Save the assistant settings and enjoy Jan-V1 with improved report generation capability.
### (Optional) Time-Sensitive queries optimization
Jan-V1 can sometimes incorrectly assume the current date based on the year it was trained on. This can be easily mitigated by attaching the current date to your system prompt:
```md
You are a **research agent** designed to ...
....
Current Year: 2025
Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
```
Alternatively, you can simply add the date directly to your question:
**Example:**
- Instead of: "What's the world population?"
- Use: "What's the world population in 2025?"
## Advanced Usage: Customize Prompt Template
You can customize the chat template by opening the model settings icon ⚙️ in the center of Jan's model selection. Do not confuse this with the assistant settings ⚙️ on the top left of the screen.
![jan_open_prompt_template](./_assets/jan_open_prompt_template.png)
Scroll down and you will see the Jinja template that can be overridden. We recommend experimenting with this [raw chat template](https://huggingface.co/janhq/Jan-v1-4B/blob/main/chat_template_raw.jinja) to completely eliminate the effect of the default system prompt.
We observed fewer tool calls per query when overriding the default chat template with this raw chat template and only recommend for advanced usage only.
![jan_prompt_template_settings](./_assets/jan_prompt_template_settings.png)

View File

@ -75,7 +75,7 @@ export default class JanAssistantExtension extends AssistantExtension {
'Jan is a helpful desktop assistant that can reason through complex tasks and use tools to complete them on the users behalf.', 'Jan is a helpful desktop assistant that can reason through complex tasks and use tools to complete them on the users behalf.',
model: '*', model: '*',
instructions: instructions:
'You are a helpful AI assistant. Your primary goal is to assist users with their questions and tasks to the best of your abilities.\n\nWhen responding:\n- Answer directly from your knowledge when you can\n- Be concise, clear, and helpful\n- Admit when youre unsure rather than making things up\n\nIf tools are available to you:\n- Only use tools when they add real value to your response\n- Use tools when the user explicitly asks (e.g., "search for...", "calculate...", "run this code")\n- Use tools for information you dont know or that needs verification\n- Never use tools just because theyre available\n\nWhen using tools:\n- Use one tool at a time and wait for results\n- Use actual values as arguments, not variable names\n- Learn from each result before deciding next steps\n- Avoid repeating the same tool call with identical parameters\n\nRemember: Most questions can be answered without tools. Think first whether you need them.', 'You are a helpful AI assistant. Your primary goal is to assist users with their questions and tasks to the best of your abilities.\n\nWhen responding:\n- Answer directly from your knowledge when you can\n- Be concise, clear, and helpful\n- Admit when youre unsure rather than making things up\n\nIf tools are available to you:\n- Only use tools when they add real value to your response\n- Use tools when the user explicitly asks (e.g., "search for...", "calculate...", "run this code")\n- Use tools for information you dont know or that needs verification\n- Never use tools just because theyre available\n\nWhen using tools:\n- Use one tool at a time and wait for results\n- Use actual values as arguments, not variable names\n- Learn from each result before deciding next steps\n- Avoid repeating the same tool call with identical parameters\n\nRemember: Most questions can be answered without tools. Think first whether you need them.\n\nCurrent date: {{current_date}}',
tools: [ tools: [
{ {
type: 'retrieval', type: 'retrieval',

View File

@ -10,6 +10,8 @@ interface DownloadItem {
url: string url: string
save_path: string save_path: string
proxy?: Record<string, string | string[] | boolean> proxy?: Record<string, string | string[] | boolean>
sha256?: string
size?: number
} }
type DownloadEvent = { type DownloadEvent = {

View File

@ -10,7 +10,18 @@
"recommended": "" "recommended": ""
} }
}, },
{
"key": "llamacpp_env",
"title": "Environmental variables",
"description": "Environmental variables for llama.cpp(KEY=VALUE), separated by ';'",
"controllerType": "input",
"controllerProps": {
"value": "none",
"placeholder": "Eg. GGML_VK_VISIBLE_DEVICES=0,1",
"type": "text",
"textAlign": "right"
}
},
{ {
"key": "auto_update_engine", "key": "auto_update_engine",
"title": "Auto update engine", "title": "Auto update engine",

View File

@ -43,9 +43,9 @@ export async function listSupportedBackends(): Promise<
if (features.vulkan) supportedBackends.push('win-vulkan-x64') if (features.vulkan) supportedBackends.push('win-vulkan-x64')
} }
// not available yet, placeholder for future // not available yet, placeholder for future
else if (sysType == 'windows-aarch64') { else if (sysType === 'windows-aarch64' || sysType === 'windows-arm64') {
supportedBackends.push('win-arm64') supportedBackends.push('win-arm64')
} else if (sysType == 'linux-x86_64') { } else if (sysType === 'linux-x86_64' || sysType === 'linux-x86') {
supportedBackends.push('linux-noavx-x64') supportedBackends.push('linux-noavx-x64')
if (features.avx) supportedBackends.push('linux-avx-x64') if (features.avx) supportedBackends.push('linux-avx-x64')
if (features.avx2) supportedBackends.push('linux-avx2-x64') if (features.avx2) supportedBackends.push('linux-avx2-x64')
@ -69,11 +69,11 @@ export async function listSupportedBackends(): Promise<
if (features.vulkan) supportedBackends.push('linux-vulkan-x64') if (features.vulkan) supportedBackends.push('linux-vulkan-x64')
} }
// not available yet, placeholder for future // not available yet, placeholder for future
else if (sysType === 'linux-aarch64') { else if (sysType === 'linux-aarch64' || sysType === 'linux-arm64') {
supportedBackends.push('linux-arm64') supportedBackends.push('linux-arm64')
} else if (sysType === 'macos-x86_64') { } else if (sysType === 'macos-x86_64' || sysType === 'macos-x86') {
supportedBackends.push('macos-x64') supportedBackends.push('macos-x64')
} else if (sysType === 'macos-aarch64') { } else if (sysType === 'macos-aarch64' || sysType === 'macos-arm64') {
supportedBackends.push('macos-arm64') supportedBackends.push('macos-arm64')
} }
@ -262,10 +262,7 @@ async function _getSupportedFeatures() {
features.cuda12 = true features.cuda12 = true
} }
// Vulkan support check - only discrete GPUs with 6GB+ VRAM // Vulkan support check - only discrete GPUs with 6GB+ VRAM
if ( if (gpuInfo.vulkan_info?.api_version && gpuInfo.total_memory >= 6 * 1024) {
gpuInfo.vulkan_info?.api_version &&
gpuInfo.total_memory >= 6 * 1024
) {
// 6GB (total_memory is in MB) // 6GB (total_memory is in MB)
features.vulkan = true features.vulkan = true
} }

View File

@ -20,9 +20,11 @@ import {
chatCompletionRequest, chatCompletionRequest,
events, events,
AppEvent, AppEvent,
DownloadEvent,
} from '@janhq/core' } from '@janhq/core'
import { error, info, warn } from '@tauri-apps/plugin-log' import { error, info, warn } from '@tauri-apps/plugin-log'
import { listen } from '@tauri-apps/api/event'
import { import {
listSupportedBackends, listSupportedBackends,
@ -33,12 +35,17 @@ import {
import { invoke } from '@tauri-apps/api/core' import { invoke } from '@tauri-apps/api/core'
import { getProxyConfig } from './util' import { getProxyConfig } from './util'
import { basename } from '@tauri-apps/api/path' import { basename } from '@tauri-apps/api/path'
import { readGgufMetadata } from '@janhq/tauri-plugin-llamacpp-api' import {
GgufMetadata,
readGgufMetadata,
} from '@janhq/tauri-plugin-llamacpp-api'
import { getSystemUsage } from '@janhq/tauri-plugin-hardware-api'
type LlamacppConfig = { type LlamacppConfig = {
version_backend: string version_backend: string
auto_update_engine: boolean auto_update_engine: boolean
auto_unload: boolean auto_unload: boolean
llamacpp_env: string
chat_template: string chat_template: string
n_gpu_layers: number n_gpu_layers: number
offload_mmproj: boolean offload_mmproj: boolean
@ -71,6 +78,8 @@ interface DownloadItem {
url: string url: string
save_path: string save_path: string
proxy?: Record<string, string | string[] | boolean> proxy?: Record<string, string | string[] | boolean>
sha256?: string
size?: number
} }
interface ModelConfig { interface ModelConfig {
@ -79,6 +88,9 @@ interface ModelConfig {
name: string // user-friendly name: string // user-friendly
// some model info that we cache upon import // some model info that we cache upon import
size_bytes: number size_bytes: number
sha256?: string
mmproj_sha256?: string
mmproj_size_bytes?: number
} }
interface EmbeddingResponse { interface EmbeddingResponse {
@ -146,6 +158,7 @@ const logger = {
export default class llamacpp_extension extends AIEngine { export default class llamacpp_extension extends AIEngine {
provider: string = 'llamacpp' provider: string = 'llamacpp'
autoUnload: boolean = true autoUnload: boolean = true
llamacpp_env: string = ''
readonly providerId: string = 'llamacpp' readonly providerId: string = 'llamacpp'
private config: LlamacppConfig private config: LlamacppConfig
@ -154,6 +167,7 @@ export default class llamacpp_extension extends AIEngine {
private pendingDownloads: Map<string, Promise<void>> = new Map() private pendingDownloads: Map<string, Promise<void>> = new Map()
private isConfiguringBackends: boolean = false private isConfiguringBackends: boolean = false
private loadingModels = new Map<string, Promise<SessionInfo>>() // Track loading promises private loadingModels = new Map<string, Promise<SessionInfo>>() // Track loading promises
private unlistenValidationStarted?: () => void
override async onLoad(): Promise<void> { override async onLoad(): Promise<void> {
super.onLoad() // Calls registerEngine() from AIEngine super.onLoad() // Calls registerEngine() from AIEngine
@ -175,12 +189,26 @@ export default class llamacpp_extension extends AIEngine {
this.config = loadedConfig as LlamacppConfig this.config = loadedConfig as LlamacppConfig
this.autoUnload = this.config.auto_unload this.autoUnload = this.config.auto_unload
this.llamacpp_env = this.config.llamacpp_env
// This sets the base directory where model files for this provider are stored. // This sets the base directory where model files for this provider are stored.
this.providerPath = await joinPath([ this.providerPath = await joinPath([
await getJanDataFolderPath(), await getJanDataFolderPath(),
this.providerId, this.providerId,
]) ])
// Set up validation event listeners to bridge Tauri events to frontend
this.unlistenValidationStarted = await listen<{
modelId: string
downloadType: string
}>('onModelValidationStarted', (event) => {
console.debug(
'LlamaCPP: bridging onModelValidationStarted event',
event.payload
)
events.emit(DownloadEvent.onModelValidationStarted, event.payload)
})
this.configureBackends() this.configureBackends()
} }
@ -774,6 +802,11 @@ export default class llamacpp_extension extends AIEngine {
override async onUnload(): Promise<void> { override async onUnload(): Promise<void> {
// Terminate all active sessions // Terminate all active sessions
// Clean up validation event listeners
if (this.unlistenValidationStarted) {
this.unlistenValidationStarted()
}
} }
onSettingUpdate<T>(key: string, value: T): void { onSettingUpdate<T>(key: string, value: T): void {
@ -801,6 +834,8 @@ export default class llamacpp_extension extends AIEngine {
closure() closure()
} else if (key === 'auto_unload') { } else if (key === 'auto_unload') {
this.autoUnload = value as boolean this.autoUnload = value as boolean
} else if (key === 'llamacpp_env') {
this.llamacpp_env = value as string
} }
} }
@ -1006,6 +1041,9 @@ export default class llamacpp_extension extends AIEngine {
url: path, url: path,
save_path: localPath, save_path: localPath,
proxy: getProxyConfig(), proxy: getProxyConfig(),
sha256:
saveName === 'model.gguf' ? opts.modelSha256 : opts.mmprojSha256,
size: saveName === 'model.gguf' ? opts.modelSize : opts.mmprojSize,
}) })
return localPath return localPath
} }
@ -1023,8 +1061,6 @@ export default class llamacpp_extension extends AIEngine {
: undefined : undefined
if (downloadItems.length > 0) { if (downloadItems.length > 0) {
let downloadCompleted = false
try { try {
// emit download update event on progress // emit download update event on progress
const onProgress = (transferred: number, total: number) => { const onProgress = (transferred: number, total: number) => {
@ -1034,7 +1070,6 @@ export default class llamacpp_extension extends AIEngine {
size: { transferred, total }, size: { transferred, total },
downloadType: 'Model', downloadType: 'Model',
}) })
downloadCompleted = transferred === total
} }
const downloadManager = window.core.extensionManager.getByName( const downloadManager = window.core.extensionManager.getByName(
'@janhq/download-extension' '@janhq/download-extension'
@ -1045,13 +1080,67 @@ export default class llamacpp_extension extends AIEngine {
onProgress onProgress
) )
const eventName = downloadCompleted // If we reach here, download completed successfully (including validation)
? 'onFileDownloadSuccess' // The downloadFiles function only returns successfully if all files downloaded AND validated
: 'onFileDownloadStopped' events.emit(DownloadEvent.onFileDownloadAndVerificationSuccess, {
events.emit(eventName, { modelId, downloadType: 'Model' }) modelId,
downloadType: 'Model'
})
} catch (error) { } catch (error) {
logger.error('Error downloading model:', modelId, opts, error) logger.error('Error downloading model:', modelId, opts, error)
events.emit('onFileDownloadError', { modelId, downloadType: 'Model' }) const errorMessage =
error instanceof Error ? error.message : String(error)
// Check if this is a cancellation
const isCancellationError = errorMessage.includes('Download cancelled') ||
errorMessage.includes('Validation cancelled') ||
errorMessage.includes('Hash computation cancelled') ||
errorMessage.includes('cancelled') ||
errorMessage.includes('aborted')
// Check if this is a validation failure
const isValidationError =
errorMessage.includes('Hash verification failed') ||
errorMessage.includes('Size verification failed') ||
errorMessage.includes('Failed to verify file')
if (isCancellationError) {
logger.info('Download cancelled for model:', modelId)
// Emit download stopped event instead of error
events.emit(DownloadEvent.onFileDownloadStopped, {
modelId,
downloadType: 'Model',
})
} else if (isValidationError) {
logger.error(
'Validation failed for model:',
modelId,
'Error:',
errorMessage
)
// Cancel any other download tasks for this model
try {
this.abortImport(modelId)
} catch (cancelError) {
logger.warn('Failed to cancel download task:', cancelError)
}
// Emit validation failure event
events.emit(DownloadEvent.onModelValidationFailed, {
modelId,
downloadType: 'Model',
error: errorMessage,
reason: 'validation_failed',
})
} else {
// Regular download error
events.emit(DownloadEvent.onFileDownloadError, {
modelId,
downloadType: 'Model',
error: errorMessage,
})
}
throw error throw error
} }
} }
@ -1078,7 +1167,9 @@ export default class llamacpp_extension extends AIEngine {
} catch (error) { } catch (error) {
logger.error('GGUF validation failed:', error) logger.error('GGUF validation failed:', error)
throw new Error( throw new Error(
`Invalid GGUF file(s): ${error.message || 'File format validation failed'}` `Invalid GGUF file(s): ${
error.message || 'File format validation failed'
}`
) )
} }
@ -1097,6 +1188,10 @@ export default class llamacpp_extension extends AIEngine {
mmproj_path: mmprojPath, mmproj_path: mmprojPath,
name: modelId, name: modelId,
size_bytes, size_bytes,
model_sha256: opts.modelSha256,
model_size_bytes: opts.modelSize,
mmproj_sha256: opts.mmprojSha256,
mmproj_size_bytes: opts.mmprojSize,
} as ModelConfig } as ModelConfig
await fs.mkdir(await joinPath([janDataFolderPath, modelDir])) await fs.mkdir(await joinPath([janDataFolderPath, modelDir]))
await invoke<void>('write_yaml', { await invoke<void>('write_yaml', {
@ -1108,16 +1203,50 @@ export default class llamacpp_extension extends AIEngine {
modelPath, modelPath,
mmprojPath, mmprojPath,
size_bytes, size_bytes,
model_sha256: opts.modelSha256,
model_size_bytes: opts.modelSize,
mmproj_sha256: opts.mmprojSha256,
mmproj_size_bytes: opts.mmprojSize,
}) })
} }
/**
* Deletes the entire model folder for a given modelId
* @param modelId The model ID to delete
*/
private async deleteModelFolder(modelId: string): Promise<void> {
try {
const modelDir = await joinPath([
await this.getProviderPath(),
'models',
modelId,
])
if (await fs.existsSync(modelDir)) {
logger.info(`Cleaning up model directory: ${modelDir}`)
await fs.rm(modelDir)
}
} catch (deleteError) {
logger.warn('Failed to delete model directory:', deleteError)
}
}
override async abortImport(modelId: string): Promise<void> { override async abortImport(modelId: string): Promise<void> {
// prepand provider name to avoid name collision // Cancel any active download task
// prepend provider name to avoid name collision
const taskId = this.createDownloadTaskId(modelId) const taskId = this.createDownloadTaskId(modelId)
const downloadManager = window.core.extensionManager.getByName( const downloadManager = window.core.extensionManager.getByName(
'@janhq/download-extension' '@janhq/download-extension'
) )
await downloadManager.cancelDownload(taskId)
try {
await downloadManager.cancelDownload(taskId)
} catch (cancelError) {
logger.warn('Failed to cancel download task:', cancelError)
}
// Delete the entire model folder if it exists (for validation failures)
await this.deleteModelFolder(modelId)
} }
/** /**
@ -1133,6 +1262,27 @@ export default class llamacpp_extension extends AIEngine {
} }
} }
private parseEnvFromString(
target: Record<string, string>,
envString: string
): void {
envString
.split(';')
.filter((pair) => pair.trim())
.forEach((pair) => {
const [key, ...valueParts] = pair.split('=')
const cleanKey = key?.trim()
if (
cleanKey &&
valueParts.length > 0 &&
!cleanKey.startsWith('LLAMA')
) {
target[cleanKey] = valueParts.join('=').trim()
}
})
}
override async load( override async load(
modelId: string, modelId: string,
overrideSettings?: Partial<LlamacppConfig>, overrideSettings?: Partial<LlamacppConfig>,
@ -1221,6 +1371,9 @@ export default class llamacpp_extension extends AIEngine {
const api_key = await this.generateApiKey(modelId, String(port)) const api_key = await this.generateApiKey(modelId, String(port))
envs['LLAMA_API_KEY'] = api_key envs['LLAMA_API_KEY'] = api_key
// set user envs
this.parseEnvFromString(envs, this.llamacpp_env)
// model option is required // model option is required
// NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path // NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path
const modelPath = await joinPath([ const modelPath = await joinPath([
@ -1593,9 +1746,12 @@ export default class llamacpp_extension extends AIEngine {
const [version, backend] = cfg.version_backend.split('/') const [version, backend] = cfg.version_backend.split('/')
if (!version || !backend) { if (!version || !backend) {
throw new Error( throw new Error(
`Invalid version/backend format: ${cfg.version_backend}. Expected format: <version>/<backend>` 'Backend setup was not successful. Please restart the app in a stable internet connection.'
) )
} }
// set envs
const envs: Record<string, string> = {}
this.parseEnvFromString(envs, this.llamacpp_env)
// Ensure backend is downloaded and ready before proceeding // Ensure backend is downloaded and ready before proceeding
await this.ensureBackendReady(backend, version) await this.ensureBackendReady(backend, version)
@ -1606,11 +1762,12 @@ export default class llamacpp_extension extends AIEngine {
const dList = await invoke<DeviceList[]>('plugin:llamacpp|get_devices', { const dList = await invoke<DeviceList[]>('plugin:llamacpp|get_devices', {
backendPath, backendPath,
libraryPath, libraryPath,
envs,
}) })
return dList return dList
} catch (error) { } catch (error) {
logger.error('Failed to query devices:\n', error) logger.error('Failed to query devices:\n', error)
throw new Error(`Failed to load llama-server: ${error}`) throw new Error("Failed to load llamacpp backend")
} }
} }
@ -1690,4 +1847,134 @@ export default class llamacpp_extension extends AIEngine {
'tokenizer.chat_template' 'tokenizer.chat_template'
]?.includes('tools') ]?.includes('tools')
} }
/**
* estimate KVCache size of from a given metadata
*
*/
private async estimateKVCache(
meta: Record<string, string>,
ctx_size?: number
): Promise<number> {
const arch = meta['general.architecture']
if (!arch) throw new Error('Invalid metadata: architecture not found')
const nLayer = Number(meta[`${arch}.block_count`])
if (!nLayer) throw new Error('Invalid metadata: block_count not found')
const nHead = Number(meta[`${arch}.attention.head_count`])
if (!nHead) throw new Error('Invalid metadata: head_count not found')
// Try to get key/value lengths first (more accurate)
const keyLen = Number(meta[`${arch}.attention.key_length`])
const valLen = Number(meta[`${arch}.attention.value_length`])
let headDim: number
if (keyLen && valLen) {
// Use explicit key/value lengths if available
logger.info(
`Using explicit key_length: ${keyLen}, value_length: ${valLen}`
)
headDim = (keyLen + valLen)
} else {
// Fall back to embedding_length estimation
const embeddingLen = Number(meta[`${arch}.embedding_length`])
if (!embeddingLen)
throw new Error('Invalid metadata: embedding_length not found')
// Standard transformer: head_dim = embedding_dim / num_heads
// For KV cache: we need both K and V, so 2 * head_dim per head
headDim = (embeddingLen / nHead) * 2
logger.info(
`Using embedding_length estimation: ${embeddingLen}, calculated head_dim: ${headDim}`
)
}
let ctxLen: number
if (!ctx_size) {
ctxLen = Number(meta[`${arch}.context_length`])
} else {
ctxLen = ctx_size
}
logger.info(`ctxLen: ${ctxLen}`)
logger.info(`nLayer: ${nLayer}`)
logger.info(`nHead: ${nHead}`)
logger.info(`headDim: ${headDim}`)
// Consider f16 by default
// Can be extended by checking cache-type-v and cache-type-k
// but we are checking overall compatibility with the default settings
// fp16 = 8 bits * 2 = 16
const bytesPerElement = 2
// Total KV cache size per token = nHead * headDim * bytesPerElement
const kvPerToken = nHead * headDim * bytesPerElement
return ctxLen * nLayer * kvPerToken
}
private async getModelSize(path: string): Promise<number> {
if (path.startsWith('https://')) {
const res = await fetch(path, { method: 'HEAD' })
const len = res.headers.get('content-length')
return len ? parseInt(len, 10) : 0
} else {
return (await fs.fileStat(path)).size
}
}
/*
* check the support status of a model by its path (local/remote)
*
* * Returns:
* - "RED" weights don't fit
* - "YELLOW" weights fit, KV cache doesn't
* - "GREEN" both weights + KV cache fit
*/
async isModelSupported(
path: string,
ctx_size?: number
): Promise<'RED' | 'YELLOW' | 'GREEN'> {
try {
const modelSize = await this.getModelSize(path)
logger.info(`modelSize: ${modelSize}`)
let gguf: GgufMetadata
gguf = await readGgufMetadata(path)
let kvCacheSize: number
if (ctx_size) {
kvCacheSize = await this.estimateKVCache(gguf.metadata, ctx_size)
} else {
kvCacheSize = await this.estimateKVCache(gguf.metadata)
}
// total memory consumption = model weights + kvcache + a small buffer for outputs
// output buffer is small so not considering here
const totalRequired = modelSize + kvCacheSize
logger.info(
`isModelSupported: Total memory requirement: ${totalRequired} for ${path}`
)
let availableMemBytes: number
const devices = await this.getDevices()
if (devices.length > 0) {
// Sum free memory across all GPUs
availableMemBytes = devices
.map((d) => d.free * 1024 * 1024)
.reduce((a, b) => a + b, 0)
} else {
// CPU fallback
const sys = await getSystemUsage()
availableMemBytes = (sys.total_memory - sys.used_memory) * 1024 * 1024
}
// check model size wrt system memory
if (modelSize > availableMemBytes) {
return 'RED'
} else if (modelSize + kvCacheSize > availableMemBytes) {
return 'YELLOW'
} else {
return 'GREEN'
}
} catch (e) {
throw new Error(String(e))
}
}
} }

11
src-tauri/Cargo.lock generated
View File

@ -2323,6 +2323,7 @@ dependencies = [
"serde_json", "serde_json",
"sha2", "sha2",
"tokio", "tokio",
"tokio-util",
"url", "url",
] ]
@ -4019,8 +4020,9 @@ dependencies = [
[[package]] [[package]]
name = "rmcp" name = "rmcp"
version = "0.5.0" version = "0.6.0"
source = "git+https://github.com/modelcontextprotocol/rust-sdk?rev=209dbac50f51737ad953c3a2c8e28f3619b6c277#209dbac50f51737ad953c3a2c8e28f3619b6c277" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb21cd3555f1059f27e4813827338dec44429a08ecd0011acc41d9907b160c00"
dependencies = [ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"chrono", "chrono",
@ -4045,8 +4047,9 @@ dependencies = [
[[package]] [[package]]
name = "rmcp-macros" name = "rmcp-macros"
version = "0.5.0" version = "0.6.0"
source = "git+https://github.com/modelcontextprotocol/rust-sdk?rev=209dbac50f51737ad953c3a2c8e28f3619b6c277#209dbac50f51737ad953c3a2c8e28f3619b6c277" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab5d16ae1ff3ce2c5fd86c37047b2869b75bec795d53a4b1d8257b15415a2354"
dependencies = [ dependencies = [
"darling 0.21.2", "darling 0.21.2",
"proc-macro2", "proc-macro2",

View File

@ -44,7 +44,7 @@ jan-utils = { path = "./utils" }
libloading = "0.8.7" libloading = "0.8.7"
log = "0.4" log = "0.4"
reqwest = { version = "0.11", features = ["json", "blocking", "stream"] } reqwest = { version = "0.11", features = ["json", "blocking", "stream"] }
rmcp = { git = "https://github.com/modelcontextprotocol/rust-sdk", rev = "209dbac50f51737ad953c3a2c8e28f3619b6c277", features = [ rmcp = { version = "0.6.0", features = [
"client", "client",
"transport-sse-client", "transport-sse-client",
"transport-streamable-http-client", "transport-streamable-http-client",

View File

@ -24,7 +24,7 @@ impl CpuStaticInfo {
CpuStaticInfo { CpuStaticInfo {
name, name,
core_count: System::physical_core_count().unwrap_or(0), core_count: System::physical_core_count().unwrap_or(0),
arch: std::env::consts::ARCH.to_string(), arch: System::cpu_arch(),
extensions: CpuStaticInfo::get_extensions(), extensions: CpuStaticInfo::get_extensions(),
} }
} }

View File

@ -1,4 +1,5 @@
use crate::commands::*; use crate::commands::*;
use crate::types::CpuStaticInfo;
use tauri::test::mock_app; use tauri::test::mock_app;
#[test] #[test]
@ -14,3 +15,125 @@ fn test_system_usage() {
let usage = get_system_usage(app.handle().clone()); let usage = get_system_usage(app.handle().clone());
println!("System Usage Info: {:?}", usage); println!("System Usage Info: {:?}", usage);
} }
#[cfg(test)]
mod cpu_tests {
use super::*;
#[test]
fn test_cpu_static_info_new() {
let cpu_info = CpuStaticInfo::new();
// Test that all fields are populated
assert!(!cpu_info.name.is_empty());
assert_ne!(cpu_info.name, "unknown"); // Should have detected a CPU name
assert!(cpu_info.core_count > 0);
assert!(!cpu_info.arch.is_empty());
// Architecture should be one of the expected values
assert!(
cpu_info.arch == "aarch64" ||
cpu_info.arch == "arm64" ||
cpu_info.arch == "x86_64" ||
cpu_info.arch == std::env::consts::ARCH
);
// Extensions should be a valid list (can be empty on non-x86)
println!("CPU Info: {:?}", cpu_info);
}
#[test]
fn test_cpu_info_consistency() {
// Test that multiple calls return consistent information
let info1 = CpuStaticInfo::new();
let info2 = CpuStaticInfo::new();
assert_eq!(info1.name, info2.name);
assert_eq!(info1.core_count, info2.core_count);
assert_eq!(info1.arch, info2.arch);
assert_eq!(info1.extensions, info2.extensions);
}
#[test]
fn test_cpu_name_not_empty() {
let cpu_info = CpuStaticInfo::new();
assert!(!cpu_info.name.is_empty());
assert!(cpu_info.name.len() > 0);
}
#[test]
fn test_core_count_positive() {
let cpu_info = CpuStaticInfo::new();
assert!(cpu_info.core_count > 0);
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn test_x86_extensions() {
let cpu_info = CpuStaticInfo::new();
// On x86/x86_64, we should always have at least FPU
assert!(cpu_info.extensions.contains(&"fpu".to_string()));
// Check that all extensions are valid x86 feature names
let valid_extensions = [
"fpu", "mmx", "sse", "sse2", "sse3", "ssse3", "sse4_1", "sse4_2",
"pclmulqdq", "avx", "avx2", "avx512_f", "avx512_dq", "avx512_ifma",
"avx512_pf", "avx512_er", "avx512_cd", "avx512_bw", "avx512_vl",
"avx512_vbmi", "avx512_vbmi2", "avx512_vnni", "avx512_bitalg",
"avx512_vpopcntdq", "avx512_vp2intersect", "aes", "f16c"
];
for ext in &cpu_info.extensions {
assert!(
valid_extensions.contains(&ext.as_str()),
"Unknown extension: {}",
ext
);
}
}
#[test]
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn test_non_x86_extensions() {
let cpu_info = CpuStaticInfo::new();
// On non-x86 architectures, extensions should be empty
assert!(cpu_info.extensions.is_empty());
}
#[test]
fn test_arch_detection() {
let cpu_info = CpuStaticInfo::new();
// Architecture should be a valid string
assert!(!cpu_info.arch.is_empty());
// Should be one of the common architectures
let common_archs = ["x86_64", "aarch64", "arm", "arm64", "x86"];
let is_common_arch = common_archs.iter().any(|&arch| cpu_info.arch == arch);
let is_compile_time_arch = cpu_info.arch == std::env::consts::ARCH;
assert!(
is_common_arch || is_compile_time_arch,
"Unexpected architecture: {}",
cpu_info.arch
);
}
#[test]
fn test_cpu_info_serialization() {
let cpu_info = CpuStaticInfo::new();
// Test that the struct can be serialized (since it derives Serialize)
let serialized = serde_json::to_string(&cpu_info);
assert!(serialized.is_ok());
let json_str = serialized.unwrap();
assert!(json_str.contains("name"));
assert!(json_str.contains("core_count"));
assert!(json_str.contains("arch"));
assert!(json_str.contains("extensions"));
}
}

View File

@ -23,6 +23,7 @@ sysinfo = "0.34.2"
tauri = { version = "2.5.0", default-features = false, features = [] } tauri = { version = "2.5.0", default-features = false, features = [] }
thiserror = "2.0.12" thiserror = "2.0.12"
tokio = { version = "1", features = ["full"] } tokio = { version = "1", features = ["full"] }
reqwest = { version = "0.11", features = ["json", "blocking", "stream"] }
# Windows-specific dependencies # Windows-specific dependencies
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]

View File

@ -265,8 +265,9 @@ pub async fn unload_llama_model<R: Runtime>(
pub async fn get_devices( pub async fn get_devices(
backend_path: &str, backend_path: &str,
library_path: Option<&str>, library_path: Option<&str>,
envs: HashMap<String, String>
) -> ServerResult<Vec<DeviceInfo>> { ) -> ServerResult<Vec<DeviceInfo>> {
get_devices_from_backend(backend_path, library_path).await get_devices_from_backend(backend_path, library_path, envs).await
} }
/// Generate API key using HMAC-SHA256 /// Generate API key using HMAC-SHA256

View File

@ -1,4 +1,5 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::process::Stdio; use std::process::Stdio;
use std::time::Duration; use std::time::Duration;
use tokio::process::Command; use tokio::process::Command;
@ -19,6 +20,7 @@ pub struct DeviceInfo {
pub async fn get_devices_from_backend( pub async fn get_devices_from_backend(
backend_path: &str, backend_path: &str,
library_path: Option<&str>, library_path: Option<&str>,
envs: HashMap<String, String>,
) -> ServerResult<Vec<DeviceInfo>> { ) -> ServerResult<Vec<DeviceInfo>> {
log::info!("Getting devices from server at path: {:?}", backend_path); log::info!("Getting devices from server at path: {:?}", backend_path);
@ -27,6 +29,7 @@ pub async fn get_devices_from_backend(
// Configure the command to run the server with --list-devices // Configure the command to run the server with --list-devices
let mut command = Command::new(backend_path); let mut command = Command::new(backend_path);
command.arg("--list-devices"); command.arg("--list-devices");
command.envs(envs);
// Set up library path // Set up library path
setup_library_path(library_path, &mut command); setup_library_path(library_path, &mut command);

View File

@ -1,8 +1,58 @@
use super::helpers; use super::helpers;
use super::types::GgufMetadata; use super::types::GgufMetadata;
use reqwest;
use std::fs::File;
use std::io::BufReader;
/// Read GGUF metadata from a model file /// Read GGUF metadata from a model file
#[tauri::command] #[tauri::command]
pub async fn read_gguf_metadata(path: String) -> Result<GgufMetadata, String> { pub async fn read_gguf_metadata(path: String) -> Result<GgufMetadata, String> {
helpers::read_gguf_metadata(&path).map_err(|e| format!("Failed to read GGUF metadata: {}", e)) if path.starts_with("http://") || path.starts_with("https://") {
// Remote: read in 2MB chunks until successful
let client = reqwest::Client::new();
let chunk_size = 2 * 1024 * 1024; // Fixed 2MB chunks
let max_total_size = 120 * 1024 * 1024; // Don't exceed 120MB total
let mut total_downloaded = 0;
let mut accumulated_data = Vec::new();
while total_downloaded < max_total_size {
let start = total_downloaded;
let end = std::cmp::min(start + chunk_size - 1, max_total_size - 1);
let resp = client
.get(&path)
.header("Range", format!("bytes={}-{}", start, end))
.send()
.await
.map_err(|e| format!("Failed to fetch chunk {}-{}: {}", start, end, e))?;
let chunk_data = resp
.bytes()
.await
.map_err(|e| format!("Failed to read chunk response: {}", e))?;
accumulated_data.extend_from_slice(&chunk_data);
total_downloaded += chunk_data.len();
// Try parsing after each chunk
let cursor = std::io::Cursor::new(&accumulated_data);
if let Ok(metadata) = helpers::read_gguf_metadata(cursor) {
return Ok(metadata);
}
// If we got less data than expected, we've reached EOF
if chunk_data.len() < chunk_size {
break;
}
}
Err("Could not parse GGUF metadata from downloaded data".to_string())
} else {
// Local: use streaming file reader
let file =
File::open(&path).map_err(|e| format!("Failed to open local file {}: {}", path, e))?;
let reader = BufReader::new(file);
helpers::read_gguf_metadata(reader)
.map_err(|e| format!("Failed to parse GGUF metadata: {}", e))
}
} }

View File

@ -1,13 +1,11 @@
use byteorder::{LittleEndian, ReadBytesExt}; use byteorder::{LittleEndian, ReadBytesExt};
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fs::File;
use std::io::{self, BufReader, Read, Seek}; use std::io::{self, BufReader, Read, Seek};
use std::path::Path;
use super::types::{GgufMetadata, GgufValueType}; use super::types::{GgufMetadata, GgufValueType};
pub fn read_gguf_metadata<P: AsRef<Path>>(path: P) -> io::Result<GgufMetadata> { pub fn read_gguf_metadata<R: Read + Seek>(reader: R) -> io::Result<GgufMetadata> {
let mut file = BufReader::new(File::open(path)?); let mut file = BufReader::new(reader);
let mut magic = [0u8; 4]; let mut magic = [0u8; 4];
file.read_exact(&mut magic)?; file.read_exact(&mut magic)?;

View File

@ -1,9 +1,10 @@
use super::models::{DownloadEvent, DownloadItem, ProxyConfig}; use super::models::{DownloadEvent, DownloadItem, ProxyConfig, ProgressTracker};
use crate::core::app::commands::get_jan_data_folder_path; use crate::core::app::commands::get_jan_data_folder_path;
use futures_util::StreamExt; use futures_util::StreamExt;
use jan_utils::normalize_path; use jan_utils::normalize_path;
use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path;
use std::time::Duration; use std::time::Duration;
use tauri::Emitter; use tauri::Emitter;
use tokio::fs::File; use tokio::fs::File;
@ -11,10 +12,131 @@ use tokio::io::AsyncWriteExt;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use url::Url; use url::Url;
// ===== UTILITY FUNCTIONS =====
pub fn err_to_string<E: std::fmt::Display>(e: E) -> String { pub fn err_to_string<E: std::fmt::Display>(e: E) -> String {
format!("Error: {}", e) format!("Error: {}", e)
} }
// ===== VALIDATION FUNCTIONS =====
/// Validates a downloaded file against expected hash and size
async fn validate_downloaded_file(
item: &DownloadItem,
save_path: &Path,
app: &tauri::AppHandle,
cancel_token: &CancellationToken,
) -> Result<(), String> {
// Skip validation if no verification data is provided
if item.sha256.is_none() && item.size.is_none() {
log::debug!(
"No validation data provided for {}, skipping validation",
item.url
);
return Ok(());
}
// Extract model ID from save path for validation events
// Path structure: llamacpp/models/{modelId}/model.gguf or llamacpp/models/{modelId}/mmproj.gguf
let model_id = save_path
.parent() // get parent directory (modelId folder)
.and_then(|p| p.file_name())
.and_then(|n| n.to_str())
.unwrap_or("unknown");
// Emit validation started event
app.emit(
"onModelValidationStarted",
serde_json::json!({
"modelId": model_id,
"downloadType": "Model",
}),
)
.unwrap();
log::info!("Starting validation for model: {}", model_id);
// Validate size if provided (fast check first)
if let Some(expected_size) = &item.size {
log::info!("Starting size verification for {}", item.url);
match tokio::fs::metadata(save_path).await {
Ok(metadata) => {
let actual_size = metadata.len();
if actual_size != *expected_size {
log::error!(
"Size verification failed for {}. Expected: {} bytes, Actual: {} bytes",
item.url,
expected_size,
actual_size
);
return Err(format!(
"Size verification failed. Expected {} bytes but got {} bytes.",
expected_size, actual_size
));
}
log::info!(
"Size verification successful for {} ({} bytes)",
item.url,
actual_size
);
}
Err(e) => {
log::error!(
"Failed to get file metadata for {}: {}",
save_path.display(),
e
);
return Err(format!("Failed to verify file size: {}", e));
}
}
}
// Check for cancellation before expensive hash computation
if cancel_token.is_cancelled() {
log::info!("Validation cancelled for {}", item.url);
return Err("Validation cancelled".to_string());
}
// Validate hash if provided (expensive check second)
if let Some(expected_sha256) = &item.sha256 {
log::info!("Starting Hash verification for {}", item.url);
match jan_utils::crypto::compute_file_sha256_with_cancellation(save_path, cancel_token).await {
Ok(computed_sha256) => {
if computed_sha256 != *expected_sha256 {
log::error!(
"Hash verification failed for {}. Expected: {}, Computed: {}",
item.url,
expected_sha256,
computed_sha256
);
return Err(format!(
"Hash verification failed. The downloaded file is corrupted or has been tampered with."
));
}
log::info!("Hash verification successful for {}", item.url);
}
Err(e) => {
log::error!(
"Failed to compute SHA256 for {}: {}",
save_path.display(),
e
);
return Err(format!("Failed to verify file integrity: {}", e));
}
}
}
log::info!("All validations passed for {}", item.url);
Ok(())
}
pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> { pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> {
// Validate proxy URL format // Validate proxy URL format
if let Err(e) = Url::parse(&config.url) { if let Err(e) = Url::parse(&config.url) {
@ -172,6 +294,9 @@ pub async fn _get_file_size(
} }
} }
// ===== MAIN DOWNLOAD FUNCTIONS =====
/// Downloads multiple files in parallel with individual progress tracking
pub async fn _download_files_internal( pub async fn _download_files_internal(
app: tauri::AppHandle, app: tauri::AppHandle,
items: &[DownloadItem], items: &[DownloadItem],
@ -184,28 +309,31 @@ pub async fn _download_files_internal(
let header_map = _convert_headers(headers).map_err(err_to_string)?; let header_map = _convert_headers(headers).map_err(err_to_string)?;
let total_size = { // Calculate sizes for each file
let mut total_size = 0u64; let mut file_sizes = HashMap::new();
for item in items.iter() { for item in items.iter() {
let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?; let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?;
total_size += _get_file_size(&client, &item.url) let size = _get_file_size(&client, &item.url)
.await .await
.map_err(err_to_string)?; .map_err(err_to_string)?;
} file_sizes.insert(item.url.clone(), size);
total_size }
};
let total_size: u64 = file_sizes.values().sum();
log::info!("Total download size: {}", total_size); log::info!("Total download size: {}", total_size);
let mut evt = DownloadEvent {
transferred: 0,
total: total_size,
};
let evt_name = format!("download-{}", task_id); let evt_name = format!("download-{}", task_id);
// Create progress tracker
let progress_tracker = ProgressTracker::new(items, file_sizes.clone());
// save file under Jan data folder // save file under Jan data folder
let jan_data_folder = get_jan_data_folder_path(app.clone()); let jan_data_folder = get_jan_data_folder_path(app.clone());
for item in items.iter() { // Collect download tasks for parallel execution
let mut download_tasks = Vec::new();
for (index, item) in items.iter().enumerate() {
let save_path = jan_data_folder.join(&item.save_path); let save_path = jan_data_folder.join(&item.save_path);
let save_path = normalize_path(&save_path); let save_path = normalize_path(&save_path);
@ -217,120 +345,251 @@ pub async fn _download_files_internal(
)); ));
} }
// Create parent directories if they don't exist // Spawn download task for each file
if let Some(parent) = save_path.parent() { let item_clone = item.clone();
if !parent.exists() { let app_clone = app.clone();
tokio::fs::create_dir_all(parent) let header_map_clone = header_map.clone();
.await let cancel_token_clone = cancel_token.clone();
.map_err(err_to_string)?; let evt_name_clone = evt_name.clone();
} let progress_tracker_clone = progress_tracker.clone();
} let file_id = format!("{}-{}", task_id, index);
let file_size = file_sizes.get(&item.url).copied().unwrap_or(0);
let current_extension = save_path.extension().unwrap_or_default().to_string_lossy(); let task = tokio::spawn(async move {
let append_extension = |ext: &str| { download_single_file(
if current_extension.is_empty() { app_clone,
ext.to_string() &item_clone,
} else { &header_map_clone,
format!("{}.{}", current_extension, ext) &save_path,
} resume,
}; cancel_token_clone,
let tmp_save_path = save_path.with_extension(append_extension("tmp")); evt_name_clone,
let url_save_path = save_path.with_extension(append_extension("url")); progress_tracker_clone,
file_id,
let mut should_resume = resume file_size,
&& tmp_save_path.exists() )
&& tokio::fs::read_to_string(&url_save_path)
.await
.map(|url| url == item.url) // check if we resume the same URL
.unwrap_or(false);
tokio::fs::write(&url_save_path, item.url.clone())
.await .await
.map_err(err_to_string)?; });
log::info!("Started downloading: {}", item.url); download_tasks.push(task);
let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?;
let mut download_delta = 0u64;
let resp = if should_resume {
let downloaded_size = tmp_save_path.metadata().map_err(err_to_string)?.len();
match _get_maybe_resume(&client, &item.url, downloaded_size).await {
Ok(resp) => {
log::info!(
"Resume download: {}, already downloaded {} bytes",
item.url,
downloaded_size
);
download_delta += downloaded_size;
resp
}
Err(e) => {
// fallback to normal download
log::warn!("Failed to resume download: {}", e);
should_resume = false;
_get_maybe_resume(&client, &item.url, 0).await?
}
}
} else {
_get_maybe_resume(&client, &item.url, 0).await?
};
let mut stream = resp.bytes_stream();
let file = if should_resume {
// resume download, append to existing file
tokio::fs::OpenOptions::new()
.write(true)
.append(true)
.open(&tmp_save_path)
.await
.map_err(err_to_string)?
} else {
// start new download, create a new file
File::create(&tmp_save_path).await.map_err(err_to_string)?
};
let mut writer = tokio::io::BufWriter::new(file);
// write chunk to file
while let Some(chunk) = stream.next().await {
if cancel_token.is_cancelled() {
if !should_resume {
tokio::fs::remove_dir_all(&save_path.parent().unwrap())
.await
.ok();
}
log::info!("Download cancelled for task: {}", task_id);
app.emit(&evt_name, evt.clone()).unwrap();
return Ok(());
}
let chunk = chunk.map_err(err_to_string)?;
writer.write_all(&chunk).await.map_err(err_to_string)?;
download_delta += chunk.len() as u64;
// only update every 10 MB
if download_delta >= 10 * 1024 * 1024 {
evt.transferred += download_delta;
app.emit(&evt_name, evt.clone()).unwrap();
download_delta = 0u64;
}
}
writer.flush().await.map_err(err_to_string)?;
evt.transferred += download_delta;
// rename tmp file to final file
tokio::fs::rename(&tmp_save_path, &save_path)
.await
.map_err(err_to_string)?;
tokio::fs::remove_file(&url_save_path)
.await
.map_err(err_to_string)?;
log::info!("Finished downloading: {}", item.url);
} }
app.emit(&evt_name, evt.clone()).unwrap(); // Wait for all downloads to complete
let mut validation_tasks = Vec::new();
for (task, item) in download_tasks.into_iter().zip(items.iter()) {
let result = task.await.map_err(|e| format!("Task join error: {}", e))?;
match result {
Ok(downloaded_path) => {
// Spawn validation task in parallel
let item_clone = item.clone();
let app_clone = app.clone();
let path_clone = downloaded_path.clone();
let cancel_token_clone = cancel_token.clone();
let validation_task = tokio::spawn(async move {
validate_downloaded_file(&item_clone, &path_clone, &app_clone, &cancel_token_clone).await
});
validation_tasks.push((validation_task, downloaded_path, item.clone()));
}
Err(e) => return Err(e),
}
}
// Wait for all validations to complete
for (validation_task, save_path, _item) in validation_tasks {
let validation_result = validation_task
.await
.map_err(|e| format!("Validation task join error: {}", e))?;
if let Err(validation_error) = validation_result {
// Clean up the file if validation fails
let _ = tokio::fs::remove_file(&save_path).await;
// Try to clean up the parent directory if it's empty
if let Some(parent) = save_path.parent() {
let _ = tokio::fs::remove_dir(parent).await;
}
return Err(validation_error);
}
}
// Emit final progress
let (transferred, total) = progress_tracker.get_total_progress().await;
let final_evt = DownloadEvent { transferred, total };
app.emit(&evt_name, final_evt).unwrap();
Ok(()) Ok(())
} }
/// Downloads a single file without blocking other downloads
async fn download_single_file(
app: tauri::AppHandle,
item: &DownloadItem,
header_map: &HeaderMap,
save_path: &std::path::Path,
resume: bool,
cancel_token: CancellationToken,
evt_name: String,
progress_tracker: ProgressTracker,
file_id: String,
_file_size: u64,
) -> Result<std::path::PathBuf, String> {
// Create parent directories if they don't exist
if let Some(parent) = save_path.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent)
.await
.map_err(err_to_string)?;
}
}
let current_extension = save_path.extension().unwrap_or_default().to_string_lossy();
let append_extension = |ext: &str| {
if current_extension.is_empty() {
ext.to_string()
} else {
format!("{}.{}", current_extension, ext)
}
};
let tmp_save_path = save_path.with_extension(append_extension("tmp"));
let url_save_path = save_path.with_extension(append_extension("url"));
let mut should_resume = resume
&& tmp_save_path.exists()
&& tokio::fs::read_to_string(&url_save_path)
.await
.map(|url| url == item.url) // check if we resume the same URL
.unwrap_or(false);
tokio::fs::write(&url_save_path, item.url.clone())
.await
.map_err(err_to_string)?;
log::info!("Started downloading: {}", item.url);
let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?;
let mut download_delta = 0u64;
let mut initial_progress = 0u64;
let resp = if should_resume {
let downloaded_size = tmp_save_path.metadata().map_err(err_to_string)?.len();
match _get_maybe_resume(&client, &item.url, downloaded_size).await {
Ok(resp) => {
log::info!(
"Resume download: {}, already downloaded {} bytes",
item.url,
downloaded_size
);
initial_progress = downloaded_size;
// Initialize progress for resumed download
progress_tracker
.update_progress(&file_id, downloaded_size)
.await;
// Emit initial combined progress
let (combined_transferred, combined_total) =
progress_tracker.get_total_progress().await;
let evt = DownloadEvent {
transferred: combined_transferred,
total: combined_total,
};
app.emit(&evt_name, evt).unwrap();
resp
}
Err(e) => {
// fallback to normal download
log::warn!("Failed to resume download: {}", e);
should_resume = false;
_get_maybe_resume(&client, &item.url, 0).await?
}
}
} else {
_get_maybe_resume(&client, &item.url, 0).await?
};
let mut stream = resp.bytes_stream();
let file = if should_resume {
// resume download, append to existing file
tokio::fs::OpenOptions::new()
.write(true)
.append(true)
.open(&tmp_save_path)
.await
.map_err(err_to_string)?
} else {
// start new download, create a new file
File::create(&tmp_save_path).await.map_err(err_to_string)?
};
let mut writer = tokio::io::BufWriter::new(file);
let mut total_transferred = initial_progress;
// write chunk to file
while let Some(chunk) = stream.next().await {
if cancel_token.is_cancelled() {
if !should_resume {
tokio::fs::remove_dir_all(&save_path.parent().unwrap())
.await
.ok();
}
log::info!("Download cancelled: {}", item.url);
return Err("Download cancelled".to_string());
}
let chunk = chunk.map_err(err_to_string)?;
writer.write_all(&chunk).await.map_err(err_to_string)?;
download_delta += chunk.len() as u64;
total_transferred += chunk.len() as u64;
// Update progress every 10 MB
if download_delta >= 10 * 1024 * 1024 {
// Update individual file progress
progress_tracker
.update_progress(&file_id, total_transferred)
.await;
// Emit combined progress event
let (combined_transferred, combined_total) =
progress_tracker.get_total_progress().await;
let evt = DownloadEvent {
transferred: combined_transferred,
total: combined_total,
};
app.emit(&evt_name, evt).unwrap();
download_delta = 0u64;
}
}
writer.flush().await.map_err(err_to_string)?;
// Final progress update for this file
progress_tracker
.update_progress(&file_id, total_transferred)
.await;
// Emit final combined progress
let (combined_transferred, combined_total) = progress_tracker.get_total_progress().await;
let evt = DownloadEvent {
transferred: combined_transferred,
total: combined_total,
};
app.emit(&evt_name, evt).unwrap();
// rename tmp file to final file
tokio::fs::rename(&tmp_save_path, &save_path)
.await
.map_err(err_to_string)?;
tokio::fs::remove_file(&url_save_path)
.await
.map_err(err_to_string)?;
log::info!("Finished downloading: {}", item.url);
Ok(save_path.to_path_buf())
}
// ===== HTTP CLIENT HELPER FUNCTIONS =====
pub async fn _get_maybe_resume( pub async fn _get_maybe_resume(
client: &reqwest::Client, client: &reqwest::Client,
url: &str, url: &str,

View File

@ -1,4 +1,6 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::Mutex;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
#[derive(Default)] #[derive(Default)]
@ -20,6 +22,8 @@ pub struct DownloadItem {
pub url: String, pub url: String,
pub save_path: String, pub save_path: String,
pub proxy: Option<ProxyConfig>, pub proxy: Option<ProxyConfig>,
pub sha256: Option<String>,
pub size: Option<u64>,
} }
#[derive(serde::Serialize, Clone, Debug)] #[derive(serde::Serialize, Clone, Debug)]
@ -27,3 +31,31 @@ pub struct DownloadEvent {
pub transferred: u64, pub transferred: u64,
pub total: u64, pub total: u64,
} }
/// Structure to track progress for each file in parallel downloads
#[derive(Clone)]
pub struct ProgressTracker {
file_progress: Arc<Mutex<HashMap<String, u64>>>,
total_size: u64,
}
impl ProgressTracker {
pub fn new(_items: &[DownloadItem], sizes: HashMap<String, u64>) -> Self {
let total_size = sizes.values().sum();
ProgressTracker {
file_progress: Arc::new(Mutex::new(HashMap::new())),
total_size,
}
}
pub async fn update_progress(&self, file_id: &str, transferred: u64) {
let mut progress = self.file_progress.lock().await;
progress.insert(file_id.to_string(), transferred);
}
pub async fn get_total_progress(&self) -> (u64, u64) {
let progress = self.file_progress.lock().await;
let total_transferred: u64 = progress.values().sum();
(total_transferred, self.total_size)
}
}

View File

@ -194,6 +194,8 @@ fn test_download_item_with_ssl_proxy() {
url: "https://example.com/file.zip".to_string(), url: "https://example.com/file.zip".to_string(),
save_path: "downloads/file.zip".to_string(), save_path: "downloads/file.zip".to_string(),
proxy: Some(proxy_config), proxy: Some(proxy_config),
sha256: None,
size: None,
}; };
assert!(download_item.proxy.is_some()); assert!(download_item.proxy.is_some());
@ -211,6 +213,8 @@ fn test_client_creation_with_ssl_settings() {
url: "https://example.com/file.zip".to_string(), url: "https://example.com/file.zip".to_string(),
save_path: "downloads/file.zip".to_string(), save_path: "downloads/file.zip".to_string(),
proxy: Some(proxy_config), proxy: Some(proxy_config),
sha256: None,
size: None,
}; };
let header_map = HeaderMap::new(); let header_map = HeaderMap::new();
@ -256,6 +260,8 @@ fn test_download_item_creation() {
url: "https://example.com/file.tar.gz".to_string(), url: "https://example.com/file.tar.gz".to_string(),
save_path: "models/test.tar.gz".to_string(), save_path: "models/test.tar.gz".to_string(),
proxy: None, proxy: None,
sha256: None,
size: None,
}; };
assert_eq!(item.url, "https://example.com/file.tar.gz"); assert_eq!(item.url, "https://example.com/file.tar.gz");

View File

@ -7,10 +7,11 @@ use rmcp::{
ServiceExt, ServiceExt,
}; };
use serde_json::Value; use serde_json::Value;
use std::{collections::HashMap, env, sync::Arc, time::Duration}; use std::{collections::HashMap, env, process::Stdio, sync::Arc, time::Duration};
use tauri::{AppHandle, Emitter, Manager, Runtime, State}; use tauri::{AppHandle, Emitter, Manager, Runtime, State};
use tauri_plugin_http::reqwest; use tauri_plugin_http::reqwest;
use tokio::{ use tokio::{
io::AsyncReadExt,
process::Command, process::Command,
sync::Mutex, sync::Mutex,
time::{sleep, timeout}, time::{sleep, timeout},
@ -647,23 +648,8 @@ async fn schedule_mcp_start_task<R: Runtime>(
{ {
cmd.creation_flags(0x08000000); // CREATE_NO_WINDOW: prevents shell window on Windows cmd.creation_flags(0x08000000); // CREATE_NO_WINDOW: prevents shell window on Windows
} }
let app_path_str = app_path.to_str().unwrap().to_string();
let log_file_path = format!("{}/logs/app.log", app_path_str);
match std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(log_file_path)
{
Ok(file) => {
cmd.stderr(std::process::Stdio::from(file));
}
Err(err) => {
log::error!("Failed to open log file: {}", err);
}
};
cmd.kill_on_drop(true); cmd.kill_on_drop(true);
log::trace!("Command: {cmd:#?}");
config_params config_params
.args .args
@ -678,26 +664,42 @@ async fn schedule_mcp_start_task<R: Runtime>(
} }
}); });
let process = TokioChildProcess::new(cmd).map_err(|e| { let (process, stderr) = TokioChildProcess::builder(cmd)
log::error!("Failed to run command {name}: {e}"); .stderr(Stdio::piped())
format!("Failed to run command {name}: {e}") .spawn()
})?; .map_err(|e| {
log::error!("Failed to run command {name}: {e}");
format!("Failed to run command {name}: {e}")
})?;
let service = () let service = ()
.serve(process) .serve(process)
.await .await
.map_err(|e| format!("Failed to start MCP server {name}: {e}"))?; .map_err(|e| format!("Failed to start MCP server {name}: {e}"));
// Get peer info and clone the needed values before moving the service match service {
let server_info = service.peer_info(); Ok(server) => {
log::trace!("Connected to server: {server_info:#?}"); log::trace!("Connected to server: {:#?}", server.peer_info());
servers
// Now move the service into the HashMap .lock()
servers .await
.lock() .insert(name.clone(), RunningServiceEnum::NoInit(server));
.await log::info!("Server {name} started successfully.");
.insert(name.clone(), RunningServiceEnum::NoInit(service)); }
log::info!("Server {name} started successfully."); Err(_) => {
let mut buffer = String::new();
let error = match stderr
.expect("stderr must be piped")
.read_to_string(&mut buffer)
.await
{
Ok(_) => format!("Failed to start MCP server {name}: {buffer}"),
Err(_) => format!("Failed to read MCP server {name} stderr"),
};
log::error!("{error}");
return Err(error);
}
}
// Wait a short time to verify the server is stable before marking as connected // Wait a short time to verify the server is stable before marking as connected
// This prevents race conditions where the server quits immediately // This prevents race conditions where the server quits immediately
@ -754,7 +756,7 @@ pub fn extract_command_args(config: &Value) -> Option<McpServerConfig> {
command, command,
args, args,
envs, envs,
headers headers,
}) })
} }

View File

@ -13,6 +13,7 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
sha2 = "0.10" sha2 = "0.10"
tokio = { version = "1", features = ["process"] } tokio = { version = "1", features = ["process"] }
tokio-util = "0.7.14"
url = "2.5" url = "2.5"
[features] [features]

View File

@ -1,7 +1,11 @@
use base64::{engine::general_purpose, Engine as _}; use base64::{engine::general_purpose, Engine as _};
use hmac::{Hmac, Mac}; use hmac::{Hmac, Mac};
use rand::{distributions::Alphanumeric, Rng}; use rand::{distributions::Alphanumeric, Rng};
use sha2::Sha256; use sha2::{Digest, Sha256};
use std::path::Path;
use tokio::fs::File;
use tokio::io::AsyncReadExt;
use tokio_util::sync::CancellationToken;
type HmacSha256 = Hmac<Sha256>; type HmacSha256 = Hmac<Sha256>;
@ -24,3 +28,59 @@ pub fn generate_api_key(model_id: String, api_secret: String) -> Result<String,
let hash = general_purpose::STANDARD.encode(code_bytes); let hash = general_purpose::STANDARD.encode(code_bytes);
Ok(hash) Ok(hash)
} }
/// Compute SHA256 hash of a file with cancellation support by chunking the file
pub async fn compute_file_sha256_with_cancellation(
file_path: &Path,
cancel_token: &CancellationToken,
) -> Result<String, String> {
// Check for cancellation before starting
if cancel_token.is_cancelled() {
return Err("Hash computation cancelled".to_string());
}
let mut file = File::open(file_path)
.await
.map_err(|e| format!("Failed to open file for hashing: {}", e))?;
let mut hasher = Sha256::new();
let mut buffer = vec![0u8; 64 * 1024]; // 64KB chunks
let mut total_read = 0u64;
loop {
// Check for cancellation every chunk (every 64KB)
if cancel_token.is_cancelled() {
return Err("Hash computation cancelled".to_string());
}
let bytes_read = file
.read(&mut buffer)
.await
.map_err(|e| format!("Failed to read file for hashing: {}", e))?;
if bytes_read == 0 {
break; // EOF
}
hasher.update(&buffer[..bytes_read]);
total_read += bytes_read as u64;
// Log progress for very large files (every 100MB)
if total_read % (100 * 1024 * 1024) == 0 {
#[cfg(feature = "logging")]
log::debug!("Hash progress: {} MB processed", total_read / (1024 * 1024));
}
}
// Final cancellation check
if cancel_token.is_cancelled() {
return Err("Hash computation cancelled".to_string());
}
let hash_bytes = hasher.finalize();
let hash_hex = format!("{:x}", hash_bytes);
#[cfg(feature = "logging")]
log::debug!("Hash computation completed for {} bytes", total_read);
Ok(hash_hex)
}

View File

@ -17,7 +17,6 @@ import {
IconPhoto, IconPhoto,
IconWorld, IconWorld,
IconAtom, IconAtom,
IconEye,
IconTool, IconTool,
IconCodeCircle2, IconCodeCircle2,
IconPlayerStopFilled, IconPlayerStopFilled,
@ -57,7 +56,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
const { prompt, setPrompt } = usePrompt() const { prompt, setPrompt } = usePrompt()
const { currentThreadId } = useThreads() const { currentThreadId } = useThreads()
const { t } = useTranslation() const { t } = useTranslation()
const { spellCheckChatInput, experimentalFeatures } = useGeneralSetting() const { spellCheckChatInput } = useGeneralSetting()
const maxRows = 10 const maxRows = 10
@ -537,7 +536,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
{/* File attachment - show only for models with mmproj */} {/* File attachment - show only for models with mmproj */}
{hasMmproj && ( {hasMmproj && (
<div <div
className="h-6 p-1 ml-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1" className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1"
onClick={handleAttachmentClick} onClick={handleAttachmentClick}
> >
<IconPhoto size={18} className="text-main-view-fg/50" /> <IconPhoto size={18} className="text-main-view-fg/50" />
@ -554,20 +553,6 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
{/* <div className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1"> {/* <div className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1">
<IconMicrophone size={18} className="text-main-view-fg/50" /> <IconMicrophone size={18} className="text-main-view-fg/50" />
</div> */} </div> */}
{selectedModel?.capabilities?.includes('vision') && (
<TooltipProvider>
<Tooltip>
<TooltipTrigger disabled={dropdownToolsAvailable}>
<div className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1">
<IconEye size={18} className="text-main-view-fg/50" />
</div>
</TooltipTrigger>
<TooltipContent>
<p>{t('vision')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)}
{selectedModel?.capabilities?.includes('embeddings') && ( {selectedModel?.capabilities?.includes('embeddings') && (
<TooltipProvider> <TooltipProvider>
<Tooltip> <Tooltip>
@ -586,8 +571,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
</TooltipProvider> </TooltipProvider>
)} )}
{experimentalFeatures && {selectedModel?.capabilities?.includes('tools') &&
selectedModel?.capabilities?.includes('tools') &&
hasActiveMCPServers && ( hasActiveMCPServers && (
<TooltipProvider> <TooltipProvider>
<Tooltip <Tooltip

View File

@ -168,9 +168,46 @@ export function DownloadManagement() {
[removeDownload, removeLocalDownloadingModel, t] [removeDownload, removeLocalDownloadingModel, t]
) )
const onModelValidationStarted = useCallback(
(event: { modelId: string; downloadType: string }) => {
console.debug('onModelValidationStarted', event)
// Show validation in progress toast
toast.info(t('common:toast.modelValidationStarted.title'), {
id: `model-validation-started-${event.modelId}`,
description: t('common:toast.modelValidationStarted.description', {
modelId: event.modelId,
}),
duration: 10000,
})
},
[t]
)
const onModelValidationFailed = useCallback(
(event: { modelId: string; error: string; reason: string }) => {
console.debug('onModelValidationFailed', event)
// Dismiss the validation started toast
toast.dismiss(`model-validation-started-${event.modelId}`)
removeDownload(event.modelId)
removeLocalDownloadingModel(event.modelId)
// Show specific toast for validation failure
toast.error(t('common:toast.modelValidationFailed.title'), {
description: t('common:toast.modelValidationFailed.description', {
modelId: event.modelId,
}),
duration: 30000, // Requires manual dismissal for security-critical message
})
},
[removeDownload, removeLocalDownloadingModel, t]
)
const onFileDownloadStopped = useCallback( const onFileDownloadStopped = useCallback(
(state: DownloadState) => { (state: DownloadState) => {
console.debug('onFileDownloadError', state) console.debug('onFileDownloadStopped', state)
removeDownload(state.modelId) removeDownload(state.modelId)
removeLocalDownloadingModel(state.modelId) removeLocalDownloadingModel(state.modelId)
}, },
@ -180,6 +217,10 @@ export function DownloadManagement() {
const onFileDownloadSuccess = useCallback( const onFileDownloadSuccess = useCallback(
async (state: DownloadState) => { async (state: DownloadState) => {
console.debug('onFileDownloadSuccess', state) console.debug('onFileDownloadSuccess', state)
// Dismiss any validation started toast when download completes successfully
toast.dismiss(`model-validation-started-${state.modelId}`)
removeDownload(state.modelId) removeDownload(state.modelId)
removeLocalDownloadingModel(state.modelId) removeLocalDownloadingModel(state.modelId)
toast.success(t('common:toast.downloadComplete.title'), { toast.success(t('common:toast.downloadComplete.title'), {
@ -192,12 +233,34 @@ export function DownloadManagement() {
[removeDownload, removeLocalDownloadingModel, t] [removeDownload, removeLocalDownloadingModel, t]
) )
const onFileDownloadAndVerificationSuccess = useCallback(
async (state: DownloadState) => {
console.debug('onFileDownloadAndVerificationSuccess', state)
// Dismiss any validation started toast when download and verification complete successfully
toast.dismiss(`model-validation-started-${state.modelId}`)
removeDownload(state.modelId)
removeLocalDownloadingModel(state.modelId)
toast.success(t('common:toast.downloadAndVerificationComplete.title'), {
id: 'download-complete',
description: t('common:toast.downloadAndVerificationComplete.description', {
item: state.modelId,
}),
})
},
[removeDownload, removeLocalDownloadingModel, t]
)
useEffect(() => { useEffect(() => {
console.debug('DownloadListener: registering event listeners...') console.debug('DownloadListener: registering event listeners...')
events.on(DownloadEvent.onFileDownloadUpdate, onFileDownloadUpdate) events.on(DownloadEvent.onFileDownloadUpdate, onFileDownloadUpdate)
events.on(DownloadEvent.onFileDownloadError, onFileDownloadError) events.on(DownloadEvent.onFileDownloadError, onFileDownloadError)
events.on(DownloadEvent.onFileDownloadSuccess, onFileDownloadSuccess) events.on(DownloadEvent.onFileDownloadSuccess, onFileDownloadSuccess)
events.on(DownloadEvent.onFileDownloadStopped, onFileDownloadStopped) events.on(DownloadEvent.onFileDownloadStopped, onFileDownloadStopped)
events.on(DownloadEvent.onModelValidationStarted, onModelValidationStarted)
events.on(DownloadEvent.onModelValidationFailed, onModelValidationFailed)
events.on(DownloadEvent.onFileDownloadAndVerificationSuccess, onFileDownloadAndVerificationSuccess)
// Register app update event listeners // Register app update event listeners
events.on(AppEvent.onAppUpdateDownloadUpdate, onAppUpdateDownloadUpdate) events.on(AppEvent.onAppUpdateDownloadUpdate, onAppUpdateDownloadUpdate)
@ -210,6 +273,12 @@ export function DownloadManagement() {
events.off(DownloadEvent.onFileDownloadError, onFileDownloadError) events.off(DownloadEvent.onFileDownloadError, onFileDownloadError)
events.off(DownloadEvent.onFileDownloadSuccess, onFileDownloadSuccess) events.off(DownloadEvent.onFileDownloadSuccess, onFileDownloadSuccess)
events.off(DownloadEvent.onFileDownloadStopped, onFileDownloadStopped) events.off(DownloadEvent.onFileDownloadStopped, onFileDownloadStopped)
events.off(
DownloadEvent.onModelValidationStarted,
onModelValidationStarted
)
events.off(DownloadEvent.onModelValidationFailed, onModelValidationFailed)
events.off(DownloadEvent.onFileDownloadAndVerificationSuccess, onFileDownloadAndVerificationSuccess)
// Unregister app update event listeners // Unregister app update event listeners
events.off(AppEvent.onAppUpdateDownloadUpdate, onAppUpdateDownloadUpdate) events.off(AppEvent.onAppUpdateDownloadUpdate, onAppUpdateDownloadUpdate)
@ -224,6 +293,9 @@ export function DownloadManagement() {
onFileDownloadError, onFileDownloadError,
onFileDownloadSuccess, onFileDownloadSuccess,
onFileDownloadStopped, onFileDownloadStopped,
onModelValidationStarted,
onModelValidationFailed,
onFileDownloadAndVerificationSuccess,
onAppUpdateDownloadUpdate, onAppUpdateDownloadUpdate,
onAppUpdateDownloadSuccess, onAppUpdateDownloadSuccess,
onAppUpdateDownloadError, onAppUpdateDownloadError,

View File

@ -14,12 +14,16 @@ import { route } from '@/constants/routes'
import { useThreads } from '@/hooks/useThreads' import { useThreads } from '@/hooks/useThreads'
import { ModelSetting } from '@/containers/ModelSetting' import { ModelSetting } from '@/containers/ModelSetting'
import ProvidersAvatar from '@/containers/ProvidersAvatar' import ProvidersAvatar from '@/containers/ProvidersAvatar'
import { ModelSupportStatus } from '@/containers/ModelSupportStatus'
import { Fzf } from 'fzf' import { Fzf } from 'fzf'
import { localStorageKey } from '@/constants/localStorage' import { localStorageKey } from '@/constants/localStorage'
import { useTranslation } from '@/i18n/react-i18next-compat' import { useTranslation } from '@/i18n/react-i18next-compat'
import { useFavoriteModel } from '@/hooks/useFavoriteModel' import { useFavoriteModel } from '@/hooks/useFavoriteModel'
import { predefinedProviders } from '@/consts/providers' import { predefinedProviders } from '@/consts/providers'
import { checkMmprojExistsAndUpdateOffloadMMprojSetting } from '@/services/models' import {
checkMmprojExistsAndUpdateOffloadMMprojSetting,
checkMmprojExists,
} from '@/services/models'
type DropdownModelProviderProps = { type DropdownModelProviderProps = {
model?: ThreadModel model?: ThreadModel
@ -91,6 +95,50 @@ const DropdownModelProvider = ({
[providers] [providers]
) )
// Helper function to get context size from model settings
const getContextSize = useCallback((): number => {
if (!selectedModel?.settings?.ctx_len?.controller_props?.value) {
return 8192 // Default context size
}
return selectedModel.settings.ctx_len.controller_props.value as number
}, [selectedModel?.settings?.ctx_len?.controller_props?.value])
// Function to check if a llamacpp model has vision capabilities and update model capabilities
const checkAndUpdateModelVisionCapability = useCallback(
async (modelId: string) => {
try {
const hasVision = await checkMmprojExists(modelId)
if (hasVision) {
// Update the model capabilities to include 'vision'
const provider = getProviderByName('llamacpp')
if (provider) {
const modelIndex = provider.models.findIndex(
(m) => m.id === modelId
)
if (modelIndex !== -1) {
const model = provider.models[modelIndex]
const capabilities = model.capabilities || []
// Add 'vision' capability if not already present
if (!capabilities.includes('vision')) {
const updatedModels = [...provider.models]
updatedModels[modelIndex] = {
...model,
capabilities: [...capabilities, 'vision'],
}
updateProvider('llamacpp', { models: updatedModels })
}
}
}
}
} catch (error) {
console.debug('Error checking mmproj for model:', modelId, error)
}
},
[getProviderByName, updateProvider]
)
// Initialize model provider only once // Initialize model provider only once
useEffect(() => { useEffect(() => {
const initializeModel = async () => { const initializeModel = async () => {
@ -107,6 +155,8 @@ const DropdownModelProvider = ({
updateProvider, updateProvider,
getProviderByName getProviderByName
) )
// Also check vision capability
await checkAndUpdateModelVisionCapability(model.id as string)
} }
} else if (useLastUsedModel) { } else if (useLastUsedModel) {
// Try to use last used model only when explicitly requested (for new chat) // Try to use last used model only when explicitly requested (for new chat)
@ -119,6 +169,8 @@ const DropdownModelProvider = ({
updateProvider, updateProvider,
getProviderByName getProviderByName
) )
// Also check vision capability
await checkAndUpdateModelVisionCapability(lastUsed.model)
} }
} else { } else {
selectModelProvider('', '') selectModelProvider('', '')
@ -136,6 +188,7 @@ const DropdownModelProvider = ({
checkModelExists, checkModelExists,
updateProvider, updateProvider,
getProviderByName, getProviderByName,
checkAndUpdateModelVisionCapability,
]) ])
// Update display model when selection changes // Update display model when selection changes
@ -147,6 +200,25 @@ const DropdownModelProvider = ({
} }
}, [selectedProvider, selectedModel, t]) }, [selectedProvider, selectedModel, t])
// Check vision capabilities for all llamacpp models
useEffect(() => {
const checkAllLlamacppModelsForVision = async () => {
const llamacppProvider = providers.find(
(p) => p.provider === 'llamacpp' && p.active
)
if (llamacppProvider) {
const checkPromises = llamacppProvider.models.map((model) =>
checkAndUpdateModelVisionCapability(model.id)
)
await Promise.allSettled(checkPromises)
}
}
if (open) {
checkAllLlamacppModelsForVision()
}
}, [open, providers, checkAndUpdateModelVisionCapability])
// Reset search value when dropdown closes // Reset search value when dropdown closes
const onOpenChange = useCallback((open: boolean) => { const onOpenChange = useCallback((open: boolean) => {
setOpen(open) setOpen(open)
@ -287,6 +359,8 @@ const DropdownModelProvider = ({
updateProvider, updateProvider,
getProviderByName getProviderByName
) )
// Also check vision capability
await checkAndUpdateModelVisionCapability(searchableModel.model.id)
} }
// Store the selected model as last used // Store the selected model as last used
@ -305,6 +379,7 @@ const DropdownModelProvider = ({
useLastUsedModel, useLastUsedModel,
updateProvider, updateProvider,
getProviderByName, getProviderByName,
checkAndUpdateModelVisionCapability,
] ]
) )
@ -318,7 +393,7 @@ const DropdownModelProvider = ({
return ( return (
<Popover open={open} onOpenChange={onOpenChange}> <Popover open={open} onOpenChange={onOpenChange}>
<div className="bg-main-view-fg/5 hover:bg-main-view-fg/8 px-2 py-1 flex items-center gap-1.5 rounded-sm max-h-[32px] "> <div className="bg-main-view-fg/5 hover:bg-main-view-fg/8 px-2 py-1 flex items-center gap-1.5 rounded-sm max-h-[32px] mr-0.5">
<PopoverTrigger asChild> <PopoverTrigger asChild>
<button <button
title={displayModel} title={displayModel}
@ -346,6 +421,12 @@ const DropdownModelProvider = ({
smallIcon smallIcon
/> />
)} )}
<ModelSupportStatus
modelId={selectedModel?.id}
provider={selectedProvider}
contextSize={getContextSize()}
className="ml-0.5 flex-shrink-0"
/>
</div> </div>
<PopoverContent <PopoverContent

View File

@ -11,6 +11,7 @@ import { cn } from '@/lib/utils'
const LANGUAGES = [ const LANGUAGES = [
{ value: 'en', label: 'English' }, { value: 'en', label: 'English' },
{ value: 'id', label: 'Bahasa' }, { value: 'id', label: 'Bahasa' },
{ value: 'pl', label: 'Polski' },
{ value: 'vn', label: 'Tiếng Việt' }, { value: 'vn', label: 'Tiếng Việt' },
{ value: 'zh-CN', label: '简体中文' }, { value: 'zh-CN', label: '简体中文' },
{ value: 'zh-TW', label: '繁體中文' }, { value: 'zh-TW', label: '繁體中文' },

View File

@ -0,0 +1,226 @@
import {
HoverCard,
HoverCardContent,
HoverCardTrigger,
} from '@/components/ui/hover-card'
import { IconInfoCircle } from '@tabler/icons-react'
import { CatalogModel, ModelQuant } from '@/services/models'
import { extractDescription } from '@/lib/models'
interface ModelInfoHoverCardProps {
model: CatalogModel
variant?: ModelQuant
defaultModelQuantizations: string[]
modelSupportStatus: Record<string, string>
onCheckModelSupport: (variant: ModelQuant) => void
children?: React.ReactNode
}
export const ModelInfoHoverCard = ({
model,
variant,
defaultModelQuantizations,
modelSupportStatus,
onCheckModelSupport,
children,
}: ModelInfoHoverCardProps) => {
const isVariantMode = !!variant
const displayVariant =
variant ||
model.quants.find((m) =>
defaultModelQuantizations.some((e) =>
m.model_id.toLowerCase().includes(e)
)
) ||
model.quants?.[0]
const handleMouseEnter = () => {
if (displayVariant) {
onCheckModelSupport(displayVariant)
}
}
const getCompatibilityStatus = () => {
const status = displayVariant
? modelSupportStatus[displayVariant.model_id]
: null
if (status === 'LOADING') {
return (
<div className="flex items-start gap-2">
<div className="size-2 shrink-0 border border-main-view-fg/50 border-t-transparent rounded-full animate-spin mt-1"></div>
<span className="text-main-view-fg/50">Checking...</span>
</div>
)
} else if (status === 'GREEN') {
return (
<div className="flex items-start gap-2">
<div className="size-2 shrink-0 bg-green-500 rounded-full mt-1"></div>
<span className="text-green-500 font-medium">
Recommended for your device
</span>
</div>
)
} else if (status === 'YELLOW') {
return (
<div className="flex items-start gap-2">
<div className="size-2 shrink-0 bg-yellow-500 rounded-full mt-1"></div>
<span className="text-yellow-500 font-medium">
May be slow on your device
</span>
</div>
)
} else if (status === 'RED') {
return (
<div className="flex items-start gap-2">
<div className="size-2 shrink-0 bg-red-500 rounded-full mt-1"></div>
<span className="text-red-500 font-medium">
May be incompatible with your device
</span>
</div>
)
} else {
return (
<div className="flex items-start gap-2">
<div className="size-2 shrink-0 bg-gray-400 rounded-full mt-1"></div>
<span className="text-gray-500">Unknown</span>
</div>
)
}
}
return (
<HoverCard>
<HoverCardTrigger asChild onMouseEnter={handleMouseEnter}>
{children || (
<div className="cursor-pointer">
<IconInfoCircle
size={14}
className="mt-0.5 text-main-view-fg/50 hover:text-main-view-fg/80 transition-colors"
/>
</div>
)}
</HoverCardTrigger>
<HoverCardContent className="w-96 p-4" side="left">
<div className="space-y-4">
{/* Header */}
<div className="border-b border-main-view-fg/10 pb-3">
<h4 className="text-sm font-semibold text-main-view-fg">
{isVariantMode ? variant.model_id : model.model_name}
</h4>
<p className="text-xs text-main-view-fg/60 mt-1">
{isVariantMode
? 'Model Variant Information'
: 'Model Information'}
</p>
</div>
{/* Main Info Grid */}
<div className="grid grid-cols-2 gap-3 text-xs">
<div className="space-y-2">
{isVariantMode ? (
<>
<div>
<span className="text-main-view-fg/50 block">
File Size
</span>
<span className="text-main-view-fg font-medium mt-1 inline-block">
{variant.file_size}
</span>
</div>
<div>
<span className="text-main-view-fg/50 block">
Quantization
</span>
<span className="text-main-view-fg font-medium mt-1 inline-block">
{variant.model_id.split('-').pop()?.toUpperCase() ||
'N/A'}
</span>
</div>
</>
) : (
<>
<div>
<span className="text-main-view-fg/50 block">
Downloads
</span>
<span className="text-main-view-fg font-medium mt-1 inline-block">
{model.downloads?.toLocaleString() || '0'}
</span>
</div>
<div>
<span className="text-main-view-fg/50 block">Variants</span>
<span className="text-main-view-fg font-medium mt-1 inline-block">
{model.quants?.length || 0}
</span>
</div>
</>
)}
</div>
<div className="space-y-2">
{!isVariantMode && (
<div>
<span className="text-main-view-fg/50 block">
Default Size
</span>
<span className="text-main-view-fg font-medium mt-1 inline-block">
{displayVariant?.file_size || 'N/A'}
</span>
</div>
)}
<div>
<span className="text-main-view-fg/50 block">
Compatibility
</span>
<div className="flex items-center gap-1.5 mt-1">
{getCompatibilityStatus()}
</div>
</div>
</div>
</div>
{/* Features Section */}
{(model.num_mmproj > 0 || model.tools) && (
<div className="border-t border-main-view-fg/10 pt-3">
<h5 className="text-xs font-medium text-main-view-fg/70 mb-2">
Features
</h5>
<div className="flex flex-wrap gap-2">
{model.num_mmproj > 0 && (
<div className="flex items-center gap-1.5 px-2 py-1 bg-main-view-fg/10 rounded-md">
<span className="text-xs text-main-view-fg font-medium">
Vision
</span>
</div>
)}
{model.tools && (
<div className="flex items-center gap-1.5 px-2 py-1 bg-main-view-fg/10 rounded-md">
<span className="text-xs text-main-view-fg font-medium">
Tools
</span>
</div>
)}
</div>
</div>
)}
{/* Content Section */}
<div className="border-t border-main-view-fg/10 pt-3">
<h5 className="text-xs font-medium text-main-view-fg/70 mb-1">
{isVariantMode ? 'Download URL' : 'Description'}
</h5>
<div className="text-xs text-main-view-fg/60 bg-main-view-fg/5 rounded p-2">
{isVariantMode ? (
<div className="font-mono break-all">{variant.path}</div>
) : (
extractDescription(model?.description) ||
'No description available'
)}
</div>
</div>
</div>
</HoverCardContent>
</HoverCard>
)
}

View File

@ -0,0 +1,142 @@
import { useCallback, useEffect, useState } from 'react'
import { cn } from '@/lib/utils'
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from '@/components/ui/tooltip'
import { isModelSupported } from '@/services/models'
import { getJanDataFolderPath, joinPath } from '@janhq/core'
interface ModelSupportStatusProps {
modelId: string | undefined
provider: string | undefined
contextSize: number
className?: string
}
export const ModelSupportStatus = ({
modelId,
provider,
contextSize,
className,
}: ModelSupportStatusProps) => {
const [modelSupportStatus, setModelSupportStatus] = useState<
'RED' | 'YELLOW' | 'GREEN' | 'LOADING' | null
>(null)
// Helper function to check model support with proper path resolution
const checkModelSupportWithPath = useCallback(
async (
id: string,
ctxSize: number
): Promise<'RED' | 'YELLOW' | 'GREEN'> => {
try {
// Get Jan's data folder path and construct the full model file path
// Following the llamacpp extension structure: <Jan's data folder>/llamacpp/models/<modelId>/model.gguf
const janDataFolder = await getJanDataFolderPath()
const modelFilePath = await joinPath([
janDataFolder,
'llamacpp',
'models',
id,
'model.gguf',
])
return await isModelSupported(modelFilePath, ctxSize)
} catch (error) {
console.error(
'Error checking model support with constructed path:',
error
)
// If path construction or model support check fails, assume not supported
return 'RED'
}
},
[]
)
// Helper function to get icon color based on model support status
const getStatusColor = (): string => {
switch (modelSupportStatus) {
case 'GREEN':
return 'bg-green-500'
case 'YELLOW':
return 'bg-yellow-500'
case 'RED':
return 'bg-red-500'
case 'LOADING':
return 'bg-main-view-fg/50'
default:
return 'bg-main-view-fg/50'
}
}
// Helper function to get tooltip text based on model support status
const getStatusTooltip = (): string => {
switch (modelSupportStatus) {
case 'GREEN':
return `Works Well on your device (ctx: ${contextSize})`
case 'YELLOW':
return `Might work on your device (ctx: ${contextSize})`
case 'RED':
return `Doesn't work on your device (ctx: ${contextSize})`
case 'LOADING':
return 'Checking device compatibility...'
default:
return 'Unknown'
}
}
// Check model support when model changes
useEffect(() => {
const checkModelSupport = async () => {
if (modelId && provider === 'llamacpp') {
// Set loading state immediately
setModelSupportStatus('LOADING')
try {
const supportStatus = await checkModelSupportWithPath(
modelId,
contextSize
)
setModelSupportStatus(supportStatus)
} catch (error) {
console.error('Error checking model support:', error)
setModelSupportStatus('RED')
}
} else {
// Only show status for llamacpp models since isModelSupported is specific to llamacpp
setModelSupportStatus(null)
}
}
checkModelSupport()
}, [modelId, provider, contextSize, checkModelSupportWithPath])
// Don't render anything if no status or not llamacpp
if (!modelSupportStatus || provider !== 'llamacpp') {
return null
}
return (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<div
className={cn(
'size-2 flex items-center justify-center rounded-full',
modelSupportStatus === 'LOADING'
? 'size-2.5 border border-main-view-fg/50 border-t-transparent animate-spin'
: getStatusColor(),
className
)}
/>
</TooltipTrigger>
<TooltipContent>
<p>{getStatusTooltip()}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)
}

View File

@ -11,7 +11,6 @@ import {
import { useMatches, useNavigate } from '@tanstack/react-router' import { useMatches, useNavigate } from '@tanstack/react-router'
import { cn } from '@/lib/utils' import { cn } from '@/lib/utils'
import { useGeneralSetting } from '@/hooks/useGeneralSetting'
import { useModelProvider } from '@/hooks/useModelProvider' import { useModelProvider } from '@/hooks/useModelProvider'
import { getProviderTitle } from '@/lib/utils' import { getProviderTitle } from '@/lib/utils'
import ProvidersAvatar from '@/containers/ProvidersAvatar' import ProvidersAvatar from '@/containers/ProvidersAvatar'
@ -23,7 +22,6 @@ const SettingsMenu = () => {
const matches = useMatches() const matches = useMatches()
const navigate = useNavigate() const navigate = useNavigate()
const { experimentalFeatures } = useGeneralSetting()
const { providers } = useModelProvider() const { providers } = useModelProvider()
// Filter providers that have active API keys (or are llama.cpp which doesn't need one) // Filter providers that have active API keys (or are llama.cpp which doesn't need one)
@ -79,15 +77,10 @@ const SettingsMenu = () => {
title: 'common:hardware', title: 'common:hardware',
route: route.settings.hardware, route: route.settings.hardware,
}, },
// Only show MCP Servers when experimental features are enabled {
...(experimentalFeatures title: 'common:mcp-servers',
? [ route: route.settings.mcp_servers,
{ },
title: 'common:mcp-servers',
route: route.settings.mcp_servers,
},
]
: []),
{ {
title: 'common:local_api_server', title: 'common:local_api_server',
route: route.settings.local_api_server, route: route.settings.local_api_server,

View File

@ -291,15 +291,6 @@ describe('ChatInput', () => {
expect(stopButton).toBeInTheDocument() expect(stopButton).toBeInTheDocument()
}) })
it('shows capability icons when model supports them', () => {
act(() => {
renderWithRouter()
})
// Should show vision icon (rendered as SVG with tabler-icon-eye class)
const visionIcon = document.querySelector('.tabler-icon-eye')
expect(visionIcon).toBeInTheDocument()
})
it('shows model selection dropdown', () => { it('shows model selection dropdown', () => {
act(() => { act(() => {

View File

@ -5,7 +5,6 @@ import SettingsMenu from '../SettingsMenu'
import { useNavigate, useMatches } from '@tanstack/react-router' import { useNavigate, useMatches } from '@tanstack/react-router'
import { useGeneralSetting } from '@/hooks/useGeneralSetting' import { useGeneralSetting } from '@/hooks/useGeneralSetting'
import { useModelProvider } from '@/hooks/useModelProvider' import { useModelProvider } from '@/hooks/useModelProvider'
import { useAppState } from '@/hooks/useAppState'
// Mock dependencies // Mock dependencies
vi.mock('@tanstack/react-router', () => ({ vi.mock('@tanstack/react-router', () => ({
@ -25,9 +24,7 @@ vi.mock('@/i18n/react-i18next-compat', () => ({
})) }))
vi.mock('@/hooks/useGeneralSetting', () => ({ vi.mock('@/hooks/useGeneralSetting', () => ({
useGeneralSetting: vi.fn(() => ({ useGeneralSetting: vi.fn(() => ({})),
experimentalFeatures: false,
})),
})) }))
vi.mock('@/hooks/useModelProvider', () => ({ vi.mock('@/hooks/useModelProvider', () => ({
@ -88,21 +85,6 @@ describe('SettingsMenu', () => {
expect(screen.getByText('common:local_api_server')).toBeInTheDocument() expect(screen.getByText('common:local_api_server')).toBeInTheDocument()
expect(screen.getByText('common:https_proxy')).toBeInTheDocument() expect(screen.getByText('common:https_proxy')).toBeInTheDocument()
expect(screen.getByText('common:extensions')).toBeInTheDocument() expect(screen.getByText('common:extensions')).toBeInTheDocument()
})
it('does not show MCP Servers when experimental features disabled', () => {
render(<SettingsMenu />)
expect(screen.queryByText('common:mcp-servers')).not.toBeInTheDocument()
})
it('shows MCP Servers when experimental features enabled', () => {
vi.mocked(useGeneralSetting).mockReturnValue({
experimentalFeatures: true,
})
render(<SettingsMenu />)
expect(screen.getByText('common:mcp-servers')).toBeInTheDocument() expect(screen.getByText('common:mcp-servers')).toBeInTheDocument()
}) })
@ -110,7 +92,7 @@ describe('SettingsMenu', () => {
render(<SettingsMenu />) render(<SettingsMenu />)
const chevronButtons = screen.getAllByRole('button') const chevronButtons = screen.getAllByRole('button')
const chevron = chevronButtons.find(button => const chevron = chevronButtons.find((button) =>
button.querySelector('svg.tabler-icon-chevron-right') button.querySelector('svg.tabler-icon-chevron-right')
) )
expect(chevron).toBeInTheDocument() expect(chevron).toBeInTheDocument()
@ -121,7 +103,7 @@ describe('SettingsMenu', () => {
render(<SettingsMenu />) render(<SettingsMenu />)
const chevronButtons = screen.getAllByRole('button') const chevronButtons = screen.getAllByRole('button')
const chevron = chevronButtons.find(button => const chevron = chevronButtons.find((button) =>
button.querySelector('svg.tabler-icon-chevron-right') button.querySelector('svg.tabler-icon-chevron-right')
) )
if (!chevron) throw new Error('Chevron button not found') if (!chevron) throw new Error('Chevron button not found')
@ -159,12 +141,14 @@ describe('SettingsMenu', () => {
// First expand the providers submenu // First expand the providers submenu
const chevronButtons = screen.getAllByRole('button') const chevronButtons = screen.getAllByRole('button')
const chevron = chevronButtons.find(button => const chevron = chevronButtons.find((button) =>
button.querySelector('svg.tabler-icon-chevron-right') button.querySelector('svg.tabler-icon-chevron-right')
) )
if (chevron) await user.click(chevron) if (chevron) await user.click(chevron)
const openaiProvider = screen.getByTestId('provider-avatar-openai').closest('div') const openaiProvider = screen
.getByTestId('provider-avatar-openai')
.closest('div')
expect(openaiProvider).toBeInTheDocument() expect(openaiProvider).toBeInTheDocument()
}) })
@ -174,14 +158,16 @@ describe('SettingsMenu', () => {
// First expand the providers // First expand the providers
const chevronButtons = screen.getAllByRole('button') const chevronButtons = screen.getAllByRole('button')
const chevron = chevronButtons.find(button => const chevron = chevronButtons.find((button) =>
button.querySelector('svg.tabler-icon-chevron-right') button.querySelector('svg.tabler-icon-chevron-right')
) )
if (!chevron) throw new Error('Chevron button not found') if (!chevron) throw new Error('Chevron button not found')
await user.click(chevron) await user.click(chevron)
// Then click on a provider // Then click on a provider
const openaiProvider = screen.getByTestId('provider-avatar-openai').closest('div') const openaiProvider = screen
.getByTestId('provider-avatar-openai')
.closest('div')
await user.click(openaiProvider!) await user.click(openaiProvider!)
expect(mockNavigate).toHaveBeenCalledWith({ expect(mockNavigate).toHaveBeenCalledWith({
@ -193,7 +179,9 @@ describe('SettingsMenu', () => {
it('shows mobile menu toggle button', () => { it('shows mobile menu toggle button', () => {
render(<SettingsMenu />) render(<SettingsMenu />)
const menuToggle = screen.getByRole('button', { name: 'Toggle settings menu' }) const menuToggle = screen.getByRole('button', {
name: 'Toggle settings menu',
})
expect(menuToggle).toBeInTheDocument() expect(menuToggle).toBeInTheDocument()
}) })
@ -201,7 +189,9 @@ describe('SettingsMenu', () => {
const user = userEvent.setup() const user = userEvent.setup()
render(<SettingsMenu />) render(<SettingsMenu />)
const menuToggle = screen.getByRole('button', { name: 'Toggle settings menu' }) const menuToggle = screen.getByRole('button', {
name: 'Toggle settings menu',
})
await user.click(menuToggle) await user.click(menuToggle)
// Menu should now be visible // Menu should now be visible
@ -214,7 +204,9 @@ describe('SettingsMenu', () => {
render(<SettingsMenu />) render(<SettingsMenu />)
// Open menu first // Open menu first
const menuToggle = screen.getByRole('button', { name: 'Toggle settings menu' }) const menuToggle = screen.getByRole('button', {
name: 'Toggle settings menu',
})
await user.click(menuToggle) await user.click(menuToggle)
// Then close it // Then close it
@ -239,7 +231,7 @@ describe('SettingsMenu', () => {
// First expand the providers submenu // First expand the providers submenu
const chevronButtons = screen.getAllByRole('button') const chevronButtons = screen.getAllByRole('button')
const chevron = chevronButtons.find(button => const chevron = chevronButtons.find((button) =>
button.querySelector('svg.tabler-icon-chevron-right') button.querySelector('svg.tabler-icon-chevron-right')
) )
if (chevron) await user.click(chevron) if (chevron) await user.click(chevron)
@ -273,12 +265,14 @@ describe('SettingsMenu', () => {
// Expand providers // Expand providers
const chevronButtons = screen.getAllByRole('button') const chevronButtons = screen.getAllByRole('button')
const chevron = chevronButtons.find(button => const chevron = chevronButtons.find((button) =>
button.querySelector('svg.tabler-icon-chevron-right') button.querySelector('svg.tabler-icon-chevron-right')
) )
if (chevron) await user.click(chevron) if (chevron) await user.click(chevron)
expect(screen.getByTestId('provider-avatar-openai')).toBeInTheDocument() expect(screen.getByTestId('provider-avatar-openai')).toBeInTheDocument()
expect(screen.queryByTestId('provider-avatar-anthropic')).not.toBeInTheDocument() expect(
screen.queryByTestId('provider-avatar-anthropic')
).not.toBeInTheDocument()
}) })
}) })

View File

@ -61,15 +61,18 @@ export default function AddEditAssistant({
const [paramsTypes, setParamsTypes] = useState<string[]>(['string']) const [paramsTypes, setParamsTypes] = useState<string[]>(['string'])
const [showEmojiPicker, setShowEmojiPicker] = useState(false) const [showEmojiPicker, setShowEmojiPicker] = useState(false)
const emojiPickerRef = useRef<HTMLDivElement>(null) const emojiPickerRef = useRef<HTMLDivElement>(null)
const emojiPickerTriggerRef = useRef<HTMLDivElement>(null)
const [nameError, setNameError] = useState<string | null>(null) const [nameError, setNameError] = useState<string | null>(null)
const [toolSteps, setToolSteps] = useState(20) const [toolSteps, setToolSteps] = useState(20)
// Handle click outside emoji picker // Handle click outside emoji picker or trigger
useEffect(() => { useEffect(() => {
const handleClickOutside = (event: MouseEvent) => { const handleClickOutside = (event: MouseEvent) => {
if ( if (
emojiPickerRef.current && emojiPickerRef.current &&
!emojiPickerRef.current.contains(event.target as Node) emojiPickerTriggerRef.current &&
!emojiPickerRef.current.contains(event.target as Node) &&
!emojiPickerTriggerRef.current.contains(event.target as Node)
) { ) {
setShowEmojiPicker(false) setShowEmojiPicker(false)
} }
@ -91,7 +94,9 @@ export default function AddEditAssistant({
setName(initialData.name) setName(initialData.name)
setDescription(initialData.description) setDescription(initialData.description)
setInstructions(initialData.instructions) setInstructions(initialData.instructions)
setShowEmojiPicker(false)
setToolSteps(initialData.tool_steps ?? 20) setToolSteps(initialData.tool_steps ?? 20)
// Convert parameters object to arrays of keys and values // Convert parameters object to arrays of keys and values
const keys = Object.keys(initialData.parameters || {}) const keys = Object.keys(initialData.parameters || {})
const values = Object.values(initialData.parameters || {}) const values = Object.values(initialData.parameters || {})
@ -122,6 +127,7 @@ export default function AddEditAssistant({
setParamsValues(['']) setParamsValues([''])
setParamsTypes(['string']) setParamsTypes(['string'])
setNameError(null) setNameError(null)
setShowEmojiPicker(false)
setToolSteps(20) setToolSteps(20)
} }
@ -247,6 +253,7 @@ export default function AddEditAssistant({
<div <div
className="border rounded-sm p-1 w-9 h-9 flex items-center justify-center border-main-view-fg/10 cursor-pointer" className="border rounded-sm p-1 w-9 h-9 flex items-center justify-center border-main-view-fg/10 cursor-pointer"
onClick={() => setShowEmojiPicker(!showEmojiPicker)} onClick={() => setShowEmojiPicker(!showEmojiPicker)}
ref={emojiPickerTriggerRef}
> >
{avatar ? ( {avatar ? (
<AvatarEmoji <AvatarEmoji
@ -325,6 +332,9 @@ export default function AddEditAssistant({
className="resize-none" className="resize-none"
rows={4} rows={4}
/> />
<div className="text-xs text-main-view-fg/60">
{t('assistants:instructionsDateHint')}
</div>
</div> </div>
<div className="space-y-2 my-4 mt-6"> <div className="space-y-2 my-4 mt-6">

View File

@ -0,0 +1,253 @@
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
DialogTrigger,
} from '@/components/ui/dialog'
import { Switch } from '@/components/ui/switch'
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from '@/components/ui/tooltip'
import { useModelProvider } from '@/hooks/useModelProvider'
import {
IconPencil,
IconEye,
IconTool,
// IconWorld,
// IconAtom,
IconCodeCircle2,
} from '@tabler/icons-react'
import { useState, useEffect } from 'react'
import { useTranslation } from '@/i18n/react-i18next-compat'
// No need to define our own interface, we'll use the existing Model type
type DialogEditModelProps = {
provider: ModelProvider
modelId?: string // Optional model ID to edit
}
export const DialogEditModel = ({
provider,
modelId,
}: DialogEditModelProps) => {
const { t } = useTranslation()
const { updateProvider } = useModelProvider()
const [selectedModelId, setSelectedModelId] = useState<string>('')
const [capabilities, setCapabilities] = useState<Record<string, boolean>>({
completion: false,
vision: false,
tools: false,
reasoning: false,
embeddings: false,
web_search: false,
})
// Initialize with the provided model ID or the first model if available
useEffect(() => {
if (modelId) {
setSelectedModelId(modelId)
} else if (provider.models && provider.models.length > 0) {
setSelectedModelId(provider.models[0].id)
}
}, [provider, modelId])
// Get the currently selected model
const selectedModel = provider.models.find(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(m: any) => m.id === selectedModelId
)
// Initialize capabilities from selected model
useEffect(() => {
if (selectedModel) {
const modelCapabilities = selectedModel.capabilities || []
setCapabilities({
completion: modelCapabilities.includes('completion'),
vision: modelCapabilities.includes('vision'),
tools: modelCapabilities.includes('tools'),
embeddings: modelCapabilities.includes('embeddings'),
web_search: modelCapabilities.includes('web_search'),
reasoning: modelCapabilities.includes('reasoning'),
})
}
}, [selectedModel])
// Track if capabilities were updated by user action
const [capabilitiesUpdated, setCapabilitiesUpdated] = useState(false)
// Update model capabilities - only update local state
const handleCapabilityChange = (capability: string, enabled: boolean) => {
setCapabilities((prev) => ({
...prev,
[capability]: enabled,
}))
// Mark that capabilities were updated by user action
setCapabilitiesUpdated(true)
}
// Use effect to update the provider when capabilities are explicitly changed by user
useEffect(() => {
// Only run if capabilities were updated by user action and we have a selected model
if (!capabilitiesUpdated || !selectedModel) return
// Reset the flag
setCapabilitiesUpdated(false)
// Create updated capabilities array from the state
const updatedCapabilities = Object.entries(capabilities)
.filter(([, isEnabled]) => isEnabled)
.map(([capName]) => capName)
// Find and update the model in the provider
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const updatedModels = provider.models.map((m: any) => {
if (m.id === selectedModelId) {
return {
...m,
capabilities: updatedCapabilities,
}
}
return m
})
// Update the provider with the updated models
updateProvider(provider.provider, {
...provider,
models: updatedModels,
})
}, [
capabilitiesUpdated,
capabilities,
provider,
selectedModel,
selectedModelId,
updateProvider,
])
if (!selectedModel) {
return null
}
return (
<Dialog>
<DialogTrigger asChild>
<div className="size-6 cursor-pointer flex items-center justify-center rounded hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out">
<IconPencil size={18} className="text-main-view-fg/50" />
</div>
</DialogTrigger>
<DialogContent>
<DialogHeader>
<DialogTitle className="line-clamp-1" title={selectedModel.id}>
{t('providers:editModel.title', { modelId: selectedModel.id })}
</DialogTitle>
<DialogDescription>
{t('providers:editModel.description')}
</DialogDescription>
</DialogHeader>
<div className="py-1">
<h3 className="text-sm font-medium mb-3">
{t('providers:editModel.capabilities')}
</h3>
<div className="space-y-4">
<div className="flex items-center justify-between">
<div className="flex items-center space-x-2">
<IconTool className="size-4 text-main-view-fg/70" />
<span className="text-sm">
{t('providers:editModel.tools')}
</span>
</div>
<Switch
id="tools-capability"
checked={capabilities.tools}
onCheckedChange={(checked) =>
handleCapabilityChange('tools', checked)
}
/>
</div>
<div className="flex items-center justify-between">
<div className="flex items-center space-x-2">
<IconEye className="size-4 text-main-view-fg/70" />
<span className="text-sm">
{t('providers:editModel.vision')}
</span>
</div>
<Tooltip>
<TooltipTrigger>
<Switch
id="vision-capability"
checked={capabilities.vision}
disabled={true}
onCheckedChange={(checked) =>
handleCapabilityChange('vision', checked)
}
/>
</TooltipTrigger>
<TooltipContent>
{t('providers:editModel.notAvailable')}
</TooltipContent>
</Tooltip>
</div>
<div className="flex items-center justify-between">
<div className="flex items-center space-x-2">
<IconCodeCircle2 className="size-4 text-main-view-fg/70" />
<span className="text-sm">
{t('providers:editModel.embeddings')}
</span>
</div>
<Tooltip>
<TooltipTrigger>
<Switch
id="embedding-capability"
disabled={true}
checked={capabilities.embeddings}
onCheckedChange={(checked) =>
handleCapabilityChange('embeddings', checked)
}
/>
</TooltipTrigger>
<TooltipContent>
{t('providers:editModel.notAvailable')}
</TooltipContent>
</Tooltip>
</div>
{/* <div className="flex items-center justify-between">
<div className="flex items-center space-x-2">
<IconWorld className="size-4 text-main-view-fg/70" />
<span className="text-sm">Web Search</span>
</div>
<Switch
id="web_search-capability"
checked={capabilities.web_search}
onCheckedChange={(checked) =>
handleCapabilityChange('web_search', checked)
}
/>
</div> */}
{/* <div className="flex items-center justify-between">
<div className="flex items-center space-x-2">
<IconAtom className="size-4 text-main-view-fg/70" />
<span className="text-sm">{t('reasoning')}</span>
</div>
<Switch
id="reasoning-capability"
checked={capabilities.reasoning}
onCheckedChange={(checked) =>
handleCapabilityChange('reasoning', checked)
}
/>
</div> */}
</div>
</div>
</DialogContent>
</Dialog>
)
}

View File

@ -0,0 +1,123 @@
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from '@/components/ui/dialog'
import { Button } from '@/components/ui/button'
import { AlertTriangle, ChevronDown, ChevronRight } from 'lucide-react'
import { IconCopy, IconCopyCheck } from '@tabler/icons-react'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { toast } from 'sonner'
import { useState } from 'react'
import { useAppState } from '@/hooks/useAppState'
export default function ErrorDialog() {
const { t } = useTranslation()
const { errorMessage, setErrorMessage } = useAppState()
const [isCopying, setIsCopying] = useState(false)
const [isDetailExpanded, setIsDetailExpanded] = useState(true)
const handleCopy = async () => {
setIsCopying(true)
try {
await navigator.clipboard.writeText(errorMessage?.message ?? '')
toast.success('Copy successful', {
id: 'copy-model',
description: 'Model load error information copied to clipboard',
})
} catch {
toast.error('Failed to copy', {
id: 'copy-model-error',
description: 'Failed to copy error information to clipboard',
})
} finally {
setTimeout(() => setIsCopying(false), 2000)
}
}
const handleDialogOpen = (open: boolean) => {
setErrorMessage(open ? errorMessage : undefined)
}
return (
<Dialog open={!!errorMessage} onOpenChange={handleDialogOpen}>
<DialogContent showCloseButton={false}>
<DialogHeader>
<div className="flex items-start gap-3">
<div className="shrink-0">
<AlertTriangle className="size-4 text-destructive" />
</div>
<div>
<DialogTitle>{t('common:error')}</DialogTitle>
<DialogDescription className="mt-1 text-main-view-fg/70">
{errorMessage?.title ?? 'Something went wrong'}
</DialogDescription>
</div>
</div>
</DialogHeader>
<div className="bg-main-view-fg/2 p-2 border border-main-view-fg/5 rounded-lg space-y-2">
<div>
<button
onClick={() => setIsDetailExpanded(!isDetailExpanded)}
className="flex items-center gap-1 text-sm text-main-view-fg/60 hover:text-main-view-fg/80 transition-colors cursor-pointer"
>
{isDetailExpanded ? (
<ChevronDown className="size-3" />
) : (
<ChevronRight className="size-3" />
)}
Details
</button>
{isDetailExpanded && (
<div
className="mt-2 text-sm text-main-view-fg/70 leading-relaxed max-h-[150px] overflow-y-auto break-all bg-main-view-fg/10 p-2 rounded border border-main-view-fg/5"
ref={(el) => {
if (el) {
el.scrollTop = el.scrollHeight
}
}}
>
{errorMessage?.message}
</div>
)}
</div>
<span className="text-sm text-main-view-fg/60">{errorMessage?.subtitle}</span>
</div>
<DialogFooter className="flex flex-col gap-2 sm:flex-row sm:justify-right">
<Button
variant="link"
onClick={() => handleDialogOpen(false)}
className="flex-1 text-right sm:flex-none"
>
{t('common:cancel')}
</Button>
<Button
variant="link"
onClick={() => handleCopy()}
disabled={isCopying}
autoFocus
className="flex-1 text-right sm:flex-none border border-main-view-fg/20 !px-2"
>
{isCopying ? (
<>
<IconCopyCheck className="text-accent" />
{t('common:copied')}
</>
) : (
<>
<IconCopy />
{t('common:copy')}
</>
)}
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
)
}

View File

@ -0,0 +1,138 @@
import { renderHook, act } from '@testing-library/react'
import { describe, it, expect, vi, beforeEach } from 'vitest'
import { useChat } from '../useChat'
// Use hoisted storage for our mock to avoid hoist errors
const hoisted = vi.hoisted(() => ({
builderMock: vi.fn(() => ({
addUserMessage: vi.fn(),
addAssistantMessage: vi.fn(),
getMessages: vi.fn(() => []),
})),
}))
vi.mock('@/lib/messages', () => ({
CompletionMessagesBuilder: hoisted.builderMock,
}))
// Mock dependencies similar to existing tests, but customize assistant
vi.mock('../../hooks/usePrompt', () => ({
usePrompt: vi.fn(() => ({ prompt: 'test prompt', setPrompt: vi.fn() })),
}))
vi.mock('../../hooks/useAppState', () => ({
useAppState: Object.assign(
vi.fn(() => ({
tools: [],
updateTokenSpeed: vi.fn(),
resetTokenSpeed: vi.fn(),
updateTools: vi.fn(),
updateStreamingContent: vi.fn(),
updateLoadingModel: vi.fn(),
setAbortController: vi.fn(),
})),
{ getState: vi.fn(() => ({ tokenSpeed: { tokensPerSecond: 10 } })) }
),
}))
vi.mock('../../hooks/useAssistant', () => ({
useAssistant: vi.fn(() => ({
assistants: [
{
id: 'test-assistant',
instructions: 'Today is {{current_date}}',
parameters: { stream: true },
},
],
currentAssistant: {
id: 'test-assistant',
instructions: 'Today is {{current_date}}',
parameters: { stream: true },
},
})),
}))
vi.mock('../../hooks/useModelProvider', () => ({
useModelProvider: vi.fn(() => ({
getProviderByName: vi.fn(() => ({ provider: 'openai', models: [] })),
selectedModel: { id: 'test-model', capabilities: ['tools'] },
selectedProvider: 'openai',
updateProvider: vi.fn(),
})),
}))
vi.mock('../../hooks/useThreads', () => ({
useThreads: vi.fn(() => ({
getCurrentThread: vi.fn(() => ({ id: 'test-thread', model: { id: 'test-model', provider: 'openai' } })),
createThread: vi.fn(() => Promise.resolve({ id: 'test-thread', model: { id: 'test-model', provider: 'openai' } })),
updateThreadTimestamp: vi.fn(),
})),
}))
vi.mock('../../hooks/useMessages', () => ({
useMessages: vi.fn(() => ({ getMessages: vi.fn(() => []), addMessage: vi.fn() })),
}))
vi.mock('../../hooks/useToolApproval', () => ({
useToolApproval: vi.fn(() => ({ approvedTools: [], showApprovalModal: vi.fn(), allowAllMCPPermissions: false })),
}))
vi.mock('../../hooks/useModelContextApproval', () => ({
useContextSizeApproval: vi.fn(() => ({ showApprovalModal: vi.fn() })),
}))
vi.mock('../../hooks/useModelLoad', () => ({
useModelLoad: vi.fn(() => ({ setModelLoadError: vi.fn() })),
}))
vi.mock('@tanstack/react-router', () => ({
useRouter: vi.fn(() => ({ navigate: vi.fn() })),
}))
vi.mock('@/lib/completion', () => ({
emptyThreadContent: { thread_id: 'test-thread', content: '' },
extractToolCall: vi.fn(),
newUserThreadContent: vi.fn(() => ({ thread_id: 'test-thread', content: 'user message' })),
newAssistantThreadContent: vi.fn(() => ({ thread_id: 'test-thread', content: 'assistant message' })),
sendCompletion: vi.fn(() => Promise.resolve({ choices: [{ message: { content: '' } }] })),
postMessageProcessing: vi.fn(),
isCompletionResponse: vi.fn(() => true),
}))
vi.mock('@/services/mcp', () => ({ getTools: vi.fn(() => Promise.resolve([])) }))
vi.mock('@/services/models', () => ({
startModel: vi.fn(() => Promise.resolve()),
stopModel: vi.fn(() => Promise.resolve()),
stopAllModels: vi.fn(() => Promise.resolve()),
}))
vi.mock('@/services/providers', () => ({ updateSettings: vi.fn(() => Promise.resolve()) }))
vi.mock('@tauri-apps/api/event', () => ({ listen: vi.fn(() => Promise.resolve(vi.fn())) }))
describe('useChat instruction rendering', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('renders assistant instructions by replacing {{current_date}} with today', async () => {
vi.useFakeTimers()
vi.setSystemTime(new Date('2025-08-16T00:00:00Z'))
const { result } = renderHook(() => useChat())
await act(async () => {
await result.current.sendMessage('Hello')
})
expect(hoisted.builderMock).toHaveBeenCalled()
const calls = (hoisted.builderMock as any).mock.calls as any[]
const call = calls[0]
expect(call[0]).toEqual([])
expect(call[1]).toMatch(/^Today is /)
expect(call[1]).not.toContain('{{current_date}}')
vi.useRealTimers()
})
})

View File

@ -40,7 +40,6 @@ describe('useGeneralSetting', () => {
useGeneralSetting.setState({ useGeneralSetting.setState({
currentLanguage: 'en', currentLanguage: 'en',
spellCheckChatInput: true, spellCheckChatInput: true,
experimentalFeatures: false,
huggingfaceToken: undefined, huggingfaceToken: undefined,
}) })
@ -60,11 +59,9 @@ describe('useGeneralSetting', () => {
expect(result.current.currentLanguage).toBe('en') expect(result.current.currentLanguage).toBe('en')
expect(result.current.spellCheckChatInput).toBe(true) expect(result.current.spellCheckChatInput).toBe(true)
expect(result.current.experimentalFeatures).toBe(false)
expect(result.current.huggingfaceToken).toBeUndefined() expect(result.current.huggingfaceToken).toBeUndefined()
expect(typeof result.current.setCurrentLanguage).toBe('function') expect(typeof result.current.setCurrentLanguage).toBe('function')
expect(typeof result.current.setSpellCheckChatInput).toBe('function') expect(typeof result.current.setSpellCheckChatInput).toBe('function')
expect(typeof result.current.setExperimentalFeatures).toBe('function')
expect(typeof result.current.setHuggingfaceToken).toBe('function') expect(typeof result.current.setHuggingfaceToken).toBe('function')
}) })
@ -155,42 +152,6 @@ describe('useGeneralSetting', () => {
}) })
}) })
describe('setExperimentalFeatures', () => {
it('should enable experimental features', () => {
const { result } = renderHook(() => useGeneralSetting())
act(() => {
result.current.setExperimentalFeatures(true)
})
expect(result.current.experimentalFeatures).toBe(true)
})
it('should disable experimental features', () => {
const { result } = renderHook(() => useGeneralSetting())
act(() => {
result.current.setExperimentalFeatures(false)
})
expect(result.current.experimentalFeatures).toBe(false)
})
it('should toggle experimental features multiple times', () => {
const { result } = renderHook(() => useGeneralSetting())
act(() => {
result.current.setExperimentalFeatures(true)
})
expect(result.current.experimentalFeatures).toBe(true)
act(() => {
result.current.setExperimentalFeatures(false)
})
expect(result.current.experimentalFeatures).toBe(false)
})
})
describe('setHuggingfaceToken', () => { describe('setHuggingfaceToken', () => {
it('should set huggingface token', () => { it('should set huggingface token', () => {
const { result } = renderHook(() => useGeneralSetting()) const { result } = renderHook(() => useGeneralSetting())
@ -254,7 +215,7 @@ describe('useGeneralSetting', () => {
expect(mockGetByName).toHaveBeenCalledWith('@janhq/download-extension') expect(mockGetByName).toHaveBeenCalledWith('@janhq/download-extension')
// Wait for async operations // Wait for async operations
await new Promise(resolve => setTimeout(resolve, 0)) await new Promise((resolve) => setTimeout(resolve, 0))
expect(mockGetSettings).toHaveBeenCalled() expect(mockGetSettings).toHaveBeenCalled()
expect(mockUpdateSettings).toHaveBeenCalledWith([ expect(mockUpdateSettings).toHaveBeenCalledWith([
@ -272,13 +233,11 @@ describe('useGeneralSetting', () => {
act(() => { act(() => {
result1.current.setCurrentLanguage('id') result1.current.setCurrentLanguage('id')
result1.current.setSpellCheckChatInput(false) result1.current.setSpellCheckChatInput(false)
result1.current.setExperimentalFeatures(true)
result1.current.setHuggingfaceToken('shared-token') result1.current.setHuggingfaceToken('shared-token')
}) })
expect(result2.current.currentLanguage).toBe('id') expect(result2.current.currentLanguage).toBe('id')
expect(result2.current.spellCheckChatInput).toBe(false) expect(result2.current.spellCheckChatInput).toBe(false)
expect(result2.current.experimentalFeatures).toBe(true)
expect(result2.current.huggingfaceToken).toBe('shared-token') expect(result2.current.huggingfaceToken).toBe('shared-token')
}) })
}) })
@ -290,13 +249,11 @@ describe('useGeneralSetting', () => {
act(() => { act(() => {
result.current.setCurrentLanguage('vn') result.current.setCurrentLanguage('vn')
result.current.setSpellCheckChatInput(false) result.current.setSpellCheckChatInput(false)
result.current.setExperimentalFeatures(true)
result.current.setHuggingfaceToken('complex-token-123') result.current.setHuggingfaceToken('complex-token-123')
}) })
expect(result.current.currentLanguage).toBe('vn') expect(result.current.currentLanguage).toBe('vn')
expect(result.current.spellCheckChatInput).toBe(false) expect(result.current.spellCheckChatInput).toBe(false)
expect(result.current.experimentalFeatures).toBe(true)
expect(result.current.huggingfaceToken).toBe('complex-token-123') expect(result.current.huggingfaceToken).toBe('complex-token-123')
}) })
@ -314,11 +271,9 @@ describe('useGeneralSetting', () => {
// Second update // Second update
act(() => { act(() => {
result.current.setExperimentalFeatures(true)
result.current.setHuggingfaceToken('sequential-token') result.current.setHuggingfaceToken('sequential-token')
}) })
expect(result.current.experimentalFeatures).toBe(true)
expect(result.current.huggingfaceToken).toBe('sequential-token') expect(result.current.huggingfaceToken).toBe('sequential-token')
// Third update // Third update

View File

@ -24,7 +24,7 @@ describe('useLocalApiServer', () => {
vi.clearAllMocks() vi.clearAllMocks()
// Reset store state to defaults // Reset store state to defaults
const store = useLocalApiServer.getState() const store = useLocalApiServer.getState()
store.setRunOnStartup(true) store.setEnableOnStartup(true)
store.setServerHost('127.0.0.1') store.setServerHost('127.0.0.1')
store.setServerPort(1337) store.setServerPort(1337)
store.setApiPrefix('/v1') store.setApiPrefix('/v1')
@ -37,7 +37,7 @@ describe('useLocalApiServer', () => {
it('should initialize with default values', () => { it('should initialize with default values', () => {
const { result } = renderHook(() => useLocalApiServer()) const { result } = renderHook(() => useLocalApiServer())
expect(result.current.runOnStartup).toBe(true) expect(result.current.enableOnStartup).toBe(true)
expect(result.current.serverHost).toBe('127.0.0.1') expect(result.current.serverHost).toBe('127.0.0.1')
expect(result.current.serverPort).toBe(1337) expect(result.current.serverPort).toBe(1337)
expect(result.current.apiPrefix).toBe('/v1') expect(result.current.apiPrefix).toBe('/v1')
@ -47,21 +47,21 @@ describe('useLocalApiServer', () => {
expect(result.current.apiKey).toBe('') expect(result.current.apiKey).toBe('')
}) })
describe('runOnStartup', () => { describe('enableOnStartup', () => {
it('should set run on startup', () => { it('should set run on startup', () => {
const { result } = renderHook(() => useLocalApiServer()) const { result } = renderHook(() => useLocalApiServer())
act(() => { act(() => {
result.current.setRunOnStartup(false) result.current.setEnableOnStartup(false)
}) })
expect(result.current.runOnStartup).toBe(false) expect(result.current.enableOnStartup).toBe(false)
act(() => { act(() => {
result.current.setRunOnStartup(true) result.current.setEnableOnStartup(true)
}) })
expect(result.current.runOnStartup).toBe(true) expect(result.current.enableOnStartup).toBe(true)
}) })
}) })
@ -323,7 +323,7 @@ describe('useLocalApiServer', () => {
const { result: result2 } = renderHook(() => useLocalApiServer()) const { result: result2 } = renderHook(() => useLocalApiServer())
act(() => { act(() => {
result1.current.setRunOnStartup(false) result1.current.setEnableOnStartup(false)
result1.current.setServerHost('0.0.0.0') result1.current.setServerHost('0.0.0.0')
result1.current.setServerPort(8080) result1.current.setServerPort(8080)
result1.current.setApiPrefix('/api') result1.current.setApiPrefix('/api')
@ -333,7 +333,7 @@ describe('useLocalApiServer', () => {
result1.current.addTrustedHost('example.com') result1.current.addTrustedHost('example.com')
}) })
expect(result2.current.runOnStartup).toBe(false) expect(result2.current.enableOnStartup).toBe(false)
expect(result2.current.serverHost).toBe('0.0.0.0') expect(result2.current.serverHost).toBe('0.0.0.0')
expect(result2.current.serverPort).toBe(8080) expect(result2.current.serverPort).toBe(8080)
expect(result2.current.apiPrefix).toBe('/api') expect(result2.current.apiPrefix).toBe('/api')

View File

@ -4,6 +4,12 @@ import { MCPTool } from '@/types/completion'
import { useAssistant } from './useAssistant' import { useAssistant } from './useAssistant'
import { ChatCompletionMessageToolCall } from 'openai/resources' import { ChatCompletionMessageToolCall } from 'openai/resources'
type AppErrorMessage = {
message?: string
title?: string
subtitle: string
}
type AppState = { type AppState = {
streamingContent?: ThreadMessage streamingContent?: ThreadMessage
loadingModel?: boolean loadingModel?: boolean
@ -13,6 +19,7 @@ type AppState = {
tokenSpeed?: TokenSpeed tokenSpeed?: TokenSpeed
currentToolCall?: ChatCompletionMessageToolCall currentToolCall?: ChatCompletionMessageToolCall
showOutOfContextDialog?: boolean showOutOfContextDialog?: boolean
errorMessage?: AppErrorMessage
cancelToolCall?: () => void cancelToolCall?: () => void
setServerStatus: (value: 'running' | 'stopped' | 'pending') => void setServerStatus: (value: 'running' | 'stopped' | 'pending') => void
updateStreamingContent: (content: ThreadMessage | undefined) => void updateStreamingContent: (content: ThreadMessage | undefined) => void
@ -26,6 +33,7 @@ type AppState = {
resetTokenSpeed: () => void resetTokenSpeed: () => void
setOutOfContextDialog: (show: boolean) => void setOutOfContextDialog: (show: boolean) => void
setCancelToolCall: (cancel: (() => void) | undefined) => void setCancelToolCall: (cancel: (() => void) | undefined) => void
setErrorMessage: (error: AppErrorMessage | undefined) => void
} }
export const useAppState = create<AppState>()((set) => ({ export const useAppState = create<AppState>()((set) => ({
@ -120,4 +128,9 @@ export const useAppState = create<AppState>()((set) => ({
cancelToolCall: cancel, cancelToolCall: cancel,
})) }))
}, },
setErrorMessage: (error) => {
set(() => ({
errorMessage: error,
}))
},
})) }))

View File

@ -43,7 +43,7 @@ export const defaultAssistant: Assistant = {
description: description:
'Jan is a helpful desktop assistant that can reason through complex tasks and use tools to complete them on the users behalf.', 'Jan is a helpful desktop assistant that can reason through complex tasks and use tools to complete them on the users behalf.',
instructions: instructions:
'You are a helpful AI assistant. Your primary goal is to assist users with their questions and tasks to the best of your abilities.\n\nWhen responding:\n- Answer directly from your knowledge when you can\n- Be concise, clear, and helpful\n- Admit when youre unsure rather than making things up\n\nIf tools are available to you:\n- Only use tools when they add real value to your response\n- Use tools when the user explicitly asks (e.g., "search for...", "calculate...", "run this code")\n- Use tools for information you dont know or that needs verification\n- Never use tools just because theyre available\n\nWhen using tools:\n- Use one tool at a time and wait for results\n- Use actual values as arguments, not variable names\n- Learn from each result before deciding next steps\n- Avoid repeating the same tool call with identical parameters\n\nRemember: Most questions can be answered without tools. Think first whether you need them.', 'You are a helpful AI assistant. Your primary goal is to assist users with their questions and tasks to the best of your abilities.\n\nWhen responding:\n- Answer directly from your knowledge when you can\n- Be concise, clear, and helpful\n- Admit when youre unsure rather than making things up\n\nIf tools are available to you:\n- Only use tools when they add real value to your response\n- Use tools when the user explicitly asks (e.g., "search for...", "calculate...", "run this code")\n- Use tools for information you dont know or that needs verification\n- Never use tools just because theyre available\n\nWhen using tools:\n- Use one tool at a time and wait for results\n- Use actual values as arguments, not variable names\n- Learn from each result before deciding next steps\n- Avoid repeating the same tool call with identical parameters\n\nRemember: Most questions can be answered without tools. Think first whether you need them.\n\nCurrent date: {{current_date}}',
} }
export const useAssistant = create<AssistantState>()((set, get) => ({ export const useAssistant = create<AssistantState>()((set, get) => ({

View File

@ -17,6 +17,7 @@ import {
sendCompletion, sendCompletion,
} from '@/lib/completion' } from '@/lib/completion'
import { CompletionMessagesBuilder } from '@/lib/messages' import { CompletionMessagesBuilder } from '@/lib/messages'
import { renderInstructions } from '@/lib/instructionTemplate'
import { ChatCompletionMessageToolCall } from 'openai/resources' import { ChatCompletionMessageToolCall } from 'openai/resources'
import { useAssistant } from './useAssistant' import { useAssistant } from './useAssistant'
@ -28,7 +29,6 @@ import { OUT_OF_CONTEXT_SIZE } from '@/utils/error'
import { updateSettings } from '@/services/providers' import { updateSettings } from '@/services/providers'
import { useContextSizeApproval } from './useModelContextApproval' import { useContextSizeApproval } from './useModelContextApproval'
import { useModelLoad } from './useModelLoad' import { useModelLoad } from './useModelLoad'
import { useGeneralSetting } from './useGeneralSetting'
import { import {
ReasoningProcessor, ReasoningProcessor,
extractReasoningFromMessage, extractReasoningFromMessage,
@ -36,7 +36,6 @@ import {
export const useChat = () => { export const useChat = () => {
const { prompt, setPrompt } = usePrompt() const { prompt, setPrompt } = usePrompt()
const { experimentalFeatures } = useGeneralSetting()
const { const {
tools, tools,
updateTokenSpeed, updateTokenSpeed,
@ -239,20 +238,19 @@ export const useChat = () => {
const builder = new CompletionMessagesBuilder( const builder = new CompletionMessagesBuilder(
messages, messages,
currentAssistant?.instructions renderInstructions(currentAssistant?.instructions)
) )
if (troubleshooting) builder.addUserMessage(message, attachments) if (troubleshooting) builder.addUserMessage(message, attachments)
let isCompleted = false let isCompleted = false
// Filter tools based on model capabilities and available tools for this thread // Filter tools based on model capabilities and available tools for this thread
let availableTools = let availableTools = selectedModel?.capabilities?.includes('tools')
experimentalFeatures && selectedModel?.capabilities?.includes('tools') ? tools.filter((tool) => {
? tools.filter((tool) => { const disabledTools = getDisabledToolsForThread(activeThread.id)
const disabledTools = getDisabledToolsForThread(activeThread.id) return !disabledTools.includes(tool.name)
return !disabledTools.includes(tool.name) })
}) : []
: []
let assistantLoopSteps = 0 let assistantLoopSteps = 0
@ -542,7 +540,6 @@ export const useChat = () => {
setPrompt, setPrompt,
selectedModel, selectedModel,
currentAssistant, currentAssistant,
experimentalFeatures,
tools, tools,
updateLoadingModel, updateLoadingModel,
getDisabledToolsForThread, getDisabledToolsForThread,

View File

@ -6,10 +6,8 @@ import { ExtensionManager } from '@/lib/extension'
type LeftPanelStoreState = { type LeftPanelStoreState = {
currentLanguage: Language currentLanguage: Language
spellCheckChatInput: boolean spellCheckChatInput: boolean
experimentalFeatures: boolean
huggingfaceToken?: string huggingfaceToken?: string
setHuggingfaceToken: (token: string) => void setHuggingfaceToken: (token: string) => void
setExperimentalFeatures: (value: boolean) => void
setSpellCheckChatInput: (value: boolean) => void setSpellCheckChatInput: (value: boolean) => void
setCurrentLanguage: (value: Language) => void setCurrentLanguage: (value: Language) => void
} }
@ -19,9 +17,7 @@ export const useGeneralSetting = create<LeftPanelStoreState>()(
(set) => ({ (set) => ({
currentLanguage: 'en', currentLanguage: 'en',
spellCheckChatInput: true, spellCheckChatInput: true,
experimentalFeatures: false,
huggingfaceToken: undefined, huggingfaceToken: undefined,
setExperimentalFeatures: (value) => set({ experimentalFeatures: value }),
setSpellCheckChatInput: (value) => set({ spellCheckChatInput: value }), setSpellCheckChatInput: (value) => set({ spellCheckChatInput: value }),
setCurrentLanguage: (value) => set({ currentLanguage: value }), setCurrentLanguage: (value) => set({ currentLanguage: value }),
setHuggingfaceToken: (token) => { setHuggingfaceToken: (token) => {

View File

@ -4,8 +4,8 @@ import { localStorageKey } from '@/constants/localStorage'
type LocalApiServerState = { type LocalApiServerState = {
// Run local API server once app opens // Run local API server once app opens
runOnStartup: boolean enableOnStartup: boolean
setRunOnStartup: (value: boolean) => void setEnableOnStartup: (value: boolean) => void
// Server host option (127.0.0.1 or 0.0.0.0) // Server host option (127.0.0.1 or 0.0.0.0)
serverHost: '127.0.0.1' | '0.0.0.0' serverHost: '127.0.0.1' | '0.0.0.0'
setServerHost: (value: '127.0.0.1' | '0.0.0.0') => void setServerHost: (value: '127.0.0.1' | '0.0.0.0') => void
@ -33,8 +33,8 @@ type LocalApiServerState = {
export const useLocalApiServer = create<LocalApiServerState>()( export const useLocalApiServer = create<LocalApiServerState>()(
persist( persist(
(set) => ({ (set) => ({
runOnStartup: true, enableOnStartup: false,
setRunOnStartup: (value) => set({ runOnStartup: value }), setEnableOnStartup: (value) => set({ enableOnStartup: value }),
serverHost: '127.0.0.1', serverHost: '127.0.0.1',
setServerHost: (value) => set({ serverHost: value }), setServerHost: (value) => set({ serverHost: value }),
serverPort: 1337, serverPort: 1337,

View File

@ -0,0 +1,27 @@
import { describe, it, expect, vi } from 'vitest'
import { renderInstructions } from '../instructionTemplate'
describe('renderInstructions', () => {
it('replaces {{current_date}} with today when no params provided', () => {
vi.useFakeTimers()
vi.setSystemTime(new Date('2025-08-16T00:00:00Z'))
const input = 'Today is {{current_date}}.'
const out = renderInstructions(input)
expect(out).not.toBe(input)
expect(out).toMatch(/^Today is /)
expect(out).not.toContain('{{current_date}}')
vi.useRealTimers()
})
it('replaces multiple occurrences of {{current_date}}', () => {
const input = 'A {{current_date}} B {{current_date}} C'
const out = renderInstructions(input)
expect(out).not.toContain('{{current_date}}')
expect(out.startsWith('A ')).toBe(true)
expect(out.includes(' B ')).toBe(true)
expect(out.endsWith(' C')).toBe(true)
})
})

View File

@ -0,0 +1,23 @@
import { formatDate } from '@/utils/formatDate'
/**
* Render assistant instructions by replacing supported placeholders.
* Supported placeholders:
* - {{current_date}}: Inserts todays date (UTC, long month), e.g., August 16, 2025.
*/
export function renderInstructions(instructions: string): string
export function renderInstructions(
instructions?: string
): string | undefined
export function renderInstructions(
instructions?: string
): string | undefined {
if (!instructions) return instructions
const currentDateStr = formatDate(new Date(), { includeTime: false })
// Replace current_date (allow spaces inside braces).
let rendered = instructions
rendered = rendered.replace(/\{\{\s*current_date\s*\}\}/gi, currentDateStr)
return rendered
}

View File

@ -30,5 +30,6 @@
"createNew": "Neuen Assistenten anlegen", "createNew": "Neuen Assistenten anlegen",
"personality": "Persönlichkeit", "personality": "Persönlichkeit",
"capabilities": "Fähigkeiten", "capabilities": "Fähigkeiten",
"instructionsDateHint": "Tipp: Verwenden Sie {{current_date}}, um das heutige Datum einzufügen.",
"maxToolSteps": "Maximale Werkzeugschritte" "maxToolSteps": "Maximale Werkzeugschritte"
} }

View File

@ -256,6 +256,22 @@
"downloadCancelled": { "downloadCancelled": {
"title": "Download abgebrochen", "title": "Download abgebrochen",
"description": "Der Download-Prozess wurde abgebrochen" "description": "Der Download-Prozess wurde abgebrochen"
},
"downloadFailed": {
"title": "Download fehlgeschlagen",
"description": "{{item}} Download fehlgeschlagen"
},
"modelValidationStarted": {
"title": "Modell wird validiert",
"description": "Modell \"{{modelId}}\" erfolgreich heruntergeladen. Integrität wird überprüft..."
},
"modelValidationFailed": {
"title": "Modellvalidierung fehlgeschlagen",
"description": "Das heruntergeladene Modell \"{{modelId}}\" ist bei der Integritätsprüfung fehlgeschlagen und wurde entfernt. Die Datei könnte beschädigt oder manipuliert worden sein."
},
"downloadAndVerificationComplete": {
"title": "Download abgeschlossen",
"description": "Modell \"{{item}}\" erfolgreich heruntergeladen und verifiziert"
} }
} }
} }

View File

@ -12,6 +12,7 @@
"showVariants": "Zeige Varianten", "showVariants": "Zeige Varianten",
"useModel": "Nutze dieses Modell", "useModel": "Nutze dieses Modell",
"downloadModel": "Modell herunterladen", "downloadModel": "Modell herunterladen",
"tools": "Werkzeuge",
"searchPlaceholder": "Suche nach Modellen auf Hugging Face...", "searchPlaceholder": "Suche nach Modellen auf Hugging Face...",
"editTheme": "Bearbeite Erscheinungsbild", "editTheme": "Bearbeite Erscheinungsbild",
"joyride": { "joyride": {

View File

@ -30,5 +30,6 @@
"createNew": "Create New Assistant", "createNew": "Create New Assistant",
"personality": "Personality", "personality": "Personality",
"capabilities": "Capabilities", "capabilities": "Capabilities",
"instructionsDateHint": "Tip: Use {{current_date}} to insert todays date.",
"maxToolSteps": "Max tool steps" "maxToolSteps": "Max tool steps"
} }

View File

@ -261,6 +261,18 @@
"downloadFailed": { "downloadFailed": {
"title": "Download Failed", "title": "Download Failed",
"description": "{{item}} download failed" "description": "{{item}} download failed"
},
"modelValidationStarted": {
"title": "Validating Model",
"description": "Downloaded model \"{{modelId}}\" successfully. Verifying integrity..."
},
"modelValidationFailed": {
"title": "Model Validation Failed",
"description": "The downloaded model \"{{modelId}}\" failed integrity verification and was removed. The file may be corrupted or tampered with."
},
"downloadAndVerificationComplete": {
"title": "Download Complete",
"description": "Model \"{{item}}\" downloaded and verified successfully"
} }
} }
} }

View File

@ -12,6 +12,7 @@
"showVariants": "Show variants", "showVariants": "Show variants",
"useModel": "Use this model", "useModel": "Use this model",
"downloadModel": "Download model", "downloadModel": "Download model",
"tools": "Tools",
"searchPlaceholder": "Search for models on Hugging Face...", "searchPlaceholder": "Search for models on Hugging Face...",
"joyride": { "joyride": {
"recommendedModelTitle": "Recommended Model", "recommendedModelTitle": "Recommended Model",

View File

@ -160,6 +160,9 @@
"serverLogs": "Server Logs", "serverLogs": "Server Logs",
"serverLogsDesc": "View detailed logs of the local API server.", "serverLogsDesc": "View detailed logs of the local API server.",
"openLogs": "Open Logs", "openLogs": "Open Logs",
"startupConfiguration": "Startup Configuration",
"runOnStartup": "Enable by default on startup",
"runOnStartupDesc": "Automatically start the Local API Server when the application launches.",
"serverConfiguration": "Server Configuration", "serverConfiguration": "Server Configuration",
"serverHost": "Server Host", "serverHost": "Server Host",
"serverHostDesc": "Network address for the server.", "serverHostDesc": "Network address for the server.",

View File

@ -30,5 +30,6 @@
"createNew": "Buat Asisten Baru", "createNew": "Buat Asisten Baru",
"personality": "Kepribadian", "personality": "Kepribadian",
"capabilities": "Kemampuan", "capabilities": "Kemampuan",
"instructionsDateHint": "Tips: Gunakan {{current_date}} untuk menyisipkan tanggal hari ini.",
"maxToolSteps": "Langkah alat maksimum" "maxToolSteps": "Langkah alat maksimum"
} }

View File

@ -249,6 +249,22 @@
"downloadCancelled": { "downloadCancelled": {
"title": "Unduhan Dibatalkan", "title": "Unduhan Dibatalkan",
"description": "Proses unduhan telah dibatalkan" "description": "Proses unduhan telah dibatalkan"
},
"downloadFailed": {
"title": "Unduhan Gagal",
"description": "Unduhan {{item}} gagal"
},
"modelValidationStarted": {
"title": "Memvalidasi Model",
"description": "Model \"{{modelId}}\" berhasil diunduh. Memverifikasi integritas..."
},
"modelValidationFailed": {
"title": "Validasi Model Gagal",
"description": "Model yang diunduh \"{{modelId}}\" gagal verifikasi integritas dan telah dihapus. File mungkin rusak atau telah dimanipulasi."
},
"downloadAndVerificationComplete": {
"title": "Unduhan Selesai",
"description": "Model \"{{item}}\" berhasil diunduh dan diverifikasi"
} }
} }
} }

View File

@ -12,6 +12,7 @@
"showVariants": "Tampilkan Varian", "showVariants": "Tampilkan Varian",
"useModel": "Gunakan model ini", "useModel": "Gunakan model ini",
"downloadModel": "Unduh model", "downloadModel": "Unduh model",
"tools": "Alat",
"searchPlaceholder": "Cari model di Hugging Face...", "searchPlaceholder": "Cari model di Hugging Face...",
"joyride": { "joyride": {
"recommendedModelTitle": "Model yang Direkomendasikan", "recommendedModelTitle": "Model yang Direkomendasikan",

View File

@ -0,0 +1,35 @@
{
"title": "Asystenci",
"editAssistant": "Edytuj Asystenta",
"deleteAssistant": "Usuń Asystenta",
"deleteConfirmation": "Usuń Asystenta",
"deleteConfirmationDesc": "Na pewno chcesz usunąć tego asystenta? Tej operacji nie można cofnąć.",
"cancel": "Anuluj",
"delete": "Usuń",
"addAssistant": "Dodaj Asystenta",
"emoji": "Emotikon",
"name": "Nazwa",
"enterName": "Wprowadź nazwę",
"nameRequired": "Nazwa jest wymagana",
"description": "Opis (opcjonalnie)",
"enterDescription": "Wprowadź opis",
"instructions": "Instrukcje",
"enterInstructions": "Wprowadź instrukcje",
"predefinedParameters": "Predefiniowane Parametry",
"parameters": "Parametry",
"key": "Klucz",
"value": "Wartość",
"stringValue": "Napis",
"numberValue": "Liczba",
"booleanValue": "Wartość Logiczna",
"jsonValue": "JSON",
"trueValue": "Prawda",
"falseValue": "Fałsz",
"jsonValuePlaceholder": "Wartość JSON",
"save": "Zapisz",
"createNew": "Utwórz Nowego Asystenta",
"personality": "Osobowość",
"capabilities": "Możliwości",
"instructionsDateHint": "Wskazówka: Użyj {{current_date}} do wstawienia dzisiejszej daty.",
"maxToolSteps": "Maks. liczba kroków narzędzia"
}

View File

@ -0,0 +1,10 @@
{
"welcome": "Cześć, jak się masz?",
"description": "Jak mogę Ci pomóc?",
"status": {
"empty": "Nie znaleziono czatów"
},
"sendMessage": "Wyślij Wiadomość",
"newConversation": "Nowa Rozmowa",
"clearHistory": "Wyczyść Historię"
}

View File

@ -0,0 +1,278 @@
{
"assistants": "Asystenci",
"hardware": "Sprzęt",
"mcp-servers": "Serwery MCP",
"local_api_server": "Lokalny Serwer API",
"https_proxy": "Pośrednik HTTPS",
"extensions": "Rozszerzenia",
"general": "Ogólne",
"settings": "Ustawienia",
"modelProviders": "Dostawcy Modeli",
"appearance": "Wygląd",
"privacy": "Prywatność",
"keyboardShortcuts": "Skróty",
"newChat": "Nowy Czat",
"favorites": "Ulubione",
"recents": "Ostatnie",
"hub": "Centrum Modeli",
"helpSupport": "Pomoc i Obsługa",
"helpUsImproveJan": "Pomóż nam ulepszyć Jan",
"unstarAll": "Usuń Wszystkim Gwiazdkę",
"unstar": "Usuń Gwiazdkę",
"deleteAll": "Usuń Wszystko",
"star": "Dodaj Gwiazdkę",
"rename": "Zmień Nazwę",
"delete": "Usuń",
"copied": "Skopiowano!",
"dataFolder": "Katalog Danych",
"others": "Inne",
"language": "Język",
"reset": "Przywróć",
"search": "Szukaj",
"name": "Nazwa",
"cancel": "Anuluj",
"create": "Utwórz",
"save": "Zapisz",
"edit": "Edytuj",
"copy": "Skopiuj",
"back": "Wstecz",
"close": "Zamknij",
"next": "Dalej",
"finish": "Zakończ",
"skip": "Pomiń",
"allow": "Pozwól",
"deny": "Odmów",
"start": "Uruchom",
"stop": "Zatrzymaj",
"preview": "Podgląd",
"compactWidth": "Zwarta Szerokość",
"fullWidth": "Pełna Szerokość",
"dark": "Ciemny",
"light": "Jasny",
"system": "Systemowy",
"auto": "Automatycznie",
"english": "Angielski",
"medium": "Średni",
"newThread": "Nowy Wątek",
"noResultsFound": "Brak wyników",
"noThreadsYet": "Brak wątków",
"noThreadsYetDesc": "Rozpocznij nową rozmowę aby zobaczyć tutaj historię wątków.",
"downloads": "Pobrane",
"downloading": "Pobieranie",
"cancelDownload": "Anuluj Pobieranie",
"downloadCancelled": "Pobieranie anulowane",
"downloadComplete": "Pobieranie zakończone",
"thinking": "Myślenie…",
"thought": "Myśl",
"callingTool": "Używanie narzędzia",
"completed": "Zakończono",
"image": "Obraz",
"vision": "Wizja",
"embeddings": "Osadzenia",
"tools": "Narzędzia",
"webSearch": "Szukanie w Sieci",
"reasoning": "Rozumowanie",
"selectAModel": "Wybierz Model",
"noToolsAvailable": "Brak narzędzi",
"noModelsFoundFor": "Brak modeli dla \"{{searchValue}}\"",
"customAvatar": "Własny awatar",
"editAssistant": "Edytuj Asystenta",
"jan": "Jan",
"metadata": "Metadane",
"regenerate": "Odtwórz",
"threadImage": "Obraz wątku",
"editMessage": "Edytuj Wiadomość",
"deleteMessage": "Usuń Wiadomość",
"deleteThread": "Usuń Wątek",
"renameThread": "Zmień Nazwę Wątku",
"threadTitle": "Tytuł Wątku",
"deleteAllThreads": "Usuń Wszystkie Wątki",
"allThreadsUnfavorited": "Usunięto wszystkie wątki z ulubionych",
"deleteAllThreadsConfirm": "Na pewno chcesz usunąć wszystkie wątki? Tej operacji nie można cofnąć.",
"addProvider": "Dodaj Dostawcę",
"addOpenAIProvider": "Dodaj Dostawcę OpenAI",
"enterNameForProvider": "Wprowadź nazwę dostawcy",
"providerAlreadyExists": "Dostawca o nazwie \"{{name}}\" już istnieje. Proszę podać inną nazwę.",
"adjustFontSize": "Dostosuj Rozmiar Fontu",
"changeLanguage": "Zmień Język",
"editTheme": "Edytuj schemat kolorystyczny",
"editCodeBlockStyle": "Edytuj styl bloków kodu",
"editServerHost": "Edytuj host serwera",
"pickColorWindowBackground": "Wybierz kolor tła okna",
"pickColorAppMainView": "Wybierz kolor głównego widoku aplikacji",
"pickColorAppPrimary": "Wybierz kolor dominujący aplikacji",
"pickColorAppAccent": "Wybierz kolor akcentowy",
"pickColorAppDestructive": "Wybierz kolor destrukcyjny",
"apiKeyRequired": "Wymagany jest klucz API",
"enterTrustedHosts": "Wprowadź zaufane hosty",
"placeholder": {
"chatInput": "Zapytaj mnie o cokolwiek…"
},
"confirm": "Potwierdź",
"loading": "Wczytywanie…",
"error": "Błąd",
"success": "Sukces",
"warning": "Ostrzeżenie",
"noResultsFoundDesc": "Nie znaleziono pasujących czatów. Spróbuj innego słowa kluczowego.",
"searchModels": "Szukaj modeli…",
"searchStyles": "Szukaj stylów…",
"createAssistant": "Utwórz Asystenta",
"enterApiKey": "Wprowadź klucz API",
"scrollToBottom": "Przewiń na sam dół",
"generateAiResponse": "Wygeneruj odpowiedź SI",
"addModel": {
"title": "Dodaj Model",
"modelId": "Identyfikator Modelu",
"enterModelId": "Wprowadź identyfikator modelu",
"addModel": "Dodaj Model",
"description": "Dodaj nowy model do dostawcy",
"exploreModels": "Zobacz listę modeli dostawcy"
},
"mcpServers": {
"editServer": "Edytuj Serwer",
"addServer": "Dodaj Serwer",
"serverName": "Nazwa Serwera",
"enterServerName": "Wprowadź nazwę serwera",
"command": "Polecenie",
"enterCommand": "Wprowadź polecenie",
"arguments": "Argumenty",
"argument": "{{index}}. argument",
"envVars": "Zmienne Środowiskowe",
"key": "Klucz",
"value": "Wartość",
"save": "Zapisz"
},
"deleteServer": {
"title": "Usuń Serwer",
"delete": "Usuń"
},
"editJson": {
"errorParse": "Błąd parsowania JSON",
"errorPaste": "Błąd wklejania JSON",
"errorFormat": "Niepoprawny format JSON",
"titleAll": "Edytuj Ustawienia Wszystkich Serwerów",
"placeholder": "Wprowadź konfigurację JSON…",
"save": "Zapisz"
},
"editModel": {
"title": "Edytuj Model: {{modelId}}",
"description": "Ustaw możliwości modelu za pomocą poniższych przełączników.",
"capabilities": "Możliwości",
"tools": "Narzędzia",
"vision": "Wizja",
"embeddings": "Osadzenia",
"notAvailable": "Jeszcze nie dostępne"
},
"outOfContextError": {
"truncateInput": "Przytnij Wejście",
"title": "Błąd kontekstu",
"description": "Temu czatowi zaczyna brakować pamięci SI. To trochę jak tablica, która jest już zapisana. Można udostępnić więcej pamięci (zwiększyć rozmiar kontekstu), ale to zwiększy zużycie pamięci komputera. Można też przyciąć wejście, co oznacza zapomnienie części historii czatu, celem zwolnienia miejsca dla nowych wiadomości.",
"increaseContextSizeDescription": "Czy chcesz zwiększyć rozmiar kontekstu?",
"increaseContextSize": "Zwiększ Rozmiar Kontekstu"
},
"toolApproval": {
"title": "Prośba o Zatwierdzenie Narzędzia",
"description": "Asystent chce użyć narzędzia <strong>{{toolName}}</strong>",
"securityNotice": "Zgadzaj się tylko na narzędzia, którym ufasz. Narzędzia mają dostęp do Twojego systemu i danych.",
"deny": "Odmów",
"allowOnce": "Pozwól Raz",
"alwaysAllow": "Zawsze Pozwalaj"
},
"deleteModel": {
"title": "Usuń Model: {{modelId}}",
"description": "Na pewno chcesz usunąć ten model? Tej operacji nie można cofnąć.",
"success": "Model {{modelId}} został usunięty.",
"cancel": "Anuluj",
"delete": "Usuń"
},
"deleteProvider": {
"title": "Usuń Dostawcę",
"description": "Usuń dostawcę i wszystkie jego modele. Tej operacji nie można cofnąć.",
"success": "Dostawca {{provider}} został usunięty.",
"confirmTitle": "Usuń Dostawcę: {{provider}}",
"confirmDescription": "Na pewno chcesz usunąć tego dostawcę? Tej operacji nie można cofnąć.",
"cancel": "Anuluj",
"delete": "Usuń"
},
"modelSettings": {
"title": "Ustawienia Modelu - {{modelId}}",
"description": "Zmień ustawienia modelu aby poprawić jego wydajność i zachowanie"
},
"dialogs": {
"changeDataFolder": {
"title": "Zmień Położenie Katalogu Danych",
"description": "Na pewno chcesz zmienić położenie katalogu danych? Przeniesie to dane do nowego katalogu i uruchomi ponownie aplikację.",
"currentLocation": "Obecne Położenie:",
"newLocation": "Nowe Położenie:",
"cancel": "Anuluj",
"changeLocation": "Zmień Położenie"
},
"deleteAllThreads": {
"title": "Usuń Wszystkie Wątki",
"description": "Wszystkie wątki zostaną usunięte. Tej operacji nie można cofnąć."
},
"deleteThread": {
"description": "Na pewno chcesz usunąć ten wątek? Tej operacji nie można cofnąć."
},
"editMessage": {
"title": "Edytuj Wiadomość"
},
"messageMetadata": {
"title": "Metadane Wiadomości"
}
},
"toast": {
"allThreadsUnfavorited": {
"title": "Wszystkie Wątki Usunięte z Ulubionych",
"description": "Wszystkie wątki zostały usunięte z ulubionych."
},
"deleteAllThreads": {
"title": "Usuń Wszystkie Wątki",
"description": "Wszystkie wątki zostały usunięte."
},
"renameThread": {
"title": "Zmień Tytuł Wątku",
"description": "Tytuł wątku został zmieniony na '{{title}}'."
},
"deleteThread": {
"title": "Usuń Wątek",
"description": "Wątek został usunięty."
},
"editMessage": {
"title": "Edytuj Wiadomość",
"description": "Pomyślnie edytowano wiadomość. Proszę poczekać na odpowiedź modelu."
},
"appUpdateDownloaded": {
"title": "Aktualizacja Aplikacji Pobrana",
"description": "Pomyślnie pobrano aktualizację aplikacji."
},
"appUpdateDownloadFailed": {
"title": "Błąd Pobierania Aktualizacji Aplikacji",
"description": "Nie udało się pobrać aktualizacji aplikacji. Proszę spróbować później."
},
"downloadComplete": {
"title": "Ukończono Pobieranie",
"description": "Model {{modelId}} został pobrany."
},
"downloadCancelled": {
"title": "Anulowano Pobieranie",
"description": "Pobieranie zostało anulowane."
},
"downloadFailed": {
"title": "Błąd Pobierania",
"description": "Błąd pobierania {{item}}"
},
"modelValidationStarted": {
"title": "Weryfikacja Modelu",
"description": "Pomyślnie pobrano model \"{{modelId}}\". Weryfikowanie integralności..."
},
"modelValidationFailed": {
"title": "Błąd Weryfikacji Modelu",
"description": "Pobrany model \"{{modelId}}\" nie przeszedł weryfikacji integralności i został usunięty. Plik może być uszkodzony lub zmodyfikowany."
},
"downloadAndVerificationComplete": {
"title": "Pobieranie Ukończone",
"description": "Model \"{{item}}\" został pobrany i pomyślnie zweryfikowany"
}
}
}

View File

@ -0,0 +1,31 @@
{
"sortNewest": "Najnowsze",
"sortMostDownloaded": "Najczęściej pobierane",
"use": "Użyj",
"download": "Pobierz",
"downloaded": "Pobrany",
"loadingModels": "Wczytywanie modeli…",
"noModels": "Nie znaleziono żadnych modeli",
"by": "Od",
"downloads": "Pobrane",
"variants": "Warianty",
"showVariants": "Pokaż warianty",
"useModel": "Użyj tego modelu",
"downloadModel": "Pobierz model",
"tools": "Narzędzia",
"searchPlaceholder": "Szukaj modeli na Hugging Face…",
"joyride": {
"recommendedModelTitle": "Polecany Model",
"recommendedModelContent": "Przeglądaj i pobieraj silne modele SI od różnych dostawców, wszystko w jednym miejscu. Warto zacząć od Jan-Nano - modelu zoptymalizowanego do wywoływania funkcji, integracji z narzędziami i możliwości badawczych. Jest on idealny do budowania interaktywnych agentów SI.",
"downloadInProgressTitle": "Trwa Pobieranie",
"downloadInProgressContent": "Twój model jest teraz pobierany. Tutaj możesz śledzić postęp. Po zakończeniu, model będzie gotowy do użytku.",
"downloadModelTitle": "Pobierz Model",
"downloadModelContent": "Kliknij na przycisk 'Pobierz' aby rozpocząć pobieranie modelu.",
"back": "Wstecz",
"close": "Zamknij",
"lastWithDownload": "Ostatni Pobrany",
"last": "Ostatni",
"next": "Dalej",
"skip": "Pomiń"
}
}

View File

@ -0,0 +1,3 @@
{
"noLogs": "Brak dzienników"
}

View File

@ -0,0 +1,43 @@
{
"editServer": "Edytuj Serwer MCP",
"addServer": "Dodaj Serwer MCP",
"serverName": "Nazwa Serwera",
"enterServerName": "Wprowadź nazwę serwera",
"command": "Polecenie",
"enterCommand": "Wprowadź polecenie (uvx lub npx)",
"arguments": "Argumenty",
"argument": "{{index}}. argument",
"envVars": "Zmienne Środowiskowe",
"key": "Klucz",
"value": "Wartość",
"save": "Zapisz",
"status": "Stan",
"connected": "Połączono",
"disconnected": "Rozłączono",
"deleteServer": {
"title": "Usuń Serwer MCP",
"description": "Na pewno chcesz usunąć serwer MCP {{serverName}}? Tej operacji nie można cofnąć.",
"delete": "Usuń"
},
"editJson": {
"title": "Edytuj JSON Serwera MCP: {{serverName}}",
"titleAll": "Edytuj JSON Wszystkich Serwerów MCP",
"placeholder": "Wprowadź konfigurację JSON",
"errorParse": "Błąd parsowania wstępnych danych",
"errorPaste": "Wprowadzono JSON o niepoprawnym formacie",
"errorFormat": "Niepoprawny format JSON",
"save": "Zapisz"
},
"checkParams": "Proszę sprawdzić parametry we wprowadzeniu.",
"title": "Serwery MCP",
"experimental": "Eksperymentalne",
"editAllJson": "Edytuj JSON Wszystkich Serwerów",
"findMore": "Znajdź więcej serwerów MCP na",
"allowPermissions": "Pozwalaj na Wszystkie Użycia Narzędzi MCP",
"allowPermissionsDesc": "Po włączeniu będzie automatycznie zezwalać na wszystkie użycia narzędzi MCP bez wyświetlania próśb o zgodę.",
"noServers": "Nie znaleziono serwerów MCP",
"args": "Argumenty",
"env": "Zmienne Środowiskowe",
"serverStatusActive": "Pomyślnie aktywowano serwer {{serverKey}}",
"serverStatusInactive": "Pomyślnie dezaktywowano serwer {{serverKey}}"
}

View File

@ -0,0 +1,7 @@
{
"title": "Błąd kontekstu",
"description": "Temu czatowi zaczyna brakować pamięci SI. To trochę jak tablica, która jest już zapisana. Można udostępnić więcej pamięci (zwiększyć rozmiar kontekstu), ale to zwiększy zużycie pamięci komputera. Można też przyciąć wejście, co oznacza zapomnienie części historii czatu, celem zwolnienia miejsca dla nowych wiadomości.",
"increaseContextSizeDescription": "Czy chcesz zwiększyć rozmiar kontekstu?",
"truncateInput": "Przytnij Wejście",
"increaseContextSize": "Zwiększ Rozmiar Kontekstu"
}

View File

@ -0,0 +1,5 @@
{
"addProvider": "Dodaj Dostawcę",
"addOpenAIProvider": "Dodaj Dostawcę OpenAI",
"enterNameForProvider": "Wprowadź nazwę dostawcy"
}

View File

@ -0,0 +1,68 @@
{
"joyride": {
"chooseProviderTitle": "Wybierz Dostawcę",
"chooseProviderContent": "Wybierz swojego dostawcę i upewnij się, że masz odpowiedni klucz API.",
"getApiKeyTitle": "Zdobądź Swój Klucz API",
"getApiKeyContent": "Zaloguj się do panelu sterowania swojego dostawcy i znajdź lub wygeneruj swój klucz API.",
"insertApiKeyTitle": "Wprowadź Swój Klucz API",
"insertApiKeyContent": "Wprowadź swój klucz API, następnie połącz się z dostawcą i go aktywuj.",
"back": "Wstecz",
"close": "Zamknij",
"last": "Ostatni",
"next": "Dalej",
"skip": "Pomiń"
},
"refreshModelsError": "Dostawca musi mieć ustawiony adres URL i klucz API aby pobrać modele.",
"refreshModelsSuccess": "Dodano {{count}} model-e/i od dostawcy {{provider}}.",
"noNewModels": "Nie znaleziono nowych modeli. Wszystkie modele są już dodane.",
"refreshModelsFailed": "Nie udało się pobrać modeli od dostawcy {{provider}}. Proszę sprawdzić poprawność klucza API i adresu URL.",
"models": "Modele",
"refreshing": "Odświeżanie…",
"refresh": "Odśwież",
"import": "Importuj",
"importModelSuccess": "Pomyślnie zaimportowano model dostawcy {{provider}}.",
"importModelError": "Nie udało się zaimportować modelu:",
"stop": "Zatrzymaj",
"start": "Uruchom",
"noModelFound": "Brak modeli",
"noModelFoundDesc": "Tutaj będą wymienione dostępne modele. Odwiedź Centrum Modeli żeby jakiś pobrać.",
"configuration": "Ustawienia",
"apiEndpoint": "Punkt Końcowy API",
"testConnection": "Sprawdź Połączenie",
"addModel": {
"title": "Dodaj Nowy Model",
"description": "Dodaj nowy model do dostawcy {{provider}}.",
"modelId": "Identyfikator Modelu",
"enterModelId": "Wprowadź identyfikator modelu",
"exploreModels": "Zobacz listę modeli dostawcy {{provider}}",
"addModel": "Dodaj Model"
},
"deleteModel": {
"title": "Usuń Model: {{modelId}}",
"description": "Na pewno chcesz usunąć ten model? Tej operacji nie można cofnąć.",
"success": "Model {{modelId}} został usunięty.",
"cancel": "Anuluj",
"delete": "Usuń"
},
"deleteProvider": {
"title": "Usuń Dostawcę",
"description": "Usuń dostawcę i wszystkie jego modele. Tej operacji nie można cofnąć.",
"success": "Dostawca {{provider}} został usunięty.",
"confirmTitle": "Usuń Dostawcę: {{provider}}",
"confirmDescription": "Na pewno chcesz usunąć tego dostawcę? Tej operacji nie można cofnąć.",
"cancel": "Anuluj",
"delete": "Usuń"
},
"editModel": {
"title": "Edytuj Model: {{modelId}}",
"description": "Ustaw możliwości modelu za pomocą poniższych przełączników.",
"capabilities": "Możliwości",
"tools": "Narzędzia",
"vision": "Wizja",
"embeddings": "Osadzenia",
"notAvailable": "Jeszcze nie dostępne"
},
"addProvider": "Dodaj Dostawcę",
"addOpenAIProvider": "Dodaj Dostawcę OpenAI",
"enterNameForProvider": "Wprowadź nazwę dostawcy"
}

View File

@ -0,0 +1,253 @@
{
"autoDownload": "Automatycznie pobieraj nowe aktualizacje",
"checkForUpdates": "Sprawdź Dostępność Aktualizacji",
"checkForUpdatesDesc": "Sprawdza czy dostępna jest nowa wersja Jan.",
"checkingForUpdates": "Sprawdzanie dostępności aktualizacji…",
"noUpdateAvailable": "Używasz najnowszej wersji",
"devVersion": "Wykryto wersję deweloperską",
"updateError": "Nie udało się sprawdzić dostępności aktualizacji",
"changeLocation": "Zmień Położenie",
"copied": "Skopiowano",
"copyPath": "Skopiuj Ścieżkę",
"openLogs": "Otwórz Dzienniki",
"revealLogs": "Wyświetl Dzienniki",
"showInFinder": "Pokaż w Finderze",
"showInFileExplorer": "Pokaż w Explorerze",
"openContainingFolder": "Otwórz Katalog Nadrzędny",
"failedToRelocateDataFolder": "Błąd zmiany katalogu danych",
"failedToRelocateDataFolderDesc": "Nie udało się przenieść katalogu danych. Proszę spróbować później.",
"factoryResetTitle": "Przywróć Ustawienia Fabryczne",
"factoryResetDesc": "Przywraca domyślne ustawienia aplikacji. Tej operacji nie można cofnąć. Zalecane przy uszkodzeniu danych aplikacji.",
"cancel": "Anuluj",
"reset": "Przywróć",
"resources": "Zasoby",
"documentation": "Dokumentacja",
"documentationDesc": "Dowiedz się jak używać aplikacji Jan i odkryj jej funkcje.",
"viewDocs": "Zobacz Dokumentację",
"releaseNotes": "Informacje o Wydaniu",
"releaseNotesDesc": "Zobacz co nowego w najnowszej wersji Jan.",
"viewReleases": "Zobacz Wydania",
"community": "Społeczność",
"github": "GitHub",
"githubDesc": "Pomóż w rozwijaniu Jan.",
"discord": "Discord",
"discordDesc": "Dołącz do naszej społeczności aby podyskutować lub uzyskać pomoc.",
"support": "Obsługa",
"reportAnIssue": "Zgłoś Problem",
"reportAnIssueDesc": "Znalazłeś/-aś błąd? Pomóż nam, zgłaszając go na GitHubie.",
"reportIssue": "Zgłoś Problem",
"credits": "Podziękowania",
"creditsDesc1": "Aplikacja Jan jest rozwijana z ❤️ przez Zespół Menlo.",
"creditsDesc2": "Szczególne podziękowania dla naszych otwartoźródłowych zależności - zwłaszcza llama.cpp i Tauri - oraz dla naszej wspaniałej społeczności SI.",
"appVersion": "Wersja Aplikacji",
"dataFolder": {
"appData": "Katalog Danych",
"appDataDesc": "Domyślne położenie wiadomości i innych danych użytkownika.",
"appLogs": "Dzienniki Aplikacji",
"appLogsDesc": "Zobacz szczegółowe dzienniki aplikacji."
},
"others": {
"spellCheck": "Sprawdzanie Pisowni",
"spellCheckDesc": "Włącz sprawdzanie pisowni w wątkach.",
"resetFactory": "Przywróć Ustawienia Fabryczne",
"resetFactoryDesc": "Przywróć aplikację do stanu początkowego, wymazując wszystkie modele i historię czatów. Tej operacji nie można cofnąć. Zalecane tylko przy uszkodzeniach danych aplikacji."
},
"shortcuts": {
"application": "Aplikacja",
"newChat": "Nowy Czat",
"newChatDesc": "Rozpocznij nowy czat.",
"toggleSidebar": "Przełącz Pasek Boczny",
"toggleSidebarDesc": "Pokaż lub ukryj pasek boczny.",
"zoomIn": "Przybliż",
"zoomInDesc": "Zwiększ poziom przybliżenia.",
"zoomOut": "Oddal",
"zoomOutDesc": "Zmniejsz poziom przybliżenia.",
"chat": "Czat",
"sendMessage": "Wyślij Wiadomość",
"sendMessageDesc": "Wyślij bieżącą wiadomość.",
"enter": "Enter",
"newLine": "Nowa Linia",
"newLineDesc": "Wprowadź nową linię.",
"shiftEnter": "Shift + Enter",
"navigation": "Nawigacja",
"goToSettings": "Przejdź do Ustawień",
"goToSettingsDesc": "Otwórz ustawienia."
},
"appearance": {
"title": "Wygląd",
"theme": "Schemat Kolorystyczny",
"themeDesc": "Dopasuj do schematu systemowego.",
"fontSize": "Rozmiar Fontu",
"fontSizeDesc": "Dostosuj rozmiar fontu w aplikacji.",
"windowBackground": "Tło Okna",
"windowBackgroundDesc": "Ustaw kolor tła okna aplikacji.",
"appMainView": "Główny Obszar Aplikacji",
"appMainViewDesc": "Ustaw kolor tła głównego obszaru aplikacji.",
"primary": "Główny",
"primaryDesc": "Ustaw kolor główny komponentów interfejsu użytkownika.",
"accent": "Akcentowy",
"accentDesc": "Ustaw kolor akcentowy wyróżnionych elementów interfejsu użytkownika.",
"destructive": "Destrukcyjny",
"destructiveDesc": "Ustaw kolor operacji destrukcyjnych.",
"resetToDefault": "Przywróć Domyślne",
"resetToDefaultDesc": "Przywróć domyślne ustawienia wyglądu.",
"resetAppearanceSuccess": "Pomyślnie Przywrócono Ustawienia Wyglądu",
"resetAppearanceSuccessDesc": "Wszystkie ustawienia wyglądu zostały przywrócone do wartości domyślnych.",
"chatWidth": "Szerokość Czatu",
"chatWidthDesc": "Ustaw szerokość komponentu czatu.",
"codeBlockTitle": "Blok Kodu",
"codeBlockDesc": "Wybierz styl kolorowania składni w blokach kodu.",
"showLineNumbers": "Pokazuj Numery Linii",
"showLineNumbersDesc": "Pokazuj numery linii w blokach kodu.",
"resetCodeBlockStyle": "Przywróć Domyślny Styl Bloków Kodu",
"resetCodeBlockStyleDesc": "Przywraca domyślny styl bloku kodu.",
"resetCodeBlockSuccess": "Pomyślnie Przywrócono Styl Bloku Kodu",
"resetCodeBlockSuccessDesc": "Styl bloku kodu został przywrócony do ustawień domyślnych."
},
"hardware": {
"os": "System Operacyjny",
"name": "Nazwa",
"version": "Wersja",
"cpu": "CPU",
"model": "Model",
"architecture": "Architektura",
"cores": "Rdzenie",
"instructions": "Instrukcje",
"usage": "Użycie",
"memory": "Pamięć",
"totalRam": "Całkowita RAM",
"availableRam": "Dostępna RAM",
"vulkan": "Vulkan",
"enableVulkan": "Włącz Vulkan",
"enableVulkanDesc": "Używaj API Vulkan do przyspieszenia GPU. Nie włączaj Vulkana jeśli masz GPU Nvidia, ponieważ może to spowodować problemy z kompatybilnością.",
"gpus": "GPU",
"noGpus": "Nie wykryto żadnego GPU",
"vram": "VRAM",
"freeOf": "wolnych z",
"driverVersion": "Wersja Sterownika",
"computeCapability": "Możliwości Obliczeniowe",
"systemMonitor": "Monitor Systemowy"
},
"httpsProxy": {
"proxy": "Pośrednik",
"proxyUrl": "URL Pośrednika",
"proxyUrlDesc": "URL i port serwera pośredniczącego.",
"proxyUrlPlaceholder": "http://proxy.example.com:8080",
"authentication": "Uwierzytelnianie",
"authenticationDesc": "Dane uwierzytelniające serwera pośredniczącego, jeśli wymagane.",
"username": "Nazwa Użytkownika",
"password": "Hasło",
"noProxy": "Bez Pośrednika",
"noProxyDesc": "Lista hostów pomijających serwer pośredniczący, oddzielonych przecinkami.",
"noProxyPlaceholder": "localhost,127.0.0.1,.local",
"sslVerification": "Weryfikacja SSL",
"ignoreSsl": "Ignoruj Certyfikaty SSL",
"ignoreSslDesc": "Pozwala używać samodzielnie podpisanych lub niezweryfikowanych certyfikatów. Może być wymagane przy niektórych serwerach pośredniczących, ale też obniża poziom bezpieczeństwa. Należy używać wyłącznie z zaufanymi serwerami pośredniczącymi.",
"proxySsl": "SSL Pośrednika",
"proxySslDesc": "Sprawdzaj certyfikaty SSL podczas łączenia z pośrednikiem.",
"proxyHostSsl": "SSL Hosta Pośrednika",
"proxyHostSslDesc": "Sprawdzaj certyfikaty SSL hosta pośrednika.",
"peerSsl": "SSL Połączeń Równorzędnych",
"peerSslDesc": "Sprawdzaj certyfikaty SSL połączeń równorzędnych.",
"hostSsl": "SSL Hosta",
"hostSslDesc": "Sprawdzaj certyfikaty SSL hostów docelowych."
},
"localApiServer": {
"title": "Lokalny Serwer API",
"description": "Używaj lokalnego serwera kompatybilnego z API OpenAI.",
"startServer": "Uruchom Serwer",
"stopServer": "Zatrzymaj Serwer",
"serverLogs": "Dzienniki Serwera",
"serverLogsDesc": "Wyświetl szczegółowe dzienniki lokalnego serwera API.",
"openLogs": "Otwórz Dzienniki",
"startupConfiguration": "Konfiguracja Startowa",
"runOnStartup": "Domyślnie włączaj przy starcie",
"runOnStartupDesc": "Automatycznie uruchamiaj lokalny serwer API podczas uruchamiania aplikacji.",
"serverConfiguration": "Konfiguracja Serwera",
"serverHost": "Host",
"serverHostDesc": "Adres sieciowy serwera.",
"serverPort": "Port",
"serverPortDesc": "Numer portu serwera API.",
"apiPrefix": "Prefiks API",
"apiPrefixDesc": "Prefiks ścieżki punktu końcowego API.",
"apiKey": "Klucz API",
"apiKeyDesc": "Uwierzytelniaj żądania sieciowe kluczem API.",
"trustedHosts": "Zaufane Hosty",
"trustedHostsDesc": "Hosty mające dostęp do serwera, oddzielone przecinkami.",
"advancedSettings": "Ustawienia Zaawansowane",
"cors": "Cross-Origin Resource Sharing (CORS)",
"corsDesc": "Pozwalaj na żądania cross-origin do serwera API.",
"verboseLogs": "Szczegółowe Wpisy Dzienników Serwera",
"verboseLogsDesc": "Włącz szczegółowe wpisy dzienników serwera na potrzeby rozwiązywania problemów."
},
"privacy": {
"analytics": "Dane Analityczne",
"helpUsImprove": "Pomóż nam poprawiać aplikację",
"helpUsImproveDesc": "Aby pomóc w rozwijaniu Jan, możesz włączyć wysyłanie anonimowych danych analitycznych, takich jak użycie funkcji i liczba użytkowników. Nigdy nie zbieramy Twoich czatów lub danych osobowych.",
"privacyPolicy": "Masz pełną kontrolę nad swoimi danymi. Dowiedz się więcej w naszej Polityce Prywatności.",
"analyticsDesc": "Aby usprawnić Jan, musimy dowiedzieć się jak jest używany - ale to tylko z Twoją pomocą. Możesz zmienić to ustawienie w dowolnym momencie.",
"privacyPromises": "Twój wybór tutaj nie będzie miał wpływu na nasze kluczowe obietnice prywatności:",
"promise1": "Twoje rozmowy zostają prywatne i na Twoim urządzeniu",
"promise2": "Nigdy nie zbieramy Twoich danych osobowych lub treści czatów",
"promise3": "Wszystkie dane są anonimowe i zagregowane",
"promise4": "W dowolnym momencie możesz zrezygnować z wysyłania danych analitycznych bez obniżenia funkcjonalności aplikacji",
"promise5": "Jesteśmy transparentni na temat tego, jakie dane zbieramy i dlaczego"
},
"general": {
"showInFinder": "Pokaż w Finderze",
"showInFileExplorer": "Pokaż w Explorerze",
"openContainingFolder": "Otwórz Katalog Nadrzędny",
"failedToRelocateDataFolder": "Błąd zmiany katalogu danych",
"failedToRelocateDataFolderDesc": "Nie udało się przenieść katalogu danych. Proszę spróbować później.",
"devVersion": "Wykryto wersję deweloperską",
"noUpdateAvailable": "Używasz najnowszej wersji",
"updateError": "Nie udało się sprawdzić dostępności aktualizacji",
"appVersion": "Wersja Aplikacji",
"checkForUpdates": "Sprawdź Dostępność Aktualizacji",
"checkForUpdatesDesc": "Sprawdza czy dostępna jest nowa wersja Jan.",
"checkingForUpdates": "Sprawdzanie dostępności aktualizacji…",
"copied": "Skopiowano",
"copyPath": "Skopiuj Ścieżkę",
"changeLocation": "Zmień Położenie",
"openLogs": "Otwórz Dzienniki",
"revealLogs": "Wyświetl Dzienniki",
"factoryResetTitle": "Przywróć Ustawienia Fabryczne",
"factoryResetDesc": "Przywraca domyślne ustawienia aplikacji. Tej operacji nie można cofnąć. Zalecane przy uszkodzeniu danych aplikacji.",
"cancel": "Anuluj",
"reset": "Przywróć",
"huggingfaceToken": "Token HuggingFace",
"huggingfaceTokenDesc": "Twój token do API modeli HuggingFace.",
"resources": "Zasoby",
"documentation": "Dokumentacja",
"documentationDesc": "Dowiedz się jak używać aplikacji Jan i odkryj jej funkcje.",
"viewDocs": "Zobacz Dokumentację",
"releaseNotes": "Informacje o Wydaniu",
"releaseNotesDesc": "Zobacz co nowego w najnowszej wersji Jan.",
"viewReleases": "Zobacz Wydania",
"community": "Społeczność",
"github": "GitHub",
"githubDesc": "Pomóż w rozwijaniu Jan.",
"discord": "Discord",
"discordDesc": "Dołącz do naszej społeczności aby podyskutować lub uzyskać pomoc.",
"support": "Obsługa",
"reportAnIssue": "Zgłoś Problem",
"reportAnIssueDesc": "Znalazłeś/-aś błąd? Pomóż nam, zgłaszając go na GitHubie.",
"reportIssue": "Zgłoś Problem",
"credits": "Podziękowania",
"creditsDesc1": "Aplikacja Jan jest rozwijana z ❤️ przez Zespół Menlo.",
"creditsDesc2": "Szczególne podziękowania dla naszych otwartoźródłowych zależności - zwłaszcza llama.cpp i Tauri - oraz dla naszej wspaniałej społeczności SI."
},
"extensions": {
"title": "Rozszerzenia"
},
"dialogs": {
"changeDataFolder": {
"title": "Zmień Położenie Katalogu Danych",
"description": "Na pewno chcesz zmienić położenie katalogu danych? Przeniesie to dane do nowego katalogu i uruchomi ponownie aplikację.",
"currentLocation": "Obecne Położenie:",
"newLocation": "Nowe Położenie:",
"cancel": "Anuluj",
"changeLocation": "Zmień Położenie"
}
}
}

View File

@ -0,0 +1,6 @@
{
"welcome": "Witaj w Jan",
"description": "Aby rozpocząć, musisz albo pobrać lokalny model SI, albo połączyć się z modelem w chmurze przy użyciu klucza API",
"localModel": "Ustaw lokalny model",
"remoteProvider": "Ustaw dostawcę modeli zdalnych"
}

View File

@ -0,0 +1,28 @@
{
"title": "Monitor Systemowy",
"cpuUsage": "Użycie CPU",
"model": "Model",
"cores": "Rdzenie",
"architecture": "Architektura",
"currentUsage": "Obecne Użycie",
"memoryUsage": "Użycie Pamięci",
"totalRam": "Całkowita RAM",
"availableRam": "Dostępna RAM",
"usedRam": "Używana RAM",
"runningModels": "Uruchomione Modele",
"noRunningModels": "Obecnie żaden model nie jest uruchomiony",
"provider": "Dostawca",
"uptime": "Czas Uruchomienia",
"actions": "Operacje",
"stop": "Zatrzymaj",
"activeGpus": "Aktywne GPU",
"noGpus": "Nie wykryto żadnej GPU",
"noActiveGpus": "Brak aktywnej GPU. Wszystkie GPU są obecnie wyłączone.",
"vramUsage": "Użycie VRAM",
"driverVersion": "Wersja Sterownika:",
"computeCapability": "Możliwości Obliczeniowe:",
"active": "Aktywne",
"performance": "Wydajność",
"resources": "Zasoby",
"refresh": "Odśwież"
}

View File

@ -0,0 +1,12 @@
{
"title": "Prośba o Użycie Narzędzia",
"description": "Asystent chce użyć narzędzia: <strong>{{toolName}}</strong>",
"securityNotice": "<strong>Ostrzeżenie Bezpieczeństwa:</strong> Złośliwe narzędzia lub treści rozmowy mają potencjał nakłonić agenta do szkodliwych działań. Dokładnie przejrzyj każdą prośbę o użycie narzędzia przed wyrażeniem zgody.",
"deny": "Odmów",
"allowOnce": "Pozwól Raz",
"alwaysAllow": "Zawsze Pozwalaj",
"permissions": "Pozwolenia",
"approve": "Zaakceptuj",
"reject": "Odrzuć",
"parameters": "Parametry Narzędzia"
}

View File

@ -0,0 +1,11 @@
{
"toolApproval": {
"title": "Wymagane Pozwolenie na Narzędzie",
"description": "Asystent chce użyć",
"securityNotice": "Złośliwe narzędzia lub treści rozmowy mają potencjał nakłonić agenta do szkodliwych działań. Dokładnie przejrzyj każdą prośbę o użycie narzędzia przed wyrażeniem zgody.",
"deny": "Odmów",
"allowOnce": "Pozwól Raz",
"alwaysAllow": "Zawsze Pozwalaj",
"parameters": "Parametry Narzędzia"
}
}

View File

@ -0,0 +1,10 @@
{
"newVersion": "Nowa Wersja: {{version}}",
"updateAvailable": "Dostępna Aktualizacja",
"nightlyBuild": "Kompilacja Nocna",
"showReleaseNotes": "Pokaż Informacje o Wydaniu",
"hideReleaseNotes": "Ukryj Informacje o Wydaniu",
"remindMeLater": "Przypomnij Mi Później",
"downloading": "Pobieranie…",
"updateNow": "Zaktualizuj Teraz"
}

View File

@ -30,5 +30,6 @@
"createNew": "Tạo Trợ lý Mới", "createNew": "Tạo Trợ lý Mới",
"personality": "Tính cách", "personality": "Tính cách",
"capabilities": "Khả năng", "capabilities": "Khả năng",
"instructionsDateHint": "Mẹo: Dùng {{current_date}} để chèn ngày hôm nay.",
"maxToolSteps": "Bước tối đa của công cụ" "maxToolSteps": "Bước tối đa của công cụ"
} }

View File

@ -249,6 +249,22 @@
"downloadCancelled": { "downloadCancelled": {
"title": "Đã hủy tải xuống", "title": "Đã hủy tải xuống",
"description": "Quá trình tải xuống đã bị hủy" "description": "Quá trình tải xuống đã bị hủy"
},
"downloadFailed": {
"title": "Tải xuống thất bại",
"description": "Tải xuống {{item}} thất bại"
},
"modelValidationStarted": {
"title": "Đang xác thực mô hình",
"description": "Đã tải xuống mô hình \"{{modelId}}\" thành công. Đang xác minh tính toàn vẹn..."
},
"modelValidationFailed": {
"title": "Xác thực mô hình thất bại",
"description": "Mô hình đã tải xuống \"{{modelId}}\" không vượt qua kiểm tra tính toàn vẹn và đã bị xóa. Tệp có thể bị hỏng hoặc bị giả mạo."
},
"downloadAndVerificationComplete": {
"title": "Tải xuống hoàn tất",
"description": "Mô hình \"{{item}}\" đã được tải xuống và xác minh thành công"
} }
} }
} }

View File

@ -12,6 +12,7 @@
"showVariants": "Hiển thị biến thể", "showVariants": "Hiển thị biến thể",
"useModel": "Sử dụng mô hình này", "useModel": "Sử dụng mô hình này",
"downloadModel": "Tải xuống mô hình", "downloadModel": "Tải xuống mô hình",
"tools": "Công cụ",
"searchPlaceholder": "Tìm kiếm các mô hình trên Hugging Face...", "searchPlaceholder": "Tìm kiếm các mô hình trên Hugging Face...",
"joyride": { "joyride": {
"recommendedModelTitle": "Mô hình được đề xuất", "recommendedModelTitle": "Mô hình được đề xuất",

View File

@ -30,5 +30,6 @@
"createNew": "创建新助手", "createNew": "创建新助手",
"personality": "个性", "personality": "个性",
"capabilities": "能力", "capabilities": "能力",
"instructionsDateHint": "提示:使用 {{current_date}} 插入今天的日期。",
"maxToolSteps": "最大工具步骤" "maxToolSteps": "最大工具步骤"
} }

View File

@ -249,6 +249,22 @@
"downloadCancelled": { "downloadCancelled": {
"title": "下载已取消", "title": "下载已取消",
"description": "下载过程已取消" "description": "下载过程已取消"
},
"downloadFailed": {
"title": "下载失败",
"description": "{{item}} 下载失败"
},
"modelValidationStarted": {
"title": "正在验证模型",
"description": "模型 \"{{modelId}}\" 下载成功。正在验证完整性..."
},
"modelValidationFailed": {
"title": "模型验证失败",
"description": "已下载的模型 \"{{modelId}}\" 未通过完整性验证并已被删除。文件可能损坏或被篡改。"
},
"downloadAndVerificationComplete": {
"title": "下载完成",
"description": "模型 \"{{item}}\" 下载并验证成功"
} }
} }
} }

View File

@ -12,6 +12,7 @@
"showVariants": "显示变体", "showVariants": "显示变体",
"useModel": "使用此模型", "useModel": "使用此模型",
"downloadModel": "下载模型", "downloadModel": "下载模型",
"tools": "工具",
"searchPlaceholder": "在 Hugging Face 上搜索模型...", "searchPlaceholder": "在 Hugging Face 上搜索模型...",
"joyride": { "joyride": {
"recommendedModelTitle": "推荐模型", "recommendedModelTitle": "推荐模型",

View File

@ -30,5 +30,6 @@
"createNew": "建立新助理", "createNew": "建立新助理",
"personality": "個性", "personality": "個性",
"capabilities": "能力", "capabilities": "能力",
"instructionsDateHint": "提示:使用 {{current_date}} 插入今天的日期。",
"maxToolSteps": "最大工具步驟" "maxToolSteps": "最大工具步驟"
} }

View File

@ -249,6 +249,22 @@
"downloadCancelled": { "downloadCancelled": {
"title": "下載已取消", "title": "下載已取消",
"description": "下載過程已取消" "description": "下載過程已取消"
},
"downloadFailed": {
"title": "下載失敗",
"description": "{{item}} 下載失敗"
},
"modelValidationStarted": {
"title": "正在驗證模型",
"description": "模型 \"{{modelId}}\" 下載成功。正在驗證完整性..."
},
"modelValidationFailed": {
"title": "模型驗證失敗",
"description": "已下載的模型 \"{{modelId}}\" 未通過完整性驗證並已被刪除。檔案可能損壞或被篡改。"
},
"downloadAndVerificationComplete": {
"title": "下載完成",
"description": "模型 \"{{item}}\" 下載並驗證成功"
} }
} }
} }

View File

@ -12,6 +12,7 @@
"showVariants": "顯示變體", "showVariants": "顯示變體",
"useModel": "使用此模型", "useModel": "使用此模型",
"downloadModel": "下載模型", "downloadModel": "下載模型",
"tools": "工具",
"searchPlaceholder": "在 Hugging Face 上搜尋模型...", "searchPlaceholder": "在 Hugging Face 上搜尋模型...",
"joyride": { "joyride": {
"recommendedModelTitle": "推薦模型", "recommendedModelTitle": "推薦模型",

View File

@ -17,10 +17,15 @@ import {
import { useNavigate } from '@tanstack/react-router' import { useNavigate } from '@tanstack/react-router'
import { route } from '@/constants/routes' import { route } from '@/constants/routes'
import { useThreads } from '@/hooks/useThreads' import { useThreads } from '@/hooks/useThreads'
import { useLocalApiServer } from '@/hooks/useLocalApiServer'
import { useAppState } from '@/hooks/useAppState'
import { AppEvent, events } from '@janhq/core' import { AppEvent, events } from '@janhq/core'
import { startModel } from '@/services/models'
import { localStorageKey } from '@/constants/localStorage'
export function DataProvider() { export function DataProvider() {
const { setProviders } = useModelProvider() const { setProviders, selectedModel, selectedProvider, getProviderByName } =
useModelProvider()
const { setMessages } = useMessages() const { setMessages } = useMessages()
const { checkForUpdate } = useAppUpdater() const { checkForUpdate } = useAppUpdater()
@ -29,6 +34,19 @@ export function DataProvider() {
const { setThreads } = useThreads() const { setThreads } = useThreads()
const navigate = useNavigate() const navigate = useNavigate()
// Local API Server hooks
const {
enableOnStartup,
serverHost,
serverPort,
apiPrefix,
apiKey,
trustedHosts,
corsEnabled,
verboseLogs,
} = useLocalApiServer()
const { setServerStatus } = useAppState()
useEffect(() => { useEffect(() => {
console.log('Initializing DataProvider...') console.log('Initializing DataProvider...')
getProviders().then(setProviders) getProviders().then(setProviders)
@ -78,6 +96,102 @@ export function DataProvider() {
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, []) }, [])
const getLastUsedModel = (): { provider: string; model: string } | null => {
try {
const stored = localStorage.getItem(localStorageKey.lastUsedModel)
return stored ? JSON.parse(stored) : null
} catch (error) {
console.debug('Failed to get last used model from localStorage:', error)
return null
}
}
// Helper function to determine which model to start
const getModelToStart = () => {
// Use last used model if available
const lastUsedModel = getLastUsedModel()
if (lastUsedModel) {
const provider = getProviderByName(lastUsedModel.provider)
if (
provider &&
provider.models.some((m) => m.id === lastUsedModel.model)
) {
return { model: lastUsedModel.model, provider }
}
}
// Use selected model if available
if (selectedModel && selectedProvider) {
const provider = getProviderByName(selectedProvider)
if (provider) {
return { model: selectedModel.id, provider }
}
}
// Use first model from llamacpp provider
const llamacppProvider = getProviderByName('llamacpp')
if (
llamacppProvider &&
llamacppProvider.models &&
llamacppProvider.models.length > 0
) {
return {
model: llamacppProvider.models[0].id,
provider: llamacppProvider,
}
}
return null
}
// Auto-start Local API Server on app startup if enabled
useEffect(() => {
if (enableOnStartup) {
// Validate API key before starting
if (!apiKey || apiKey.toString().trim().length === 0) {
console.warn('Cannot start Local API Server: API key is required')
return
}
const modelToStart = getModelToStart()
// Only start server if we have a model to load
if (!modelToStart) {
console.warn(
'Cannot start Local API Server: No model available to load'
)
return
}
setServerStatus('pending')
// Start the model first
startModel(modelToStart.provider, modelToStart.model)
.then(() => {
console.log(`Model ${modelToStart.model} started successfully`)
// Then start the server
return window.core?.api?.startServer({
host: serverHost,
port: serverPort,
prefix: apiPrefix,
apiKey,
trustedHosts,
isCorsEnabled: corsEnabled,
isVerboseEnabled: verboseLogs,
})
})
.then(() => {
setServerStatus('running')
})
.catch((error: unknown) => {
console.error('Failed to start Local API Server on startup:', error)
setServerStatus('stopped')
})
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
const handleDeepLink = (urls: string[] | null) => { const handleDeepLink = (urls: string[] | null) => {
if (!urls) return if (!urls) return
console.log('Received deeplink:', urls) console.log('Received deeplink:', urls)

View File

@ -29,6 +29,7 @@ import {
import { useCallback, useEffect } from 'react' import { useCallback, useEffect } from 'react'
import GlobalError from '@/containers/GlobalError' import GlobalError from '@/containers/GlobalError'
import { GlobalEventHandler } from '@/providers/GlobalEventHandler' import { GlobalEventHandler } from '@/providers/GlobalEventHandler'
import ErrorDialog from '@/containers/dialogs/ErrorDialog'
export const Route = createRootRoute({ export const Route = createRootRoute({
component: RootLayout, component: RootLayout,
@ -203,6 +204,7 @@ function RootLayout() {
{/* <TanStackRouterDevtools position="bottom-right" /> */} {/* <TanStackRouterDevtools position="bottom-right" /> */}
<ToolApproval /> <ToolApproval />
<LoadModelErrorDialog /> <LoadModelErrorDialog />
<ErrorDialog />
<OutOfContextPromiseModal /> <OutOfContextPromiseModal />
</TranslationProvider> </TranslationProvider>
</Fragment> </Fragment>

View File

@ -20,19 +20,24 @@ import { useModelProvider } from '@/hooks/useModelProvider'
import { useDownloadStore } from '@/hooks/useDownloadStore' import { useDownloadStore } from '@/hooks/useDownloadStore'
import { import {
CatalogModel, CatalogModel,
ModelQuant,
convertHfRepoToCatalogModel, convertHfRepoToCatalogModel,
fetchHuggingFaceRepo, fetchHuggingFaceRepo,
pullModel, pullModelWithMetadata,
isModelSupported,
} from '@/services/models' } from '@/services/models'
import { Progress } from '@/components/ui/progress' import { Progress } from '@/components/ui/progress'
import { Button } from '@/components/ui/button' import { Button } from '@/components/ui/button'
import { cn } from '@/lib/utils' import { cn } from '@/lib/utils'
import { useGeneralSetting } from '@/hooks/useGeneralSetting' import { useGeneralSetting } from '@/hooks/useGeneralSetting'
import { ModelInfoHoverCard } from '@/containers/ModelInfoHoverCard'
type SearchParams = { type SearchParams = {
repo: string repo: string
} }
const defaultModelQuantizations = ['iq4_xs', 'q4_k_m']
export const Route = createFileRoute('/hub/$modelId')({ export const Route = createFileRoute('/hub/$modelId')({
component: HubModelDetail, component: HubModelDetail,
validateSearch: (search: Record<string, unknown>): SearchParams => ({ validateSearch: (search: Record<string, unknown>): SearchParams => ({
@ -57,6 +62,11 @@ function HubModelDetail() {
const [readmeContent, setReadmeContent] = useState<string>('') const [readmeContent, setReadmeContent] = useState<string>('')
const [isLoadingReadme, setIsLoadingReadme] = useState(false) const [isLoadingReadme, setIsLoadingReadme] = useState(false)
// State for model support status
const [modelSupportStatus, setModelSupportStatus] = useState<
Record<string, 'RED' | 'YELLOW' | 'GREEN' | 'LOADING'>
>({})
useEffect(() => { useEffect(() => {
fetchSources() fetchSources()
}, [fetchSources]) }, [fetchSources])
@ -131,6 +141,41 @@ function HubModelDetail() {
} }
} }
// Check model support function
const checkModelSupport = useCallback(
async (variant: ModelQuant) => {
const modelKey = variant.model_id
// Don't check again if already checking or checked
if (modelSupportStatus[modelKey]) {
return
}
// Set loading state
setModelSupportStatus((prev) => ({
...prev,
[modelKey]: 'LOADING',
}))
try {
// Use the HuggingFace path for the model
const modelPath = variant.path
const supported = await isModelSupported(modelPath, 8192)
setModelSupportStatus((prev) => ({
...prev,
[modelKey]: supported,
}))
} catch (error) {
console.error('Error checking model support:', error)
setModelSupportStatus((prev) => ({
...prev,
[modelKey]: 'RED',
}))
}
},
[modelSupportStatus]
)
// Extract tags from quants (model variants) // Extract tags from quants (model variants)
const tags = useMemo(() => { const tags = useMemo(() => {
if (!modelData?.quants) return [] if (!modelData?.quants) return []
@ -318,6 +363,7 @@ function HubModelDetail() {
<th className="text-left py-3 px-2 text-sm font-medium text-main-view-fg/70"> <th className="text-left py-3 px-2 text-sm font-medium text-main-view-fg/70">
Size Size
</th> </th>
<th></th>
<th className="text-right py-3 px-2 text-sm font-medium text-main-view-fg/70"> <th className="text-right py-3 px-2 text-sm font-medium text-main-view-fg/70">
Action Action
</th> </th>
@ -372,7 +418,18 @@ function HubModelDetail() {
{variant.file_size} {variant.file_size}
</span> </span>
</td> </td>
<td className="py-3 px-2 text-right"> <td>
<ModelInfoHoverCard
model={modelData}
variant={variant}
defaultModelQuantizations={
defaultModelQuantizations
}
modelSupportStatus={modelSupportStatus}
onCheckModelSupport={checkModelSupport}
/>
</td>
<td className="py-3 px-2 text-right ml-auto">
{(() => { {(() => {
if (isDownloading && !isDownloaded) { if (isDownloading && !isDownloaded) {
return ( return (
@ -408,9 +465,11 @@ function HubModelDetail() {
addLocalDownloadingModel( addLocalDownloadingModel(
variant.model_id variant.model_id
) )
pullModel( pullModelWithMetadata(
variant.model_id, variant.model_id,
variant.path variant.path,
modelData.mmproj_models?.[0]?.path,
huggingfaceToken
) )
}} }}
className={cn(isDownloading && 'hidden')} className={cn(isDownloading && 'hidden')}

View File

@ -31,6 +31,7 @@ import {
TooltipProvider, TooltipProvider,
TooltipTrigger, TooltipTrigger,
} from '@/components/ui/tooltip' } from '@/components/ui/tooltip'
import { ModelInfoHoverCard } from '@/containers/ModelInfoHoverCard'
import Joyride, { CallBackProps, STATUS } from 'react-joyride' import Joyride, { CallBackProps, STATUS } from 'react-joyride'
import { CustomTooltipJoyRide } from '@/containers/CustomeTooltipJoyRide' import { CustomTooltipJoyRide } from '@/containers/CustomeTooltipJoyRide'
import { import {
@ -41,9 +42,10 @@ import {
} from '@/components/ui/dropdown-menu' } from '@/components/ui/dropdown-menu'
import { import {
CatalogModel, CatalogModel,
pullModel, pullModelWithMetadata,
fetchHuggingFaceRepo, fetchHuggingFaceRepo,
convertHfRepoToCatalogModel, convertHfRepoToCatalogModel,
isModelSupported,
} from '@/services/models' } from '@/services/models'
import { useDownloadStore } from '@/hooks/useDownloadStore' import { useDownloadStore } from '@/hooks/useDownloadStore'
import { Progress } from '@/components/ui/progress' import { Progress } from '@/components/ui/progress'
@ -97,6 +99,9 @@ function Hub() {
const [huggingFaceRepo, setHuggingFaceRepo] = useState<CatalogModel | null>( const [huggingFaceRepo, setHuggingFaceRepo] = useState<CatalogModel | null>(
null null
) )
const [modelSupportStatus, setModelSupportStatus] = useState<
Record<string, 'RED' | 'YELLOW' | 'GREEN' | 'LOADING'>
>({})
const [joyrideReady, setJoyrideReady] = useState(false) const [joyrideReady, setJoyrideReady] = useState(false)
const [currentStepIndex, setCurrentStepIndex] = useState(0) const [currentStepIndex, setCurrentStepIndex] = useState(0)
const addModelSourceTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>( const addModelSourceTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(
@ -270,6 +275,41 @@ function Hub() {
[navigate] [navigate]
) )
const checkModelSupport = useCallback(
async (variant: any) => {
const modelKey = variant.model_id
// Don't check again if already checking or checked
if (modelSupportStatus[modelKey]) {
return
}
// Set loading state
setModelSupportStatus((prev) => ({
...prev,
[modelKey]: 'LOADING',
}))
try {
// Use the HuggingFace path for the model
const modelPath = variant.path
const supportStatus = await isModelSupported(modelPath, 8192)
setModelSupportStatus((prev) => ({
...prev,
[modelKey]: supportStatus,
}))
} catch (error) {
console.error('Error checking model support:', error)
setModelSupportStatus((prev) => ({
...prev,
[modelKey]: 'RED',
}))
}
},
[modelSupportStatus]
)
const DownloadButtonPlaceholder = useMemo(() => { const DownloadButtonPlaceholder = useMemo(() => {
return ({ model }: ModelProps) => { return ({ model }: ModelProps) => {
// Check if this is a HuggingFace repository (no quants) // Check if this is a HuggingFace repository (no quants)
@ -313,7 +353,12 @@ function Hub() {
// Immediately set local downloading state // Immediately set local downloading state
addLocalDownloadingModel(modelId) addLocalDownloadingModel(modelId)
const mmprojPath = model.mmproj_models?.[0]?.path const mmprojPath = model.mmproj_models?.[0]?.path
pullModel(modelId, modelUrl, mmprojPath) pullModelWithMetadata(
modelId,
modelUrl,
mmprojPath,
huggingfaceToken
)
} }
return ( return (
@ -611,6 +656,14 @@ function Hub() {
)?.file_size )?.file_size
} }
</span> </span>
<ModelInfoHoverCard
model={filteredModels[virtualItem.index]}
defaultModelQuantizations={
defaultModelQuantizations
}
modelSupportStatus={modelSupportStatus}
onCheckModelSupport={checkModelSupport}
/>
<DownloadButtonPlaceholder <DownloadButtonPlaceholder
model={filteredModels[virtualItem.index]} model={filteredModels[virtualItem.index]}
/> />
@ -666,45 +719,47 @@ function Hub() {
?.length || 0} ?.length || 0}
</span> </span>
</div> </div>
{filteredModels[virtualItem.index].tools && ( <div className="flex gap-1.5 items-center">
<div className="flex items-center gap-1"> {filteredModels[virtualItem.index].num_mmproj >
<TooltipProvider> 0 && (
<Tooltip> <div className="flex items-center gap-1">
<TooltipTrigger asChild> <TooltipProvider>
<div> <Tooltip>
<IconTool <TooltipTrigger asChild>
size={17} <div>
className="text-main-view-fg/50" <IconEye
/> size={17}
</div> className="text-main-view-fg/50"
</TooltipTrigger> />
<TooltipContent> </div>
<p>{t('tools')}</p> </TooltipTrigger>
</TooltipContent> <TooltipContent>
</Tooltip> <p>{t('vision')}</p>
</TooltipProvider> </TooltipContent>
</div> </Tooltip>
)} </TooltipProvider>
{filteredModels[virtualItem.index].num_mmproj > </div>
0 && ( )}
<div className="flex items-center gap-1"> {filteredModels[virtualItem.index].tools && (
<TooltipProvider> <div className="flex items-center gap-1">
<Tooltip> <TooltipProvider>
<TooltipTrigger asChild> <Tooltip>
<div> <TooltipTrigger asChild>
<IconEye <div>
size={17} <IconTool
className="text-main-view-fg/50" size={17}
/> className="text-main-view-fg/50"
</div> />
</TooltipTrigger> </div>
<TooltipContent> </TooltipTrigger>
<p>{t('vision')}</p> <TooltipContent>
</TooltipContent> <p>{t('tools')}</p>
</Tooltip> </TooltipContent>
</TooltipProvider> </Tooltip>
</div> </TooltipProvider>
)} </div>
)}
</div>
{filteredModels[virtualItem.index].quants.length > {filteredModels[virtualItem.index].quants.length >
1 && ( 1 && (
<div className="flex items-center gap-2 hub-show-variants-step"> <div className="flex items-center gap-2 hub-show-variants-step">
@ -739,12 +794,75 @@ function Hub() {
(variant) => ( (variant) => (
<CardItem <CardItem
key={variant.model_id} key={variant.model_id}
title={variant.model_id} title={
<>
<div className="flex items-center gap-1">
<span className="mr-2">
{variant.model_id}
</span>
{filteredModels[virtualItem.index]
.num_mmproj > 0 && (
<div className="flex items-center gap-1">
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<div>
<IconEye
size={17}
className="text-main-view-fg/50"
/>
</div>
</TooltipTrigger>
<TooltipContent>
<p>{t('vision')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
)}
{filteredModels[virtualItem.index]
.tools && (
<div className="flex items-center gap-1">
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<div>
<IconTool
size={17}
className="text-main-view-fg/50"
/>
</div>
</TooltipTrigger>
<TooltipContent>
<p>{t('tools')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
)}
</div>
</>
}
actions={ actions={
<div className="flex items-center gap-2"> <div className="flex items-center gap-2">
<p className="text-main-view-fg/70 font-medium text-xs"> <p className="text-main-view-fg/70 font-medium text-xs">
{variant.file_size} {variant.file_size}
</p> </p>
<ModelInfoHoverCard
model={
filteredModels[virtualItem.index]
}
variant={variant}
defaultModelQuantizations={
defaultModelQuantizations
}
modelSupportStatus={
modelSupportStatus
}
onCheckModelSupport={
checkModelSupport
}
/>
{(() => { {(() => {
const isDownloading = const isDownloading =
localDownloadingModels.has( localDownloadingModels.has(
@ -812,12 +930,13 @@ function Hub() {
addLocalDownloadingModel( addLocalDownloadingModel(
variant.model_id variant.model_id
) )
pullModel( pullModelWithMetadata(
variant.model_id, variant.model_id,
variant.path, variant.path,
filteredModels[ filteredModels[
virtualItem.index virtualItem.index
].mmproj_models?.[0]?.path ].mmproj_models?.[0]?.path,
huggingfaceToken
) )
}} }}
> >

View File

@ -61,8 +61,6 @@ vi.mock('@/hooks/useGeneralSetting', () => ({
useGeneralSetting: () => ({ useGeneralSetting: () => ({
spellCheckChatInput: true, spellCheckChatInput: true,
setSpellCheckChatInput: vi.fn(), setSpellCheckChatInput: vi.fn(),
experimentalFeatures: false,
setExperimentalFeatures: vi.fn(),
huggingfaceToken: 'test-token', huggingfaceToken: 'test-token',
setHuggingfaceToken: vi.fn(), setHuggingfaceToken: vi.fn(),
}), }),
@ -188,10 +186,12 @@ vi.mock('@tauri-apps/plugin-opener', () => ({
})) }))
vi.mock('@tauri-apps/api/webviewWindow', () => { vi.mock('@tauri-apps/api/webviewWindow', () => {
const MockWebviewWindow = vi.fn().mockImplementation((label: string, options: any) => ({ const MockWebviewWindow = vi
once: vi.fn(), .fn()
setFocus: vi.fn(), .mockImplementation((label: string, options: any) => ({
})) once: vi.fn(),
setFocus: vi.fn(),
}))
MockWebviewWindow.getByLabel = vi.fn().mockReturnValue(null) MockWebviewWindow.getByLabel = vi.fn().mockReturnValue(null)
return { return {
@ -299,16 +299,6 @@ describe('General Settings Route', () => {
// expect(screen.getByTestId('language-switcher')).toBeInTheDocument() // expect(screen.getByTestId('language-switcher')).toBeInTheDocument()
// }) // })
it('should render switches for experimental features and spell check', async () => {
const Component = GeneralRoute.component as React.ComponentType
await act(async () => {
render(<Component />)
})
const switches = screen.getAllByTestId('switch')
expect(switches.length).toBeGreaterThanOrEqual(2)
})
it('should render huggingface token input', async () => { it('should render huggingface token input', async () => {
const Component = GeneralRoute.component as React.ComponentType const Component = GeneralRoute.component as React.ComponentType
await act(async () => { await act(async () => {
@ -336,24 +326,6 @@ describe('General Settings Route', () => {
expect(switches[0]).toBeInTheDocument() expect(switches[0]).toBeInTheDocument()
}) })
it('should handle experimental features toggle', async () => {
const Component = GeneralRoute.component as React.ComponentType
await act(async () => {
render(<Component />)
})
const switches = screen.getAllByTestId('switch')
expect(switches.length).toBeGreaterThan(0)
// Test that switches are interactive
if (switches.length > 1) {
await act(async () => {
fireEvent.click(switches[1])
})
expect(switches[1]).toBeInTheDocument()
}
})
it('should handle huggingface token change', async () => { it('should handle huggingface token change', async () => {
const Component = GeneralRoute.component as React.ComponentType const Component = GeneralRoute.component as React.ComponentType
await act(async () => { await act(async () => {

View File

@ -46,9 +46,7 @@ import { stopAllModels } from '@/services/models'
import { SystemEvent } from '@/types/events' import { SystemEvent } from '@/types/events'
import { Input } from '@/components/ui/input' import { Input } from '@/components/ui/input'
import { useHardware } from '@/hooks/useHardware' import { useHardware } from '@/hooks/useHardware'
import { getConnectedServers } from '@/services/mcp' import LanguageSwitcher from '@/containers/LanguageSwitcher'
import { invoke } from '@tauri-apps/api/core'
import { useMCPServers } from '@/hooks/useMCPServers'
// eslint-disable-next-line @typescript-eslint/no-explicit-any // eslint-disable-next-line @typescript-eslint/no-explicit-any
export const Route = createFileRoute(route.settings.general as any)({ export const Route = createFileRoute(route.settings.general as any)({
@ -60,8 +58,6 @@ function General() {
const { const {
spellCheckChatInput, spellCheckChatInput,
setSpellCheckChatInput, setSpellCheckChatInput,
experimentalFeatures,
setExperimentalFeatures,
huggingfaceToken, huggingfaceToken,
setHuggingfaceToken, setHuggingfaceToken,
} = useGeneralSetting() } = useGeneralSetting()
@ -209,38 +205,6 @@ function General() {
} }
}, [t, checkForUpdate]) }, [t, checkForUpdate])
const handleStopAllMCPServers = async () => {
try {
const connectedServers = await getConnectedServers()
// Stop each connected server
const stopPromises = connectedServers.map((serverName) =>
invoke('deactivate_mcp_server', { name: serverName }).catch((error) => {
console.error(`Error stopping MCP server ${serverName}:`, error)
return Promise.resolve() // Continue with other servers even if one fails
})
)
await Promise.all(stopPromises)
// Update server configs to set active: false for stopped servers
const { mcpServers, editServer } = useMCPServers.getState()
connectedServers.forEach((serverName) => {
const serverConfig = mcpServers[serverName]
if (serverConfig) {
editServer(serverName, { ...serverConfig, active: false })
}
})
if (connectedServers.length > 0) {
toast.success(`Stopped ${connectedServers.length} MCP server(s)`)
}
} catch (error) {
console.error('Error stopping MCP servers:', error)
toast.error('Failed to stop MCP servers')
}
}
return ( return (
<div className="flex flex-col h-full"> <div className="flex flex-col h-full">
<HeaderPage> <HeaderPage>
@ -282,10 +246,10 @@ function General() {
} }
/> />
)} )}
{/* <CardItem <CardItem
title={t('common:language')} title={t('common:language')}
actions={<LanguageSwitcher />} actions={<LanguageSwitcher />}
/> */} />
</Card> </Card>
{/* Data folder */} {/* Data folder */}
@ -430,19 +394,6 @@ function General() {
</Card> </Card>
{/* Advanced */} {/* Advanced */}
<Card title="Advanced"> <Card title="Advanced">
<CardItem
title="Experimental Features"
description="Enable experimental features. They may be unstable or change at any time."
actions={
<Switch
checked={experimentalFeatures}
onCheckedChange={async (e) => {
await handleStopAllMCPServers()
setExperimentalFeatures(e)
}}
/>
}
/>
<CardItem <CardItem
title={t('settings:others.resetFactory', { title={t('settings:others.resetFactory', {
ns: 'settings', ns: 'settings',

View File

@ -13,6 +13,9 @@ import { TrustedHostsInput } from '@/containers/TrustedHostsInput'
import { useLocalApiServer } from '@/hooks/useLocalApiServer' import { useLocalApiServer } from '@/hooks/useLocalApiServer'
import { WebviewWindow } from '@tauri-apps/api/webviewWindow' import { WebviewWindow } from '@tauri-apps/api/webviewWindow'
import { useAppState } from '@/hooks/useAppState' import { useAppState } from '@/hooks/useAppState'
import { useModelProvider } from '@/hooks/useModelProvider'
import { startModel } from '@/services/models'
import { localStorageKey } from '@/constants/localStorage'
import { windowKey } from '@/constants/windows' import { windowKey } from '@/constants/windows'
import { IconLogs } from '@tabler/icons-react' import { IconLogs } from '@tabler/icons-react'
import { cn } from '@/lib/utils' import { cn } from '@/lib/utils'
@ -32,6 +35,8 @@ function LocalAPIServer() {
setCorsEnabled, setCorsEnabled,
verboseLogs, verboseLogs,
setVerboseLogs, setVerboseLogs,
enableOnStartup,
setEnableOnStartup,
serverHost, serverHost,
serverPort, serverPort,
apiPrefix, apiPrefix,
@ -40,6 +45,7 @@ function LocalAPIServer() {
} = useLocalApiServer() } = useLocalApiServer()
const { serverStatus, setServerStatus } = useAppState() const { serverStatus, setServerStatus } = useAppState()
const { selectedModel, selectedProvider, getProviderByName } = useModelProvider()
const [showApiKeyError, setShowApiKeyError] = useState(false) const [showApiKeyError, setShowApiKeyError] = useState(false)
const [isApiKeyEmpty, setIsApiKeyEmpty] = useState( const [isApiKeyEmpty, setIsApiKeyEmpty] = useState(
!apiKey || apiKey.toString().trim().length === 0 !apiKey || apiKey.toString().trim().length === 0
@ -60,6 +66,54 @@ function LocalAPIServer() {
setIsApiKeyEmpty(!isValid) setIsApiKeyEmpty(!isValid)
} }
const getLastUsedModel = (): { provider: string; model: string } | null => {
try {
const stored = localStorage.getItem(localStorageKey.lastUsedModel)
return stored ? JSON.parse(stored) : null
} catch (error) {
console.debug('Failed to get last used model from localStorage:', error)
return null
}
}
// Helper function to determine which model to start
const getModelToStart = () => {
// Use last used model if available
const lastUsedModel = getLastUsedModel()
if (lastUsedModel) {
const provider = getProviderByName(lastUsedModel.provider)
if (
provider &&
provider.models.some((m) => m.id === lastUsedModel.model)
) {
return { model: lastUsedModel.model, provider }
}
}
// Use selected model if available
if (selectedModel && selectedProvider) {
const provider = getProviderByName(selectedProvider)
if (provider) {
return { model: selectedModel.id, provider }
}
}
// Use first model from llamacpp provider
const llamacppProvider = getProviderByName('llamacpp')
if (
llamacppProvider &&
llamacppProvider.models &&
llamacppProvider.models.length > 0
) {
return {
model: llamacppProvider.models[0].id,
provider: llamacppProvider,
}
}
return null
}
const toggleAPIServer = async () => { const toggleAPIServer = async () => {
// Validate API key before starting server // Validate API key before starting server
if (serverStatus === 'stopped') { if (serverStatus === 'stopped') {
@ -68,19 +122,33 @@ function LocalAPIServer() {
return return
} }
setShowApiKeyError(false) setShowApiKeyError(false)
}
setServerStatus('pending') const modelToStart = getModelToStart()
if (serverStatus === 'stopped') { // Only start server if we have a model to load
window.core?.api if (!modelToStart) {
?.startServer({ console.warn(
host: serverHost, 'Cannot start Local API Server: No model available to load'
port: serverPort, )
prefix: apiPrefix, return
apiKey, }
trustedHosts,
isCorsEnabled: corsEnabled, setServerStatus('pending')
isVerboseEnabled: verboseLogs,
// Start the model first
startModel(modelToStart.provider, modelToStart.model)
.then(() => {
console.log(`Model ${modelToStart.model} started successfully`)
// Then start the server
return window.core?.api?.startServer({
host: serverHost,
port: serverPort,
prefix: apiPrefix,
apiKey,
trustedHosts,
isCorsEnabled: corsEnabled,
isVerboseEnabled: verboseLogs,
})
}) })
.then(() => { .then(() => {
setServerStatus('running') setServerStatus('running')
@ -90,6 +158,7 @@ function LocalAPIServer() {
setServerStatus('stopped') setServerStatus('stopped')
}) })
} else { } else {
setServerStatus('pending')
window.core?.api window.core?.api
?.stopServer() ?.stopServer()
.then(() => { .then(() => {
@ -199,6 +268,26 @@ function LocalAPIServer() {
/> />
</Card> </Card>
{/* Startup Configuration */}
<Card title={t('settings:localApiServer.startupConfiguration')}>
<CardItem
title={t('settings:localApiServer.runOnStartup')}
description={t('settings:localApiServer.runOnStartupDesc')}
actions={
<Switch
checked={enableOnStartup}
onCheckedChange={(checked) => {
if (!apiKey || apiKey.toString().trim().length === 0) {
setShowApiKeyError(true)
return
}
setEnableOnStartup(checked)
}}
/>
}
/>
</Card>
{/* Server Configuration */} {/* Server Configuration */}
<Card title={t('settings:localApiServer.serverConfiguration')}> <Card title={t('settings:localApiServer.serverConfiguration')}>
<CardItem <CardItem

View File

@ -21,6 +21,7 @@ import { useToolApproval } from '@/hooks/useToolApproval'
import { toast } from 'sonner' import { toast } from 'sonner'
import { invoke } from '@tauri-apps/api/core' import { invoke } from '@tauri-apps/api/core'
import { useTranslation } from '@/i18n/react-i18next-compat' import { useTranslation } from '@/i18n/react-i18next-compat'
import { useAppState } from '@/hooks/useAppState'
// Function to mask sensitive values // Function to mask sensitive values
const maskSensitiveValue = (value: string) => { const maskSensitiveValue = (value: string) => {
@ -120,6 +121,7 @@ function MCPServers() {
const [loadingServers, setLoadingServers] = useState<{ const [loadingServers, setLoadingServers] = useState<{
[key: string]: boolean [key: string]: boolean
}>({}) }>({})
const { setErrorMessage } = useAppState()
const handleOpenDialog = (serverKey?: string) => { const handleOpenDialog = (serverKey?: string) => {
if (serverKey) { if (serverKey) {
@ -247,13 +249,13 @@ function MCPServers() {
getConnectedServers().then(setConnectedServers) getConnectedServers().then(setConnectedServers)
}) })
.catch((error) => { .catch((error) => {
console.log(error, 'error.mcp')
editServer(serverKey, { editServer(serverKey, {
...(config ?? (mcpServers[serverKey] as MCPServerConfig)), ...(config ?? (mcpServers[serverKey] as MCPServerConfig)),
active: false, active: false,
}) })
toast.error(error, { setErrorMessage({
description: t('mcp-servers:checkParams'), message: error,
subtitle: t('mcp-servers:checkParams'),
}) })
}) })
.finally(() => { .finally(() => {

View File

@ -22,6 +22,7 @@ import { useTranslation } from '@/i18n/react-i18next-compat'
import Capabilities from '@/containers/Capabilities' import Capabilities from '@/containers/Capabilities'
import { DynamicControllerSetting } from '@/containers/dynamicControllerSetting' import { DynamicControllerSetting } from '@/containers/dynamicControllerSetting'
import { RenderMarkdown } from '@/containers/RenderMarkdown' import { RenderMarkdown } from '@/containers/RenderMarkdown'
import { DialogEditModel } from '@/containers/dialogs/EditModel'
import { DialogAddModel } from '@/containers/dialogs/AddModel' import { DialogAddModel } from '@/containers/dialogs/AddModel'
import { ModelSetting } from '@/containers/ModelSetting' import { ModelSetting } from '@/containers/ModelSetting'
import { DialogDeleteModel } from '@/containers/dialogs/DeleteModel' import { DialogDeleteModel } from '@/containers/dialogs/DeleteModel'
@ -583,6 +584,10 @@ function ProviderDetail() {
} }
actions={ actions={
<div className="flex items-center gap-0.5"> <div className="flex items-center gap-0.5">
<DialogEditModel
provider={provider}
modelId={model.id}
/>
{model.settings && ( {model.settings && (
<ModelSetting <ModelSetting
provider={provider} provider={provider}

View File

@ -13,6 +13,7 @@ import {
stopModel, stopModel,
stopAllModels, stopAllModels,
startModel, startModel,
isModelSupported,
HuggingFaceRepo, HuggingFaceRepo,
CatalogModel, CatalogModel,
} from '../models' } from '../models'
@ -325,7 +326,7 @@ describe('models service', () => {
expect(result).toEqual(mockRepoData) expect(result).toEqual(mockRepoData)
expect(fetch).toHaveBeenCalledWith( expect(fetch).toHaveBeenCalledWith(
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true', 'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true&files_metadata=true',
{ {
headers: {}, headers: {},
} }
@ -344,7 +345,7 @@ describe('models service', () => {
'https://huggingface.co/microsoft/DialoGPT-medium' 'https://huggingface.co/microsoft/DialoGPT-medium'
) )
expect(fetch).toHaveBeenCalledWith( expect(fetch).toHaveBeenCalledWith(
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true', 'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true&files_metadata=true',
{ {
headers: {}, headers: {},
} }
@ -353,7 +354,7 @@ describe('models service', () => {
// Test with domain prefix // Test with domain prefix
await fetchHuggingFaceRepo('huggingface.co/microsoft/DialoGPT-medium') await fetchHuggingFaceRepo('huggingface.co/microsoft/DialoGPT-medium')
expect(fetch).toHaveBeenCalledWith( expect(fetch).toHaveBeenCalledWith(
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true', 'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true&files_metadata=true',
{ {
headers: {}, headers: {},
} }
@ -362,7 +363,7 @@ describe('models service', () => {
// Test with trailing slash // Test with trailing slash
await fetchHuggingFaceRepo('microsoft/DialoGPT-medium/') await fetchHuggingFaceRepo('microsoft/DialoGPT-medium/')
expect(fetch).toHaveBeenCalledWith( expect(fetch).toHaveBeenCalledWith(
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true', 'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true&files_metadata=true',
{ {
headers: {}, headers: {},
} }
@ -391,7 +392,7 @@ describe('models service', () => {
expect(result).toBeNull() expect(result).toBeNull()
expect(fetch).toHaveBeenCalledWith( expect(fetch).toHaveBeenCalledWith(
'https://huggingface.co/api/models/nonexistent/model?blobs=true', 'https://huggingface.co/api/models/nonexistent/model?blobs=true&files_metadata=true',
{ {
headers: {}, headers: {},
} }
@ -845,4 +846,95 @@ describe('models service', () => {
expect(result.quants[0].file_size).toBe('Unknown size') expect(result.quants[0].file_size).toBe('Unknown size')
}) })
}) })
describe('isModelSupported', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('should return GREEN when model is fully supported', async () => {
const mockEngineWithSupport = {
...mockEngine,
isModelSupported: vi.fn().mockResolvedValue('GREEN'),
}
mockEngineManager.get.mockReturnValue(mockEngineWithSupport)
const result = await isModelSupported('/path/to/model.gguf', 4096)
expect(result).toBe('GREEN')
expect(mockEngineWithSupport.isModelSupported).toHaveBeenCalledWith(
'/path/to/model.gguf',
4096
)
})
it('should return YELLOW when model weights fit but KV cache does not', async () => {
const mockEngineWithSupport = {
...mockEngine,
isModelSupported: vi.fn().mockResolvedValue('YELLOW'),
}
mockEngineManager.get.mockReturnValue(mockEngineWithSupport)
const result = await isModelSupported('/path/to/model.gguf', 8192)
expect(result).toBe('YELLOW')
expect(mockEngineWithSupport.isModelSupported).toHaveBeenCalledWith(
'/path/to/model.gguf',
8192
)
})
it('should return RED when model is not supported', async () => {
const mockEngineWithSupport = {
...mockEngine,
isModelSupported: vi.fn().mockResolvedValue('RED'),
}
mockEngineManager.get.mockReturnValue(mockEngineWithSupport)
const result = await isModelSupported('/path/to/large-model.gguf')
expect(result).toBe('RED')
expect(mockEngineWithSupport.isModelSupported).toHaveBeenCalledWith(
'/path/to/large-model.gguf',
undefined
)
})
it('should return YELLOW as fallback when engine method is not available', async () => {
const mockEngineWithoutSupport = {
...mockEngine,
// isModelSupported method not available
}
mockEngineManager.get.mockReturnValue(mockEngineWithoutSupport)
const result = await isModelSupported('/path/to/model.gguf')
expect(result).toBe('YELLOW')
})
it('should return RED when engine is not available', async () => {
mockEngineManager.get.mockReturnValue(null)
const result = await isModelSupported('/path/to/model.gguf')
expect(result).toBe('YELLOW') // Should use fallback
})
it('should return RED when there is an error', async () => {
const mockEngineWithError = {
...mockEngine,
isModelSupported: vi.fn().mockRejectedValue(new Error('Test error')),
}
mockEngineManager.get.mockReturnValue(mockEngineWithError)
const result = await isModelSupported('/path/to/model.gguf')
expect(result).toBe('RED')
})
})
}) })

Some files were not shown because too many files have changed in this diff Show More