Merge remote-tracking branch 'origin/dev' into mobile/dev

# Conflicts:
#	extensions/yarn.lock
#	package.json
#	src-tauri/plugins/tauri-plugin-hardware/src/vendor/vulkan.rs
#	src-tauri/src/lib.rs
#	yarn.lock
This commit is contained in:
Vanalite 2025-09-29 22:22:00 +07:00
commit 5e57caee43
100 changed files with 3721 additions and 3149 deletions

View File

@ -15,7 +15,6 @@ on:
- 'pre-install/**'
- 'Makefile'
- 'package.json'
- 'mise.toml'
jobs:
get-update-version:

View File

@ -35,7 +35,6 @@ on:
- 'pre-install/**'
- 'Makefile'
- 'package.json'
- 'mise.toml'
jobs:

View File

@ -72,8 +72,7 @@ jobs:
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = false' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun",
"usr/lib/Jan-${{ inputs.channel }}/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json
fi
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json

View File

@ -93,8 +93,7 @@ jobs:
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun",
"usr/lib/Jan-${{ inputs.channel }}/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json
fi
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
@ -184,4 +183,3 @@ jobs:
with:
name: jan-linux-amd64-flatpak-${{ inputs.new_version }}-AppImage
path: ./src-tauri/target/release/bundle/appimage/*.AppImage

View File

@ -110,8 +110,7 @@ jobs:
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun",
"usr/lib/Jan-${{ inputs.channel }}/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json
fi
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json

View File

@ -127,7 +127,6 @@ jan/
├── package.json # Root workspace configuration
├── Makefile # Build automation commands
├── mise.toml # Mise tool configuration
├── LICENSE # Apache 2.0 license
└── README.md # Project overview
```
@ -149,19 +148,6 @@ cd jan
make dev
```
**Option 2: The Easier Way (Mise)**
```bash
git clone https://github.com/menloresearch/jan
cd jan
# Install mise
curl https://mise.run | sh
# Let mise handle everything
mise install # installs Node.js, Rust, and other tools
mise dev # runs the full development setup
```
## How Can I Contribute?
### Reporting Bugs

View File

@ -60,7 +60,6 @@ install-ios-rust-targets:
dev: install-and-build
yarn download:bin
yarn download:lib
yarn dev
# Web application targets
@ -117,7 +116,6 @@ lint: install-and-build
# Testing
test: lint
yarn download:bin
yarn download:lib
ifeq ($(OS),Windows_NT)
yarn download:windows-installer
endif

View File

@ -1,6 +1,6 @@
# Jan - Local AI Assistant
# Jan - Open-source ChatGPT replacement
![Jan AI](docs/src/pages/docs/_assets/jan-app.png)
<img width="2048" height="280" alt="github jan banner" src="https://github.com/user-attachments/assets/f3f87889-c133-433b-b250-236218150d3f" />
<p align="center">
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
@ -12,15 +12,13 @@
</p>
<p align="center">
<a href="https://jan.ai/docs/quickstart">Getting Started</a>
- <a href="https://jan.ai/docs">Docs</a>
<a href="https://www.jan.ai/docs/desktop">Getting Started</a>
- <a href="https://discord.gg/Exe46xPMbK">Community</a>
- <a href="https://jan.ai/changelog">Changelog</a>
- <a href="https://github.com/menloresearch/jan/issues">Bug reports</a>
- <a href="https://discord.gg/AsJ8krTT3N">Discord</a>
</p>
Jan is an AI assistant that can run 100% offline on your device. Download and run LLMs with
**full control** and **privacy**.
Jan is bringing the best of open-source AI in an easy-to-use product. Download and run LLMs with **full control** and **privacy**.
## Installation
@ -29,41 +27,36 @@ The easiest way to get started is by downloading one of the following versions f
<table>
<tr>
<td><b>Platform</b></td>
<td><b>Stable</b></td>
<td><b>Nightly</b></td>
<td><b>Download</b></td>
</tr>
<tr>
<td><b>Windows</b></td>
<td><a href='https://app.jan.ai/download/latest/win-x64'>jan.exe</a></td>
<td><a href='https://app.jan.ai/download/nightly/win-x64'>jan.exe</a></td>
</tr>
<tr>
<td><b>macOS</b></td>
<td><a href='https://app.jan.ai/download/latest/mac-universal'>jan.dmg</a></td>
<td><a href='https://app.jan.ai/download/nightly/mac-universal'>jan.dmg</a></td>
</tr>
<tr>
<td><b>Linux (deb)</b></td>
<td><a href='https://app.jan.ai/download/latest/linux-amd64-deb'>jan.deb</a></td>
<td><a href='https://app.jan.ai/download/nightly/linux-amd64-deb'>jan.deb</a></td>
</tr>
<tr>
<td><b>Linux (AppImage)</b></td>
<td><a href='https://app.jan.ai/download/latest/linux-amd64-appimage'>jan.AppImage</a></td>
<td><a href='https://app.jan.ai/download/nightly/linux-amd64-appimage'>jan.AppImage</a></td>
</tr>
</table>
Download from [jan.ai](https://jan.ai/) or [GitHub Releases](https://github.com/menloresearch/jan/releases).
Download from [jan.ai](https://jan.ai/) or [GitHub Releases](https://github.com/menloresearch/jan/releases).
## Features
- **Local AI Models**: Download and run LLMs (Llama, Gemma, Qwen, etc.) from HuggingFace
- **Cloud Integration**: Connect to OpenAI, Anthropic, Mistral, Groq, and others
- **Local AI Models**: Download and run LLMs (Llama, Gemma, Qwen, GPT-oss etc.) from HuggingFace
- **Cloud Integration**: Connect to GPT models via OpenAI, Claude models via Anthropic, Mistral, Groq, and others
- **Custom Assistants**: Create specialized AI assistants for your tasks
- **OpenAI-Compatible API**: Local server at `localhost:1337` for other applications
- **Model Context Protocol**: MCP integration for enhanced capabilities
- **Model Context Protocol**: MCP integration for agentic capabilities
- **Privacy First**: Everything runs locally when you want it to
## Build from Source
@ -93,29 +86,6 @@ This handles everything: installs dependencies, builds core components, and laun
- `make test` - Run tests and linting
- `make clean` - Delete everything and start fresh
### Run with Mise (easier)
You can also run with [mise](https://mise.jdx.dev/), which is a bit easier as it ensures Node.js, Rust, and other dependency versions are automatically managed:
```bash
git clone https://github.com/menloresearch/jan
cd jan
# Install mise (if not already installed)
curl https://mise.run | sh
# Install tools and start development
mise install # installs Node.js, Rust, and other tools
mise dev # runs the full development setup
```
**Available mise commands:**
- `mise dev` - Full development setup and launch
- `mise build` - Production build
- `mise test` - Run tests and linting
- `mise clean` - Delete everything and start fresh
- `mise tasks` - List all available tasks
### Manual Commands
```bash

View File

@ -274,6 +274,10 @@ export abstract class AIEngine extends BaseExtension {
*/
abstract delete(modelId: string): Promise<void>
/**
* Updates a model
*/
abstract update(modelId: string, model: Partial<modelInfo>): Promise<void>
/**
* Imports a model
*/

View File

@ -43,6 +43,12 @@ const mkdir = (...args: any[]) => globalThis.core.api?.mkdir({ args })
*/
const rm = (...args: any[]) => globalThis.core.api?.rm({ args })
/**
* Moves a file from the source path to the destination path.
* @returns {Promise<any>} A Promise that resolves when the file is moved successfully.
*/
const mv = (...args: any[]) => globalThis.core.api?.mv({ args })
/**
* Deletes a file from the local file system.
* @param {string} path - The path of the file to delete.
@ -92,6 +98,7 @@ export const fs = {
readdirSync,
mkdir,
rm,
mv,
unlinkSync,
appendFileSync,
copyFile,

View File

@ -91,6 +91,7 @@ export enum FileSystemRoute {
existsSync = 'existsSync',
readdirSync = 'readdirSync',
rm = 'rm',
mv = 'mv',
mkdir = 'mkdir',
readFileSync = 'readFileSync',
writeFileSync = 'writeFileSync',

View File

@ -50,15 +50,72 @@
/docs/shortcuts /docs/ 302
/docs/models /docs/ 302
/integrations/function-calling/interpreter /docs/ 302
/docs/desktop/built-in/tensorrt-llm /docs 302
/docs/desktop/beta /docs/desktop 302
/platforms /docs/desktop 302
/docs/built-in/llama-cpp /docs/desktop/llama-cpp 302
/docs/install-engines /docs/desktop/llama-cpp 302
/docs/local-api /docs/desktop/api-server 302
/docs/local-engines/llama-cpp /docs/desktop/llama-cpp 302
/docs/api-server /docs/desktop/api-server 302
/docs/assistants /docs/desktop/assistants 302
/docs/models/manage-models /docs/desktop/manage-models 302
/docs/data-folder /docs/desktop/data-folder 302
/cortex/vision /handbook/open-superintelligence 302
/docs/models/model-parameters /docs/desktop/model-parameters 302
/docs/remote-models/generic-openai /docs/desktop/remote-models/openai 302
/docs/threads /changelog/2024-01-16-settings-options-right-panel 302
/docs/desktop/docs/data-folder /docs/desktop/data-folder 302
/docs/desktop/docs/desktop/install/linux /docs/desktop/install/linux 302
/docs/desktop/docs/desktop/troubleshooting /docs/desktop/troubleshooting 302
/docs/desktop/linux /docs/desktop/install/linux 302
/docs/desktop/local-engines/llama-cpp /docs/desktop/llama-cpp-server 302
/docs/desktop/models/model-parameters /docs/desktop/model-parameters 302
/docs/desktop/windows /docs/desktop/install/windows 302
/docs/docs/data-folder /docs/desktop/data-folder 302
/docs/docs/desktop/linux /docs/desktop/install/linux 302
/docs/docs/troubleshooting /docs/desktop/troubleshooting 302
/docs/jan-models/jan-nano-32 /docs/desktop/jan-models/jan-nano-32 302
/docs/jan-models/jan-v1 /docs/desktop/jan-models/jan-v1 302
/docs/jan-models/lucy /docs/desktop/jan-models/lucy 302
/docs/llama-cpp /docs/desktop/llama-cpp 302
/docs/manage-models /docs/desktop/manage-models 302
/docs/mcp /docs/desktop/mcp 302
/docs/mcp-examples/data-analysis/e2b /docs/desktop/mcp-examples/data-analysis/e2b 302
/docs/mcp-examples/deepresearch/octagon /docs/desktop/mcp-examples/deepresearch/octagon 302
/docs/mcp-examples/design/canva /docs/desktop/mcp-examples/design/canva 302
/docs/mcp-examples/productivity/linear /docs/desktop/mcp-examples/productivity/linear 302
/docs/mcp-examples/search/exa /docs/desktop/mcp-examples/search/exa 302
/docs/model-parameters /docs/desktop/model-parameters 302
/docs/remote-models/cohere /docs/desktop/remote-models/cohere 302
/docs/remote-models/google /docs/desktop/remote-models/google 302
/docs/remote-models/groq /docs/desktop/remote-models/groq 302
/docs/remote-models/huggingface /docs/desktop/remote-models/huggingface 302
/docs/remote-models/mistralai /docs/desktop/remote-models/mistralai 302
/docs/remote-models/openai /docs/desktop/remote-models/openai 302
/docs/server-examples/continue-dev /docs/desktop/server-examples/continue-dev 302
/docs/server-examples/n8n /docs/desktop/server-examples/n8n 302
/docs/server-troubleshooting /docs/desktop/troubleshooting 302
/docs/privacy-policy /privacy 302
/docs/server-settings /docs/desktop/server-settings 302
/docs/settings /docs/desktop/settings 302
/docs/llama-cpp-server /docs/desktop/llama-cpp-server 302
/docs/install/linux /docs/desktop/install/linux 302
/docs/install/macos /docs/desktop/install/mac 302
/docs/install/windows /docs/desktop/install/windows 302
/docs/mcp-examples/browser/browserbase /docs/desktop/mcp-examples/browser/browserbase 302
/docs/jan-models/jan-nano-128 /docs/desktop/jan-models/jan-nano-128 302
/docs/mcp-examples/search/serper /docs/desktop/mcp-examples/search/serper 302
/docs/mcp-examples/data-analysis/jupyter /docs/desktop/mcp-examples/data-analysis/jupyter 302
/docs/mcp-examples/productivity/todoist /docs/desktop/mcp-examples/productivity/todoist 302
/docs/remote-models/anthropic /docs/desktop/remote-models/anthropic 302
/docs/remote-models/openrouter /docs/desktop/remote-models/openrouter 302
/docs/server-examples/llmcord /docs/desktop/server-examples/llmcord 302
/docs/server-examples/tabby /docs/desktop/server-examples/tabby 302
/docs/built-in/tensorrt-llm /docs/desktop/llama-cpp 302
/docs/desktop/docs/desktop/linux /docs/desktop/install/linux 302
/windows /docs/desktop/install/windows 302
/guides/integrations/continue/ /docs/desktop/server-examples/continue-dev 302
/continue-dev /docs/desktop/server-examples/continue-dev 302
/integrations /docs/desktop/server-examples/continue-dev 302

View File

@ -1,125 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:news="http://www.google.com/schemas/sitemap-news/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:mobile="http://www.google.com/schemas/sitemap-mobile/1.0" xmlns:image="http://www.google.com/schemas/sitemap-image/1.1" xmlns:video="http://www.google.com/schemas/sitemap-video/1.1">
<url><loc>https://jan.ai</loc><lastmod>2025-09-24T03:40:05.491Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/api-reference</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/api-reference/api-reference</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/api-reference/architecture</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/api-reference/configuration</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/api-reference/development</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/api-reference/installation</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/blog</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2023-12-21-faster-inference-across-platform</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-01-16-settings-options-right-panel</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-01-29-local-api-server</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-02-05-jan-data-folder</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-02-10-jan-is-more-stable</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-02-26-home-servers-with-helm</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-03-06-ui-revamp-settings</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-03-11-import-models</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-03-19-nitro-tensorrt-llm-extension</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-04-02-groq-api-integration</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-04-15-new-mistral-extension</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-04-25-llama3-command-r-hugginface</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-05-20-llamacpp-upgrade-new-remote-models</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-06-21-nvidia-nim-support</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-07-15-claude-3-5-support</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-09-01-llama3-1-gemma2-support</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-09-17-improved-cpu-performance</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-10-24-jan-stable</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-11-22-jan-bugs</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-11.14-jan-supports-qwen-coder</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-12-03-jan-is-faster</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-12-05-jan-hot-fix-mac</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2024-12-30-jan-new-privacy</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-01-06-key-issues-resolved</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-01-23-deepseek-r1-jan</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-02-18-advanced-llama.cpp-settings</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-03-14-jan-security-patch</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-05-14-jan-qwen3-patch</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-06-19-jan-ui-revamp</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-06-26-jan-nano-mcp</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-07-17-responsive-ui</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-07-31-llamacpp-tutorials</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-08-07-gpt-oss</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-08-14-general-improvs</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-08-28-image-support</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/changelog/2025-09-18-auto-optimize-vision-imports</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/api-server</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/assistants</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/data-folder</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/install/linux</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/install/mac</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/install/windows</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/jan-models/jan-nano-128</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/jan-models/jan-nano-32</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/jan-models/jan-v1</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/jan-models/lucy</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/llama-cpp</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/llama-cpp-server</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/manage-models</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp-examples/browser/browserbase</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp-examples/data-analysis/e2b</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp-examples/data-analysis/jupyter</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp-examples/deepresearch/octagon</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp-examples/design/canva</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp-examples/productivity/linear</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp-examples/productivity/todoist</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp-examples/search/exa</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/mcp-examples/search/serper</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/model-parameters</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/privacy</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/privacy-policy</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/quickstart</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/remote-models/anthropic</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/remote-models/cohere</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/remote-models/google</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/remote-models/groq</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/remote-models/huggingface</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/remote-models/mistralai</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/remote-models/openai</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/remote-models/openrouter</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/server-examples/continue-dev</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/server-examples/llmcord</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/server-examples/n8n</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/server-examples/tabby</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/server-settings</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/server-troubleshooting</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/settings</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/desktop/troubleshooting</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/api-reference</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/api-reference-administration</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/api-reference-authentication</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/api-reference-chat</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/api-reference-chat-conversations</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/api-reference-conversations</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/api-reference-jan-responses</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/api-reference-jan-server</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/architecture</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/configuration</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/development</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/installation</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/docs/server/overview</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/download</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/handbook</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/handbook/betting-on-open-source</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/handbook/open-superintelligence</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/benchmarking-nvidia-tensorrt-llm</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/bitdefender</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/data-is-moat</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/deepresearch</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/deepseek-r1-locally</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/jan-v1-for-research</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/offline-chatgpt-alternative</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/qwen3-settings</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/rag-is-not-enough</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/run-ai-models-locally</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/post/run-gpt-oss-locally</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/privacy</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
<url><loc>https://jan.ai/support</loc><lastmod>2025-09-24T03:40:05.492Z</lastmod><changefreq>daily</changefreq><priority>1</priority></url>
</urlset>

View File

@ -16,7 +16,10 @@ const FOOTER_MENUS: FooterMenu[] = [
{
title: 'Company',
links: [
{ name: 'Vision', href: '/', comingSoon: true },
{
name: 'Open Superintelligence',
href: '/handbook/why/open-superintelligence',
},
{ name: 'Handbook', href: '/handbook' },
{ name: 'Community', href: 'https://discord.com/invite/FTk2MvZwJH' },
{ name: 'Careers', href: 'https://menlo.bamboohr.com/careers' },

View File

@ -95,7 +95,7 @@ const Home = () => {
<div className="container mx-auto relative z-10">
<div className="flex justify-center items-center mt-14 lg:mt-20 px-4">
<a
href=""
href={`https://github.com/menloresearch/jan/releases/tag/${lastVersion}`}
target="_blank"
rel="noopener noreferrer"
className="bg-black/40 px-3 lg:px-4 rounded-full h-10 inline-flex items-center max-w-full animate-fade-in delay-100"
@ -109,7 +109,7 @@ const Home = () => {
</span>
</a>
</div>
<div className="mt-10">
<div className="mt-4">
<div className="text-center relative lg:w-1/2 mx-auto">
<div className="flex flex-col lg:flex-row items-center justify-center gap-4 animate-fade-in-up delay-300">
<span>
@ -127,12 +127,17 @@ const Home = () => {
The best of open-source AI in an easy-to-use product.
</p>
</div>
<div className="flex px-4 flex-col lg:flex-row items-center gap-4 w-full justify-center text-center animate-fade-in-up delay-600 mt-8 lg:mt-10">
<div className="flex px-4 flex-col lg:flex-row items-start gap-4 w-full justify-center text-center animate-fade-in-up delay-600 mt-8 lg:mt-10">
<div>
<DropdownButton
size="xxl"
className="w-full !rounded-[20px] lg:w-auto"
lastRelease={lastRelease}
/>
<div className="font-medium text-center mt-2 text-white">
+{totalDownload(release)} downloads
</div>
</div>
<a
href="https://discord.com/invite/FTk2MvZwJH"
target="_blank"
@ -189,7 +194,8 @@ const Home = () => {
</defs>
</svg>
<span className="text-sm">
{formatCompactNumber(discordWidget.presence_count)}
15k+
{/* {formatCompactNumber(discordWidget.presence_count)} */}
</span>
</div>
</Button>
@ -198,7 +204,7 @@ const Home = () => {
</div>
</div>
<div className="absolute w-full bottom-0 left-0 flex justify-center">
<div className="absolute w-full -bottom-10 left-0 flex justify-center">
<img
className="abs animate-float scale-[175%] md:scale-100"
src={CuteRobotFlyingPNG.src}
@ -448,9 +454,10 @@ const Home = () => {
<div className="flex items-center gap-1 ml-3">
<IoMdPeople className="size-5" />
<span className="text-sm">
{formatCompactNumber(
15k+
{/* {formatCompactNumber(
discordWidget.presence_count
)}
)} */}
</span>
</div>
</Button>
@ -483,9 +490,10 @@ const Home = () => {
<div className="flex items-center gap-1 ml-3">
<IoMdPeople className="size-5" />
<span className="text-sm">
{formatCompactNumber(
15k+
{/* {formatCompactNumber(
discordWidget.presence_count
)}
)} */}
</span>
</div>
</Button>

View File

@ -4,7 +4,7 @@ import { useRouter } from 'next/router'
import { cn } from '@/lib/utils'
import { FaDiscord, FaGithub } from 'react-icons/fa'
import { FiDownload } from 'react-icons/fi'
import { FaXTwitter } from 'react-icons/fa6'
import { FaXTwitter, FaLinkedinIn } from 'react-icons/fa6'
import { Button } from './ui/button'
import LogoJanSVG from '@/assets/icons/logo-jan.svg'
@ -113,6 +113,43 @@ const Navbar = ({ noScroll }: { noScroll?: boolean }) => {
</Button>
</a>
</li>
<li>
<div className={cn('flex gap-4', !isLanding && '!text-black')}>
<a
href="https://discord.com/invite/FTk2MvZwJH"
target="_blank"
rel="noopener noreferrer"
className="rounded-lg flex items-center justify-center"
>
<FaDiscord className="size-5" />
</a>
<a
href="https://twitter.com/jandotai"
target="_blank"
rel="noopener noreferrer"
className="rounded-lg flex items-center justify-center"
>
<FaXTwitter className="size-5" />
</a>
<a
href="https://linkedin.com/company/opensuperintelligence"
target="_blank"
rel="noopener noreferrer"
className="rounded-lg flex items-center justify-center"
>
<FaLinkedinIn className="size-5" />
</a>
<a
href="https://github.com/menloresearch/jan"
target="_blank"
rel="noopener noreferrer"
className="rounded-lg flex items-center justify-center"
>
<FaGithub className="size-5" />
</a>
</div>
</li>
</ul>
</nav>
@ -232,6 +269,14 @@ const Navbar = ({ noScroll }: { noScroll?: boolean }) => {
>
<FaXTwitter className="size-5" />
</a>
<a
href="https://linkedin.com/company/opensuperintelligence"
target="_blank"
rel="noopener noreferrer"
className="text-black rounded-lg flex items-center justify-center"
>
<FaLinkedinIn className="size-5" />
</a>
<a
href="https://github.com/menloresearch/jan"
target="_blank"

View File

@ -0,0 +1,283 @@
import React, { useState, useEffect, useCallback } from 'react'
import { AlertCircle, CheckCircle, Clock, RefreshCw } from 'lucide-react'
interface StatusData {
status:
| 'operational'
| 'degraded'
| 'partial_outage'
| 'major_outage'
| 'under_maintenance'
| 'unknown'
lastUpdated: string
incidents: Array<{
name: string
status: string
impact: string
}>
}
const StatusIcon = ({ status }: { status: string }) => {
switch (status) {
case 'operational':
return <CheckCircle className="w-5 h-5 text-green-500" />
case 'degraded':
case 'partial_outage':
return <AlertCircle className="w-5 h-5 text-yellow-500" />
case 'major_outage':
return <AlertCircle className="w-5 h-5 text-red-500" />
case 'under_maintenance':
return <Clock className="w-5 h-5 text-blue-500" />
default:
return <AlertCircle className="w-5 h-5 text-gray-500" />
}
}
const getStatusColor = (status: string) => {
switch (status) {
case 'operational':
return 'bg-green-100 text-green-800 border-green-200 dark:bg-green-900/20 dark:text-green-300 dark:border-green-800'
case 'degraded':
case 'partial_outage':
return 'bg-yellow-100 text-yellow-800 border-yellow-200 dark:bg-yellow-900/20 dark:text-yellow-300 dark:border-yellow-800'
case 'major_outage':
return 'bg-red-100 text-red-800 border-red-200 dark:bg-red-900/20 dark:text-red-300 dark:border-red-800'
case 'under_maintenance':
return 'bg-blue-100 text-blue-800 border-blue-200 dark:bg-blue-900/20 dark:text-blue-300 dark:border-blue-800'
default:
return 'bg-gray-100 text-gray-800 border-gray-200 dark:bg-gray-900/20 dark:text-gray-300 dark:border-gray-800'
}
}
const getStatusText = (status: string) => {
switch (status) {
case 'operational':
return 'All Systems Operational'
case 'degraded':
return 'Degraded Performance'
case 'partial_outage':
return 'Partial Service Outage'
case 'major_outage':
return 'Major Service Outage'
case 'under_maintenance':
return 'Under Maintenance'
default:
return 'Status Unknown'
}
}
export const OpenAIStatusChecker: React.FC = () => {
const [statusData, setStatusData] = useState<StatusData | null>(null)
const [loading, setLoading] = useState(true)
const [error, setError] = useState<string | null>(null)
const [lastRefresh, setLastRefresh] = useState<Date>(new Date())
const fetchStatus = useCallback(async () => {
setLoading(true)
setError(null)
try {
console.log('Fetching real OpenAI status...')
// Use CORS proxy to fetch real OpenAI status
const proxyUrl = 'https://api.allorigins.win/get?url='
const targetUrl = 'https://status.openai.com/api/v2/status.json'
const response = await fetch(proxyUrl + encodeURIComponent(targetUrl))
if (!response.ok) {
throw new Error(`Proxy returned ${response.status}`)
}
const proxyData = await response.json()
const openaiData = JSON.parse(proxyData.contents)
console.log('Real OpenAI data received:', openaiData)
// Transform real OpenAI data to our format
const transformedData: StatusData = {
status: mapOpenAIStatusClient(
openaiData.status?.indicator || 'operational'
),
lastUpdated: openaiData.page?.updated_at || new Date().toISOString(),
incidents: (openaiData.incidents || []).slice(0, 3),
}
setStatusData(transformedData)
setLastRefresh(new Date())
console.log('✅ Real OpenAI status loaded successfully!')
} catch (err) {
console.error('Failed to fetch real status:', err)
// Fallback: try alternative proxy
try {
console.log('Trying alternative proxy...')
const altResponse = await fetch(
`https://cors-anywhere.herokuapp.com/https://status.openai.com/api/v2/summary.json`
)
if (altResponse.ok) {
const altData = await altResponse.json()
setStatusData({
status: mapOpenAIStatusClient(
altData.status?.indicator || 'operational'
),
lastUpdated: new Date().toISOString(),
incidents: [],
})
setLastRefresh(new Date())
console.log('✅ Alternative proxy worked!')
return
}
} catch (altErr) {
console.log('Alternative proxy also failed')
}
// Final fallback
setError('Unable to fetch real-time status')
setStatusData({
status: 'operational' as const,
lastUpdated: new Date().toISOString(),
incidents: [],
})
setLastRefresh(new Date())
console.log('Using fallback status')
} finally {
setLoading(false)
}
}, [])
// Client-side status mapping function
const mapOpenAIStatusClient = (indicator: string): StatusData['status'] => {
switch (indicator.toLowerCase()) {
case 'none':
case 'operational':
return 'operational'
case 'minor':
return 'degraded'
case 'major':
return 'partial_outage'
case 'critical':
return 'major_outage'
case 'maintenance':
return 'under_maintenance'
default:
return 'operational' as const // Default to operational
}
}
useEffect(() => {
fetchStatus()
// Refresh every 2 minutes for more real-time updates
const interval = setInterval(fetchStatus, 2 * 60 * 1000)
return () => clearInterval(interval)
}, [fetchStatus])
const handleRefresh = () => {
fetchStatus()
}
if (loading && !statusData) {
return (
<div className="bg-white dark:bg-gray-800 rounded-xl shadow-lg p-6 border border-gray-200 dark:border-gray-700">
<div className="flex items-center justify-center space-x-3">
<RefreshCw className="w-6 h-6 text-blue-500 animate-spin" />
<span className="text-lg font-medium text-gray-700 dark:text-gray-300">
Checking OpenAI Status...
</span>
</div>
</div>
)
}
if (error) {
return (
<div className="bg-white dark:bg-gray-800 rounded-xl shadow-lg p-6 border border-red-200 dark:border-red-800">
<div className="flex items-center justify-between">
<div className="flex items-center space-x-3">
<AlertCircle className="w-6 h-6 text-red-500" />
<div>
<h3 className="text-lg font-semibold text-red-800 dark:text-red-300">
Unable to Check Status
</h3>
<p className="text-red-600 dark:text-red-400">{error}</p>
</div>
</div>
<button
onClick={handleRefresh}
className="px-4 py-2 bg-red-100 hover:bg-red-200 dark:bg-red-900/20 dark:hover:bg-red-900/40 text-red-700 dark:text-red-300 rounded-lg font-medium transition-colors"
>
Retry
</button>
</div>
</div>
)
}
return (
<div className="bg-white dark:bg-gray-800 rounded-xl shadow-lg p-6 border border-gray-200 dark:border-gray-700 my-6">
<div className="flex items-center justify-between mb-4">
<div className="flex items-center space-x-3">
<StatusIcon status={statusData?.status || 'unknown'} />
<div>
<h3 className="text-xl font-bold text-gray-900 dark:text-gray-100">
OpenAI Services
</h3>
<p className="text-sm text-gray-600 dark:text-gray-400">
Last updated: {new Date(lastRefresh).toLocaleTimeString()}
</p>
</div>
</div>
<button
onClick={handleRefresh}
disabled={loading}
className="p-2 hover:bg-gray-100 dark:hover:bg-gray-700 rounded-lg transition-colors disabled:opacity-50"
>
<RefreshCw
className={`w-5 h-5 text-gray-600 dark:text-gray-400 ${loading ? 'animate-spin' : ''}`}
/>
</button>
</div>
<div
className={`inline-flex items-center px-4 py-2 rounded-full text-sm font-semibold border ${getStatusColor(statusData?.status || 'unknown')}`}
>
{getStatusText(statusData?.status || 'unknown')}
</div>
<div className="mt-4 p-4 bg-gray-50 dark:bg-gray-700 rounded-lg">
<h4 className="font-semibold text-gray-900 dark:text-gray-100 mb-2">
Quick Status Check
</h4>
<div className="grid grid-cols-1 sm:grid-cols-3 gap-3 text-sm">
<div className="flex items-center justify-between">
<span className="text-gray-600 dark:text-gray-400">ChatGPT</span>
<StatusIcon status={statusData?.status || 'unknown'} />
</div>
<div className="flex items-center justify-between">
<span className="text-gray-600 dark:text-gray-400">API</span>
<StatusIcon status={statusData?.status || 'unknown'} />
</div>
<div className="flex items-center justify-between">
<span className="text-gray-600 dark:text-gray-400">Playground</span>
<StatusIcon status={statusData?.status || 'unknown'} />
</div>
</div>
</div>
<div className="mt-4 text-xs text-gray-500 dark:text-gray-400 text-center">
{error
? 'Using fallback status • '
: '🟢 Real-time data from OpenAI • '}
Updated: {new Date(lastRefresh).toLocaleTimeString()}
<br />
<a
href="/post/is-chatgpt-down-use-jan#-is-chatgpt-down"
className="text-blue-500 hover:text-blue-600 dark:text-blue-400 dark:hover:text-blue-300 underline"
>
View detailed status guide
</a>
</div>
</div>
)
}

View File

@ -3,7 +3,7 @@ title: Installation
description: Install and deploy Jan Server on Kubernetes using minikube and Helm.
---
## Prerequisites
# Prerequisites
Jan Server requires the following tools installed on your system:

View File

@ -9,7 +9,7 @@
},
"desktop": {
"type": "page",
"title": "Jan Desktop & Mobile"
"title": "Jan Desktop"
},
"server": {
"type": "page",

View File

@ -42,6 +42,5 @@
},
"settings": "Settings",
"data-folder": "Jan Data Folder",
"troubleshooting": "Troubleshooting",
"privacy": "Privacy"
"troubleshooting": "Troubleshooting"
}

View File

@ -22,228 +22,52 @@ keywords:
import { Callout } from 'nextra/components'
import FAQBox from '@/components/FaqBox'
# Jan
![Jan's Cover Image](./_assets/jan-app-new.png)
## Jan's Goal
> We're working towards open superintelligence to make a viable open-source alternative to platforms like ChatGPT
and Claude that anyone can own and run.
## What is Jan Today
Jan is an open-source AI platform that runs on your hardware. We believe AI should be in the hands of many, not
controlled by a few tech giants.
Today, Jan is:
- **A desktop app** that runs AI models locally or connects to cloud providers
- **A model hub** making the latest open-source models accessible
- **A connector system** that lets AI interact with real-world tools via MCP
Tomorrow, Jan aims to be a complete ecosystem where open models rival or exceed closed alternatives.
# Overview
<Callout type="info">
We're building this with the open-source AI community, using the best available tools, and sharing everything
we learn along the way.
We're building [Open Superintelligence](https://jan.ai/handbook/open-superintelligence) together.
</Callout>
## The Jan Ecosystem
Jan is an open-source replacement for ChatGPT:
- AI Models: Use AI models with agentic capabilities
- [Open-source Models](/docs/desktop/manage-models): Run open-source locally
- [Cloud Models](/docs/desktop/remote-models/anthropic): Connect to remote models with API keys
- [Assistants](/docs/desktop/assistants): Create custom AI assistants
- [MCP Servers](/docs/desktop/mcp): Integrate MCP Servers to give agentic capabilities to AI models
- Jan Hub: Browse, install, and [manage models](/docs/desktop/manage-models)
- Local API Server: Expose an [OpenAI-compatible API](/docs/desktop/api-server) from your own machine or server
### Jan Apps
**Available Now:**
- **Desktop**: Full-featured AI workstation for Windows, Mac, and Linux
## Product Suite
**Coming Late 2025:**
- **Mobile**: Jan on your phone
- **Web**: Browser-based access at jan.ai
- **Server**: Self-hosted for teams
- **Extensions**: Browser extension for Chrome-based browsers
Jan is a full [product suite](https://en.wikipedia.org/wiki/Software_suite) that offers an alternative to Big AI:
- [Jan Desktop](/docs/desktop/quickstart): macOS, Windows, and Linux apps with offline mode
- [Jan Web](https://chat.jan.ai): Jan on browser, a direct alternative to chatgpt.com
- Jan Mobile: iOS and Android apps (Coming Soon)
- [Jan Server](/docs/server): deploy locally, in your cloud, or on-prem
- [Jan Models](/docs/models): Open-source models optimized for deep research, tool use, and reasoning
### Jan Model Hub
Making open-source AI accessible to everyone:
- **Easy Downloads**: One-click model installation
- **Jan Models**: Our own models optimized for local use
- **Jan-v1**: 4B reasoning model specialized in web search
- **Research Models**
- **Jan-Nano (32k/128k)**: 4B model for web search with MCP tools
- **Lucy**: 1.7B mobile-optimized for web search
- **Community Models**: Any GGUF from Hugging Face works in Jan
- **Cloud Models**: Connect your API keys for OpenAI, Anthropic, Gemini, and more
### Extending Jan (Coming Soon)
Jan helps you customize and align Open Superintelligence:
- Jan Connectors: Extend Jan with integrations
- Jan Studio: Fine-tune, align, and guardrail
- Evals: Benchmark models across industries, regions, and alignment dimensions
## Principles
### Jan Connectors Hub
Connect AI to the tools you use daily via [Model Context Protocol](./mcp):
- [Open source](https://www.redhat.com/en/blog/open-source-culture-9-core-principles-and-values): [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) licensed, built in public.
- No [vendor lock-in](https://en.wikipedia.org/wiki/Vendor_lock-in): Switch freely between local and frontier models.
- [Right to Repair](https://en.wikipedia.org/wiki/Right_to_repair): Inspect, audit, and modify your AI stack.
**Creative & Design:**
- **Canva**: Generate and edit designs
**Data & Analysis:**
- **Jupyter**: Run Python notebooks
- **E2B**: Execute code in sandboxes
**Web & Search:**
- **Browserbase & Browser Use**: Browser automation
- **Exa, Serper, Perplexity**: Advanced web search
- **Octagon**: Deep research capabilities
**Productivity:**
- **Linear**: Project management
- **Todoist**: Task management
## Core Features
- **Run Models Locally**: Download any GGUF model from Hugging Face, use OpenAI's gpt-oss models,
or connect to cloud providers
- **OpenAI-Compatible API**: Local server at `localhost:1337` works with tools like
[Continue](./server-examples/continue-dev) and [Cline](https://cline.bot/)
- **Extend with MCP Tools**: Browser automation, web search, data analysis, and design tools, all
through natural language
- **Your Choice of Infrastructure**: Run on your laptop, self-host on your servers (soon), or use
cloud when you need it
## Philosophy
Jan is built to be user-owned:
- **Open Source**: Apache 2.0 license
- **Local First**: Your data stays on your device. Internet is optional
- **Privacy Focused**: We don't collect or sell user data. See our [Privacy Policy](./privacy)
- **No Lock-in**: Export your data anytime. Use any model. Switch between local and cloud
<Callout>
The best AI is the one you control. Not the one that others control for you.
</Callout>
## The Path Forward
### What Works Today
- Run powerful models locally on consumer hardware
- Connect to any cloud provider with your API keys
- Use MCP tools for real-world tasks
- Access transparent model evaluations
### What We're Building
- More specialized models that excel at specific tasks
- Expanded app ecosystem (mobile, web, extensions)
- Richer connector ecosystem
- An evaluation framework to build better models
### The Long-Term Vision
We're working towards open superintelligence where:
- Open models match or exceed closed alternatives
- Anyone can run powerful AI on their own hardware
- The community drives innovation, not corporations
- AI capabilities are owned by users, not rented
<Callout type="warning">
This is an ambitious goal without a guaranteed path. We're betting on the open-source community, improved
hardware, and better techniques, but we're honest that this is a journey, not a destination we've reached.
</Callout>
## Quick Start
1. [Download Jan](./quickstart) for your operating system
2. Choose a model - download locally or add cloud API keys
3. Start chatting or connect tools via MCP
4. Build with our [local API](./api-server)
Jan grows through contribution. It is shaped by many and belongs to everyone who uses it.
## Acknowledgements
Jan is built on the shoulders of giants:
- [Llama.cpp](https://github.com/ggerganov/llama.cpp) for inference
- [Model Context Protocol](https://modelcontextprotocol.io) for tool integration
- The open-source community that makes this possible
> Good artists copy, great artists steal.
## FAQs
Jan exists because we've borrowed, learned, and built on the work of others.
<FAQBox title="What is Jan?">
Jan is an open-source AI platform working towards a viable alternative to Big Tech AI. Today it's a desktop app that runs models locally or connects to cloud providers. Tomorrow it aims to be a complete ecosystem rivaling platforms like ChatGPT and Claude.
</FAQBox>
<FAQBox title="How is this different from other AI platforms?">
Other platforms are models behind APIs you rent. Jan is a complete AI ecosystem you own. Run any model, use real tools through MCP, keep your data private, and never pay subscriptions for local use.
</FAQBox>
<FAQBox title="What models can I use?">
**Jan Models:**
- Jan-Nano (32k/128k) - Research and analysis with MCP integration
- Lucy - Mobile-optimized search (1.7B)
- Jan-v1 - Reasoning and tool use (4B)
**Open Source:**
- OpenAI's gpt-oss models (120b and 20b)
- Any GGUF model from Hugging Face
**Cloud (with your API keys):**
- OpenAI, Anthropic, Mistral, Groq, and more
</FAQBox>
<FAQBox title="What are MCP tools?">
MCP (Model Context Protocol) lets AI interact with real applications. Instead of just generating text, your AI can create designs in Canva, analyze data in Jupyter, browse the web, and execute code - all through conversation.
</FAQBox>
<FAQBox title="Is Jan compatible with my system?">
**Supported OS**:
- [Windows 10+](/docs/desktop/install/windows#compatibility)
- [macOS 12+](/docs/desktop/install/mac#compatibility)
- [Linux (Ubuntu 20.04+)](/docs/desktop/install/linux)
**Hardware**:
- Minimum: 8GB RAM, 10GB storage
- Recommended: 16GB RAM, GPU (NVIDIA/AMD/Intel/Apple), 50GB storage
</FAQBox>
<FAQBox title="How realistic is 'open superintelligence'?">
Honestly? It's ambitious and uncertain. We believe the combination of rapidly improving open models, better consumer hardware, community innovation, and specialized models working together can eventually rival closed platforms. But this is a multi-year journey with no guarantees. What we can guarantee is that we'll keep building in the open, with the community, towards this goal.
</FAQBox>
<FAQBox title="What can Jan actually do today?">
Right now, Jan can:
- Run models like Llama, Mistral, and our own Jan models locally
- Connect to cloud providers if you want more power
- Use MCP tools to create designs, analyze data, browse the web, and more
- Work completely offline once models are downloaded
- Provide an OpenAI-compatible API for developers
</FAQBox>
<FAQBox title="Is Jan really free?">
**Local use**: Always free, no catches
**Cloud models**: You pay providers directly (we add no markup)
**Jan cloud**: Optional paid services coming 2025
The core platform will always be free and open source.
</FAQBox>
<FAQBox title="How does Jan protect privacy?">
- Runs 100% offline once models are downloaded
- All data stored locally in [Jan Data Folder](/docs/desktop/data-folder)
- No telemetry without explicit consent
- Open source code you can audit
<Callout type="warning">
When using cloud providers through Jan, their privacy policies apply.
</Callout>
</FAQBox>
<FAQBox title="Can I self-host Jan?">
Yes. Download directly or build from [source](https://github.com/menloresearch/jan). Jan Server for production deployments coming late 2025.
</FAQBox>
<FAQBox title="When will mobile/web versions launch?">
- **Jan Web**: Beta late 2025
- **Jan Mobile**: Late 2025
- **Jan Server**: Late 2025
All versions will sync seamlessly.
</FAQBox>
<FAQBox title="How can I contribute?">
- Code: [GitHub](https://github.com/menloresearch/jan)
- Community: [Discord](https://discord.gg/FTk2MvZwJH)
- Testing: Help evaluate models and report bugs
- Documentation: Improve guides and tutorials
</FAQBox>
<FAQBox title="Are you hiring?">
Yes! We love hiring from our community. Check [Careers](https://menlo.bamboohr.com/careers).
</FAQBox>
- [llama.cpp](https://github.com/ggerganov/llama.cpp) and [GGML](https://github.com/ggerganov/ggml) for efficient inference
- [r/LocalLLaMA](https://www.reddit.com/r/LocalLLaMA/) for ideas, feedback, and debate
- [Model Context Protocol](https://modelcontextprotocol.io) for MCP integrations
- [PostHog](https://posthog.com/docs) for docs inspiration
- The open-source community for contributions, bug reports, and improvements

View File

@ -1,11 +1,12 @@
---
title: Linux
description: Get started quickly with Jan, an AI chat application that runs 100% offline on your desktop & mobile (*coming soon*).
description: Download Jan on Linux to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline.
keywords:
[
Jan,
Customizable Intelligence, LLM,
local AI,
Jan on Linux,
privacy focus,
free and open source,
private and offline,
@ -18,15 +19,17 @@ keywords:
installation,
"desktop"
]
twitter:
card: summary_large_image
site: "@jandotai"
title: "Jan on Linux"
description: "Download Jan on Linux to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline."
---
import FAQBox from '@/components/FaqBox'
import { Tabs, Callout, Steps } from 'nextra/components'
import { Settings } from 'lucide-react'
# Linux Installation
Instructions for installing Jan on Linux.
@ -244,7 +247,7 @@ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64
### Step 2: Enable GPU Acceleration
1. Navigate to **Settings** (<Settings width={16} height={16} style={{display:"inline"}}/>) > **Local Engine** > **Llama.cpp**
2. Select appropriate backend in **llama-cpp Backend**. Details in our [guide](/docs/desktop/local-engines/llama-cpp).
2. Select appropriate backend in **llama-cpp Backend**. Details in our [llama.cpp guide](/docs/desktop/llama-cpp).
<Callout type="info">
CUDA offers better performance than Vulkan.

View File

@ -1,11 +1,11 @@
---
title: Mac
description: Get started quickly with Jan - a local AI that runs on your computer. Install Jan and pick your model to start chatting.
keywords:
description: Download Jan on Mac to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline.
[
Jan,
Customizable Intelligence, LLM,
local AI,
Jan on Mac,
privacy focus,
free and open source,
private and offline,
@ -18,6 +18,11 @@ keywords:
installation,
"desktop"
]
twitter:
card: summary_large_image
site: "@jandotai"
title: "Jan on Mac"
description: "Download Jan on Mac to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline."
---
import { Tabs } from 'nextra/components'

View File

@ -1,10 +1,11 @@
---
title: Windows
description: Run AI models locally on your Windows machine with Jan. Quick setup guide for local inference and chat.
description: Download Jan on Windows to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline.
keywords:
[
Jan,
Customizable Intelligence, LLM,
Jan on Windows,
local AI,
privacy focus,
free and open source,
@ -18,6 +19,11 @@ keywords:
installation,
"desktop"
]
twitter:
card: summary_large_image
site: "@jandotai"
title: "Jan on Windows"
description: "Download Jan on Windows to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline."
---
import { Tabs, Callout, Steps } from 'nextra/components'

View File

@ -59,7 +59,7 @@ The model and its different model variants are fully supported by Jan.
## Using Jan-Nano-32k
**Step 1**
Download Jan from [here](https://jan.ai/docs/desktop/).
Download Jan from [here](https://jan.ai/download/).
**Step 2**
Go to the Hub Tab, search for Jan-Nano-Gguf, and click on the download button to the best model size for your system.
@ -118,8 +118,8 @@ Here are some example queries to showcase Jan-Nano's web search capabilities:
- 4xA6000 for vllm server (inferencing)
- What frontend should I use?
- Jan Beta (recommended) - Minimalistic and polished interface
- Download link: https://jan.ai/docs/desktop/beta
- Jan (recommended)
- Download link: https://jan.ai/download
- Getting Jinja errors in LM Studio?
- Use Qwen3 template from other LM Studio compatible models

View File

@ -90,7 +90,7 @@ Refer to the following documentation to install the Tabby extension on your favo
Tabby offers an [Answer Engine](https://tabby.tabbyml.com/docs/administration/answer-engine/) on the homepage,
which can leverage the Jan LLM and related contexts like code, documentation, and web pages to answer user questions.
Simply open the Tabby homepage at [localhost:8080](http://localhost:8080) and ask your questions.
Simply open the Tabby homepage at http://localhost:8080 and ask your questions.
### IDE Chat Sidebar

View File

@ -108,7 +108,7 @@ You can help improve Jan by sharing anonymous usage data:
2. You can change this setting at any time
<Callout type="info">
Read more about that we collect with opt-in users at [Privacy](/docs/desktop/privacy).
Read more about that we collect with opt-in users at [Privacy](/privacy).
</Callout>
<br/>
@ -141,7 +141,7 @@ This action cannot be undone.
### Jan Data Folder
Jan stores your data locally in your own filesystem in a universal file format. See detailed [Jan Folder Structure](docs/data-folder#folder-structure).
Jan stores your data locally in your own filesystem in a universal file format. See detailed [Jan Folder Structure](/docs/desktop/data-folder#directory-structure).
**1. Open Jan Data Folder**

View File

@ -328,14 +328,14 @@ This command ensures that the necessary permissions are granted for Jan's instal
When you start a chat with a model and encounter a **Failed to Fetch** or **Something's Amiss** error, here are some possible solutions to resolve it:
**1. Check System & Hardware Requirements**
- Hardware dependencies: Ensure your device meets all [hardware requirements](docs/desktop/troubleshooting#step-1-verify-hardware-and-system-requirements)
- OS: Ensure your operating system meets the minimum requirements ([Mac](/docs/desktop/install/mac#minimum-requirements), [Windows](/docs/desktop/install/windows#compatibility), [Linux](/docs/desktop/install/linux#compatibility))
- Hardware dependencies: Ensure your device meets all [hardware requirements](troubleshooting)
- OS: Ensure your operating system meets the minimum requirements ([Mac](https://www.jan.ai/docs/desktop/install/mac#minimum-requirements), [Windows](/windows#compatibility), [Linux](https://www.jan.ai/docs/desktop/install/linux#compatibility)
- RAM: Choose models that use less than 80% of your available RAM
- For 8GB systems: Use models under 6GB
- For 16GB systems: Use models under 13GB
**2. Check Model Parameters**
- In **Engine Settings** in right sidebar, check your `ngl` ([number of GPU layers](/docs/desktop/models/model-parameters#engine-parameters)) setting to see if it's too high
- In **Engine Settings** in right sidebar, check your `ngl` ([number of GPU layers](/docs/desktop/model-parameters)) setting to see if it's too high
- Start with a lower NGL value and increase gradually based on your GPU memory
**3. Port Conflicts**

View File

@ -1,5 +1,4 @@
{
"index": "Overview",
"open-superintelligence": "Open Superintelligence",
"betting-on-open-source": "Betting on Open-Source"
"why": "Why does Jan exist?"
}

View File

@ -18,31 +18,6 @@ Jan's Handbook is a [living document](https://en.wikipedia.org/wiki/Living_docum
## Why does Jan exist?
### [Open Superintelligence](/handbook/open-superintelligence)
Building superintelligence that belongs to everyone, not just a few tech giants. We believe the future of AI should be open, accessible, and owned by the people who use it.
### [Betting on Open-Source](/handbook/betting-on-open-source)
- [Open Superintelligence](/handbook/open-superintelligence) - Building superintelligence that belongs to everyone, not just a few tech giants. We believe the future of AI should be open, accessible, and owned by the people who use it.
- [Betting on Open-Source](/handbook/betting-on-open-source)
Why we're betting on open-source as the future of AI and technology. Open-source has consistently won in the long term, and AI will be no different.
---
## Quick Links
- **For the curious**: Start with [Open Superintelligence](/handbook/open-superintelligence)
- **For developers**: Learn about [Betting on Open-Source](/handbook/betting-on-open-source)
- **For contributors**: Check out our [GitHub](https://github.com/menloresearch/jan) and [Discord](https://discord.gg/FTk2MvZwJH)
## Our North Star
We're building superintelligence that:
- **Works anywhere**: From your laptop to your data center
- **Belongs to you**: Download it, own it, modify it
- **Scales infinitely**: One person or ten thousand, same platform
- **Improves constantly**: Community-driven development
This isn't just about making AI accessible. It's about ensuring the most transformative technology in human history can be owned by those who use it.
---
_"The future of AI isn't about choosing between local or cloud. It's about having both, and everything in between, working perfectly together."_

View File

@ -0,0 +1,4 @@
{
"open-superintelligence": "Why Jan exists",
"betting-on-open-source": "Why we're betting on open-source"
}

View File

@ -1,11 +1,11 @@
---
title: "Why Open-Source"
title: "Why Jan is betting on Open-Source"
description: "Why we're betting on open-source."
---
# Why Open-Source
AI today is concentrated in the hands of a few companies. They ask for trust, while keeping the levers of control hidden. We think that's a mistake.
AI today is concentrated in the hands of [a few companies](https://stratechery.com/2025/tech-philosophy-and-ai-opportunity/). They ask for trust, while keeping the levers of control hidden. We think that's a mistake.
When you depend on one vendor, your future is tied to their roadmap, their politics, their survival. If they get acquired, pivot, or shut down; you're stuck.
@ -16,9 +16,9 @@ Depending on a closed vendor means giving up more than flexibility:
AI has become critical infrastructure. Nations, enterprises, even small teams rely on it to think and decide. And yet, control sits with a few vendors who decide the terms of access. We believe that's not control. That's dependency dressed up as convenience. One of the most powerful invention is being steered by a handful of executives. Their values shape what billions can say, build, or ask.
*This cannot stand. It must be changed.*
This can't stand. It must be changed.
## Jan's Bet
## How we see
We don't believe the future of AI should be dictated by a few firms in San Francisco, Beijing, or anywhere else.
@ -30,4 +30,4 @@ That's why we're building Jan, a full product suite:
- Jan Server
- Hub, Store, evals, guardrails, the ecosystem around it
The goal is to be the open-source replacement for ChatGPT and other BigAI products, with models and tools you can run, own, and trust.
The goal is to be the [open-source replacement for ChatGPT](https://jan.ai/) and other BigAI products, with models and tools you can run, own, and trust.

View File

@ -5,9 +5,13 @@ description: "Short answer: Open Superintelligence."
# Why does Jan exist?
> Short answer: Open Superintelligence.
import { Callout } from 'nextra/components'
In 1879, Edison lit a single street in [Menlo Park](https://en.wikipedia.org/wiki/Menlo_Park,_California). What mattered wasn't the bulb. It was that power could reach homes, schools, and factories.
<Callout type="info">
Short answer: Open Superintelligence.
</Callout>
In 1879, [Edison](https://en.wikipedia.org/wiki/Thomas_Edison) lit a single street in [Menlo Park](https://en.wikipedia.org/wiki/Menlo_Park,_California). What mattered wasn't the bulb. It was that power could reach homes, schools, and factories.
Electricity changed the world only when it became universal. Standard plugs, cheap generation, lines everywhere. People stopped talking about electricity and started using light, cold chains, and machines.
@ -19,13 +23,13 @@ Jan exists to push intelligence toward the first path: Open Superintelligence yo
> The world is made, and can be remade.
Every industrial wave redefined critical aspects of our daily lives:
- Factories introduced shift clocks and wage rhythms
- Steam gave way to electricity and standardized parts
- Rail, telegraph, and later networks changed how decisions travel
- Each wave pulled new bargains into being skills, schools, safety nets, labor law
Every industrial wave redefined new defaults of our daily lives:
- [Factories](https://en.wikipedia.org/wiki/Factory) created the modern job
- [Electricity](https://en.wikipedia.org/wiki/Electricity) created the modern home
- [Railroads](https://en.wikipedia.org/wiki/Rail_transport#History) and [telegraphs](https://en.wikipedia.org/wiki/Telegraphy#History) created the modern nation
- [The Internet](https://en.wikipedia.org/wiki/Internet) created the modern world
So what we're interested in is who is going to write the new defaults and share in the gains.
Open Superintelligence will create what comes next. What we're interested in is who is going to write the new defaults and share in the gains.
Technology doesnt choose its path, people do. Power accrues to whoever designs, deploys, and profits from the system:
- If intelligence is closed and centralized, the gains concentrate

Binary file not shown.

After

Width:  |  Height:  |  Size: 328 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 230 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 293 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 612 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 230 KiB

View File

@ -17,7 +17,7 @@ Jan now supports [NVIDIA TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) i
We've been excited for TensorRT-LLM for a while, and [had a lot of fun implementing it](https://github.com/menloresearch/nitro-tensorrt-llm). As part of the process, we've run some benchmarks, to see how TensorRT-LLM fares on consumer hardware (e.g. [4090s](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/), [3090s](https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/)) we commonly see in the [Jan's hardware community](https://discord.com/channels/1107178041848909847/1201834752206974996).
<Callout type="info" >
**Give it a try!** Jan's [TensorRT-LLM extension](/docs/desktop/built-in/tensorrt-llm) is available in Jan v0.4.9 and up ([see more](/docs/desktop/built-in/tensorrt-llm)). We precompiled some TensorRT-LLM models for you to try: `Mistral 7b`, `TinyLlama-1.1b`, `TinyJensen-1.1b` 😂
**Give it a try!** Jan's TensorRT-LLM extension is available in Jan v0.4.9. We precompiled some TensorRT-LLM models for you to try: `Mistral 7b`, `TinyLlama-1.1b`, `TinyJensen-1.1b` 😂
Bugs or feedback? Let us know on [GitHub](https://github.com/menloresearch/jan) or via [Discord](https://discord.com/channels/1107178041848909847/1201832734704795688).
</Callout>

View File

@ -0,0 +1,120 @@
---
title: "ChatGPT alternatives that actually replace it"
description: "See the best ChatGPT alternatives in 2025. We've listed tools that are alternatives to ChatGPT."
tags: AI, ChatGPT alternative, ChatGPT alternatives, alternative to chatgpt, Jan, local AI, privacy, open source, offline AI
categories: guides
date: 2025-09-29
ogImage: _assets/chatgpt-alternative-jan.jpeg
twitter:
card: summary_large_image
site: "@jandotai"
title: "ChatGPT alternatives that actually replace it."
description: "See the best ChatGPT alternatives in 2025. We've listed tools that are alternatives to ChatGPT."
image: _assets/chatgpt-alternative-jan.jpeg
---
import { Callout } from 'nextra/components'
import CTABlog from '@/components/Blog/CTA'
# Best ChatGPT Alternatives
ChatGPT works well, but it always needs internet, has usage limits, and isn't private.
If you want options that fit different needs, offline use, privacy, or specialized tasks, see the best alternatives to ChatGPT available for specific use cases.
## Comparison: ChatGPT Alternatives
| ChatGPT Alternative | Offline | Key Strength | Best For |
| ------------------------- | ------- | ---------------------------- | -------------------------- |
| **[Jan](https://jan.ai)** | Yes | Runs Cloud + Offline, open-source | Best overall ChatGPT replacement |
| Claude | - | Strong writing and reasoning | Creative text & code |
| Gemini | - | Integrated with Google | Research tasks, image generation |
| Perplexity | - | Fast, with cited answers | Research and fact-checking |
| LM Studio | Yes | Runs open models on PC | Coding and experiments |
### Jan is the best ChatGPT alternative
![Use Jan to chat with AI models without internet access](./_assets/chatgpt-alternative-jan.jpeg)
*Jan as an open-source alternative to ChatGPT*
Jan is the most complete ChatGPT alternative available today. It enables:
- Use AI in online & offline (even on a plain)
- Agentic actions supported
- MCP servers supported for tools
Unlike ChatGPT, it runs on your computer, which means:
- Offline AI capabilities (see [Offline ChatGPT post](https://www.jan.ai/post/offline-chatgpt-alternative) for details)
- 100% private
- Open-source & Free
<Callout> Jan is an [open-source replacement for ChatGPT.](https://www.jan.ai/) </Callout>
### Claude is the most notable online alternative
![Claude](./_assets/claude.jpeg)
Claude has become the main online rival to ChatGPT. It stands out for writing, reasoning, and coding.
- Handles very long documents and context well
- Strong for essays, research papers, and structured text
- Popular with developers for code explanations and debugging
- Cloud-only, no offline mode
- Filters outputs heavily, sometimes too restrictive
### Gemini is the Google's integrated alternative
![Gemini](./_assets/gemini.jpeg)
Gemini ties directly into Googles apps and search. Great for users in the Google ecosystem.
- Built into Gmail, Docs, and Google Search
- Good for real-time research and fact-checking
- Strong at pulling web context into answers
- Requires Google account, fully online
- Privacy concerns: all tied to Google services
### Perplexity is the research-focused alternative
![Perplexity](./_assets/perplexity.jpeg)
Perplexity is built for fact-checking and quick research, not creativity.
- Always cites sources for answers
- Strong at summarizing current web info
- Very fast for Q&A style use
- Limited in creativity and open-ended writing
- Cloud-only, daily free usage caps
### LM Studio is the experimental alternative
![LM Studio](./_assets/lm-studio.jpeg)
LM Studio is not a ChatGPT replacement but a local tool for running open models.
- Lets you test and run open-source models on PC
- Offline by default, works without internet
- Flexible setup for developers and technical users
- Requires decent hardware (RAM/VRAM)
LM Studio is not beginner-friendly compared to Jan.
## Choosing the right ChatGPT alternative for you:
- Best overall replacement: [Jan](https://www.jan.ai/)
- For writing & storytelling: Claude
- For research & web knowledge: Perplexity or Gemini
- For productivity & office work: Microsoft Copilot
- For experimentation with open-source models for technical people: LM Studio
Most ChatGPT alternatives are still cloud-based and limited. If you want full privacy, offline use, and no restrictions, the best ChatGPT alternative is [Jan](https://www.jan.ai/).
### Can I use ChatGPT offline?
No. ChatGPT always requires internet. For offline AI, use Jan.
### Whats the best free ChatGPT alternative?
Jan is free, open-source, and runs offline. Others like Claude or Perplexity have limited free tiers but are cloud-based.
### Which ChatGPT alternative is best for writing?
Claude is strong for essays, reports, and structured writing. You could use [open-source models](https://www.jan.ai/post/run-ai-models-locally) in Jan too.
### Which ChatGPT alternative is best for research?
Perplexity and Gemini pull real-time web data with citations.
### Whats the closest full replacement to ChatGPT?
Jan. It runs locally, works offline, and feels like ChatGPT without restrictions.

View File

@ -125,8 +125,8 @@ any version with Model Context Protocol in it (>`v0.6.3`).
**The Key: Assistants + Tools**
Running deep research in Jan can be accomplished by combining [custom assistants](https://jan.ai/docs/assistants)
with [MCP search tools](https://jan.ai/docs/desktop/mcp-examples/search/exa). This pairing allows any model—local or
Running deep research in Jan can be accomplished by combining [custom assistants](https://jan.ai/docs/desktop/assistants)
with [MCP search tools](https://jan.ai/docs/mcp-examples/search/exa). This pairing allows any model—local or
cloud—to follow a systematic research workflow, to create a report similar to that of other providers, with some
visible limitations (for now).

View File

@ -0,0 +1,124 @@
---
title: "If ChatGPT is down, switch to AI that never goes down"
description: "Check if ChatGPT down right now, and learn how to use AI that never goes down."
tags: AI, ChatGPT down, ChatGPT alternative, Jan, local AI, offline AI, ChatGPT at capacity
categories: guides
date: 2025-09-30
ogImage: _assets/is-chatgpt-down.jpg
twitter:
card: summary_large_image
site: "@jandotai"
title: "Realtime Status Checker: Is ChatGPT down?"
description: "Check if ChatGPT is down right now with our real-time status checker, and learn how to use AI that never goes offline."
image: _assets/is-chatgpt-down.jpg
---
import { Callout } from 'nextra/components'
import CTABlog from '@/components/Blog/CTA'
import { OpenAIStatusChecker } from '@/components/OpenAIStatusChecker'
# If ChatGPT is down, switch to AI that never goes down
If you're seeing ChatGPT is down, it could a good signal to switch to [Jan](https://www.jan.ai/), AI that never goes down.
## 🔴 Realtime Status Checker: Is ChatGPT down?
<Callout>
This live tracker shows if ChatGPT is down right now.
</Callout>
<OpenAIStatusChecker />
### ChatGPT Status Indicators
<div className="grid grid-cols-1 md:grid-cols-2 gap-4 my-6">
<div className="p-4 rounded-lg border border-green-200 bg-green-50 dark:bg-green-900/20 dark:border-green-800">
<div className="flex items-center gap-2 mb-2">
<div className="w-3 h-3 bg-green-500 rounded-full"></div>
<span className="font-semibold text-green-800 dark:text-green-300">Operational</span>
</div>
<p className="text-sm text-green-700 dark:text-green-400">All systems are functioning normally with no reported issues.</p>
</div>
<div className="p-4 rounded-lg border border-yellow-200 bg-yellow-50 dark:bg-yellow-900/20 dark:border-yellow-800">
<div className="flex items-center gap-2 mb-2">
<div className="w-3 h-3 bg-yellow-500 rounded-full"></div>
<span className="font-semibold text-yellow-800 dark:text-yellow-300">Degraded Performance</span>
</div>
<p className="text-sm text-yellow-700 dark:text-yellow-400">Services are running but may be slower than usual.</p>
</div>
<div className="p-4 rounded-lg border border-orange-200 bg-orange-50 dark:bg-orange-900/20 dark:border-orange-800">
<div className="flex items-center gap-2 mb-2">
<div className="w-3 h-3 bg-orange-500 rounded-full"></div>
<span className="font-semibold text-orange-800 dark:text-orange-300">Partial Outage</span>
</div>
<p className="text-sm text-orange-700 dark:text-orange-400">Some features or regions may be experiencing issues.</p>
</div>
<div className="p-4 rounded-lg border border-red-200 bg-red-50 dark:bg-red-900/20 dark:border-red-800">
<div className="flex items-center gap-2 mb-2">
<div className="w-3 h-3 bg-red-500 rounded-full"></div>
<span className="font-semibold text-red-800 dark:text-red-300">Major Outage</span>
</div>
<p className="text-sm text-red-700 dark:text-red-400">Significant service disruption affecting most users.</p>
</div>
</div>
## Skip the downtime with Jan
When ChatGPT is down, Jan keeps working. Jan is an open-source ChatGPT alternative that runs on your computer - no servers, no outages, no waiting.
![Jan running when ChatGPT is down](./_assets/chatgpt-alternative-jan.jpeg)
*Jan works even when ChatGPT doesn't.*
### Why Jan never goes down:
- **Runs locally** - No dependency on external servers
- **Always available** - Works offline, even on flights
- **No capacity limits** - Uses your computer's resources
- **100% private** - Your conversations stay on your device
### Get started in 3 mins:
1. Download Jan: [jan.ai](https://jan.ai)
2. Install a model: Choose from Jan, Qwen, or other top models
3. Start chatting: Similar design as ChatGPT, but always available if you use local models
<Callout type="info">
**Pro tip:** Keep both ChatGPT and Jan. You'll never lose productivity to outages again.
</Callout>
Jan runs AI models locally, so you don't need internet access. That means Jan is unaffected when ChatGPT is down.
### Why does ChatGPT goes down?
There could be multiple reasons:
- Too many users at once
- Data center or API downtime
- Planned or uplanned updates
- Limited in some locations
ChatGPT depends on OpenAIs servers. If those go down, so does ChatGPT. Jan users don't affect by ChatGPT's outage.
### Common ChatGPT Errors
When ChatGPT experiences issues, you might see these error messages:
- "ChatGPT is at capacity right now": Too many users online, try again later
- "Error in message stream": Connection problems with OpenAI servers
- "Something went wrong": General server error, refresh and retry
- "Network error": Internet connectivity issues on your end or OpenAI's
- "Rate limit exceeded": Too many requests sent, wait before trying again
- "This model is currently overloaded": High demand for specific model
## Quick answers about ChatGPT status
### Is ChatGPT down today?
Check the ChatGPT realtime status above. If ChatGPT is down, you'll see it here.
### Why is ChatGPT down?
Usually server overload, maintenance, or outages at OpenAI.
### What does “ChatGPT is at capacity” mean?
Too many users are online at the same time. Youll need to wait or switch to Jan instead.
### Is ChatGPT shutting down?
No, ChatGPT isnt shutting down. Outages are temporary.
### Can I use ChatGPT offline?
No. ChatGPT always requires internet. For [offline AI](https://www.jan.ai/post/offline-chatgpt-alternative), use [Jan](https://jan.ai).

View File

@ -1,7 +1,7 @@
---
title: "Offline ChatGPT: You can't run ChatGPT offline, do this instead"
description: "Learn how to use AI offline with Jan - a free, open-source alternative to ChatGPT that works 100% offline on your computer."
tags: AI, ChatGPT alternative, offline AI, Jan, local AI, privacy
description: "Use offline AI with Jan, a free & open-source alternative to ChatGPT that runs 100% offline."
tags: AI, chatgpt offline, ChatGPT alternative, offline AI, Jan, local AI, privacy
categories: guides
date: 2025-02-08
ogImage: _assets/offline-chatgpt-alternatives-jan.jpg
@ -9,7 +9,7 @@ twitter:
card: summary_large_image
site: "@jandotai"
title: "Offline ChatGPT: You can't run ChatGPT offline, do this instead"
description: "Want to use ChatGPT offline? Learn how to run AI models locally with Jan - free, open-source, and works without internet."
description: "Use offline AI with Jan, a free & open-source alternative to ChatGPT that runs 100% offline."
image: _assets/offline-chatgpt-alternatives-jan.jpg
---
import { Callout } from 'nextra/components'
@ -17,16 +17,20 @@ import CTABlog from '@/components/Blog/CTA'
# Offline ChatGPT: You can't run ChatGPT offline, do this instead
ChatGPT is a cloud-based service that requires internet access. However, it's not the only way to use AI. You can run AI models offline on your device with [Jan](https://jan.ai/). It's completely free, open-source, and gives you 100% offline capability. You can even use AI on a plane!
ChatGPT itself can't run offline. ChatGPT can't run offline. You cant download it. It always needs internet, because it runs on OpenAI's servers.
<Callout>
If you want offline AI, you need local models. The easiest way: [Jan, an open-source replacement of ChatGPT](https://jan.ai/). It's free, open-source, and works 100% offline. With Jan, you can even use AI on a plane.
<Callout type="info">
**Quick Summary:**
- ChatGPT always needs internet - it can't run offline
- Jan lets you run AI models 100% offline on your computer
- It's free and open-source
- Works on Mac, Windows, and Linux
- ChatGPT always needs internet - no offline mode
- Use Jan to use AI models 100% offline
- It's free & open-source, and works on Mac, Windows, and Linux
</Callout>
## How to use AI offline?
Offline AI means the model runs on your computer. So no internet needed, 100% private, and data never leaves your device. With Jan you can run offline AI models locally.
## Jan as an offline ChatGPT alternative
![Use Jan to chat with AI models without internet access](./_assets/offline-chatgpt-alternative-ai-without-internet.jpg)
@ -42,20 +46,22 @@ Go to [jan.ai](https://jan.ai) and download the version for your computer (Mac,
### 2. Download an AI model
You'll need an AI model to use AI offline, so download a model from Jan. Once it's on your computer, you don't need internet anymore.
You'll need an AI model to use AI offline, so download a model from Jan. Once it's on your computer, you don't need internet anymore. You can also use GPT models via Jan - check [running gpt-oss locally](https://www.jan.ai/post/run-gpt-oss-locally) post to see it.
![Choose an AI model that works offline](./_assets/jan-model-selection.jpg "Find the perfect AI model for offline use")
*Select an AI model that matches your needs and computer capabilities*
<Callout>
**Which model should you choose?**
### Which model should you choose?
- For most computers: Try Mistral 7B or DeepSeek - they're similar to ChatGPT 3.5
- For older computers: Use smaller 3B models
- For gaming PCs: You can try larger 13B models
<Callout type="info">
Don't worry about choosing - Jan will automatically recommend models that work well on your computer.
</Callout>
If you'd like to learn more about local AI, check [how to run AI models locally as a beginner](https://www.jan.ai/post/run-ai-models-locally) article.
### 3. Start using AI offline
![Chat with AI offline using Jan's interface](./_assets/run-ai-locally-with-jan.jpg "Experience ChatGPT-like interactions without internet")
@ -71,12 +77,7 @@ Once downloaded, you can use AI anywhere, anytime:
## How to chat with your docs in Jan?
To chat with your docs in Jan, you need to activate experimental mode.
![Activate experimental mode in Jan's settings](./_assets/chat-with-your-docs-offline-ai.jpg "Enable experimental features to chat with your documents")
*Turn on experimental mode in settings to chat with your docs*
After activating experimental mode, simply add your files and ask questions about them.
Simply add your files and ask questions about them.
![Chat with your documents using Jan](./_assets/chat-with-docs-prompt.jpg "Ask questions about your documents offline")
*Chat with your documents privately - no internet needed*
@ -97,17 +98,17 @@ Local AI makes possible offline AI use, so Jan is going to be your first step to
4. **No Server Issues:** No more "ChatGPT is at capacity"
5. **Your Choice of Models:** Use newer models as they come out
**"Is it really free? What's the catch?"**
### "Is Jan really free? What's the catch?"
Yes, it's completely free and open source. Jan is built by developers who believe in making AI accessible to everyone.
**"How does it compare to ChatGPT?"**
### How does Jan compare to ChatGPT?"
Modern open-source models like DeepSeek and Mistral are very capable. While they might not match GPT-4, they're perfect for most tasks and getting better every month.
**"Do I need a powerful computer?"**
### "Do I need a powerful computer?"
If your computer is from the last 5 years, it will likely work fine. You need about 8GB of RAM and 10GB of free space for comfortable usage.
**"What about my privacy?"**
Everything stays on your computer. Your conversations, documents, and data never leave your device unless you choose to share them.
### "What about my privacy?"
Everything stays on your computer with Jan. Your conversations, documents, and data never leave your device unless you choose to share them.
Want to learn more about the technical side? Check our detailed [guide on running AI models locally](/post/run-ai-models-locally). It's not required to [use AI offline](https://jan.ai/) but helps understand how it all works.
@ -116,3 +117,20 @@ Want to learn more about the technical side? Check our detailed [guide on runnin
<Callout type="info">
[Join our Discord community](https://discord.gg/Exe46xPMbK) for support and tips on using Jan as your offline ChatGPT alternative.
</Callout>
### FAQ
#### Can I download ChatGPT for offline use?
No. ChatGPT is cloud-only.
#### How to use ChatGPT offline?
You can't. ChatGPT has no offline mode. Use Jan instead for a ChatGPT-like offline experience.
#### Does ChatGPT have internet access?
Yes. It runs in the cloud.
#### What's the best way to use AI offline?
Download Jan and run models like Mistral, DeepSeek, or GPT-OSS locally.
#### What's GPT offline?
OpenAI has open-source models you can run locally but not via ChatGPT. One of them is [gpt-oss](https://www.jan.ai/post/run-gpt-oss-locally) and you can run it via Jan.

View File

@ -22,16 +22,16 @@
},
"devDependencies": {
"@janhq/core": "workspace:*",
"typescript": "^5.3.3",
"vite": "^5.0.0",
"vitest": "^2.0.0",
"zustand": "^5.0.8"
"typescript": "5.9.2",
"vite": "5.4.20",
"vitest": "2.1.9",
"zustand": "5.0.8"
},
"peerDependencies": {
"@janhq/core": "*",
"zustand": "^5.0.0"
"zustand": "5.0.3"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.17.5"
"@modelcontextprotocol/sdk": "1.17.5"
}
}

View File

@ -16,6 +16,9 @@ import {
import { janApiClient, JanChatMessage } from './api'
import { janProviderStore } from './store'
// Jan models support tools via MCP
const JAN_MODEL_CAPABILITIES = ['tools'] as const
export default class JanProviderWeb extends AIEngine {
readonly provider = 'jan'
private activeSessions: Map<string, SessionInfo> = new Map()
@ -24,6 +27,9 @@ export default class JanProviderWeb extends AIEngine {
console.log('Loading Jan Provider Extension...')
try {
// Check and clear invalid Jan models (capabilities mismatch)
this.validateJanModelsLocalStorage()
// Initialize authentication and fetch models
await janApiClient.initialize()
console.log('Jan Provider Extension loaded successfully')
@ -35,6 +41,54 @@ export default class JanProviderWeb extends AIEngine {
super.onLoad()
}
// Verify Jan models capabilities in localStorage
private validateJanModelsLocalStorage() {
try {
console.log("Validating Jan models in localStorage...")
const storageKey = 'model-provider'
const data = localStorage.getItem(storageKey)
if (!data) return
const parsed = JSON.parse(data)
if (!parsed?.state?.providers) return
// Check if any Jan model has incorrect capabilities
let hasInvalidModel = false
for (const provider of parsed.state.providers) {
if (provider.provider === 'jan' && provider.models) {
for (const model of provider.models) {
console.log(`Checking Jan model: ${model.id}`, model.capabilities)
if (JSON.stringify(model.capabilities) !== JSON.stringify(JAN_MODEL_CAPABILITIES)) {
hasInvalidModel = true
console.log(`Found invalid Jan model: ${model.id}, clearing localStorage`)
break
}
}
}
if (hasInvalidModel) break
}
// If any invalid model found, just clear the storage
if (hasInvalidModel) {
// Force clear the storage
localStorage.removeItem(storageKey)
// Verify it's actually removed
const afterRemoval = localStorage.getItem(storageKey)
// If still present, try setting to empty state
if (afterRemoval) {
// Try alternative clearing method
localStorage.setItem(storageKey, JSON.stringify({ state: { providers: [] }, version: parsed.version || 3 }))
}
console.log('Cleared model-provider from localStorage due to invalid Jan capabilities')
// Force a page reload to ensure clean state
window.location.reload()
}
} catch (error) {
console.error('Failed to check Jan models:', error)
}
}
override async onUnload() {
console.log('Unloading Jan Provider Extension...')
@ -64,7 +118,7 @@ export default class JanProviderWeb extends AIEngine {
path: undefined, // Remote model, no local path
owned_by: model.owned_by,
object: model.object,
capabilities: ['tools'], // Jan models support both tools via MCP
capabilities: [...JAN_MODEL_CAPABILITIES],
}
: undefined
)
@ -85,7 +139,7 @@ export default class JanProviderWeb extends AIEngine {
path: undefined, // Remote model, no local path
owned_by: model.owned_by,
object: model.object,
capabilities: ['tools'], // Jan models support both tools via MCP
capabilities: [...JAN_MODEL_CAPABILITIES],
}))
} catch (error) {
console.error('Failed to list Jan models:', error)
@ -332,6 +386,12 @@ export default class JanProviderWeb extends AIEngine {
)
}
async update(modelId: string, model: Partial<modelInfo>): Promise<void> {
throw new Error(
`Update operation not supported for remote Jan API model: ${modelId}`
)
}
async import(modelId: string, _opts: ImportOptions): Promise<void> {
throw new Error(
`Import operation not supported for remote Jan API model: ${modelId}`

View File

@ -48,6 +48,18 @@ export class JanAuthService {
* Called on app load to check existing session
*/
async initialize(): Promise<void> {
// Ensure refreshtoken is valid (in case of expired session or secret change)
try {
await refreshToken()
} catch (error) {
console.log('Failed to refresh token on init:', error)
// If refresh fails, logout to clear any invalid state
console.log('Logging out and clearing auth state to clear invalid session...')
await logoutUser()
this.clearAuthState()
this.authBroadcast.broadcastLogout()
}
// Authentication state check
try {
if (!this.isAuthenticated()) {
// Not authenticated - ensure guest access

View File

@ -12,11 +12,11 @@
"build:publish": "rimraf *.tgz --glob || true && yarn build && npm pack && cpx *.tgz ../../pre-install"
},
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^6.0.1",
"cpx": "1.5.0",
"rimraf": "6.0.1",
"rolldown": "1.0.0-beta.1",
"run-script-os": "^1.1.6",
"typescript": "^5.3.3"
"run-script-os": "1.1.6",
"typescript": "5.9.2"
},
"dependencies": {
"@janhq/core": "../../core/package.tgz",

View File

@ -15,11 +15,11 @@
"./main": "./dist/module.js"
},
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^6.0.1",
"cpx": "1.5.0",
"rimraf": "6.0.1",
"rolldown": "1.0.0-beta.1",
"ts-loader": "^9.5.0",
"typescript": "^5.7.2"
"typescript": "5.9.2"
},
"dependencies": {
"@janhq/core": "../../core/package.tgz"

View File

@ -12,12 +12,12 @@
"build:publish": "rimraf *.tgz --glob || true && yarn build && npm pack && cpx *.tgz ../../pre-install"
},
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^6.0.1",
"cpx": "1.5.0",
"rimraf": "6.0.1",
"rolldown": "1.0.0-beta.1",
"run-script-os": "^1.1.6",
"run-script-os": "1.1.6",
"typescript": "5.8.3",
"vitest": "^3.0.6"
"vitest": "3.2.4"
},
"files": [
"dist/*",
@ -26,7 +26,7 @@
],
"dependencies": {
"@janhq/core": "../../core/package.tgz",
"@tauri-apps/api": "^2.5.0"
"@tauri-apps/api": "2.8.0"
},
"bundleDependencies": [],
"installConfig": {

View File

@ -17,24 +17,24 @@
"test:coverage": "vitest run --coverage"
},
"devDependencies": {
"@vitest/ui": "^3.2.4",
"cpx": "^1.5.0",
"jsdom": "^26.1.0",
"rimraf": "^3.0.2",
"@vitest/ui": "2.1.9",
"cpx": "1.5.0",
"jsdom": "26.1.0",
"rimraf": "3.0.2",
"rolldown": "1.0.0-beta.1",
"ts-loader": "^9.5.0",
"typescript": "^5.7.2",
"vitest": "^3.2.4"
"typescript": "5.9.2",
"vitest": "3.2.4"
},
"dependencies": {
"@janhq/core": "../../core/package.tgz",
"@janhq/tauri-plugin-hardware-api": "link:../../src-tauri/plugins/tauri-plugin-hardware",
"@janhq/tauri-plugin-llamacpp-api": "link:../../src-tauri/plugins/tauri-plugin-llamacpp",
"@tauri-apps/api": "^2.5.0",
"@tauri-apps/plugin-http": "^2.5.1",
"@tauri-apps/api": "2.8.0",
"@tauri-apps/plugin-http": "2.5.0",
"@tauri-apps/plugin-log": "^2.6.0",
"fetch-retry": "^5.0.6",
"ulidx": "^2.3.0"
"ulidx": "2.4.1"
},
"engines": {
"node": ">=18.0.0"

View File

@ -1,9 +1,8 @@
import { getJanDataFolderPath, fs, joinPath, events } from '@janhq/core'
import { invoke } from '@tauri-apps/api/core'
import { getProxyConfig } from './util'
import { getProxyConfig, basenameNoExt } from './util'
import { dirname, basename } from '@tauri-apps/api/path'
import { getSystemInfo } from '@janhq/tauri-plugin-hardware-api'
/*
* Reads currently installed backends in janDataFolderPath
*
@ -73,10 +72,7 @@ async function fetchRemoteSupportedBackends(
if (!name.startsWith(prefix)) continue
const backend = name
.replace(prefix, '')
.replace('.tar.gz', '')
.replace('.zip', '')
const backend = basenameNoExt(name).slice(prefix.length)
if (supportedBackends.includes(backend)) {
remote.push({ version, backend })

View File

@ -1155,6 +1155,49 @@ export default class llamacpp_extension extends AIEngine {
}
}
/**
* Update a model with new information.
* @param modelId
* @param model
*/
async update(modelId: string, model: Partial<modelInfo>): Promise<void> {
const modelFolderPath = await joinPath([
await this.getProviderPath(),
'models',
modelId,
])
const modelConfig = await invoke<ModelConfig>('read_yaml', {
path: await joinPath([modelFolderPath, 'model.yml']),
})
const newFolderPath = await joinPath([
await this.getProviderPath(),
'models',
model.id,
])
// Check if newFolderPath exists
if (await fs.existsSync(newFolderPath)) {
throw new Error(`Model with ID ${model.id} already exists`)
}
const newModelConfigPath = await joinPath([newFolderPath, 'model.yml'])
await fs.mv(modelFolderPath, newFolderPath).then(() =>
// now replace what values have previous model name with format
invoke('write_yaml', {
data: {
...modelConfig,
model_path: modelConfig?.model_path?.replace(
`${this.providerId}/models/${modelId}`,
`${this.providerId}/models/${model.id}`
),
mmproj_path: modelConfig?.mmproj_path?.replace(
`${this.providerId}/models/${modelId}`,
`${this.providerId}/models/${model.id}`
),
},
savePath: newModelConfigPath,
})
)
}
override async import(modelId: string, opts: ImportOptions): Promise<void> {
const isValidModelId = (id: string) => {
// only allow alphanumeric, underscore, hyphen, and dot characters in modelId

View File

@ -1,3 +1,23 @@
// File path utilities
export function basenameNoExt(filePath: string): string {
const VALID_EXTENSIONS = [".tar.gz", ".zip"];
// handle VALID extensions first
for (const ext of VALID_EXTENSIONS) {
if (filePath.toLowerCase().endsWith(ext)) {
return filePath.slice(0, -ext.length);
}
}
// fallback: remove only the last extension
const lastDotIndex = filePath.lastIndexOf('.');
if (lastDotIndex > 0) {
return filePath.slice(0, lastDotIndex);
}
return filePath;
}
// Zustand proxy state structure
interface ProxyState {
proxyEnabled: boolean

View File

@ -1,6 +1,6 @@
{
"compilerOptions": {
"target": "es2016",
"target": "es2018",
"module": "ES6",
"moduleResolution": "node",
"outDir": "./dist",

File diff suppressed because it is too large Load Diff

286
mise.toml
View File

@ -1,286 +0,0 @@
[tools]
node = "20"
rust = "1.85.1"
sccache = "latest"
[env]
_.path = ['./node_modules/.bin']
RUSTC_WRAPPER="sccache"
# ============================================================================
# CORE SETUP AND CONFIGURATION TASKS
# ============================================================================
[tasks.config-yarn]
description = "Configure yarn version and settings"
run = [
"corepack enable",
"corepack prepare yarn@4.5.3 --activate",
"yarn --version",
"yarn config set -H enableImmutableInstalls false"
]
[tasks.install]
description = "Install dependencies"
depends = ["config-yarn"]
run = "yarn install"
sources = ['package.json', 'yarn.lock']
outputs = ['node_modules']
[tasks.build-tauri-plugin-api]
description = "Build Tauri plugin API"
depends = ["install"]
run = "yarn build:tauri:plugin:api"
sources = ['src-tauri/plugins/**/*']
outputs = [
'src-tauri/plugins/tauri-plugin-hardware/dist-js',
'src-tauri/plugins/tauri-plugin-llamacpp/dist-js',
]
[tasks.build-core]
description = "Build core package"
depends = ["build-tauri-plugin-api"]
run = "yarn build:core"
sources = ['core/**/*']
outputs = ['core/dist']
[tasks.build-extensions]
description = "Build extensions"
depends = ["build-core"]
run = "yarn build:extensions && yarn build:extensions-web"
sources = ['extensions/**/*']
outputs = ['pre-install/*.tgz']
[tasks.install-and-build]
description = "Install dependencies and build core and extensions (matches Makefile)"
depends = ["build-extensions"]
# ============================================================================
# DEVELOPMENT TASKS
# ============================================================================
[tasks.dev]
description = "Start development server (matches Makefile)"
depends = ["install-and-build"]
run = [
"yarn download:bin",
"yarn dev"
]
[tasks.dev-tauri]
description = "Start development server with Tauri (DEPRECATED - matches Makefile)"
depends = ["install-and-build"]
run = [
"yarn download:bin",
"yarn dev:tauri"
]
# ============================================================================
# WEB APPLICATION DEVELOPMENT TASKS
# ============================================================================
[tasks.dev-web-app]
description = "Start web application development server (matches Makefile)"
depends = ["build-core"]
run = "yarn dev:web-app"
[tasks.build-web-app]
description = "Build web application (matches Makefile)"
depends = ["build-core"]
run = "yarn build:web-app"
[tasks.serve-web-app]
description = "Serve built web application"
run = "yarn serve:web-app"
[tasks.build-serve-web-app]
description = "Build and serve web application (matches Makefile)"
depends = ["build-web-app"]
run = "yarn serve:web-app"
# ============================================================================
# BUILD TASKS
# ============================================================================
[tasks.install-rust-targets]
description = "Install required Rust targets for MacOS universal builds"
run = '''
#!/usr/bin/env bash
# Check if we're on macOS
if [[ "$OSTYPE" == "darwin"* ]]; then
echo "Detected macOS, installing universal build targets..."
rustup target add x86_64-apple-darwin
rustup target add aarch64-apple-darwin
echo "Rust targets installed successfully!"
fi
'''
[tasks.build]
description = "Build complete application (matches Makefile)"
depends = ["install-rust-targets", "install-and-build"]
run = [
"yarn download:bin",
"yarn build"
]
# ============================================================================
# QUALITY ASSURANCE TASKS
# ============================================================================
[tasks.lint]
description = "Run linting (matches Makefile)"
depends = ["build-extensions"]
run = "yarn lint"
# ============================================================================
# RUST TEST COMPONENTS
# ============================================================================
[tasks.test-rust-main]
description = "Test main src-tauri package"
run = "cargo test --manifest-path src-tauri/Cargo.toml --no-default-features --features test-tauri -- --test-threads=1"
[tasks.test-rust-hardware]
description = "Test hardware plugin"
run = "cargo test --manifest-path src-tauri/plugins/tauri-plugin-hardware/Cargo.toml"
[tasks.test-rust-llamacpp]
description = "Test llamacpp plugin"
run = "cargo test --manifest-path src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml"
[tasks.test-rust-utils]
description = "Test utils package"
run = "cargo test --manifest-path src-tauri/utils/Cargo.toml"
[tasks.test-rust]
description = "Run all Rust tests"
depends = ["test-rust-main", "test-rust-hardware", "test-rust-llamacpp", "test-rust-utils"]
# ============================================================================
# JS TEST COMPONENTS
# ============================================================================
[tasks.test-js-setup]
description = "Setup for JS tests"
run = [
"yarn download:bin",
"yarn download:lib",
"yarn copy:assets:tauri",
"yarn build:icon"
]
[tasks.test-js]
description = "Run JS tests"
depends = ["test-js-setup"]
run = "yarn test"
# ============================================================================
# COMBINED TEST TASKS
# ============================================================================
[tasks.test]
description = "Run complete test suite (matches Makefile)"
depends = ["lint", "test-js", "test-rust"]
# ============================================================================
# PARALLEL-FRIENDLY QUALITY ASSURANCE TASKS
# ============================================================================
[tasks.lint-only]
description = "Run linting only (parallel-friendly)"
depends = ["build-extensions"]
run = "yarn lint"
hide = true
[tasks.test-only]
description = "Run tests only (parallel-friendly)"
depends = ["build-extensions", "test-js", "test-rust"]
hide = true
[tasks.qa-parallel]
description = "Run linting and testing in parallel"
depends = ["lint-only", "test-only"]
# ============================================================================
# UTILITY TASKS
# ============================================================================
[tasks.clean]
description = "Clean all build artifacts and dependencies (cross-platform - matches Makefile)"
run = '''
#!/usr/bin/env bash
echo "Cleaning build artifacts and dependencies..."
# Platform detection and cleanup (matches Makefile exactly)
if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then
# Windows cleanup using PowerShell (matches Makefile)
powershell -Command "Get-ChildItem -Path . -Include node_modules, .next, dist, build, out, .turbo, .yarn -Recurse -Directory | Remove-Item -Recurse -Force" 2>/dev/null || true
powershell -Command "Get-ChildItem -Path . -Include package-lock.json, tsconfig.tsbuildinfo -Recurse -File | Remove-Item -Recurse -Force" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./pre-install/*.tgz" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./extensions/*/*.tgz" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./electron/pre-install/*.tgz" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./src-tauri/resources" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./src-tauri/target" 2>/dev/null || true
powershell -Command "if (Test-Path \"\$(\$env:USERPROFILE)\\jan\\extensions\\\") { Remove-Item -Path \"\$(\$env:USERPROFILE)\\jan\\extensions\" -Recurse -Force }" 2>/dev/null || true
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
# Linux cleanup (matches Makefile)
find . -name "node_modules" -type d -prune -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".next" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "dist" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "build" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "out" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".turbo" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".yarn" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "package-lock.json" -type f -exec rm -rf '{}' + 2>/dev/null || true
rm -rf ./pre-install/*.tgz 2>/dev/null || true
rm -rf ./extensions/*/*.tgz 2>/dev/null || true
rm -rf ./electron/pre-install/*.tgz 2>/dev/null || true
rm -rf ./src-tauri/resources 2>/dev/null || true
rm -rf ./src-tauri/target 2>/dev/null || true
rm -rf ~/jan/extensions 2>/dev/null || true
rm -rf "~/.cache/jan*" 2>/dev/null || true
rm -rf "./.cache" 2>/dev/null || true
else
# macOS cleanup (matches Makefile)
find . -name "node_modules" -type d -prune -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".next" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "dist" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "build" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "out" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".turbo" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".yarn" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "package-lock.json" -type f -exec rm -rf '{}' + 2>/dev/null || true
rm -rf ./pre-install/*.tgz 2>/dev/null || true
rm -rf ./extensions/*/*.tgz 2>/dev/null || true
rm -rf ./electron/pre-install/*.tgz 2>/dev/null || true
rm -rf ./src-tauri/resources 2>/dev/null || true
rm -rf ./src-tauri/target 2>/dev/null || true
rm -rf ~/jan/extensions 2>/dev/null || true
rm -rf ~/Library/Caches/jan* 2>/dev/null || true
fi
echo "Clean completed!"
'''
[tasks.all]
description = "Default target - shows available commands (matches Makefile)"
run = "echo 'Specify a target to run. Use: mise tasks'"
# ============================================================================
# DEVELOPMENT WORKFLOW SHORTCUTS
# ============================================================================
[tasks.setup]
description = "Complete development setup"
depends = ["install-and-build"]
alias = "init"
[tasks.ci]
description = "Run CI pipeline (lint + test sequentially)"
depends = ["test"]
[tasks.ci-parallel]
description = "Run CI pipeline (lint + test in parallel)"
depends = ["qa-parallel"]
alias = "ci-fast"

View File

@ -36,8 +36,8 @@
"download:lib": "node ./scripts/download-lib.mjs",
"download:bin": "node ./scripts/download-bin.mjs",
"download:windows-installer": "node ./scripts/download-win-installer-deps.mjs",
"build:tauri:win32": "yarn download:bin && yarn download:lib && yarn download:windows-installer && yarn tauri build",
"build:tauri:linux": "yarn download:bin && yarn download:lib && NO_STRIP=1 ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build && ./src-tauri/build-utils/buildAppImage.sh",
"build:tauri:win32": "yarn download:bin && yarn download:windows-installer && yarn tauri build",
"build:tauri:linux": "yarn download:bin && NO_STRIP=1 ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build && ./src-tauri/build-utils/buildAppImage.sh",
"build:tauri:darwin": "yarn download:bin && yarn tauri build --target universal-apple-darwin",
"build:tauri": "yarn build:icon && yarn copy:assets:tauri && run-script-os",
"build:tauri:plugin:api": "cd src-tauri/plugins && yarn install && yarn workspaces foreach -Apt run build",

View File

@ -1,4 +1,3 @@
console.log('Script is running')
// scripts/download.js
import https from 'https'
import fs, { copyFile, mkdirSync } from 'fs'
@ -69,7 +68,10 @@ function getPlatformArch() {
arch === 'arm64' ? 'aarch64-apple-darwin' : 'x86_64-apple-darwin'
} else if (platform === 'linux') {
bunPlatform = arch === 'arm64' ? 'linux-aarch64' : 'linux-x64'
uvPlatform = arch === 'arm64' ? 'aarch64-unknown-linux-gnu' : 'x86_64-unknown-linux-gnu'
uvPlatform =
arch === 'arm64'
? 'aarch64-unknown-linux-gnu'
: 'x86_64-unknown-linux-gnu'
} else if (platform === 'win32') {
bunPlatform = 'windows-x64' // Bun has limited Windows support
uvPlatform = 'x86_64-pc-windows-msvc'
@ -81,6 +83,10 @@ function getPlatformArch() {
}
async function main() {
if (process.env.SKIP_BINARIES) {
console.log('Skipping binaries download.')
process.exit(0)
}
console.log('Starting main function')
const platform = os.platform()
const { bunPlatform, uvPlatform } = getPlatformArch()
@ -124,29 +130,45 @@ async function main() {
if (err) {
console.log('Add execution permission failed!', err)
}
});
})
if (platform === 'darwin') {
copyFile(path.join(binDir, 'bun'), path.join(binDir, 'bun-x86_64-apple-darwin'), (err) => {
copyFile(
path.join(binDir, 'bun'),
path.join(binDir, 'bun-x86_64-apple-darwin'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
copyFile(path.join(binDir, 'bun'), path.join(binDir, 'bun-aarch64-apple-darwin'), (err) => {
}
)
copyFile(
path.join(binDir, 'bun'),
path.join(binDir, 'bun-aarch64-apple-darwin'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
copyFile(path.join(binDir, 'bun'), path.join(binDir, 'bun-universal-apple-darwin'), (err) => {
}
)
copyFile(
path.join(binDir, 'bun'),
path.join(binDir, 'bun-universal-apple-darwin'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
}
)
} else if (platform === 'linux') {
copyFile(path.join(binDir, 'bun'), path.join(binDir, 'bun-x86_64-unknown-linux-gnu'), (err) => {
copyFile(
path.join(binDir, 'bun'),
path.join(binDir, 'bun-x86_64-unknown-linux-gnu'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
}
)
}
} catch (err) {
// Expect EEXIST error
@ -157,11 +179,15 @@ async function main() {
path.join(binDir)
)
if (platform === 'win32') {
copyFile(path.join(binDir, 'bun.exe'), path.join(binDir, 'bun-x86_64-pc-windows-msvc.exe'), (err) => {
copyFile(
path.join(binDir, 'bun.exe'),
path.join(binDir, 'bun-x86_64-pc-windows-msvc.exe'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
}
)
}
} catch (err) {
// Expect EEXIST error
@ -176,52 +202,66 @@ async function main() {
await decompress(uvPath, tempBinDir)
}
try {
copySync(
path.join(tempBinDir, `uv-${uvPlatform}`, 'uv'),
path.join(binDir)
)
copySync(path.join(tempBinDir, `uv-${uvPlatform}`, 'uv'), path.join(binDir))
fs.chmod(path.join(binDir, 'uv'), 0o755, (err) => {
if (err) {
console.log('Add execution permission failed!', err)
}
});
})
if (platform === 'darwin') {
copyFile(path.join(binDir, 'uv'), path.join(binDir, 'uv-x86_64-apple-darwin'), (err) => {
copyFile(
path.join(binDir, 'uv'),
path.join(binDir, 'uv-x86_64-apple-darwin'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
copyFile(path.join(binDir, 'uv'), path.join(binDir, 'uv-aarch64-apple-darwin'), (err) => {
}
)
copyFile(
path.join(binDir, 'uv'),
path.join(binDir, 'uv-aarch64-apple-darwin'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
copyFile(path.join(binDir, 'uv'), path.join(binDir, 'uv-universal-apple-darwin'), (err) => {
}
)
copyFile(
path.join(binDir, 'uv'),
path.join(binDir, 'uv-universal-apple-darwin'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
}
)
} else if (platform === 'linux') {
copyFile(path.join(binDir, 'uv'), path.join(binDir, 'uv-x86_64-unknown-linux-gnu'), (err) => {
copyFile(
path.join(binDir, 'uv'),
path.join(binDir, 'uv-x86_64-unknown-linux-gnu'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
}
)
}
} catch (err) {
// Expect EEXIST error
}
try {
copySync(
path.join(tempBinDir, 'uv.exe'),
path.join(binDir)
)
copySync(path.join(tempBinDir, 'uv.exe'), path.join(binDir))
if (platform === 'win32') {
copyFile(path.join(binDir, 'uv.exe'), path.join(binDir, 'uv-x86_64-pc-windows-msvc.exe'), (err) => {
copyFile(
path.join(binDir, 'uv.exe'),
path.join(binDir, 'uv-x86_64-pc-windows-msvc.exe'),
(err) => {
if (err) {
console.log("Error Found:", err);
console.log('Error Found:', err)
}
})
}
)
}
} catch (err) {
// Expect EEXIST error

View File

@ -1,86 +0,0 @@
console.log('Script is running')
// scripts/download-lib.mjs
import https from 'https'
import fs, { mkdirSync } from 'fs'
import os from 'os'
import path from 'path'
import { copySync } from 'cpx'
function download(url, dest) {
return new Promise((resolve, reject) => {
console.log(`Downloading ${url} to ${dest}`)
const file = fs.createWriteStream(dest)
https
.get(url, (response) => {
console.log(`Response status code: ${response.statusCode}`)
if (
response.statusCode >= 300 &&
response.statusCode < 400 &&
response.headers.location
) {
// Handle redirect
const redirectURL = response.headers.location
console.log(`Redirecting to ${redirectURL}`)
download(redirectURL, dest).then(resolve, reject) // Recursive call
return
} else if (response.statusCode !== 200) {
reject(`Failed to get '${url}' (${response.statusCode})`)
return
}
response.pipe(file)
file.on('finish', () => {
file.close(resolve)
})
})
.on('error', (err) => {
fs.unlink(dest, () => reject(err.message))
})
})
}
async function main() {
console.log('Starting main function')
const platform = os.platform() // 'darwin', 'linux', 'win32'
const arch = os.arch() // 'x64', 'arm64', etc.
if (arch != 'x64') return
let filename
if (platform == 'linux')
filename = 'libvulkan.so'
else if (platform == 'win32')
filename = 'vulkan-1.dll'
else
return
const url = `https://catalog.jan.ai/${filename}`
const libDir = 'src-tauri/resources/lib'
const tempDir = 'scripts/dist'
try {
mkdirSync('scripts/dist')
} catch (err) {
// Expect EEXIST error if the directory already exists
}
console.log(`Downloading libvulkan...`)
const savePath = path.join(tempDir, filename)
if (!fs.existsSync(savePath)) {
await download(url, savePath)
}
// copy to tauri resources
try {
copySync(savePath, libDir)
} catch (err) {
// Expect EEXIST error
}
console.log('Downloads completed.')
}
main().catch((err) => {
console.error('Error:', err)
process.exit(1)
})

View File

@ -11,7 +11,7 @@ exclude = ["/examples", "/dist-js", "/guest-js", "/node_modules"]
links = "tauri-plugin-hardware"
[dependencies]
ash = "0.38.0"
vulkano = "0.34"
libc = "0.2"
log = "0.4"
nvml-wrapper = "0.10.0"

View File

@ -1,14 +1,12 @@
use crate::{
helpers::get_jan_libvulkan_path,
types::{CpuStaticInfo, SystemInfo, SystemUsage},
vendor::{nvidia, vulkan},
SYSTEM_INFO,
};
use sysinfo::System;
use tauri::Runtime;
#[tauri::command]
pub fn get_system_info<R: Runtime>(app: tauri::AppHandle<R>) -> SystemInfo {
pub fn get_system_info() -> SystemInfo {
SYSTEM_INFO
.get_or_init(|| {
let mut system = System::new();
@ -19,15 +17,7 @@ pub fn get_system_info<R: Runtime>(app: tauri::AppHandle<R>) -> SystemInfo {
gpu_map.insert(gpu.uuid.clone(), gpu);
}
// try system vulkan first
let paths = vec!["".to_string(), get_jan_libvulkan_path(app.clone())];
let mut vulkan_gpus = vec![];
for path in paths {
vulkan_gpus = vulkan::get_vulkan_gpus(&path);
if !vulkan_gpus.is_empty() {
break;
}
}
let vulkan_gpus = vulkan::get_vulkan_gpus();
for gpu in vulkan_gpus {
match gpu_map.get_mut(&gpu.uuid) {
@ -64,7 +54,7 @@ pub fn get_system_info<R: Runtime>(app: tauri::AppHandle<R>) -> SystemInfo {
}
#[tauri::command]
pub fn get_system_usage<R: Runtime>(app: tauri::AppHandle<R>) -> SystemUsage {
pub fn get_system_usage() -> SystemUsage {
let mut system = System::new();
system.refresh_memory();
@ -81,7 +71,7 @@ pub fn get_system_usage<R: Runtime>(app: tauri::AppHandle<R>) -> SystemUsage {
cpu: cpu_usage,
used_memory: system.used_memory() / 1024 / 1024, // bytes to MiB,
total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB,
gpus: get_system_info(app.clone())
gpus: get_system_info()
.gpus
.iter()
.map(|gpu| gpu.get_usage())

View File

@ -1,20 +0,0 @@
use tauri::{path::BaseDirectory, Manager, Runtime};
pub fn get_jan_libvulkan_path<R: Runtime>(app: tauri::AppHandle<R>) -> String {
let lib_name = if cfg!(target_os = "windows") {
"vulkan-1.dll"
} else if cfg!(target_os = "linux") {
"libvulkan.so"
} else {
return "".to_string();
};
// NOTE: this does not work in test mode (mock app)
match app.path().resolve(
format!("resources/lib/{}", lib_name),
BaseDirectory::Resource,
) {
Ok(lib_path) => lib_path.to_string_lossy().to_string(),
Err(_) => "".to_string(),
}
}

View File

@ -2,12 +2,10 @@ mod commands;
mod constants;
pub mod cpu;
pub mod gpu;
mod helpers;
mod types;
pub mod vendor;
pub use constants::*;
pub use helpers::*;
pub use types::*;
use std::sync::OnceLock;

View File

@ -4,15 +4,13 @@ use tauri::test::mock_app;
#[test]
fn test_system_info() {
let app = mock_app();
let info = get_system_info(app.handle().clone());
let info = get_system_info();
println!("System Static Info: {:?}", info);
}
#[test]
fn test_system_usage() {
let app = mock_app();
let usage = get_system_usage(app.handle().clone());
let usage = get_system_usage();
println!("System Usage Info: {:?}", usage);
}
@ -32,10 +30,10 @@ mod cpu_tests {
// Architecture should be one of the expected values
assert!(
cpu_info.arch == "aarch64" ||
cpu_info.arch == "arm64" ||
cpu_info.arch == "x86_64" ||
cpu_info.arch == std::env::consts::ARCH
cpu_info.arch == "aarch64"
|| cpu_info.arch == "arm64"
|| cpu_info.arch == "x86_64"
|| cpu_info.arch == std::env::consts::ARCH
);
// Extensions should be a valid list (can be empty on non-x86)
@ -78,11 +76,33 @@ mod cpu_tests {
// Check that all extensions are valid x86 feature names
let valid_extensions = [
"fpu", "mmx", "sse", "sse2", "sse3", "ssse3", "sse4_1", "sse4_2",
"pclmulqdq", "avx", "avx2", "avx512_f", "avx512_dq", "avx512_ifma",
"avx512_pf", "avx512_er", "avx512_cd", "avx512_bw", "avx512_vl",
"avx512_vbmi", "avx512_vbmi2", "avx512_vnni", "avx512_bitalg",
"avx512_vpopcntdq", "avx512_vp2intersect", "aes", "f16c"
"fpu",
"mmx",
"sse",
"sse2",
"sse3",
"ssse3",
"sse4_1",
"sse4_2",
"pclmulqdq",
"avx",
"avx2",
"avx512_f",
"avx512_dq",
"avx512_ifma",
"avx512_pf",
"avx512_er",
"avx512_cd",
"avx512_bw",
"avx512_vl",
"avx512_vbmi",
"avx512_vbmi2",
"avx512_vnni",
"avx512_bitalg",
"avx512_vpopcntdq",
"avx512_vp2intersect",
"aes",
"f16c",
];
for ext in &cpu_info.extensions {

View File

@ -12,7 +12,7 @@ fn test_get_nvidia_gpus() {
#[test]
fn test_get_vulkan_gpus() {
let gpus = vulkan::get_vulkan_gpus("");
let gpus = vulkan::get_vulkan_gpus();
for (i, gpu) in gpus.iter().enumerate() {
println!("GPU {}:", i);
println!(" {:?}", gpu);

View File

@ -3,7 +3,6 @@ use super::utils::{estimate_kv_cache_internal, read_gguf_metadata_internal};
use crate::gguf::types::{KVCacheError, KVCacheEstimate, ModelSupportStatus};
use std::collections::HashMap;
use std::fs;
use tauri::Runtime;
use tauri_plugin_hardware::get_system_info;
/// Read GGUF metadata from a model file
#[tauri::command]
@ -49,16 +48,15 @@ pub async fn get_model_size(path: String) -> Result<u64, String> {
}
#[tauri::command]
pub async fn is_model_supported<R: Runtime>(
pub async fn is_model_supported(
path: String,
ctx_size: Option<u32>,
app_handle: tauri::AppHandle<R>,
) -> Result<ModelSupportStatus, String> {
// Get model size
let model_size = get_model_size(path.clone()).await?;
// Get system info
let system_info = get_system_info(app_handle.clone());
let system_info = get_system_info();
log::info!("modelSize: {}", model_size);

View File

@ -3,7 +3,6 @@ use crate::gguf::utils::estimate_kv_cache_internal;
use crate::gguf::utils::read_gguf_metadata_internal;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use tauri::Runtime;
use tauri_plugin_hardware::get_system_info;
#[derive(Serialize, Deserialize, Clone, Debug)]
@ -27,15 +26,14 @@ pub enum ModelMode {
}
#[tauri::command]
pub async fn plan_model_load<R: Runtime>(
pub async fn plan_model_load(
path: String,
memory_mode: String,
mmproj_path: Option<String>,
requested_ctx: Option<u64>,
app: tauri::AppHandle<R>,
) -> Result<ModelPlan, String> {
let model_size = get_model_size(path.clone()).await?;
let sys_info = get_system_info(app.clone());
let sys_info = get_system_info();
let gguf = read_gguf_metadata_internal(path.clone()).await?;
let mut mmproj_size: u64 = 0;

View File

@ -465,7 +465,11 @@ async fn download_single_file(
.await
.map_err(err_to_string)?;
log::info!("Started downloading: {}", item.url);
// Decode URL for better readability in logs
let decoded_url = url::Url::parse(&item.url)
.map(|u| u.to_string())
.unwrap_or_else(|_| item.url.clone());
log::info!("Started downloading: {}", decoded_url);
let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?;
let mut download_delta = 0u64;
let mut initial_progress = 0u64;
@ -584,7 +588,11 @@ async fn download_single_file(
.await
.map_err(err_to_string)?;
log::info!("Finished downloading: {}", item.url);
// Decode URL for better readability in logs
let decoded_url = url::Url::parse(&item.url)
.map(|u| u.to_string())
.unwrap_or_else(|_| item.url.clone());
log::info!("Finished downloading: {}", decoded_url);
Ok(save_path.to_path_buf())
}

View File

@ -33,6 +33,22 @@ pub fn mkdir<R: Runtime>(app_handle: tauri::AppHandle<R>, args: Vec<String>) ->
fs::create_dir_all(&path).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn mv<R: Runtime>(app_handle: tauri::AppHandle<R>, args: Vec<String>) -> Result<(), String> {
if args.len() < 2 || args[0].is_empty() || args[1].is_empty() {
return Err("mv error: Invalid argument - source and destination required".to_string());
}
let source = resolve_path(app_handle.clone(), &args[0]);
let destination = resolve_path(app_handle, &args[1]);
if !source.exists() {
return Err("mv error: Source path does not exist".to_string());
}
fs::rename(&source, &destination).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn join_path<R: Runtime>(
app_handle: tauri::AppHandle<R>,

View File

@ -25,7 +25,7 @@ use crate::core::{
mcp::models::McpServerConfig,
state::{AppState, RunningServiceEnum, SharedMcpServers},
};
use jan_utils::can_override_npx;
use jan_utils::{can_override_npx, can_override_uvx};
/// Calculate exponential backoff delay with jitter
///
@ -627,19 +627,20 @@ async fn schedule_mcp_start_task<R: Runtime>(
}
} else {
let mut cmd = Command::new(config_params.command.clone());
if config_params.command.clone() == "npx" && can_override_npx() {
let bun_x_path = format!("{}/bun", bin_path.display());
if config_params.command.clone() == "npx" && can_override_npx(bun_x_path.clone()) {
let mut cache_dir = app_path.clone();
cache_dir.push(".npx");
let bun_x_path = format!("{}/bun", bin_path.display());
cmd = Command::new(bun_x_path);
cmd.arg("x");
cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap().to_string());
}
if config_params.command.clone() == "uvx" {
let uv_path = format!("{}/uv", bin_path.display());
if config_params.command.clone() == "uvx" && can_override_uvx(uv_path.clone()) {
let mut cache_dir = app_path.clone();
cache_dir.push(".uvx");
let bun_x_path = format!("{}/uv", bin_path.display());
cmd = Command::new(bun_x_path);
cmd = Command::new(uv_path);
cmd.arg("tool");
cmd.arg("run");
cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap().to_string());

View File

@ -51,6 +51,7 @@ pub fn run() {
core::filesystem::commands::readdir_sync,
core::filesystem::commands::read_file_sync,
core::filesystem::commands::rm,
core::filesystem::commands::mv,
core::filesystem::commands::file_stat,
core::filesystem::commands::write_file_sync,
core::filesystem::commands::write_yaml,
@ -141,8 +142,7 @@ pub fn run() {
#[cfg(all(feature = "deep-link", any(windows, target_os = "linux")))]
{
use tauri_plugin_deep_link::DeepLinkExt;
// Register the deep-link scheme programmatically
app.deep_link().register("jan")?;
app.deep_link().register_all()?;
}
setup_mcp(app);
Ok(())

View File

@ -10,13 +10,11 @@
"linux": {
"appimage": {
"bundleMediaFramework": false,
"files": {
}
"files": {}
},
"deb": {
"files": {
"usr/bin/bun": "resources/bin/bun",
"usr/lib/Jan/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"
"usr/bin/bun": "resources/bin/bun"
}
}
}

View File

@ -7,7 +7,11 @@
"bundle": {
"targets": ["nsis"],
"resources": ["resources/pre-install/**/*", "resources/lib/vulkan-1.dll", "resources/lib/vc_redist.x64.exe", "resources/LICENSE"],
"resources": [
"resources/pre-install/**/*",
"resources/lib/vc_redist.x64.exe",
"resources/LICENSE"
],
"externalBin": ["resources/bin/bun", "resources/bin/uv"],
"windows": {
"nsis": {

View File

@ -1,5 +1,5 @@
/// Checks AVX2 CPU support for npx override with bun binary
pub fn can_override_npx() -> bool {
/// Checks if npx can be overridden with bun binary
pub fn can_override_npx(bun_path: String) -> bool {
// We need to check the CPU for the AVX2 instruction support if we are running under MacOS
// with Intel CPU. We can override `npx` command with `bun` only if CPU is
// supporting AVX2, otherwise we need to use default `npx` binary
@ -13,10 +13,31 @@ pub fn can_override_npx() -> bool {
return false; // we cannot override npx with bun binary
}
}
// Check if bun_path exists
if !std::path::Path::new(bun_path.as_str()).exists() {
#[cfg(feature = "logging")]
log::warn!(
"bun binary not found at '{}', default npx binary will be used",
bun_path
);
return false;
}
true // by default, we can override npx with bun binary
}
/// Checks if uv_path exists and determines if uvx can be overridden with the uv binary
pub fn can_override_uvx(uv_path: String) -> bool {
if !std::path::Path::new(uv_path.as_str()).exists() {
#[cfg(feature = "logging")]
log::warn!(
"uv binary not found at '{}', default uvx binary will be used",
uv_path
);
return false;
}
true // by default, we can override uvx with uv binary
}
/// Setup library paths for different operating systems
pub fn setup_library_path(library_path: Option<&str>, command: &mut tokio::process::Command) {
if let Some(lib_path) = library_path {

View File

@ -18,112 +18,112 @@
"build:serve:web": "yarn build:web && yarn serve:web"
},
"dependencies": {
"@dnd-kit/core": "^6.3.1",
"@dnd-kit/modifiers": "^9.0.0",
"@dnd-kit/sortable": "^10.0.0",
"@dnd-kit/core": "6.3.1",
"@dnd-kit/modifiers": "9.0.0",
"@dnd-kit/sortable": "10.0.0",
"@jan/extensions-web": "link:../extensions-web",
"@janhq/core": "link:../core",
"@radix-ui/react-accordion": "^1.2.10",
"@radix-ui/react-avatar": "^1.1.10",
"@radix-ui/react-dialog": "^1.1.14",
"@radix-ui/react-dropdown-menu": "^2.1.15",
"@radix-ui/react-hover-card": "^1.1.14",
"@radix-ui/react-popover": "^1.1.13",
"@radix-ui/react-progress": "^1.1.4",
"@radix-ui/react-radio-group": "^1.3.7",
"@radix-ui/react-slider": "^1.3.2",
"@radix-ui/react-slot": "^1.2.0",
"@radix-ui/react-switch": "^1.2.2",
"@radix-ui/react-tooltip": "^1.2.4",
"@tabler/icons-react": "^3.33.0",
"@tailwindcss/vite": "^4.1.4",
"@tanstack/react-router": "^1.116.0",
"@tanstack/react-router-devtools": "^1.121.34",
"@tanstack/react-virtual": "^3.13.12",
"@tauri-apps/api": "^2.8.0",
"@radix-ui/react-accordion": "1.2.11",
"@radix-ui/react-avatar": "1.1.10",
"@radix-ui/react-dialog": "1.1.15",
"@radix-ui/react-dropdown-menu": "2.1.16",
"@radix-ui/react-hover-card": "1.1.14",
"@radix-ui/react-popover": "1.1.14",
"@radix-ui/react-progress": "1.1.4",
"@radix-ui/react-radio-group": "1.3.8",
"@radix-ui/react-slider": "1.3.2",
"@radix-ui/react-slot": "1.2.0",
"@radix-ui/react-switch": "1.2.2",
"@radix-ui/react-tooltip": "1.2.4",
"@tabler/icons-react": "3.34.0",
"@tailwindcss/vite": "4.1.4",
"@tanstack/react-router": "1.117.0",
"@tanstack/react-router-devtools": "1.121.34",
"@tanstack/react-virtual": "3.13.12",
"@tauri-apps/api": "2.8.0",
"@tauri-apps/plugin-deep-link": "2.4.3",
"@tauri-apps/plugin-dialog": "^2.2.1",
"@tauri-apps/plugin-http": "^2.2.1",
"@tauri-apps/plugin-opener": "^2.2.7",
"@tauri-apps/plugin-os": "^2.2.1",
"@tauri-apps/plugin-updater": "^2.7.1",
"@types/react-syntax-highlighter": "^15.5.13",
"@types/uuid": "^10.0.0",
"@uiw/react-textarea-code-editor": "^3.1.1",
"class-variance-authority": "^0.7.1",
"culori": "^4.0.1",
"emoji-picker-react": "^4.12.2",
"framer-motion": "^12.23.12",
"fuse.js": "^7.1.0",
"fzf": "^0.5.2",
"i18next": "^25.0.1",
"katex": "^0.16.22",
"lodash.clonedeep": "^4.5.0",
"lodash.debounce": "^4.0.8",
"lucide-react": "^0.536.0",
"motion": "^12.10.5",
"next-themes": "^0.4.6",
"posthog-js": "^1.246.0",
"react": "^19.0.0",
"react-colorful": "^5.6.1",
"react-dom": "^19.0.0",
"react-i18next": "^15.5.1",
"react-joyride": "^2.9.3",
"react-markdown": "^10.1.0",
"react-resizable-panels": "^3.0.3",
"react-syntax-highlighter": "^15.6.1",
"react-syntax-highlighter-virtualized-renderer": "^1.1.0",
"react-textarea-autosize": "^8.5.9",
"rehype-katex": "^7.0.1",
"rehype-raw": "^7.0.0",
"remark-breaks": "^4.0.0",
"remark-emoji": "^5.0.1",
"remark-gfm": "^4.0.1",
"remark-math": "^6.0.0",
"sonner": "^2.0.3",
"tailwindcss": "^4.1.4",
"@tauri-apps/plugin-dialog": "2.2.2",
"@tauri-apps/plugin-http": "2.5.0",
"@tauri-apps/plugin-opener": "2.3.0",
"@tauri-apps/plugin-os": "2.2.1",
"@tauri-apps/plugin-updater": "2.8.1",
"@types/react-syntax-highlighter": "15.5.13",
"@types/uuid": "10.0.0",
"@uiw/react-textarea-code-editor": "3.1.1",
"class-variance-authority": "0.7.1",
"culori": "4.0.1",
"emoji-picker-react": "4.12.2",
"framer-motion": "12.23.12",
"fuse.js": "7.1.0",
"fzf": "0.5.2",
"i18next": "25.0.2",
"katex": "0.16.22",
"lodash.clonedeep": "4.5.0",
"lodash.debounce": "4.0.8",
"lucide-react": "0.536.0",
"motion": "12.18.1",
"next-themes": "0.4.6",
"posthog-js": "1.255.1",
"react": "19.0.0",
"react-colorful": "5.6.1",
"react-dom": "19.0.0",
"react-i18next": "15.5.1",
"react-joyride": "2.9.3",
"react-markdown": "10.1.0",
"react-resizable-panels": "3.0.5",
"react-syntax-highlighter": "15.6.1",
"react-syntax-highlighter-virtualized-renderer": "1.1.0",
"react-textarea-autosize": "8.5.9",
"rehype-katex": "7.0.1",
"rehype-raw": "7.0.0",
"remark-breaks": "4.0.0",
"remark-emoji": "5.0.1",
"remark-gfm": "4.0.1",
"remark-math": "6.0.0",
"sonner": "2.0.5",
"tailwindcss": "4.1.4",
"token.js": "npm:token.js-fork@0.7.27",
"tw-animate-css": "^1.2.7",
"ulidx": "^2.4.1",
"unified": "^11.0.5",
"uuid": "^11.1.0",
"vaul": "^1.1.2",
"zustand": "^5.0.3"
"tw-animate-css": "1.2.8",
"ulidx": "2.4.1",
"unified": "11.0.5",
"uuid": "11.1.0",
"vaul": "1.1.2",
"zustand": "5.0.3"
},
"devDependencies": {
"@eslint/js": "^9.22.0",
"@tanstack/router-plugin": "^1.116.1",
"@testing-library/dom": "^10.4.0",
"@testing-library/jest-dom": "^6.6.3",
"@testing-library/react": "^16.3.0",
"@testing-library/user-event": "^14.6.1",
"@types/culori": "^2.1.1",
"@types/istanbul-lib-report": "^3",
"@types/istanbul-reports": "^3",
"@types/lodash.clonedeep": "^4",
"@types/lodash.debounce": "^4",
"@types/node": "^22.14.1",
"@types/react": "^19.0.10",
"@types/react-dom": "^19.0.4",
"@vitejs/plugin-react": "^4.3.4",
"@eslint/js": "8.57.0",
"@tanstack/router-plugin": "1.117.0",
"@testing-library/dom": "10.4.1",
"@testing-library/jest-dom": "6.8.0",
"@testing-library/react": "16.3.0",
"@testing-library/user-event": "14.6.1",
"@types/culori": "2.1.1",
"@types/istanbul-lib-report": "3.0.3",
"@types/istanbul-reports": "3.0.4",
"@types/lodash.clonedeep": "4.5.9",
"@types/lodash.debounce": "4.0.9",
"@types/node": "22.14.1",
"@types/react": "19.1.2",
"@types/react-dom": "19.1.2",
"@vitejs/plugin-react": "4.4.1",
"@vitest/coverage-v8": "3.2.4",
"clsx": "^2.1.1",
"eslint": "^9.22.0",
"eslint-plugin-react-hooks": "^5.2.0",
"eslint-plugin-react-refresh": "^0.4.19",
"globals": "^16.0.0",
"istanbul-api": "^3.0.0",
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
"istanbul-reports": "^3.1.7",
"jsdom": "^26.1.0",
"serve": "^14.2.4",
"tailwind-merge": "^3.3.1",
"typescript": "~5.8.3",
"typescript-eslint": "^8.26.1",
"vite": "^6.3.0",
"vite-plugin-node-polyfills": "^0.23.0",
"vite-plugin-pwa": "^1.0.3",
"vitest": "^3.1.3"
"clsx": "2.1.1",
"eslint": "9.25.1",
"eslint-plugin-react-hooks": "5.2.0",
"eslint-plugin-react-refresh": "0.4.20",
"globals": "16.0.0",
"istanbul-api": "3.0.0",
"istanbul-lib-coverage": "2.0.5",
"istanbul-lib-report": "2.0.8",
"istanbul-reports": "3.1.7",
"jsdom": "26.1.0",
"serve": "14.2.5",
"tailwind-merge": "3.3.1",
"typescript": "5.9.2",
"typescript-eslint": "8.31.0",
"vite": "6.3.2",
"vite-plugin-node-polyfills": "0.23.0",
"vite-plugin-pwa": "1.0.3",
"vitest": "3.2.4"
}
}

View File

@ -6,7 +6,7 @@ import {
PopoverTrigger,
} from '@/components/ui/popover'
import { useModelProvider } from '@/hooks/useModelProvider'
import { cn, getProviderTitle } from '@/lib/utils'
import { cn, getProviderTitle, getModelDisplayName } from '@/lib/utils'
import { highlightFzfMatch } from '@/utils/highlight'
import Capabilities from './Capabilities'
import { IconSettings, IconX } from '@tabler/icons-react'
@ -240,7 +240,7 @@ const DropdownModelProvider = ({
// Update display model when selection changes
useEffect(() => {
if (selectedProvider && selectedModel) {
setDisplayModel(selectedModel.id)
setDisplayModel(getModelDisplayName(selectedModel))
} else {
setDisplayModel(t('common:selectAModel'))
}
@ -326,7 +326,7 @@ const DropdownModelProvider = ({
// Create Fzf instance for fuzzy search
const fzfInstance = useMemo(() => {
return new Fzf(searchableItems, {
selector: (item) => item.model.id.toLowerCase(),
selector: (item) => `${getModelDisplayName(item.model)} ${item.model.id}`.toLowerCase(),
})
}, [searchableItems])
@ -390,7 +390,7 @@ const DropdownModelProvider = ({
const handleSelect = useCallback(
async (searchableModel: SearchableModel) => {
// Immediately update display to prevent double-click issues
setDisplayModel(searchableModel.model.id)
setDisplayModel(getModelDisplayName(searchableModel.model))
setSearchValue('')
setOpen(false)
@ -576,7 +576,7 @@ const DropdownModelProvider = ({
/>
</div>
<span className="text-main-view-fg/80 text-sm">
{searchableModel.model.id}
{getModelDisplayName(searchableModel.model)}
</span>
<div className="flex-1"></div>
{capabilities.length > 0 && (
@ -669,7 +669,7 @@ const DropdownModelProvider = ({
className="text-main-view-fg/80 text-sm"
title={searchableModel.model.id}
>
{searchableModel.model.id}
{getModelDisplayName(searchableModel.model)}
</span>
<div className="flex-1"></div>
{capabilities.length > 0 && (

View File

@ -432,9 +432,9 @@ const LeftPanel = () => {
>
<IconFolder
size={16}
className="text-left-panel-fg/70"
className="text-left-panel-fg/70 shrink-0"
/>
<span className="text-sm text-left-panel-fg/90">
<span className="text-sm text-left-panel-fg/90 truncate">
{folder.name}
</span>
</Link>

View File

@ -14,7 +14,7 @@ import { Button } from '@/components/ui/button'
import { DynamicControllerSetting } from '@/containers/dynamicControllerSetting'
import { useModelProvider } from '@/hooks/useModelProvider'
import { useServiceHub } from '@/hooks/useServiceHub'
import { cn } from '@/lib/utils'
import { cn, getModelDisplayName } from '@/lib/utils'
import { useTranslation } from '@/i18n/react-i18next-compat'
type ModelSettingProps = {
@ -261,7 +261,7 @@ export function ModelSetting({
<SheetContent className="h-[calc(100%-8px)] top-1 right-1 rounded-e-md overflow-y-auto">
<SheetHeader>
<SheetTitle>
{t('common:modelSettings.title', { modelId: model.id })}
{t('common:modelSettings.title', { modelId: getModelDisplayName(model) })}
</SheetTitle>
<SheetDescription>
{t('common:modelSettings.description')}

View File

@ -18,7 +18,7 @@ const ScrollToBottom = ({
}) => {
const { t } = useTranslation()
const appMainViewBgColor = useAppearance((state) => state.appMainViewBgColor)
const { showScrollToBottomBtn, scrollToBottom, setIsUserScrolling } =
const { showScrollToBottomBtn, scrollToBottom } =
useThreadScrolling(threadId, scrollContainerRef)
const { messages } = useMessages(
useShallow((state) => ({
@ -50,7 +50,6 @@ const ScrollToBottom = ({
className="bg-main-view-fg/10 px-2 border border-main-view-fg/5 flex items-center justify-center rounded-xl gap-x-2 cursor-pointer pointer-events-auto"
onClick={() => {
scrollToBottom(true)
setIsUserScrolling(false)
}}
>
<p className="text-xs">{t('scrollToBottom')}</p>

View File

@ -0,0 +1,277 @@
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { render, screen } from '@testing-library/react'
import '@testing-library/jest-dom'
import DropdownModelProvider from '../DropdownModelProvider'
import { getModelDisplayName } from '@/lib/utils'
import { useModelProvider } from '@/hooks/useModelProvider'
// Define basic types to avoid missing declarations
type ModelProvider = {
provider: string
active: boolean
models: Array<{
id: string
displayName?: string
capabilities: string[]
}>
settings: unknown[]
}
type Model = {
id: string
displayName?: string
capabilities?: string[]
}
type MockHookReturn = {
providers: ModelProvider[]
selectedProvider: string
selectedModel: Model
getProviderByName: (name: string) => ModelProvider | undefined
selectModelProvider: () => void
getModelBy: (id: string) => Model | undefined
updateProvider: () => void
}
// Mock the dependencies
vi.mock('@/hooks/useModelProvider', () => ({
useModelProvider: vi.fn(),
}))
vi.mock('@/hooks/useThreads', () => ({
useThreads: vi.fn(() => ({
updateCurrentThreadModel: vi.fn(),
})),
}))
vi.mock('@/hooks/useServiceHub', () => ({
useServiceHub: vi.fn(() => ({
models: () => ({
checkMmprojExists: vi.fn(() => Promise.resolve(false)),
checkMmprojExistsAndUpdateOffloadMMprojSetting: vi.fn(() => Promise.resolve()),
}),
})),
}))
vi.mock('@/i18n/react-i18next-compat', () => ({
useTranslation: vi.fn(() => ({
t: (key: string) => key,
})),
}))
vi.mock('@tanstack/react-router', () => ({
useNavigate: vi.fn(() => vi.fn()),
}))
vi.mock('@/hooks/useFavoriteModel', () => ({
useFavoriteModel: vi.fn(() => ({
favoriteModels: [],
})),
}))
vi.mock('@/lib/platform/const', () => ({
PlatformFeatures: {
WEB_AUTO_MODEL_SELECTION: false,
MODEL_PROVIDER_SETTINGS: true,
},
}))
// Mock UI components
vi.mock('@/components/ui/popover', () => ({
Popover: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
PopoverTrigger: ({ children }: { children: React.ReactNode }) => (
<div data-testid="popover-trigger">{children}</div>
),
PopoverContent: ({ children }: { children: React.ReactNode }) => (
<div data-testid="popover-content">{children}</div>
),
}))
vi.mock('../ProvidersAvatar', () => ({
default: ({ provider }: { provider: any }) => (
<div data-testid={`provider-avatar-${provider.provider}`} />
),
}))
vi.mock('../Capabilities', () => ({
default: ({ capabilities }: { capabilities: string[] }) => (
<div data-testid="capabilities">{capabilities.join(',')}</div>
),
}))
vi.mock('../ModelSetting', () => ({
ModelSetting: () => <div data-testid="model-setting" />,
}))
vi.mock('../ModelSupportStatus', () => ({
ModelSupportStatus: () => <div data-testid="model-support-status" />,
}))
describe('DropdownModelProvider - Display Name Integration', () => {
const mockProviders: ModelProvider[] = [
{
provider: 'llamacpp',
active: true,
models: [
{
id: 'model1.gguf',
displayName: 'Custom Model 1',
capabilities: ['completion'],
},
{
id: 'model2-very-long-filename.gguf',
displayName: 'Short Name',
capabilities: ['completion'],
},
{
id: 'model3.gguf',
// No displayName - should fall back to ID
capabilities: ['completion'],
},
],
settings: [],
},
]
const mockSelectedModel = {
id: 'model1.gguf',
displayName: 'Custom Model 1',
capabilities: ['completion'],
}
beforeEach(() => {
vi.clearAllMocks()
// Reset the mock for each test
vi.mocked(useModelProvider).mockReturnValue({
providers: mockProviders,
selectedProvider: 'llamacpp',
selectedModel: mockSelectedModel,
getProviderByName: vi.fn((name: string) =>
mockProviders.find((p: ModelProvider) => p.provider === name)
),
selectModelProvider: vi.fn(),
getModelBy: vi.fn((id: string) =>
mockProviders[0].models.find((m: Model) => m.id === id)
),
updateProvider: vi.fn(),
} as MockHookReturn)
})
it('should display custom model name in the trigger button', () => {
render(<DropdownModelProvider />)
// Should show the display name in both trigger and dropdown
expect(screen.getAllByText('Custom Model 1')).toHaveLength(2) // One in trigger, one in dropdown
// Model ID should not be visible as text (it's only in title attributes)
expect(screen.queryByDisplayValue('model1.gguf')).not.toBeInTheDocument()
})
it('should fall back to model ID when no displayName is set', () => {
vi.mocked(useModelProvider).mockReturnValue({
providers: mockProviders,
selectedProvider: 'llamacpp',
selectedModel: mockProviders[0].models[2], // model3 without displayName
getProviderByName: vi.fn((name: string) =>
mockProviders.find((p: ModelProvider) => p.provider === name)
),
selectModelProvider: vi.fn(),
getModelBy: vi.fn((id: string) =>
mockProviders[0].models.find((m: Model) => m.id === id)
),
updateProvider: vi.fn(),
} as MockHookReturn)
render(<DropdownModelProvider />)
expect(screen.getAllByText('model3.gguf')).toHaveLength(2) // Trigger and dropdown
})
it('should show display names in the model list items', () => {
render(<DropdownModelProvider />)
// Check if the display names are shown in the options
expect(screen.getAllByText('Custom Model 1')).toHaveLength(2) // Selected: Trigger + dropdown
expect(screen.getByText('Short Name')).toBeInTheDocument() // Only in dropdown
expect(screen.getByText('model3.gguf')).toBeInTheDocument() // Only in dropdown
})
it('should use getModelDisplayName utility correctly', () => {
// Test the utility function directly with different model scenarios
const modelWithDisplayName = {
id: 'long-model-name.gguf',
displayName: 'Short Name',
} as Model
const modelWithoutDisplayName = {
id: 'model-without-display-name.gguf',
} as Model
const modelWithEmptyDisplayName = {
id: 'model-with-empty.gguf',
displayName: '',
} as Model
expect(getModelDisplayName(modelWithDisplayName)).toBe('Short Name')
expect(getModelDisplayName(modelWithoutDisplayName)).toBe('model-without-display-name.gguf')
expect(getModelDisplayName(modelWithEmptyDisplayName)).toBe('model-with-empty.gguf')
})
it('should maintain model ID for internal operations while showing display name', () => {
const mockSelectModelProvider = vi.fn()
vi.mocked(useModelProvider).mockReturnValue({
providers: mockProviders,
selectedProvider: 'llamacpp',
selectedModel: mockSelectedModel,
getProviderByName: vi.fn((name: string) =>
mockProviders.find((p: ModelProvider) => p.provider === name)
),
selectModelProvider: mockSelectModelProvider,
getModelBy: vi.fn((id: string) =>
mockProviders[0].models.find((m: Model) => m.id === id)
),
updateProvider: vi.fn(),
} as MockHookReturn)
render(<DropdownModelProvider />)
// Verify that display name is shown in UI
expect(screen.getAllByText('Custom Model 1')).toHaveLength(2) // Trigger + dropdown
// The actual model ID should still be preserved for backend operations
// This would be tested in the click handlers, but that requires more complex mocking
expect(mockSelectedModel.id).toBe('model1.gguf')
})
it('should handle updating display model when selection changes', () => {
// Test that when a new model is selected, the trigger updates correctly
// First render with model1 selected
const { rerender } = render(<DropdownModelProvider />)
// Check trigger shows Custom Model 1
const triggerButton = screen.getByRole('button')
expect(triggerButton).toHaveTextContent('Custom Model 1')
// Update to select model2
vi.mocked(useModelProvider).mockReturnValue({
providers: mockProviders,
selectedProvider: 'llamacpp',
selectedModel: mockProviders[0].models[1], // model2
getProviderByName: vi.fn((name: string) =>
mockProviders.find((p: ModelProvider) => p.provider === name)
),
selectModelProvider: vi.fn(),
getModelBy: vi.fn((id: string) =>
mockProviders[0].models.find((m: Model) => m.id === id)
),
updateProvider: vi.fn(),
} as MockHookReturn)
rerender(<DropdownModelProvider />)
// Check trigger now shows Short Name
expect(triggerButton).toHaveTextContent('Short Name')
// Both models are still visible in the dropdown, so we can't test for absence
expect(screen.getAllByText('Short Name')).toHaveLength(2) // trigger + dropdown
})
})

View File

@ -0,0 +1,184 @@
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { render, screen, fireEvent, waitFor } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { DialogEditModel } from '../dialogs/EditModel'
import { useModelProvider } from '@/hooks/useModelProvider'
import '@testing-library/jest-dom'
// Mock the dependencies
vi.mock('@/hooks/useModelProvider', () => ({
useModelProvider: vi.fn(() => ({
updateProvider: vi.fn(),
setProviders: vi.fn(),
})),
}))
vi.mock('@/hooks/useServiceHub', () => ({
useServiceHub: vi.fn(() => ({
providers: () => ({
getProviders: vi.fn(() => Promise.resolve([])),
}),
})),
}))
vi.mock('@/i18n/react-i18next-compat', () => ({
useTranslation: vi.fn(() => ({
t: (key: string) => key,
})),
}))
vi.mock('sonner', () => ({
toast: {
success: vi.fn(),
error: vi.fn(),
},
}))
// Mock Dialog components
vi.mock('@/components/ui/dialog', () => ({
Dialog: ({ children, open }: { children: React.ReactNode; open: boolean }) =>
open ? <div data-testid="dialog">{children}</div> : null,
DialogContent: ({ children }: { children: React.ReactNode }) => (
<div data-testid="dialog-content">{children}</div>
),
DialogHeader: ({ children }: { children: React.ReactNode }) => (
<div data-testid="dialog-header">{children}</div>
),
DialogTitle: ({ children }: { children: React.ReactNode }) => (
<h1 data-testid="dialog-title">{children}</h1>
),
DialogDescription: ({ children }: { children: React.ReactNode }) => (
<p data-testid="dialog-description">{children}</p>
),
DialogTrigger: ({ children }: { children: React.ReactNode }) => (
<div data-testid="dialog-trigger">{children}</div>
),
}))
vi.mock('@/components/ui/input', () => ({
Input: ({ value, onChange, ...props }: any) => (
<input
value={value}
onChange={onChange}
data-testid="display-name-input"
{...props}
/>
),
}))
vi.mock('@/components/ui/button', () => ({
Button: ({ children, onClick, ...props }: any) => (
<button onClick={onClick} data-testid="button" {...props}>
{children}
</button>
),
}))
// Mock other UI components
vi.mock('@tabler/icons-react', () => ({
IconPencil: () => <div data-testid="pencil-icon" />,
IconCheck: () => <div data-testid="check-icon" />,
IconX: () => <div data-testid="x-icon" />,
IconAlertTriangle: () => <div data-testid="alert-triangle-icon" />,
IconEye: () => <div data-testid="eye-icon" />,
IconTool: () => <div data-testid="tool-icon" />,
IconLoader2: () => <div data-testid="loader-icon" />,
}))
describe('DialogEditModel - Basic Component Tests', () => {
const mockProvider = {
provider: 'llamacpp',
active: true,
models: [
{
id: 'test-model.gguf',
displayName: 'My Custom Model',
capabilities: ['completion'],
},
],
settings: [],
} as any
const mockUpdateProvider = vi.fn()
const mockSetProviders = vi.fn()
beforeEach(() => {
vi.clearAllMocks()
vi.mocked(useModelProvider).mockReturnValue({
updateProvider: mockUpdateProvider,
setProviders: mockSetProviders,
} as any)
})
it('should render without errors', () => {
const { container } = render(
<DialogEditModel
provider={mockProvider}
modelId="test-model.gguf"
/>
)
// Component should render without throwing errors
expect(container).toBeInTheDocument()
})
it('should handle provider without models', () => {
const emptyProvider = {
...mockProvider,
models: [],
} as any
const { container } = render(
<DialogEditModel
provider={emptyProvider}
modelId="test-model.gguf"
/>
)
// Component should handle empty models gracefully
expect(container).toBeInTheDocument()
})
it('should accept provider and modelId props', () => {
const { container } = render(
<DialogEditModel
provider={mockProvider}
modelId="different-model.gguf"
/>
)
expect(container).toBeInTheDocument()
})
it('should not crash with minimal props', () => {
const minimalProvider = {
provider: 'test',
active: false,
models: [],
settings: [],
} as any
expect(() => {
render(
<DialogEditModel
provider={minimalProvider}
modelId="any-model"
/>
)
}).not.toThrow()
})
it('should have mocked dependencies available', () => {
render(
<DialogEditModel
provider={mockProvider}
modelId="test-model.gguf"
/>
)
// Verify our mocks are in place
expect(mockUpdateProvider).toBeDefined()
expect(mockSetProviders).toBeDefined()
})
})

View File

@ -7,6 +7,8 @@ import {
DialogTrigger,
} from '@/components/ui/dialog'
import { Switch } from '@/components/ui/switch'
import { Input } from '@/components/ui/input'
import { Button } from '@/components/ui/button'
import { useModelProvider } from '@/hooks/useModelProvider'
import {
@ -14,12 +16,14 @@ import {
IconEye,
IconTool,
IconAlertTriangle,
IconLoader2,
// IconWorld,
// IconAtom,
// IconCodeCircle2,
} from '@tabler/icons-react'
import { useState, useEffect } from 'react'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { toast } from 'sonner'
// No need to define our own interface, we'll use the existing Model type
type DialogEditModelProps = {
@ -34,6 +38,13 @@ export const DialogEditModel = ({
const { t } = useTranslation()
const { updateProvider } = useModelProvider()
const [selectedModelId, setSelectedModelId] = useState<string>('')
const [displayName, setDisplayName] = useState<string>('')
const [originalDisplayName, setOriginalDisplayName] = useState<string>('')
const [originalCapabilities, setOriginalCapabilities] = useState<
Record<string, boolean>
>({})
const [isOpen, setIsOpen] = useState(false)
const [isLoading, setIsLoading] = useState(false)
const [capabilities, setCapabilities] = useState<Record<string, boolean>>({
completion: false,
vision: false,
@ -45,20 +56,34 @@ export const DialogEditModel = ({
// Initialize with the provided model ID or the first model if available
useEffect(() => {
// Only set the selected model ID if the dialog is not open to prevent switching during downloads
if (!isOpen) {
if (modelId) {
setSelectedModelId(modelId)
} else if (provider.models && provider.models.length > 0) {
setSelectedModelId(provider.models[0].id)
}
}, [provider, modelId])
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [modelId, isOpen]) // Add isOpen dependency to prevent switching when dialog is open
// Handle dialog opening - set the initial model selection
useEffect(() => {
if (isOpen && !selectedModelId) {
if (modelId) {
setSelectedModelId(modelId)
} else if (provider.models && provider.models.length > 0) {
setSelectedModelId(provider.models[0].id)
}
}
}, [isOpen, selectedModelId, modelId, provider.models])
// Get the currently selected model
const selectedModel = provider.models.find(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(m: any) => m.id === selectedModelId
(m: Model) => m.id === selectedModelId
)
// Initialize capabilities from selected model
// Initialize capabilities and display name from selected model
useEffect(() => {
if (selectedModel) {
const modelCapabilities = selectedModel.capabilities || []
@ -70,38 +95,77 @@ export const DialogEditModel = ({
web_search: modelCapabilities.includes('web_search'),
reasoning: modelCapabilities.includes('reasoning'),
})
// Use existing displayName if available, otherwise fall back to model ID
const displayNameValue = (selectedModel as Model & { displayName?: string }).displayName || selectedModel.id
setDisplayName(displayNameValue)
setOriginalDisplayName(displayNameValue)
const originalCaps = {
completion: modelCapabilities.includes('completion'),
vision: modelCapabilities.includes('vision'),
tools: modelCapabilities.includes('tools'),
embeddings: modelCapabilities.includes('embeddings'),
web_search: modelCapabilities.includes('web_search'),
reasoning: modelCapabilities.includes('reasoning'),
}
setOriginalCapabilities(originalCaps)
}
}, [selectedModel])
// Track if capabilities were updated by user action
const [capabilitiesUpdated, setCapabilitiesUpdated] = useState(false)
// Update model capabilities - only update local state
const handleCapabilityChange = (capability: string, enabled: boolean) => {
setCapabilities((prev) => ({
...prev,
[capability]: enabled,
}))
// Mark that capabilities were updated by user action
setCapabilitiesUpdated(true)
}
// Use effect to update the provider when capabilities are explicitly changed by user
useEffect(() => {
// Only run if capabilities were updated by user action and we have a selected model
if (!capabilitiesUpdated || !selectedModel) return
// Handle display name change
const handleDisplayNameChange = (newName: string) => {
setDisplayName(newName)
}
// Reset the flag
setCapabilitiesUpdated(false)
// Check if there are unsaved changes
const hasUnsavedChanges = () => {
const nameChanged = displayName !== originalDisplayName
const capabilitiesChanged =
JSON.stringify(capabilities) !== JSON.stringify(originalCapabilities)
return nameChanged || capabilitiesChanged
}
// Create updated capabilities array from the state
// Handle save changes
const handleSaveChanges = async () => {
if (!selectedModel?.id || isLoading) return
setIsLoading(true)
try {
let updatedModels = provider.models
// Update display name if changed
if (displayName !== originalDisplayName) {
// Update the model in the provider models array with displayName
updatedModels = updatedModels.map((m: Model) => {
if (m.id === selectedModelId) {
return {
...m,
displayName: displayName,
}
}
return m
})
setOriginalDisplayName(displayName)
}
// Update capabilities if changed
if (
JSON.stringify(capabilities) !== JSON.stringify(originalCapabilities)
) {
const updatedCapabilities = Object.entries(capabilities)
.filter(([, isEnabled]) => isEnabled)
.map(([capName]) => capName)
// Find and update the model in the provider
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const updatedModels = provider.models.map((m: any) => {
updatedModels = updatedModels.map((m: Model) => {
if (m.id === selectedModelId) {
return {
...m,
@ -113,26 +177,32 @@ export const DialogEditModel = ({
return m
})
setOriginalCapabilities(capabilities)
}
// Update the provider with the updated models
updateProvider(provider.provider, {
...provider,
models: updatedModels,
})
}, [
capabilitiesUpdated,
capabilities,
provider,
selectedModel,
selectedModelId,
updateProvider,
])
// Show success toast and close dialog
toast.success('Model updated successfully')
setIsOpen(false)
} catch (error) {
console.error('Failed to update model:', error)
toast.error('Failed to update model. Please try again.')
} finally {
setIsLoading(false)
}
}
if (!selectedModel) {
return null
}
return (
<Dialog>
<Dialog open={isOpen} onOpenChange={setIsOpen}>
<DialogTrigger asChild>
<div className="size-6 cursor-pointer flex items-center justify-center rounded hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out">
<IconPencil size={18} className="text-main-view-fg/50" />
@ -148,6 +218,27 @@ export const DialogEditModel = ({
</DialogDescription>
</DialogHeader>
{/* Model Display Name Section */}
<div className="py-1">
<label
htmlFor="display-name"
className="text-sm font-medium mb-3 block"
>
Display Name
</label>
<Input
id="display-name"
value={displayName}
onChange={(e) => handleDisplayNameChange(e.target.value)}
placeholder="Enter display name"
className="w-full"
disabled={isLoading}
/>
<p className="text-xs text-main-view-fg/60 mt-1">
This is the name that will be shown in the interface. The original model file remains unchanged.
</p>
</div>
{/* Warning Banner */}
<div className="bg-main-view-fg/5 border border-main-view-fg/10 rounded-md p-3">
<div className="flex items-start space-x-3">
@ -181,6 +272,7 @@ export const DialogEditModel = ({
onCheckedChange={(checked) =>
handleCapabilityChange('tools', checked)
}
disabled={isLoading}
/>
</div>
@ -197,6 +289,7 @@ export const DialogEditModel = ({
onCheckedChange={(checked) =>
handleCapabilityChange('vision', checked)
}
disabled={isLoading}
/>
</div>
@ -253,6 +346,24 @@ export const DialogEditModel = ({
</div> */}
</div>
</div>
{/* Save Button */}
<div className="flex justify-end pt-4">
<Button
onClick={handleSaveChanges}
disabled={!hasUnsavedChanges() || isLoading}
className="px-4 py-2"
>
{isLoading ? (
<>
<IconLoader2 className="mr-2 h-4 w-4 animate-spin" />
Saving...
</>
) : (
'Save Changes'
)}
</Button>
</div>
</DialogContent>
</Dialog>
)

View File

@ -0,0 +1,182 @@
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { act, renderHook } from '@testing-library/react'
import { useModelProvider } from '../useModelProvider'
// Mock getServiceHub
vi.mock('@/hooks/useServiceHub', () => ({
getServiceHub: vi.fn(() => ({
path: () => ({
sep: () => '/',
}),
})),
}))
// Mock the localStorage key constants
vi.mock('@/constants/localStorage', () => ({
localStorageKey: {
modelProvider: 'jan-model-provider',
},
}))
// Mock localStorage
const localStorageMock = {
getItem: vi.fn(),
setItem: vi.fn(),
removeItem: vi.fn(),
clear: vi.fn(),
}
Object.defineProperty(window, 'localStorage', {
value: localStorageMock,
writable: true,
})
describe('useModelProvider - displayName functionality', () => {
beforeEach(() => {
vi.clearAllMocks()
localStorageMock.getItem.mockReturnValue(null)
// Reset Zustand store to default state
act(() => {
useModelProvider.setState({
providers: [],
selectedProvider: 'llamacpp',
selectedModel: null,
deletedModels: [],
})
})
})
it('should handle models without displayName property', () => {
const { result } = renderHook(() => useModelProvider())
const provider = {
provider: 'llamacpp',
active: true,
models: [
{
id: 'test-model.gguf',
capabilities: ['completion'],
},
],
settings: [],
} as any
// First add the provider, then update it (since updateProvider only updates existing providers)
act(() => {
result.current.addProvider(provider)
})
const updatedProvider = result.current.getProviderByName('llamacpp')
expect(updatedProvider?.models[0].displayName).toBeUndefined()
expect(updatedProvider?.models[0].id).toBe('test-model.gguf')
})
it('should preserve displayName when merging providers in setProviders', () => {
const { result } = renderHook(() => useModelProvider())
// First, set up initial state with displayName via direct state manipulation
// This simulates the scenario where a user has already customized a display name
act(() => {
useModelProvider.setState({
providers: [
{
provider: 'llamacpp',
active: true,
models: [
{
id: 'test-model.gguf',
displayName: 'My Custom Model',
capabilities: ['completion'],
},
],
settings: [],
},
] as any,
selectedProvider: 'llamacpp',
selectedModel: null,
deletedModels: [],
})
})
// Now simulate setProviders with fresh data (like from server refresh)
const freshProviders = [
{
provider: 'llamacpp',
active: true,
persist: true,
models: [
{
id: 'test-model.gguf',
capabilities: ['completion'],
// Note: no displayName in fresh data
},
],
settings: [],
},
] as any
act(() => {
result.current.setProviders(freshProviders)
})
// The displayName should be preserved from existing state
const provider = result.current.getProviderByName('llamacpp')
expect(provider?.models[0].displayName).toBe('My Custom Model')
})
it('should provide basic functionality without breaking existing behavior', () => {
const { result } = renderHook(() => useModelProvider())
// Test that basic provider operations work
expect(result.current.providers).toEqual([])
expect(result.current.selectedProvider).toBe('llamacpp')
expect(result.current.selectedModel).toBeNull()
// Test addProvider functionality
const provider = {
provider: 'openai',
active: true,
models: [],
settings: [],
} as any
act(() => {
result.current.addProvider(provider)
})
expect(result.current.providers).toHaveLength(1)
expect(result.current.getProviderByName('openai')).toBeDefined()
})
it('should handle provider operations with models that have displayName', () => {
const { result } = renderHook(() => useModelProvider())
// Test that we can at least get and set providers with displayName models
const providerWithDisplayName = {
provider: 'llamacpp',
active: true,
models: [
{
id: 'test-model.gguf',
displayName: 'Custom Model Name',
capabilities: ['completion'],
},
],
settings: [],
} as any
// Set the state directly (simulating what would happen in real usage)
act(() => {
useModelProvider.setState({
providers: [providerWithDisplayName],
selectedProvider: 'llamacpp',
selectedModel: null,
deletedModels: [],
})
})
const provider = result.current.getProviderByName('llamacpp')
expect(provider?.models[0].displayName).toBe('Custom Model Name')
expect(provider?.models[0].id).toBe('test-model.gguf')
})
})

View File

@ -32,7 +32,7 @@ export function useClickOutside<T extends HTMLElement = any>(
)
return () => {
;(events || DEFAULT_EVENTS).forEach((fn) =>
(events || DEFAULT_EVENTS).forEach((fn) =>
document.removeEventListener(fn, listener)
)
}

View File

@ -104,6 +104,7 @@ export const useModelProvider = create<ModelProviderState>()(
...model,
settings: settings,
capabilities: existingModel?.capabilities || model.capabilities,
displayName: existingModel?.displayName || model.displayName,
}
})

View File

@ -1,8 +1,10 @@
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { useAppState } from './useAppState'
import { useMessages } from './useMessages'
import { useShallow } from 'zustand/react/shallow'
import debounce from 'lodash.debounce'
const VIEWPORT_PADDING = 40 // Offset from viewport bottom for user message positioning
const MAX_DOM_RETRY_ATTEMPTS = 5 // Maximum attempts to find DOM elements before giving up
const DOM_RETRY_DELAY = 100 // Delay in ms between DOM element retry attempts
export const useThreadScrolling = (
threadId: string,
@ -10,18 +12,36 @@ export const useThreadScrolling = (
) => {
const streamingContent = useAppState((state) => state.streamingContent)
const isFirstRender = useRef(true)
const { messages } = useMessages(
useShallow((state) => ({
messages: state.messages[threadId],
}))
)
const wasStreamingRef = useRef(false)
const userIntendedPositionRef = useRef<number | null>(null)
const [isUserScrolling, setIsUserScrolling] = useState(false)
const [isAtBottom, setIsAtBottom] = useState(true)
const [hasScrollbar, setHasScrollbar] = useState(false)
const lastScrollTopRef = useRef(0)
const messagesCount = useMemo(() => messages?.length ?? 0, [messages])
const lastAssistantMessageRef = useRef<HTMLElement | null>(null)
const messageCount = useMessages((state) => state.messages[threadId]?.length ?? 0)
const lastMessageRole = useMessages((state) => {
const msgs = state.messages[threadId]
return msgs && msgs.length > 0 ? msgs[msgs.length - 1].role : null
})
const [paddingHeight, setPaddingHeightInternal] = useState(0)
const setPaddingHeight = setPaddingHeightInternal
const originalPaddingRef = useRef(0)
const getDOMElements = useCallback(() => {
const scrollContainer = scrollContainerRef.current
if (!scrollContainer) return null
const userMessages = scrollContainer.querySelectorAll('[data-message-author-role="user"]')
const assistantMessages = scrollContainer.querySelectorAll('[data-message-author-role="assistant"]')
return {
scrollContainer,
lastUserMessage: userMessages[userMessages.length - 1] as HTMLElement,
lastAssistantMessage: assistantMessages[assistantMessages.length - 1] as HTMLElement,
}
}, [scrollContainerRef])
const showScrollToBottomBtn = !isAtBottom && hasScrollbar
@ -32,20 +52,16 @@ export const useThreadScrolling = (
...(smooth ? { behavior: 'smooth' } : {}),
})
}
}, [])
}, [scrollContainerRef])
const handleScroll = useCallback((e: Event) => {
const target = e.target as HTMLDivElement
const { scrollTop, scrollHeight, clientHeight } = target
// Use a small tolerance to better detect when we're at the bottom
const isBottom = Math.abs(scrollHeight - scrollTop - clientHeight) < 10
const hasScroll = scrollHeight > clientHeight
// Detect if this is a user-initiated scroll
if (Math.abs(scrollTop - lastScrollTopRef.current) > 10) {
setIsUserScrolling(!isBottom)
// If user scrolls during streaming and moves away from bottom, record their intended position
if (streamingContent && !isBottom) {
userIntendedPositionRef.current = scrollTop
}
@ -76,117 +92,129 @@ export const useThreadScrolling = (
setHasScrollbar(hasScroll)
}, [])
// Single useEffect for all auto-scrolling logic
useEffect(() => {
// Track streaming state changes
const isCurrentlyStreaming = !!streamingContent
const justFinishedStreaming =
wasStreamingRef.current && !isCurrentlyStreaming
wasStreamingRef.current = isCurrentlyStreaming
// If streaming just finished and user had an intended position, restore it
if (justFinishedStreaming && userIntendedPositionRef.current !== null) {
// Small delay to ensure DOM has updated
setTimeout(() => {
if (
scrollContainerRef.current &&
userIntendedPositionRef.current !== null
) {
scrollContainerRef.current.scrollTo({
top: userIntendedPositionRef.current,
behavior: 'smooth',
})
userIntendedPositionRef.current = null
setIsUserScrolling(false)
}
}, 100)
return
}
// Clear intended position when streaming starts fresh
if (isCurrentlyStreaming && !wasStreamingRef.current) {
userIntendedPositionRef.current = null
}
// Only auto-scroll when the user is not actively scrolling
// AND either at the bottom OR there's streaming content
if (!isUserScrolling && (streamingContent || isAtBottom) && messagesCount) {
// Use non-smooth scrolling for auto-scroll to prevent jank
scrollToBottom(false)
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [streamingContent, isUserScrolling, messagesCount])
useEffect(() => {
if (streamingContent) {
const interval = setInterval(checkScrollState, 100)
return () => clearInterval(interval)
}
}, [streamingContent, checkScrollState])
// Auto-scroll to bottom when component mounts or thread content changes
useEffect(() => {
const scrollContainer = scrollContainerRef.current
if (!scrollContainer) return
// Always scroll to bottom on first render or when thread changes
if (!scrollContainerRef.current) return
if (isFirstRender.current) {
isFirstRender.current = false
scrollToBottom()
setIsAtBottom(true)
setIsUserScrolling(false)
userIntendedPositionRef.current = null
wasStreamingRef.current = false
scrollToBottom(false)
checkScrollState()
return
}
}, [checkScrollState, scrollToBottom])
const handleDOMScroll = (e: Event) => {
const target = e.target as HTMLDivElement
const { scrollTop, scrollHeight, clientHeight } = target
// Use a small tolerance to better detect when we're at the bottom
const isBottom = Math.abs(scrollHeight - scrollTop - clientHeight) < 10
const hasScroll = scrollHeight > clientHeight
// Detect if this is a user-initiated scroll
if (Math.abs(scrollTop - lastScrollTopRef.current) > 10) {
setIsUserScrolling(!isBottom)
const prevCountRef = useRef(messageCount)
useEffect(() => {
const prevCount = prevCountRef.current
const becameLonger = messageCount > prevCount
const isUserMessage = lastMessageRole === 'user'
// If user scrolls during streaming and moves away from bottom, record their intended position
if (streamingContent && !isBottom) {
userIntendedPositionRef.current = scrollTop
if (becameLonger && messageCount > 0 && isUserMessage) {
const calculatePadding = () => {
const elements = getDOMElements()
if (!elements?.lastUserMessage) return
const viewableHeight = elements.scrollContainer.clientHeight
const userMessageHeight = elements.lastUserMessage.offsetHeight
const calculatedPadding = Math.max(0, viewableHeight - VIEWPORT_PADDING - userMessageHeight)
setPaddingHeight(calculatedPadding)
originalPaddingRef.current = calculatedPadding
// Scroll after padding is applied to the DOM
requestAnimationFrame(() => {
elements.scrollContainer.scrollTo({
top: elements.scrollContainer.scrollHeight,
behavior: 'smooth',
})
})
}
let retryCount = 0
const tryCalculatePadding = () => {
if (getDOMElements()?.lastUserMessage) {
calculatePadding()
} else if (retryCount < MAX_DOM_RETRY_ATTEMPTS) {
retryCount++
setTimeout(tryCalculatePadding, DOM_RETRY_DELAY)
}
}
setIsAtBottom(isBottom)
setHasScrollbar(hasScroll)
lastScrollTopRef.current = scrollTop
tryCalculatePadding()
}
// Use a shorter debounce time for more responsive scrolling
const debouncedScroll = debounce(handleDOMScroll)
prevCountRef.current = messageCount
}, [messageCount, lastMessageRole])
useEffect(() => {
const chatHistoryElement = scrollContainerRef.current
if (chatHistoryElement) {
chatHistoryElement.addEventListener('scroll', debouncedScroll)
return () =>
chatHistoryElement.removeEventListener('scroll', debouncedScroll)
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
const previouslyStreaming = wasStreamingRef.current
const currentlyStreaming = !!streamingContent && streamingContent.thread_id === threadId
const streamingStarted = !previouslyStreaming && currentlyStreaming
const streamingEnded = previouslyStreaming && !currentlyStreaming
const hasPaddingToAdjust = originalPaddingRef.current > 0
// Store the current assistant message when streaming starts
if (streamingStarted) {
const elements = getDOMElements()
lastAssistantMessageRef.current = elements?.lastAssistantMessage || null
}
if (streamingEnded && hasPaddingToAdjust) {
let retryCount = 0
const adjustPaddingWhenReady = () => {
const elements = getDOMElements()
const currentAssistantMessage = elements?.lastAssistantMessage
// Check if a new assistant message has appeared (different from the one before streaming)
const hasNewAssistantMessage = currentAssistantMessage &&
currentAssistantMessage !== lastAssistantMessageRef.current
if (hasNewAssistantMessage && elements?.lastUserMessage) {
const userRect = elements.lastUserMessage.getBoundingClientRect()
const assistantRect = currentAssistantMessage.getBoundingClientRect()
const actualSpacing = assistantRect.top - userRect.bottom
const totalAssistantHeight = currentAssistantMessage.offsetHeight + actualSpacing
const newPadding = Math.max(0, originalPaddingRef.current - totalAssistantHeight)
setPaddingHeight(newPadding)
originalPaddingRef.current = newPadding
lastAssistantMessageRef.current = currentAssistantMessage
} else if (retryCount < MAX_DOM_RETRY_ATTEMPTS) {
retryCount++
setTimeout(adjustPaddingWhenReady, DOM_RETRY_DELAY)
} else {
// Max retries hit - remove padding as fallback
setPaddingHeight(0)
originalPaddingRef.current = 0
}
}
adjustPaddingWhenReady()
}
wasStreamingRef.current = currentlyStreaming
}, [streamingContent, threadId])
// Reset scroll state when thread changes
useEffect(() => {
isFirstRender.current = true
scrollToBottom()
setIsAtBottom(true)
setIsUserScrolling(false)
userIntendedPositionRef.current = null
wasStreamingRef.current = false
setPaddingHeight(0)
originalPaddingRef.current = 0
prevCountRef.current = messageCount
scrollToBottom(false)
checkScrollState()
}, [threadId, checkScrollState, scrollToBottom])
}, [threadId])
return useMemo(
() => ({ showScrollToBottomBtn, scrollToBottom, setIsUserScrolling }),
[showScrollToBottomBtn, scrollToBottom, setIsUserScrolling]
() => ({
showScrollToBottomBtn,
scrollToBottom,
paddingHeight
}),
[showScrollToBottomBtn, scrollToBottom, paddingHeight]
)
}

View File

@ -6,6 +6,7 @@ import {
toGigabytes,
formatMegaBytes,
formatDuration,
getModelDisplayName,
} from '../utils'
describe('getProviderLogo', () => {
@ -200,3 +201,52 @@ describe('formatDuration', () => {
expect(formatDuration(start, 86400000)).toBe('1d 0h 0m 0s') // exactly 1 day
})
})
describe('getModelDisplayName', () => {
it('returns displayName when it exists', () => {
const model = {
id: 'llama-3.2-1b-instruct-q4_k_m.gguf',
displayName: 'My Custom Model',
} as Model
expect(getModelDisplayName(model)).toBe('My Custom Model')
})
it('returns model.id when displayName is undefined', () => {
const model = {
id: 'llama-3.2-1b-instruct-q4_k_m.gguf',
} as Model
expect(getModelDisplayName(model)).toBe('llama-3.2-1b-instruct-q4_k_m.gguf')
})
it('returns model.id when displayName is empty string', () => {
const model = {
id: 'llama-3.2-1b-instruct-q4_k_m.gguf',
displayName: '',
} as Model
expect(getModelDisplayName(model)).toBe('llama-3.2-1b-instruct-q4_k_m.gguf')
})
it('returns model.id when displayName is null', () => {
const model = {
id: 'llama-3.2-1b-instruct-q4_k_m.gguf',
displayName: null as any,
} as Model
expect(getModelDisplayName(model)).toBe('llama-3.2-1b-instruct-q4_k_m.gguf')
})
it('handles models with complex display names', () => {
const model = {
id: 'very-long-model-file-name-with-lots-of-details.gguf',
displayName: 'Short Name 🤖',
} as Model
expect(getModelDisplayName(model)).toBe('Short Name 🤖')
})
it('handles models with special characters in displayName', () => {
const model = {
id: 'model.gguf',
displayName: 'Model (Version 2.0) - Fine-tuned',
} as Model
expect(getModelDisplayName(model)).toBe('Model (Version 2.0) - Fine-tuned')
})
})

View File

@ -1,11 +1,34 @@
import { type ClassValue, clsx } from 'clsx'
import { twMerge } from 'tailwind-merge'
import { ExtensionManager } from './extension'
import path from "path"
export function cn(...inputs: ClassValue[]) {
return twMerge(clsx(inputs))
}
export function basenameNoExt(filePath: string): string {
const base = path.basename(filePath);
const VALID_EXTENSIONS = [".tar.gz", ".zip"];
// handle VALID extensions first
for (const ext of VALID_EXTENSIONS) {
if (base.toLowerCase().endsWith(ext)) {
return base.slice(0, -ext.length);
}
}
// fallback: remove only the last extension
return base.slice(0, -path.extname(base).length);
}
/**
* Get the display name for a model, falling back to the model ID if no display name is set
*/
export function getModelDisplayName(model: Model): string {
return model.displayName || model.id
}
export function getProviderLogo(provider: string) {
switch (provider) {
case 'jan':

View File

@ -13,11 +13,15 @@ export function ToasterProvider() {
alignItems: 'start',
borderColor:
'color-mix(in oklch, var(--app-main-view) 5%, transparent)',
userSelect: 'none',
WebkitUserSelect: 'none',
MozUserSelect: 'none',
msUserSelect: 'none',
},
classNames: {
toast: 'toast',
title: '!text-main-view/90',
description: '!text-main-view/70',
toast: 'toast select-none',
title: '!text-main-view/90 select-none',
description: '!text-main-view/70 select-none',
},
}}
/>

View File

@ -134,25 +134,22 @@ function ProjectContent() {
className="bg-main-view-fg/3 py-2 px-4 rounded-lg"
key={folder.id}
>
<div className="flex items-center gap-4">
<div className="flex items-start gap-3 flex-1">
<div className="flex items-center gap-4 min-w-0">
<div className="flex items-start gap-3 flex-1 min-w-0">
<div className="shrink-0 w-8 h-8 relative flex items-center justify-center bg-main-view-fg/4 rounded-md">
<IconFolder
size={16}
className="text-main-view-fg/50"
/>
</div>
<div className="flex-1 min-w-0">
<div className="flex items-center gap-2">
<h3 className="text-base font-medium text-main-view-fg/80 line-clamp-1">
<div className="flex-1 min-w-0 overflow-hidden">
<div className="flex items-center gap-2 min-w-0">
<h3
className="text-base font-medium text-main-view-fg/80 truncate flex-1 min-w-0"
title={folder.name}
>
{folder.name}
</h3>
<span className="text-xs bg-main-view-fg/10 text-main-view-fg/60 px-2 py-0.5 rounded-full">
{projectThreads.length}{' '}
{projectThreads.length === 1
? t('projects.thread')
: t('projects.threads')}
</span>
</div>
<p className="text-main-view-fg/50 text-xs line-clamp-2 mt-0.5">
{t('projects.updated')}{' '}
@ -161,6 +158,12 @@ function ProjectContent() {
</div>
</div>
<div className="flex items-center">
<span className="text-xs mr-4 bg-main-view-fg/10 text-main-view-fg/60 px-2 py-0.5 rounded-full shrink-0 whitespace-nowrap">
{projectThreads.length}{' '}
{projectThreads.length === 1
? t('projects.thread')
: t('projects.threads')}
</span>
{projectThreads.length > 0 && (
<button
className="size-8 cursor-pointer flex items-center justify-center rounded-md hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out mr-1"

View File

@ -3,7 +3,7 @@ import { Card, CardItem } from '@/containers/Card'
import HeaderPage from '@/containers/HeaderPage'
import SettingsMenu from '@/containers/SettingsMenu'
import { useModelProvider } from '@/hooks/useModelProvider'
import { cn, getProviderTitle } from '@/lib/utils'
import { cn, getProviderTitle, getModelDisplayName } from '@/lib/utils'
import {
createFileRoute,
Link,
@ -41,6 +41,7 @@ import { useLlamacppDevices } from '@/hooks/useLlamacppDevices'
import { PlatformFeatures } from '@/lib/platform/const'
import { PlatformFeature } from '@/lib/platform/types'
import { useBackendUpdater } from '@/hooks/useBackendUpdater'
import { basenameNoExt } from '@/lib/utils'
// as route.threadsDetail
export const Route = createFileRoute('/settings/providers/$providerName')({
@ -382,7 +383,7 @@ function ProviderDetail() {
filters: [
{
name: 'Backend Archives',
extensions: ['tar.gz', 'zip'],
extensions: ['tar.gz', 'zip', 'gz'],
},
],
})
@ -394,9 +395,7 @@ function ProviderDetail() {
await installBackend(selectedFile)
// Extract filename from the selected file path and replace spaces with dashes
const fileName = (
selectedFile.split(/[/\\]/).pop() || selectedFile
).replace(/\s+/g, '-')
const fileName = basenameNoExt(selectedFile).replace(/\s+/g, "-")
toast.success(t('settings:backendInstallSuccess'), {
description: `Llamacpp ${fileName} installed`,
@ -547,7 +546,7 @@ function ProviderDetail() {
)
if (deviceSettingIndex !== -1) {
;(
(
newSettings[deviceSettingIndex]
.controller_props as {
value: string
@ -778,7 +777,7 @@ function ProviderDetail() {
className="font-medium line-clamp-1"
title={model.id}
>
{model.id}
{getModelDisplayName(model)}
</h1>
<Capabilities capabilities={capabilities} />
</div>

View File

@ -22,6 +22,7 @@ import { PlatformFeatures } from '@/lib/platform/const'
import { PlatformFeature } from '@/lib/platform/types'
import ScrollToBottom from '@/containers/ScrollToBottom'
import { PromptProgress } from '@/components/PromptProgress'
import { useThreadScrolling } from '@/hooks/useThreadScrolling'
// as route.threadsDetail
export const Route = createFileRoute('/threads/$threadId')({
@ -51,6 +52,9 @@ function ThreadDetail() {
const thread = useThreads(useShallow((state) => state.threads[threadId]))
const scrollContainerRef = useRef<HTMLDivElement>(null)
// Get padding height for ChatGPT-style message positioning
const { paddingHeight } = useThreadScrolling(threadId, scrollContainerRef)
useEffect(() => {
setCurrentThreadId(threadId)
const assistant = assistants.find(
@ -194,6 +198,12 @@ function ThreadDetail() {
threadId={threadId}
data-test-id="thread-content-text"
/>
{/* Persistent padding element for ChatGPT-style message positioning */}
<div
style={{ height: paddingHeight }}
className="flex-shrink-0"
data-testid="chat-padding"
/>
</div>
</div>
<div

View File

@ -1,7 +1,7 @@
import { describe, it, expect, vi, beforeEach } from 'vitest'
import { DefaultModelsService } from '../models/default'
import type { HuggingFaceRepo, CatalogModel } from '../models/types'
import { EngineManager, Model } from '@janhq/core'
import { EngineManager } from '@janhq/core'
// Mock EngineManager
vi.mock('@janhq/core', () => ({
@ -26,6 +26,7 @@ describe('DefaultModelsService', () => {
const mockEngine = {
list: vi.fn(),
updateSettings: vi.fn(),
update: vi.fn(),
import: vi.fn(),
abortImport: vi.fn(),
delete: vi.fn(),
@ -108,22 +109,41 @@ describe('DefaultModelsService', () => {
describe('updateModel', () => {
it('should update model settings', async () => {
const modelId = 'model1'
const model = {
id: 'model1',
settings: [{ key: 'temperature', value: 0.7 }],
}
await modelsService.updateModel(model as any)
await modelsService.updateModel(modelId, model as any)
expect(mockEngine.updateSettings).toHaveBeenCalledWith(model.settings)
expect(mockEngine.update).not.toHaveBeenCalled()
})
it('should handle model without settings', async () => {
const modelId = 'model1'
const model = { id: 'model1' }
await modelsService.updateModel(model)
await modelsService.updateModel(modelId, model)
expect(mockEngine.updateSettings).not.toHaveBeenCalled()
expect(mockEngine.update).not.toHaveBeenCalled()
})
it('should handle model when modelId differs from model.id', async () => {
const modelId = 'old-model-id'
const model = {
id: 'new-model-id',
settings: [{ key: 'temperature', value: 0.7 }],
}
await modelsService.updateModel(modelId, model as any)
expect(mockEngine.updateSettings).toHaveBeenCalledWith(model.settings)
// Note: Model ID updates are now handled at the provider level in the frontend
// The engine no longer has an update method for model metadata
expect(mockEngine.update).not.toHaveBeenCalled()
})
})

View File

@ -9,7 +9,16 @@ import { DefaultDialogService } from './default'
export class TauriDialogService extends DefaultDialogService {
async open(options?: DialogOpenOptions): Promise<string | string[] | null> {
try {
return await open(options)
console.log('TauriDialogService: Opening dialog with options:', options)
if (options?.filters) {
console.log('TauriDialogService: File filters:', options.filters)
options.filters.forEach((filter, index) => {
console.log(`TauriDialogService: Filter ${index} - Name: "${filter.name}", Extensions:`, filter.extensions)
})
}
const result = await open(options)
console.log('TauriDialogService: Dialog result:', result)
return result
} catch (error) {
console.error('Error opening dialog in Tauri:', error)
return null

View File

@ -19,10 +19,14 @@ export class WebDialogService implements DialogService {
}
if (options?.filters) {
console.log('WebDialogService: Processing file filters:', options.filters)
const extensions = options.filters.flatMap(filter =>
filter.extensions.map(ext => `.${ext}`)
)
input.accept = extensions.join(',')
console.log('WebDialogService: Generated extensions with dots:', extensions)
const acceptString = extensions.join(',')
console.log('WebDialogService: Final accept attribute:', acceptString)
input.accept = acceptString
}
input.onchange = (e) => {

View File

@ -162,12 +162,16 @@ export class DefaultModelsService implements ModelsService {
}
}
async updateModel(model: Partial<CoreModel>): Promise<void> {
if (model.settings)
async updateModel(modelId: string, model: Partial<CoreModel>): Promise<void> {
if (model.settings) {
this.getEngine()?.updateSettings(
model.settings as SettingComponentProps[]
)
}
// Note: Model name/ID updates are handled at the provider level in the frontend
// The engine doesn't have an update method for model metadata
console.log('Model update request processed for modelId:', modelId)
}
async pullModel(
id: string,

View File

@ -99,7 +99,7 @@ export interface ModelsService {
hfToken?: string
): Promise<HuggingFaceRepo | null>
convertHfRepoToCatalogModel(repo: HuggingFaceRepo): CatalogModel
updateModel(model: Partial<CoreModel>): Promise<void>
updateModel(modelId: string, model: Partial<CoreModel>): Promise<void>
pullModel(
id: string,
modelPath: string,

View File

@ -28,6 +28,7 @@ type Model = {
id: string
model?: string
name?: string
displayName?: string
version?: number | string
description?: string
format?: string

2177
yarn.lock

File diff suppressed because it is too large Load Diff