Merge remote-tracking branch 'upstream/dev' into feat/identify-jan-on-openrouter

This commit is contained in:
Zhiqiang ZHOU 2025-07-06 11:54:46 -07:00
commit 9ff3cbe63f
No known key found for this signature in database
GPG Key ID: 445EC770299B03A8
246 changed files with 16479 additions and 11839 deletions

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
make clean
# To reproduce https://github.com/menloresearch/jan/pull/5463
TAURI_TOOLKIT_PATH="${XDG_CACHE_HOME:-$HOME/.cache}/tauri"
mkdir -p "$TAURI_TOOLKIT_PATH"
wget https://github.com/linuxdeploy/linuxdeploy/releases/download/1-alpha-20250213-2/linuxdeploy-x86_64.AppImage -O "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
chmod +x "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
jq '.bundle.resources = ["resources/pre-install/**/*"] | .bundle.externalBin = ["binaries/cortex-server", "resources/bin/uv"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
make build-tauri
cp ./src-tauri/resources/bin/bun ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/bin/bun
mkdir -p ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/engines
cp -f ./src-tauri/binaries/deps/*.so* ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
cp -f ./src-tauri/binaries/*.so* ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
cp -rf ./src-tauri/binaries/engines ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
APP_IMAGE=./src-tauri/target/release/bundle/appimage/$(ls ./src-tauri/target/release/bundle/appimage/ | grep AppImage | head -1)
echo $APP_IMAGE
rm -f $APP_IMAGE
/opt/bin/appimagetool ./src-tauri/target/release/bundle/appimage/Jan.AppDir $APP_IMAGE

View File

@ -1,4 +1,20 @@
{
"name": "jan",
"image": "node:20"
"name": "Jan",
"image": "mcr.microsoft.com/devcontainers/base:jammy",
"features": {
"ghcr.io/devcontainers/features/node:1": {
"version": "20"
},
"ghcr.io/devcontainers/features/rust:1": {},
"ghcr.io/devcontainers-extra/features/corepack:1": {}
},
"postCreateCommand": "./.devcontainer/postCreateCommand.sh",
// appimagekit requires fuse to package appimage, to use fuse in the container you need to enable it on the host
"runArgs": [
"--device", "/dev/fuse",
"--cap-add=SYS_ADMIN",
"--security-opt", "apparmor:unconfined"
]
}

View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
# install tauri prerequisites + xdg-utils for xdg-open + libfuse2 for using appimagekit
sudo apt update
sudo apt install -yqq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libxdo-dev \
libssl-dev \
libayatana-appindicator3-dev \
librsvg2-dev \
xdg-utils \
libfuse2
sudo mkdir -p /opt/bin
sudo wget https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage -O /opt/bin/appimagetool
sudo chmod +x /opt/bin/appimagetool

24
.github/ISSUE_TEMPLATE/1-bug-report.md vendored Normal file
View File

@ -0,0 +1,24 @@
---
name: 🐛 Bug Report
about: If something isn't working as expected 🤔
title: 'bug: '
type: Bug
---
**Version:** e.g. 0.5.x-xxx
## Describe the Bug
<!-- A clear & concise description of the bug -->
## Steps to Reproduce
1.
## Screenshots / Logs
<!-- You can find logs in: Setting -> General -> Data Folder -> App Logs -->
## Operating System
- [ ] MacOS
- [ ] Windows
- [ ] Linux

View File

@ -0,0 +1,12 @@
---
name: 🚀 Feature Request
about: Suggest an idea for this project 😻!
title: 'idea: '
type: Idea
---
## Problem Statement
<!-- Describe the problem you're facing -->
## Feature Idea
<!-- Describe what you want instead. Examples are welcome! -->

12
.github/ISSUE_TEMPLATE/3-epic.md vendored Normal file
View File

@ -0,0 +1,12 @@
---
name: 🌟 Epic
about: Major building block that advances Jan's goals
title: 'epic: '
type: Epic
---
## Goal
## Tasklist
## Out of scope

13
.github/ISSUE_TEMPLATE/4-goal.md vendored Normal file
View File

@ -0,0 +1,13 @@
---
name: 🎯 Goal
about: External communication of Jan's roadmap and objectives
title: 'goal: '
type: Goal
---
## Goal
## Tasklist
## Out of scope

View File

@ -1,43 +0,0 @@
name: "\U0001F41B Bug Report"
description: "If something isn't working as expected \U0001F914"
labels: [ "type: bug" ]
title: 'bug: [DESCRIPTION]'
body:
- type: input
validations:
required: true
attributes:
label: "Jan version"
description: "**Tip:** The version is in the app's bottom right corner"
placeholder: "e.g. 0.5.x-xxx"
- type: textarea
validations:
required: true
attributes:
label: "Describe the Bug"
description: "A clear & concise description of the bug"
- type: textarea
attributes:
label: "Steps to Reproduce"
description: |
Please list out steps to reproduce the issue
placeholder: |
1. Go to '...'
2. Click on '...'
- type: textarea
attributes:
label: "Screenshots / Logs"
description: |
You can find logs in: ~/jan/logs/app.logs
- type: checkboxes
attributes:
label: "What is your OS?"
options:
- label: MacOS
- label: Windows
- label: Linux

View File

@ -1,7 +1,5 @@
## To encourage contributors to use issue templates, we don't allow blank issues
blank_issues_enabled: true
contact_links:
- name: "\1F4AC Jan Discussions"
url: "https://github.com/orgs/menloresearch/discussions/categories/q-a"
about: "Get help, discuss features & roadmap, and share your projects"
- name: Jan Discussions
url: https://github.com/orgs/menloresearch/discussions/categories/q-a
about: Get help, discuss features & roadmap, and share your projects

View File

@ -1,20 +0,0 @@
name: "\U0001F680 Feature Request"
description: "Suggest an idea for this project \U0001F63B!"
title: 'idea: [DESCRIPTION]'
labels: 'type: feature request'
body:
- type: textarea
validations:
required: true
attributes:
label: "Problem Statement"
description: "Describe the problem you're facing"
placeholder: |
I'm always frustrated when ...
- type: textarea
validations:
required: true
attributes:
label: "Feature Idea"
description: "Describe what you want instead. Examples are welcome!"

View File

@ -1,21 +0,0 @@
name: "\U0001F929 Model Request"
description: "Request a new model to be compiled"
title: 'feat: [DESCRIPTION]'
labels: 'type: model request'
body:
- type: markdown
attributes:
value: "**Tip:** Download any HuggingFace model in app ([see guides](https://jan.ai/docs/models/manage-models#add-models)). Use this form for unsupported models only."
- type: textarea
validations:
required: true
attributes:
label: "Model Requests"
description: "If applicable, include the source URL, licenses, and any other relevant information"
- type: checkboxes
attributes:
label: "Which formats?"
options:
- label: GGUF (llama.cpp)
- label: TensorRT (TensorRT-LLM)
- label: ONNX (Onnx Runtime)

View File

@ -1,35 +0,0 @@
---
name: Roadmap
about: Plan Roadmap items with subtasks
title: 'roadmap: '
labels: 'type: planning'
assignees: ''
---
## Goal
## Tasklist
### Frontend
- [ ] link to janhq/jan epics
**Bugs**
- [ ] link to bugs
### Backend
- [ ] link to janhq/cortex.cpp epics
**Bugs**
- [ ] link to bug issues
### Infra
- [ ] link to infra issues
### Administrative / Management
- [ ] link to infra issues
### Marketing
-------
## Resources

17
.github/dependabot.yaml vendored Normal file
View File

@ -0,0 +1,17 @@
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/dependabot-options-reference#package-ecosystem-
version: 2
updates:
- package-ecosystem: "cargo"
directory: "src-tauri"
schedule:
interval: "weekly"
- package-ecosystem: "npm"
directories:
- "/"
- "core"
- "docs"
- "extensions"
- "extensions/*"
- "web-app"
schedule:
interval: "weekly"

16
.github/workflows/issues.yaml vendored Normal file
View File

@ -0,0 +1,16 @@
name: Adds all issues to project board
on:
issues:
types:
- opened
jobs:
add-to-project:
name: Add issue to project
runs-on: ubuntu-latest
steps:
- uses: actions/add-to-project@v1.0.2
with:
project-url: https://github.com/orgs/${{ vars.ORG_NAME }}/projects/${{ vars.JAN_PROJECT_NUMBER }}
github-token: ${{ secrets.AUTO_ADD_TICKET_PAT }}

View File

@ -1,215 +0,0 @@
name: Electron Builder - Nightly / Manual
on:
schedule:
- cron: '0 20 * * 1,2,3' # At 8 PM UTC on Monday, Tuesday, and Wednesday which is 3 AM UTC+7 Tuesday, Wednesday, and Thursday
workflow_dispatch:
inputs:
public_provider:
type: choice
description: 'Public Provider'
options:
- none
- aws-s3
default: none
pull_request:
branches:
- release/**
jobs:
set-public-provider:
runs-on: ubuntu-latest
outputs:
public_provider: ${{ steps.set-public-provider.outputs.public_provider }}
ref: ${{ steps.set-public-provider.outputs.ref }}
steps:
- name: Set public provider
id: set-public-provider
run: |
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
echo "::set-output name=public_provider::${{ github.event.inputs.public_provider }}"
echo "::set-output name=ref::${{ github.ref }}"
else
if [ "${{ github.event_name }}" == "schedule" ]; then
echo "::set-output name=public_provider::aws-s3"
echo "::set-output name=ref::refs/heads/dev"
elif [ "${{ github.event_name }}" == "push" ]; then
echo "::set-output name=public_provider::aws-s3"
echo "::set-output name=ref::${{ github.ref }}"
elif [ "${{ github.event_name }}" == "pull_request_review" ]; then
echo "::set-output name=public_provider::none"
echo "::set-output name=ref::${{ github.ref }}"
else
echo "::set-output name=public_provider::none"
echo "::set-output name=ref::${{ github.ref }}"
fi
fi
# Job create Update app version based on latest release tag with build number and save to output
get-update-version:
uses: ./.github/workflows/template-get-update-version.yml
build-tauri-macos:
uses: ./.github/workflows/template-tauri-build-macos.yml
secrets: inherit
needs: [get-update-version, set-public-provider]
with:
ref: ${{ needs.set-public-provider.outputs.ref }}
public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
cortex_api_port: "39261"
build-tauri-windows-x64:
uses: ./.github/workflows/template-tauri-build-windows-x64.yml
secrets: inherit
needs: [get-update-version, set-public-provider]
with:
ref: ${{ needs.set-public-provider.outputs.ref }}
public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
cortex_api_port: "39261"
build-tauri-linux-x64:
uses: ./.github/workflows/template-tauri-build-linux-x64.yml
secrets: inherit
needs: [get-update-version, set-public-provider]
with:
ref: ${{ needs.set-public-provider.outputs.ref }}
public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
cortex_api_port: "39261"
sync-temp-to-latest:
needs: [get-update-version, set-public-provider, build-tauri-windows-x64, build-tauri-linux-x64, build-tauri-macos]
runs-on: ubuntu-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: create latest.json file
run: |
VERSION=${{ needs.get-update-version.outputs.new_version }}
PUB_DATE=$(date -u +"%Y-%m-%dT%H:%M:%S.%3NZ")
LINUX_SIGNATURE="${{ needs.build-tauri-linux-x64.outputs.APPIMAGE_SIG }}"
LINUX_URL="https://delta.jan.ai/nightly/${{ needs.build-tauri-linux-x64.outputs.APPIMAGE_FILE_NAME }}"
WINDOWS_SIGNATURE="${{ needs.build-tauri-windows-x64.outputs.WIN_SIG }}"
WINDOWS_URL="https://delta.jan.ai/nightly/${{ needs.build-tauri-windows-x64.outputs.FILE_NAME }}"
DARWIN_SIGNATURE="${{ needs.build-tauri-macos.outputs.MAC_UNIVERSAL_SIG }}"
DARWIN_URL="https://delta.jan.ai/nightly/Jan-nightly_${{ needs.get-update-version.outputs.new_version }}.app.tar.gz"
jq --arg version "$VERSION" \
--arg pub_date "$PUB_DATE" \
--arg linux_signature "$LINUX_SIGNATURE" \
--arg linux_url "$LINUX_URL" \
--arg windows_signature "$WINDOWS_SIGNATURE" \
--arg windows_url "$WINDOWS_URL" \
--arg darwin_arm_signature "$DARWIN_SIGNATURE" \
--arg darwin_arm_url "$DARWIN_URL" \
--arg darwin_amd_signature "$DARWIN_SIGNATURE" \
--arg darwin_amd_url "$DARWIN_URL" \
'.version = $version
| .pub_date = $pub_date
| .platforms["linux-x86_64"].signature = $linux_signature
| .platforms["linux-x86_64"].url = $linux_url
| .platforms["windows-x86_64"].signature = $windows_signature
| .platforms["windows-x86_64"].url = $windows_url
| .platforms["darwin-aarch64"].signature = $darwin_arm_signature
| .platforms["darwin-aarch64"].url = $darwin_arm_url
| .platforms["darwin-x86_64"].signature = $darwin_amd_signature
| .platforms["darwin-x86_64"].url = $darwin_amd_url' \
src-tauri/latest.json.template > latest.json
cat latest.json
- name: Sync temp to latest
if: ${{ needs.set-public-provider.outputs.public_provider == 'aws-s3' }}
run: |
aws s3 cp ./latest.json s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-nightly/latest.json
aws s3 sync s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-nightly/ s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/nightly/
env:
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }}
AWS_EC2_METADATA_DISABLED: "true"
noti-discord-nightly-and-update-url-readme:
needs: [
build-tauri-macos,
build-tauri-windows-x64,
build-tauri-linux-x64,
get-update-version,
set-public-provider,
sync-temp-to-latest
]
secrets: inherit
if: github.event_name == 'schedule'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
with:
ref: refs/heads/dev
build_reason: Nightly
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
noti-discord-pre-release-and-update-url-readme:
needs: [
build-tauri-macos,
build-tauri-windows-x64,
build-tauri-linux-x64,
get-update-version,
set-public-provider,
sync-temp-to-latest
]
secrets: inherit
if: github.event_name == 'push'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
with:
ref: refs/heads/dev
build_reason: Pre-release
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
noti-discord-manual-and-update-url-readme:
needs: [
build-tauri-macos,
build-tauri-windows-x64,
build-tauri-linux-x64,
get-update-version,
set-public-provider,
sync-temp-to-latest
]
secrets: inherit
if: github.event_name == 'workflow_dispatch' && github.event.inputs.public_provider == 'aws-s3'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
with:
ref: refs/heads/dev
build_reason: Manual
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
# comment-pr-build-url:
# needs: [
# build-tauri-macos,
# build-tauri-windows-x64,
# build-tauri-linux-x64,
# get-update-version,
# set-public-provider,
# sync-temp-to-latest
# ]
# runs-on: ubuntu-latest
# if: github.event_name == 'pull_request_review'
# steps:
# - name: Set up GitHub CLI
# run: |
# curl -sSL https://github.com/cli/cli/releases/download/v2.33.0/gh_2.33.0_linux_amd64.tar.gz | tar xz
# sudo cp gh_2.33.0_linux_amd64/bin/gh /usr/local/bin/
# - name: Comment build URL on PR
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# run: |
# PR_URL=${{ github.event.pull_request.html_url }}
# RUN_ID=${{ github.run_id }}
# COMMENT="This is the build for this pull request. You can download it from the Artifacts section here: [Build URL](https://github.com/${{ github.repository }}/actions/runs/${RUN_ID})."
# gh pr comment $PR_URL --body "$COMMENT"

View File

@ -1,131 +0,0 @@
name: Electron Builder - Tag
on:
push:
tags: ["v[0-9]+.[0-9]+.[0-9]+"]
jobs:
# Job create Update app version based on latest release tag with build number and save to output
get-update-version:
uses: ./.github/workflows/template-get-update-version.yml
create-draft-release:
runs-on: ubuntu-latest
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
outputs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
version: ${{ steps.get_version.outputs.version }}
permissions:
contents: write
steps:
- name: Extract tag name without v prefix
id: get_version
run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV && echo "::set-output name=version::${GITHUB_REF#refs/tags/v}"
env:
GITHUB_REF: ${{ github.ref }}
- name: Create Draft Release
id: create_release
uses: softprops/action-gh-release@v2
with:
tag_name: ${{ github.ref_name }}
token: ${{ secrets.GITHUB_TOKEN }}
name: "${{ env.VERSION }}"
draft: true
prerelease: false
build-electron-macos:
uses: ./.github/workflows/template-electron-build-macos.yml
secrets: inherit
needs: [get-update-version]
with:
ref: ${{ github.ref }}
public_provider: github
beta: false
nightly: false
new_version: ${{ needs.get-update-version.outputs.new_version }}
build-electron-windows-x64:
uses: ./.github/workflows/template-electron-build-windows-x64.yml
secrets: inherit
needs: [get-update-version]
with:
ref: ${{ github.ref }}
public_provider: github
beta: false
nightly: false
new_version: ${{ needs.get-update-version.outputs.new_version }}
build-electron-linux-x64:
uses: ./.github/workflows/template-electron-build-linux-x64.yml
secrets: inherit
needs: [get-update-version]
with:
ref: ${{ github.ref }}
public_provider: github
beta: false
nightly: false
new_version: ${{ needs.get-update-version.outputs.new_version }}
# build-tauri-macos:
# uses: ./.github/workflows/template-tauri-build-macos.yml
# secrets: inherit
# needs: [get-update-version, create-draft-release]
# with:
# ref: ${{ github.ref }}
# public_provider: github
# channel: stable
# new_version: ${{ needs.get-update-version.outputs.new_version }}
# upload_url: ${{ needs.create-draft-release.outputs.upload_url }}
# build-tauri-windows-x64:
# uses: ./.github/workflows/template-tauri-build-windows-x64.yml
# secrets: inherit
# needs: [get-update-version, create-draft-release]
# with:
# ref: ${{ github.ref }}
# public_provider: github
# channel: stable
# new_version: ${{ needs.get-update-version.outputs.new_version }}
# upload_url: ${{ needs.create-draft-release.outputs.upload_url }}
# build-tauri-linux-x64:
# uses: ./.github/workflows/template-tauri-build-linux-x64.yml
# secrets: inherit
# needs: [get-update-version, create-draft-release]
# with:
# ref: ${{ github.ref }}
# public_provider: github
# channel: stable
# new_version: ${{ needs.get-update-version.outputs.new_version }}
# upload_url: ${{ needs.create-draft-release.outputs.upload_url }}
update_release_draft:
needs: [
build-electron-windows-x64,
build-electron-linux-x64,
build-electron-macos,
build-tauri-windows-x64,
build-tauri-linux-x64,
build-tauri-macos
]
permissions:
# write permission is required to create a github release
contents: write
# write permission is required for autolabeler
# otherwise, read permission is required at least
pull-requests: write
runs-on: ubuntu-latest
steps:
# (Optional) GitHub Enterprise requires GHE_HOST variable set
#- name: Set GHE_HOST
# run: |
# echo "GHE_HOST=${GITHUB_SERVER_URL##https:\/\/}" >> $GITHUB_ENV
# Drafts your next Release notes as Pull Requests are merged into "master"
- uses: release-drafter/release-drafter@v5
# (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
# with:
# config-name: my-config.yml
# disable-autolabeler: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -6,8 +6,7 @@ on:
- main
- dev
paths:
- 'electron/**'
- .github/workflows/jan-electron-linter-and-test.yml
- .github/workflows/jan-linter-and-test.yml
- 'web/**'
- 'joi/**'
- 'package.json'
@ -24,8 +23,7 @@ on:
- dev
- release/**
paths:
- 'electron/**'
- .github/workflows/jan-electron-linter-and-test.yml
- .github/workflows/jan-linter-and-test.yml
- 'web/**'
- 'joi/**'
- 'package.json'
@ -67,8 +65,8 @@ jobs:
path: ./coverage/lcov.info
test-on-macos:
if: (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || github.event_name == 'push' || github.event_name == 'workflow_dispatch'
runs-on: macos-latest
runs-on: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) && 'macos-latest' || 'macos-selfhosted-12-arm64' }}
if: github.event_name == 'pull_request' || github.event_name == 'push' || github.event_name == 'workflow_dispatch'
steps:
- name: Getting the repo
uses: actions/checkout@v3
@ -81,48 +79,8 @@ jobs:
node-version: 20
- name: Set IS_TEST environment variable
run: |
echo "IS_TEST=true" >> $GITHUB_ENV
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Get Commit Message for PR
if: github.event_name == 'pull_request'
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.event.after}})" >> $GITHUB_ENV
- name: Get Commit Message for push event
if: github.event_name == 'push'
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.sha}})" >> $GITHUB_ENV
# - name: 'Config report portal'
# run: |
# make update-playwright-config REPORT_PORTAL_URL=${{ secrets.REPORT_PORTAL_URL }} REPORT_PORTAL_API_KEY=${{ secrets.REPORT_PORTAL_API_KEY }} REPORT_PORTAL_PROJECT_NAME=${{ secrets.REPORT_PORTAL_PROJECT_NAME }} REPORT_PORTAL_LAUNCH_NAME="Jan App macos" REPORT_PORTAL_DESCRIPTION="${{env.REPORT_PORTAL_DESCRIPTION}}"
- name: Linter and test
run: |
make test
env:
CSC_IDENTITY_AUTO_DISCOVERY: 'false'
test-on-macos-pr-target:
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository
runs-on: macos-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
if: github.event.pull_request.head.repo.full_name == github.repository
run: echo "IS_TEST=true" >> $GITHUB_ENV
- name: 'Cleanup cache'
continue-on-error: true
@ -154,6 +112,10 @@ jobs:
with:
node-version: 20
- name: Install tauri-driver dependencies
run: |
cargo install tauri-driver --locked
# Clean cache, continue on error
- name: 'Cleanup cache'
shell: powershell
@ -167,37 +129,33 @@ jobs:
}
make clean
- name: Get Commit Message for push event
if: github.event_name == 'push'
shell: bash
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.sha}}" >> $GITHUB_ENV
# - name: 'Config report portal'
# shell: bash
# run: |
# make update-playwright-config REPORT_PORTAL_URL=${{ secrets.REPORT_PORTAL_URL }} REPORT_PORTAL_API_KEY=${{ secrets.REPORT_PORTAL_API_KEY }} REPORT_PORTAL_PROJECT_NAME=${{ secrets.REPORT_PORTAL_PROJECT_NAME }} REPORT_PORTAL_LAUNCH_NAME="Jan App Windows ${{ matrix.antivirus-tools }}" REPORT_PORTAL_DESCRIPTION="${{env.REPORT_PORTAL_DESCRIPTION}}"
- name: Linter and test
shell: powershell
run: |
make test
test-on-windows-pr:
if: (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || github.event_name == 'workflow_dispatch'
runs-on: windows-latest
if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch'
runs-on: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) && 'windows-latest' || 'WINDOWS-11' }}
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: install dependencies
run: |
choco install --yes --no-progress make
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
# Clean cache, continue on error
- name: Install tauri-driver dependencies
run: |
cargo install tauri-driver --locked
- name: 'Cleanup cache'
shell: powershell
continue-on-error: true
@ -210,57 +168,22 @@ jobs:
}
make clean
- name: Get Commit Message for PR
if: github.event_name == 'pull_request'
shell: bash
- name: Install WebView2 Runtime (Bootstrapper)
shell: powershell
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.event.after}}" >> $GITHUB_ENV
# - name: 'Config report portal'
# shell: bash
# run: |
# make update-playwright-config REPORT_PORTAL_URL=${{ secrets.REPORT_PORTAL_URL }} REPORT_PORTAL_API_KEY=${{ secrets.REPORT_PORTAL_API_KEY }} REPORT_PORTAL_PROJECT_NAME=${{ secrets.REPORT_PORTAL_PROJECT_NAME }} REPORT_PORTAL_LAUNCH_NAME="Jan App Windows" REPORT_PORTAL_DESCRIPTION="${{env.REPORT_PORTAL_DESCRIPTION}}"
- name: Linter and test
shell: powershell
run: |
make test
test-on-windows-pr-target:
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository
runs-on: windows-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
# Clean cache, continue on error
- name: 'Cleanup cache'
shell: powershell
continue-on-error: true
run: |
$path = "$Env:APPDATA\jan"
if (Test-Path $path) {
Remove-Item "\\?\$path" -Recurse -Force
} else {
Write-Output "Folder does not exist."
}
make clean
Invoke-WebRequest -Uri 'https://go.microsoft.com/fwlink/p/?LinkId=2124703' -OutFile 'setup.exe'
Start-Process -FilePath setup.exe -Verb RunAs -Wait
- name: Linter and test
shell: powershell
run: |
make test
env:
NODE_OPTIONS: '--max-old-space-size=2048'
test-on-ubuntu:
runs-on: ubuntu-latest
if: (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || github.event_name == 'push' || github.event_name == 'workflow_dispatch'
runs-on: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) && 'ubuntu-latest' || 'ubuntu-latest' }}
if: github.event_name == 'pull_request' || github.event_name == 'push' || github.event_name == 'workflow_dispatch'
steps:
- name: Getting the repo
uses: actions/checkout@v3
@ -272,27 +195,21 @@ jobs:
with:
node-version: 20
- name: Install Tauri dependencies
run: |
sudo apt update
sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2 webkit2gtk-driver
- name: Install tauri-driver dependencies
run: |
cargo install tauri-driver --locked
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Get Commit Message for PR
if: github.event_name == 'pull_request'
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.event.after}}" >> $GITHUB_ENV
- name: Get Commit Message for push event
if: github.event_name == 'push'
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.sha}}" >> $GITHUB_ENV
# - name: 'Config report portal'
# shell: bash
# run: |
# make update-playwright-config REPORT_PORTAL_URL=${{ secrets.REPORT_PORTAL_URL }} REPORT_PORTAL_API_KEY=${{ secrets.REPORT_PORTAL_API_KEY }} REPORT_PORTAL_PROJECT_NAME=${{ secrets.REPORT_PORTAL_PROJECT_NAME }} REPORT_PORTAL_LAUNCH_NAME="Jan App Linux" REPORT_PORTAL_DESCRIPTION="${{env.REPORT_PORTAL_DESCRIPTION}}"
- name: Linter and test
run: |
export DISPLAY=$(w -h | awk 'NR==1 {print $2}')
@ -353,29 +270,3 @@ jobs:
# base-lcov-file: './lcov.info'
# send-summary-comment: true
# show-annotations: 'warning'
test-on-ubuntu-pr-target:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Linter and test
run: |
export DISPLAY=$(w -h | awk 'NR==1 {print $2}')
echo -e "Display ID: $DISPLAY"
make test

View File

@ -6,7 +6,6 @@ on:
workflow_dispatch:
jobs:
build-and-publish-plugins:
environment: production
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4

View File

@ -1,188 +0,0 @@
name: build-linux-x64
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
public_provider:
required: true
type: string
default: none
description: 'none: build only, github: build and publish to github, aws s3: build and publish to aws s3'
new_version:
required: true
type: string
default: ''
aws_s3_prefix:
required: false
type: string
default: '/latest/'
beta:
required: false
type: boolean
default: false
nightly:
required: false
type: boolean
default: false
cortex_api_port:
required: false
type: string
default: null
secrets:
DELTA_AWS_S3_BUCKET_NAME:
required: false
DELTA_AWS_ACCESS_KEY_ID:
required: false
DELTA_AWS_SECRET_ACCESS_KEY:
required: false
jobs:
build-linux-x64:
if: inputs.public_provider == 'github' || inputs.public_provider == 'none'
runs-on: ubuntu-latest
environment: production
permissions:
contents: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.beta == true && inputs.nightly != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-beta-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-beta.ico electron/icons/icon.ico
cp electron/icons_dev/jan-beta.png electron/icons/icon.png
cp electron/icons_dev/jan-beta-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-beta-tray.png electron/icons/icon-tray.png
- name: Replace Icons for Nightly Build
if: inputs.nightly == true && inputs.beta != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-nightly-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-nightly.ico electron/icons/icon.ico
cp electron/icons_dev/jan-nightly.png electron/icons/icon.png
cp electron/icons_dev/jan-nightly-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-nightly-tray.png electron/icons/icon-tray.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Update app version base public_provider
if: inputs.public_provider != 'github'
run: |
echo "Version: ${{ inputs.new_version }}"
# Update the version in electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/nightly", "channel": "latest"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-nightly", "channel": "latest"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json nightly
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json nightly
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
- name: Change App Name for beta version
if: inputs.beta == true
shell: bash
run: |
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json beta
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json beta
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
cat ./package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
- name: Update app version base on tag
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github'
run: |
jq --arg version "${VERSION_TAG#v}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${VERSION_TAG#v}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
env:
VERSION_TAG: ${{ inputs.new_version }}
- name: Build and publish app to aws s3 r2 or github artifactory
if: inputs.public_provider != 'github'
run: |
# check public_provider is true or not
echo "public_provider is ${{ inputs.public_provider }}"
if [ "${{ inputs.public_provider }}" == "none" ]; then
make build
else
make build-and-publish
fi
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
CORTEX_API_PORT: ${{ inputs.cortex_api_port }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == true
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Upload Artifact .deb file
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-electron-linux-amd64-${{ inputs.new_version }}-deb
path: ./electron/dist/*.deb
- name: Upload Artifact .AppImage file
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-electron-linux-amd64-${{ inputs.new_version }}-AppImage
path: ./electron/dist/*.AppImage

View File

@ -1,234 +0,0 @@
name: build-macos
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
public_provider:
required: true
type: string
default: none
description: 'none: build only, github: build and publish to github, aws s3: build and publish to aws s3'
new_version:
required: true
type: string
default: ''
aws_s3_prefix:
required: false
type: string
default: '/latest/'
beta:
required: false
type: boolean
default: false
nightly:
required: false
type: boolean
default: false
cortex_api_port:
required: false
type: string
default: null
secrets:
DELTA_AWS_S3_BUCKET_NAME:
required: false
DELTA_AWS_ACCESS_KEY_ID:
required: false
DELTA_AWS_SECRET_ACCESS_KEY:
required: false
CODE_SIGN_P12_BASE64:
required: false
CODE_SIGN_P12_PASSWORD:
required: false
APPLE_ID:
required: false
APPLE_APP_SPECIFIC_PASSWORD:
required: false
DEVELOPER_ID:
required: false
jobs:
build-macos:
if: inputs.public_provider == 'github' || inputs.public_provider == 'none'
runs-on: macos-latest
environment: production
permissions:
contents: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.beta == true && inputs.nightly != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-beta-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-beta.ico electron/icons/icon.ico
cp electron/icons_dev/jan-beta.png electron/icons/icon.png
cp electron/icons_dev/jan-beta-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-beta-tray.png electron/icons/icon-tray.png
- name: Replace Icons for Nightly Build
if: inputs.nightly == true && inputs.beta != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-nightly-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-nightly.ico electron/icons/icon.ico
cp electron/icons_dev/jan-nightly.png electron/icons/icon.png
cp electron/icons_dev/jan-nightly-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-nightly-tray.png electron/icons/icon-tray.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Update app version based on latest release tag with build number
if: inputs.public_provider != 'github'
run: |
echo "Version: ${{ inputs.new_version }}"
# Update the version in electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/nightly", "channel": "latest"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-nightly", "channel": "latest"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg teamid "${{ secrets.APPLE_TEAM_ID }}" '.build.mac.notarize.teamId = $teamid' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
# cat electron/package.json
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json nightly
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json nightly
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
- name: Change App Name for beta version
if: inputs.beta == true
shell: bash
run: |
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json beta
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json beta
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
cat ./package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
- name: Update app version base on tag
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github'
run: |
jq --arg version "${VERSION_TAG#v}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${VERSION_TAG#v}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq --arg teamid "${{ secrets.APPLE_TEAM_ID }}" '.build.mac.notarize.teamId = $teamid' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
env:
VERSION_TAG: ${{ inputs.new_version }}
- name: Get Cer for code signing
run: base64 -d <<< "$CODE_SIGN_P12_BASE64" > /tmp/codesign.p12
shell: bash
env:
CODE_SIGN_P12_BASE64: ${{ secrets.CODE_SIGN_P12_BASE64 }}
- uses: apple-actions/import-codesign-certs@v2
continue-on-error: true
with:
p12-file-base64: ${{ secrets.CODE_SIGN_P12_BASE64 }}
p12-password: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
- name: Build and publish app to aws s3 r2 or github artifactory
if: inputs.public_provider != 'github'
run: |
# check public_provider is true or not
echo "public_provider is ${{ inputs.public_provider }}"
if [ "${{ inputs.public_provider }}" == "none" ]; then
make build
else
make build-and-publish
fi
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: '/tmp/codesign.p12'
CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
CSC_IDENTITY_AUTO_DISCOVERY: 'true'
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
CORTEX_API_PORT: ${{ inputs.cortex_api_port }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: '/tmp/codesign.p12'
CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
CSC_IDENTITY_AUTO_DISCOVERY: 'true'
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == true
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: '/tmp/codesign.p12'
CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
CSC_IDENTITY_AUTO_DISCOVERY: 'true'
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Upload Artifact
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-electron-mac-universal-${{ inputs.new_version }}
path: ./electron/dist/*.dmg

View File

@ -1,230 +0,0 @@
name: build-windows-x64
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
public_provider:
required: true
type: string
default: none
description: 'none: build only, github: build and publish to github, aws s3: build and publish to aws s3'
new_version:
required: true
type: string
default: ''
aws_s3_prefix:
required: false
type: string
default: '/latest/'
beta:
required: false
type: boolean
default: false
nightly:
required: false
type: boolean
default: false
cortex_api_port:
required: false
type: string
default: null
secrets:
DELTA_AWS_S3_BUCKET_NAME:
required: false
DELTA_AWS_ACCESS_KEY_ID:
required: false
DELTA_AWS_SECRET_ACCESS_KEY:
required: false
AZURE_KEY_VAULT_URI:
required: false
AZURE_CLIENT_ID:
required: false
AZURE_TENANT_ID:
required: false
AZURE_CLIENT_SECRET:
required: false
AZURE_CERT_NAME:
required: false
jobs:
build-windows-x64:
if: inputs.public_provider == 'github' || inputs.public_provider == 'none'
runs-on: windows-latest
permissions:
contents: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.beta == true && inputs.nightly != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-beta-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-beta.ico electron/icons/icon.ico
cp electron/icons_dev/jan-beta.png electron/icons/icon.png
cp electron/icons_dev/jan-beta-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-beta-tray.png electron/icons/icon-tray.png
- name: Replace Icons for Nightly Build
if: inputs.nightly == true && inputs.beta != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-nightly-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-nightly.ico electron/icons/icon.ico
cp electron/icons_dev/jan-nightly.png electron/icons/icon.png
cp electron/icons_dev/jan-nightly-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-nightly-tray.png electron/icons/icon-tray.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Update app version base on tag
if: inputs.public_provider != 'github'
id: version_update
shell: bash
run: |
echo "Version: ${{ inputs.new_version }}"
# Update the version in electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/nightly", "channel": "latest"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-nightly", "channel": "latest"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq '.build.win.sign = "./sign.js"' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json nightly
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json nightly
chmod +x .github/scripts/rename-uninstaller.sh
.github/scripts/rename-uninstaller.sh nightly
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
cat ./package.json
echo "------------------------"
- name: Change App Name for beta version
if: inputs.beta == true
shell: bash
run: |
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json beta
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json beta
chmod +x .github/scripts/rename-uninstaller.sh
.github/scripts/rename-uninstaller.sh beta
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
cat ./package.json
echo "------------------------"
cat ./electron/scripts/uninstaller.nsh
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
- name: Update app version base on tag
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github'
shell: bash
run: |
jq --arg version "${VERSION_TAG#v}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${VERSION_TAG#v}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq '.build.win.sign = "./sign.js"' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
env:
VERSION_TAG: ${{ inputs.new_version }}
- name: Install AzureSignTool
run: |
dotnet tool install --global AzureSignTool
- name: Build and publish app to aws s3 r2 or github artifactory
shell: bash
if: inputs.public_provider != 'github'
run: |
# check public_provider is true or not
echo "public_provider is ${{ inputs.public_provider }}"
if [ "${{ inputs.public_provider }}" == "none" ]; then
make build
else
make build-and-publish
fi
env:
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
AZURE_CERT_NAME: homebrewltd
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
CORTEX_API_PORT: ${{ inputs.cortex_api_port }}
- name: Build app and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
AZURE_CERT_NAME: homebrewltd
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build app and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == true
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
# AZURE_CERT_NAME: ${{ secrets.AZURE_CERT_NAME }}
AZURE_CERT_NAME: homebrewltd
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Upload Artifact
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-electron-win-x64-${{ inputs.new_version }}
path: ./electron/dist/*.exe

View File

@ -9,7 +9,6 @@ on:
jobs:
get-update-version:
runs-on: ubuntu-latest
environment: production
outputs:
new_version: ${{ steps.version_update.outputs.new_version }}
steps:

View File

@ -26,7 +26,6 @@ on:
jobs:
noti-discord-and-update-url-readme:
environment: production
runs-on: ubuntu-latest
permissions:
contents: write

View File

@ -55,7 +55,6 @@ jobs:
DEB_SIG: ${{ steps.packageinfo.outputs.DEB_SIG }}
APPIMAGE_SIG: ${{ steps.packageinfo.outputs.APPIMAGE_SIG }}
APPIMAGE_FILE_NAME: ${{ steps.packageinfo.outputs.APPIMAGE_FILE_NAME }}
environment: production
permissions:
contents: write
steps:
@ -96,7 +95,7 @@ jobs:
run: |
cargo install ctoml
- name: Install Tauri dependecies
- name: Install Tauri dependencies
run: |
sudo apt update
sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2
@ -151,6 +150,12 @@ jobs:
fi
- name: Build app
run: |
# Pin linuxdeploy version to prevent @tauri-apps/cli-linux-x64-gnu from pulling in an outdated version
TAURI_TOOLKIT_PATH="${XDG_CACHE_HOME:-$HOME/.cache}/tauri"
mkdir -p "$TAURI_TOOLKIT_PATH"
wget https://github.com/linuxdeploy/linuxdeploy/releases/download/1-alpha-20250213-2/linuxdeploy-x86_64.AppImage -O "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
chmod +x "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
make build-tauri
# Copy engines and bun to appimage
wget https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage -O ./appimagetool

View File

@ -63,7 +63,6 @@ jobs:
outputs:
MAC_UNIVERSAL_SIG: ${{ steps.metadata.outputs.MAC_UNIVERSAL_SIG }}
TAR_NAME: ${{ steps.metadata.outputs.TAR_NAME }}
environment: production
permissions:
contents: write
steps:

View File

@ -1,4 +1,7 @@
{
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true
"editor.formatOnSave": true,
"[rust]": {
"editor.defaultFormatter": "rust-lang.rust-analyzer"
}
}

View File

@ -33,23 +33,14 @@ dev: install-and-build
yarn copy:lib
yarn dev
# Deprecated soon
dev-tauri: install-and-build
yarn install:cortex
yarn download:bin
yarn copy:lib
yarn dev:tauri
# Linting
lint: install-and-build
yarn lint
# Testing
test: lint
# yarn build:test
# yarn test:coverage
# Need e2e setup for tauri backend
yarn test
yarn test:e2e
# Builds and publishes the app
build-and-publish: install-and-build

View File

@ -89,7 +89,7 @@ For those who enjoy the scenic route:
- Make ≥ 3.81
- Rust (for Tauri)
### Quick Start
### Run with Make
```bash
git clone https://github.com/menloresearch/jan
@ -99,34 +99,44 @@ make dev
This handles everything: installs dependencies, builds core components, and launches the app.
### Alternative Commands
**Available make targets:**
- `make dev` - Full development setup and launch
- `make build` - Production build
- `make test` - Run tests and linting
- `make clean` - Delete everything and start fresh
If you prefer the verbose approach:
### Run with Mise (easier)
You can also run with [mise](https://mise.jdx.dev/), which is a bit easier as it ensures Node.js, Rust, and other dependency versions are automatically managed:
```bash
git clone https://github.com/menloresearch/jan
cd jan
# Install mise (if not already installed)
curl https://mise.run | sh
# Install tools and start development
mise install # installs Node.js, Rust, and other tools
mise dev # runs the full development setup
```
**Available mise commands:**
- `mise dev` - Full development setup and launch
- `mise build` - Production build
- `mise test` - Run tests and linting
- `mise clean` - Delete everything and start fresh
- `mise tasks` - List all available tasks
### Manual Commands
```bash
# Setup and development
yarn install
yarn build:core
yarn build:extensions
yarn dev
# Production build
yarn build
# Clean slate (when things inevitably break)
make clean
```
### Available Make Targets
- `make dev` - Full development setup and launch (recommended)
- `make dev-tauri` - Tauri development (deprecated, use `make dev`)
- `make build` - Production build
- `make install-and-build` - Install dependencies and build core/extensions
- `make test` - Run tests and linting
- `make lint` - Check your code doesn't offend the linters
- `make clean` - Nuclear option: delete everything and start fresh
## System Requirements
**Minimum specs for a decent experience:**

View File

@ -23,24 +23,24 @@
},
"devDependencies": {
"@npmcli/arborist": "^7.1.0",
"@types/jest": "^29.5.14",
"@types/jest": "^30.0.0",
"@types/node": "^22.10.0",
"@types/pacote": "^11.1.7",
"@types/request": "^2.48.12",
"electron": "33.2.1",
"eslint": "8.57.0",
"eslint-plugin-jest": "^27.9.0",
"jest": "^29.7.0",
"jest": "^30.0.3",
"jest-junit": "^16.0.0",
"jest-runner": "^29.7.0",
"pacote": "^21.0.0",
"request": "^2.88.2",
"request-progress": "^3.0.0",
"rimraf": "^3.0.2",
"rimraf": "^6.0.1",
"rolldown": "1.0.0-beta.1",
"ts-jest": "^29.2.5",
"tslib": "^2.6.2",
"typescript": "^5.3.3"
"typescript": "^5.8.3"
},
"dependencies": {
"rxjs": "^7.8.1",

View File

@ -0,0 +1,133 @@
import { ModelManager } from './manager'
import { Model, ModelEvent } from '../../types'
import { events } from '../events'
jest.mock('../events', () => ({
events: {
emit: jest.fn(),
},
}))
Object.defineProperty(global, 'window', {
value: {
core: {},
},
writable: true,
})
describe('ModelManager', () => {
let modelManager: ModelManager
let mockModel: Model
beforeEach(() => {
jest.clearAllMocks()
;(global.window as any).core = {}
modelManager = new ModelManager()
mockModel = {
id: 'test-model-1',
name: 'Test Model',
version: '1.0.0',
} as Model
})
describe('constructor', () => {
it('should set itself on window.core.modelManager when window exists', () => {
expect((global.window as any).core.modelManager).toBe(modelManager)
})
})
describe('register', () => {
it('should register a new model', () => {
modelManager.register(mockModel)
expect(modelManager.models.has('test-model-1')).toBe(true)
expect(modelManager.models.get('test-model-1')).toEqual(mockModel)
expect(events.emit).toHaveBeenCalledWith(ModelEvent.OnModelsUpdate, {})
})
it('should merge existing model with new model data', () => {
const existingModel: Model = {
id: 'test-model-1',
name: 'Existing Model',
description: 'Existing description',
} as Model
const updatedModel: Model = {
id: 'test-model-1',
name: 'Updated Model',
version: '2.0.0',
} as Model
modelManager.register(existingModel)
modelManager.register(updatedModel)
const registeredModel = modelManager.models.get('test-model-1')
expect(registeredModel).toEqual({
id: 'test-model-1',
name: 'Existing Model',
description: 'Existing description',
version: '2.0.0',
})
expect(events.emit).toHaveBeenCalledTimes(2)
})
})
describe('get', () => {
it('should retrieve a registered model by id', () => {
modelManager.register(mockModel)
const retrievedModel = modelManager.get('test-model-1')
expect(retrievedModel).toEqual(mockModel)
})
it('should return undefined for non-existent model', () => {
const retrievedModel = modelManager.get('non-existent-model')
expect(retrievedModel).toBeUndefined()
})
it('should return correctly typed model', () => {
modelManager.register(mockModel)
const retrievedModel = modelManager.get<Model>('test-model-1')
expect(retrievedModel?.id).toBe('test-model-1')
expect(retrievedModel?.name).toBe('Test Model')
})
})
describe('instance', () => {
it('should create a new instance when none exists on window.core', () => {
;(global.window as any).core = {}
const instance = ModelManager.instance()
expect(instance).toBeInstanceOf(ModelManager)
expect((global.window as any).core.modelManager).toBe(instance)
})
it('should return existing instance when it exists on window.core', () => {
const existingManager = new ModelManager()
;(global.window as any).core.modelManager = existingManager
const instance = ModelManager.instance()
expect(instance).toBe(existingManager)
})
})
describe('models property', () => {
it('should initialize with empty Map', () => {
expect(modelManager.models).toBeInstanceOf(Map)
expect(modelManager.models.size).toBe(0)
})
it('should maintain multiple models', () => {
const model1: Model = { id: 'model-1', name: 'Model 1' } as Model
const model2: Model = { id: 'model-2', name: 'Model 2' } as Model
modelManager.register(model1)
modelManager.register(model2)
expect(modelManager.models.size).toBe(2)
expect(modelManager.models.get('model-1')).toEqual(model1)
expect(modelManager.models.get('model-2')).toEqual(model2)
})
})
})

View File

@ -29,7 +29,7 @@ describe('validationRules', () => {
expect(validationRules.top_k(1)).toBe(true)
expect(validationRules.top_k(0)).toBe(true)
expect(validationRules.top_k(-0.1)).toBe(false)
expect(validationRules.top_k(1.1)).toBe(false)
expect(validationRules.top_k(1.1)).toBe(true)
expect(validationRules.top_k('0.5')).toBe(false)
})
@ -68,8 +68,8 @@ describe('validationRules', () => {
expect(validationRules.frequency_penalty(0.5)).toBe(true)
expect(validationRules.frequency_penalty(1)).toBe(true)
expect(validationRules.frequency_penalty(0)).toBe(true)
expect(validationRules.frequency_penalty(-0.1)).toBe(false)
expect(validationRules.frequency_penalty(1.1)).toBe(false)
expect(validationRules.frequency_penalty(-0.1)).toBe(true)
expect(validationRules.frequency_penalty(1.1)).toBe(true)
expect(validationRules.frequency_penalty('0.5')).toBe(false)
})
@ -77,8 +77,8 @@ describe('validationRules', () => {
expect(validationRules.presence_penalty(0.5)).toBe(true)
expect(validationRules.presence_penalty(1)).toBe(true)
expect(validationRules.presence_penalty(0)).toBe(true)
expect(validationRules.presence_penalty(-0.1)).toBe(false)
expect(validationRules.presence_penalty(1.1)).toBe(false)
expect(validationRules.presence_penalty(-0.1)).toBe(true)
expect(validationRules.presence_penalty(1.1)).toBe(true)
expect(validationRules.presence_penalty('0.5')).toBe(false)
})
@ -152,6 +152,33 @@ describe('validationRules', () => {
expect(validationRules.text_model('true')).toBe(false)
expect(validationRules.text_model(1)).toBe(false)
})
it('should validate repeat_last_n correctly', () => {
expect(validationRules.repeat_last_n(5)).toBe(true)
expect(validationRules.repeat_last_n(-5)).toBe(true)
expect(validationRules.repeat_last_n(0)).toBe(true)
expect(validationRules.repeat_last_n(1.5)).toBe(true)
expect(validationRules.repeat_last_n('5')).toBe(false)
expect(validationRules.repeat_last_n(null)).toBe(false)
})
it('should validate repeat_penalty correctly', () => {
expect(validationRules.repeat_penalty(1.1)).toBe(true)
expect(validationRules.repeat_penalty(0.9)).toBe(true)
expect(validationRules.repeat_penalty(0)).toBe(true)
expect(validationRules.repeat_penalty(-1)).toBe(true)
expect(validationRules.repeat_penalty('1.1')).toBe(false)
expect(validationRules.repeat_penalty(null)).toBe(false)
})
it('should validate min_p correctly', () => {
expect(validationRules.min_p(0.1)).toBe(true)
expect(validationRules.min_p(0)).toBe(true)
expect(validationRules.min_p(-0.1)).toBe(true)
expect(validationRules.min_p(1.5)).toBe(true)
expect(validationRules.min_p('0.1')).toBe(false)
expect(validationRules.min_p(null)).toBe(false)
})
})
it('should normalize invalid values for keys not listed in validationRules', () => {
@ -192,18 +219,125 @@ describe('normalizeValue', () => {
expect(normalizeValue('cpu_threads', '4')).toBe(4)
expect(normalizeValue('cpu_threads', 0)).toBe(0)
})
it('should handle edge cases for normalization', () => {
expect(normalizeValue('ctx_len', -5.7)).toBe(-6)
expect(normalizeValue('token_limit', 'abc')).toBeNaN()
expect(normalizeValue('max_tokens', null)).toBe(0)
expect(normalizeValue('ngl', undefined)).toBeNaN()
expect(normalizeValue('n_parallel', Infinity)).toBe(Infinity)
expect(normalizeValue('cpu_threads', -Infinity)).toBe(-Infinity)
})
it('should not normalize non-integer parameters', () => {
expect(normalizeValue('temperature', 1.5)).toBe(1.5)
expect(normalizeValue('top_p', 0.9)).toBe(0.9)
expect(normalizeValue('stream', true)).toBe(true)
expect(normalizeValue('prompt_template', 'template')).toBe('template')
})
})
describe('extractInferenceParams', () => {
it('should handle invalid values correctly by falling back to originParams', () => {
const modelParams = { temperature: 'invalid', token_limit: -1 }
const originParams = { temperature: 0.5, token_limit: 100 }
expect(extractInferenceParams(modelParams as any, originParams)).toEqual(originParams)
})
it('should return an empty object when no modelParams are provided', () => {
expect(extractInferenceParams()).toEqual({})
})
it('should extract and normalize valid inference parameters', () => {
const modelParams = {
temperature: 1.5,
token_limit: 100.7,
top_p: 0.9,
stream: true,
max_tokens: 50.3,
invalid_param: 'should_be_ignored',
}
const result = extractInferenceParams(modelParams as any)
expect(result).toEqual({
temperature: 1.5,
token_limit: 100,
top_p: 0.9,
stream: true,
max_tokens: 50,
})
})
it('should handle parameters without validation rules', () => {
const modelParams = { engine: 'llama' }
const result = extractInferenceParams(modelParams as any)
expect(result).toEqual({ engine: 'llama' })
})
it('should skip invalid values when no origin params provided', () => {
const modelParams = { temperature: 'invalid', top_p: 0.8 }
const result = extractInferenceParams(modelParams as any)
expect(result).toEqual({ top_p: 0.8 })
})
})
describe('extractModelLoadParams', () => {
it('should return an empty object when no modelParams are provided', () => {
expect(extractModelLoadParams()).toEqual({})
})
it('should return an empty object when no modelParams are provided', () => {
expect(extractInferenceParams()).toEqual({})
it('should extract and normalize valid model load parameters', () => {
const modelParams = {
ctx_len: 2048.5,
ngl: 12.7,
embedding: true,
n_parallel: 4.2,
cpu_threads: 8.9,
prompt_template: 'template',
llama_model_path: '/path/to/model',
vision_model: false,
invalid_param: 'should_be_ignored',
}
const result = extractModelLoadParams(modelParams as any)
expect(result).toEqual({
ctx_len: 2048,
ngl: 12,
embedding: true,
n_parallel: 4,
cpu_threads: 8,
prompt_template: 'template',
llama_model_path: '/path/to/model',
vision_model: false,
})
})
it('should handle parameters without validation rules', () => {
const modelParams = {
engine: 'llama',
pre_prompt: 'System:',
system_prompt: 'You are helpful',
model_path: '/path',
}
const result = extractModelLoadParams(modelParams as any)
expect(result).toEqual({
engine: 'llama',
pre_prompt: 'System:',
system_prompt: 'You are helpful',
model_path: '/path',
})
})
it('should fall back to origin params for invalid values', () => {
const modelParams = { ctx_len: -1, ngl: 'invalid' }
const originParams = { ctx_len: 2048, ngl: 12 }
const result = extractModelLoadParams(modelParams as any, originParams)
expect(result).toEqual({})
})
it('should skip invalid values when no origin params provided', () => {
const modelParams = { ctx_len: -1, embedding: true }
const result = extractModelLoadParams(modelParams as any)
expect(result).toEqual({ embedding: true })
})
})

View File

@ -8,16 +8,19 @@ import { ModelParams, ModelRuntimeParams, ModelSettingParams } from '../../types
export const validationRules: { [key: string]: (value: any) => boolean } = {
temperature: (value: any) => typeof value === 'number' && value >= 0 && value <= 2,
token_limit: (value: any) => Number.isInteger(value) && value >= 0,
top_k: (value: any) => typeof value === 'number' && value >= 0 && value <= 1,
top_k: (value: any) => typeof value === 'number' && value >= 0,
top_p: (value: any) => typeof value === 'number' && value >= 0 && value <= 1,
stream: (value: any) => typeof value === 'boolean',
max_tokens: (value: any) => Number.isInteger(value) && value >= 0,
stop: (value: any) => Array.isArray(value) && value.every((v) => typeof v === 'string'),
frequency_penalty: (value: any) => typeof value === 'number' && value >= 0 && value <= 1,
presence_penalty: (value: any) => typeof value === 'number' && value >= 0 && value <= 1,
frequency_penalty: (value: any) => typeof value === 'number' && value >= -2 && value <= 2,
presence_penalty: (value: any) => typeof value === 'number' && value >= -2 && value <= 2,
repeat_last_n: (value: any) => typeof value === 'number',
repeat_penalty: (value: any) => typeof value === 'number',
min_p: (value: any) => typeof value === 'number',
ctx_len: (value: any) => Number.isInteger(value) && value >= 0,
ngl: (value: any) => Number.isInteger(value),
ngl: (value: any) => Number.isInteger(value) && value >= 0,
embedding: (value: any) => typeof value === 'boolean',
n_parallel: (value: any) => Number.isInteger(value) && value >= 0,
cpu_threads: (value: any) => Number.isInteger(value) && value >= 0,
@ -47,6 +50,22 @@ export const normalizeValue = (key: string, value: any) => {
// Convert to integer
return Math.floor(Number(value))
}
if (
key === 'temperature' ||
key === 'top_k' ||
key === 'top_p' ||
key === 'min_p' ||
key === 'repeat_penalty' ||
key === 'frequency_penalty' ||
key === 'presence_penalty' ||
key === 'repeat_last_n'
) {
// Convert to float
const newValue = parseFloat(value)
if (newValue !== null && !isNaN(newValue)) {
return newValue
}
}
return value
}
@ -126,6 +145,14 @@ export const extractModelLoadParams = (
vision_model: undefined,
text_model: undefined,
engine: undefined,
top_p: undefined,
top_k: undefined,
min_p: undefined,
temperature: undefined,
repeat_penalty: undefined,
repeat_last_n: undefined,
presence_penalty: undefined,
frequency_penalty: undefined,
}
const settingParams: ModelSettingParams = {}

View File

@ -121,6 +121,14 @@ export type ModelSettingParams = {
vision_model?: boolean
text_model?: boolean
engine?: boolean
top_p?: number
top_k?: number
min_p?: number
temperature?: number
repeat_penalty?: number
repeat_last_n?: number
presence_penalty?: number
frequency_penalty?: number
}
/**

View File

@ -0,0 +1,643 @@
{
"$ref": "#/definitions/docs",
"definitions": {
"docs": {
"type": "object",
"properties": {
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"editUrl": {
"anyOf": [
{
"type": "string",
"format": "uri"
},
{
"type": "boolean"
}
],
"default": true
},
"head": {
"type": "array",
"items": {
"type": "object",
"properties": {
"tag": {
"type": "string",
"enum": [
"title",
"base",
"link",
"style",
"meta",
"script",
"noscript",
"template"
]
},
"attrs": {
"type": "object",
"additionalProperties": {
"anyOf": [
{
"type": "string"
},
{
"type": "boolean"
},
{
"not": {}
}
]
}
},
"content": {
"type": "string"
}
},
"required": [
"tag"
],
"additionalProperties": false
},
"default": []
},
"tableOfContents": {
"anyOf": [
{
"type": "object",
"properties": {
"minHeadingLevel": {
"type": "integer",
"minimum": 1,
"maximum": 6,
"default": 2
},
"maxHeadingLevel": {
"type": "integer",
"minimum": 1,
"maximum": 6,
"default": 3
}
},
"additionalProperties": false
},
{
"type": "boolean"
}
],
"default": {
"minHeadingLevel": 2,
"maxHeadingLevel": 3
}
},
"template": {
"type": "string",
"enum": [
"doc",
"splash"
],
"default": "doc"
},
"hero": {
"type": "object",
"properties": {
"title": {
"type": "string"
},
"tagline": {
"type": "string"
},
"image": {
"anyOf": [
{
"type": "object",
"properties": {
"alt": {
"type": "string",
"default": ""
},
"file": {
"type": "string"
}
},
"required": [
"file"
],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"alt": {
"type": "string",
"default": ""
},
"dark": {
"type": "string"
},
"light": {
"type": "string"
}
},
"required": [
"dark",
"light"
],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"html": {
"type": "string"
}
},
"required": [
"html"
],
"additionalProperties": false
}
]
},
"actions": {
"type": "array",
"items": {
"type": "object",
"properties": {
"text": {
"type": "string"
},
"link": {
"type": "string"
},
"variant": {
"type": "string",
"enum": [
"primary",
"secondary",
"minimal"
],
"default": "primary"
},
"icon": {
"anyOf": [
{
"type": "string",
"enum": [
"up-caret",
"down-caret",
"right-caret",
"left-caret",
"up-arrow",
"down-arrow",
"right-arrow",
"left-arrow",
"bars",
"translate",
"pencil",
"pen",
"document",
"add-document",
"setting",
"external",
"download",
"cloud-download",
"moon",
"sun",
"laptop",
"open-book",
"information",
"magnifier",
"forward-slash",
"close",
"error",
"warning",
"approve-check-circle",
"approve-check",
"rocket",
"star",
"puzzle",
"list-format",
"random",
"comment",
"comment-alt",
"heart",
"github",
"gitlab",
"bitbucket",
"codePen",
"farcaster",
"discord",
"gitter",
"twitter",
"x.com",
"mastodon",
"codeberg",
"youtube",
"threads",
"linkedin",
"twitch",
"azureDevOps",
"microsoftTeams",
"instagram",
"stackOverflow",
"telegram",
"rss",
"facebook",
"email",
"phone",
"reddit",
"patreon",
"signal",
"slack",
"matrix",
"hackerOne",
"openCollective",
"blueSky",
"discourse",
"zulip",
"pinterest",
"tiktok",
"astro",
"alpine",
"pnpm",
"biome",
"bun",
"mdx",
"apple",
"linux",
"homebrew",
"nix",
"starlight",
"pkl",
"node",
"cloudflare",
"vercel",
"netlify",
"deno",
"jsr",
"nostr",
"backstage",
"confluence",
"jira",
"storybook",
"vscode",
"jetbrains",
"zed",
"vim",
"figma",
"sketch",
"npm",
"sourcehut",
"substack",
"seti:folder",
"seti:bsl",
"seti:mdo",
"seti:salesforce",
"seti:asm",
"seti:bicep",
"seti:bazel",
"seti:c",
"seti:c-sharp",
"seti:html",
"seti:cpp",
"seti:clojure",
"seti:coldfusion",
"seti:config",
"seti:crystal",
"seti:crystal_embedded",
"seti:json",
"seti:css",
"seti:csv",
"seti:xls",
"seti:cu",
"seti:cake",
"seti:cake_php",
"seti:d",
"seti:word",
"seti:elixir",
"seti:elixir_script",
"seti:hex",
"seti:elm",
"seti:favicon",
"seti:f-sharp",
"seti:git",
"seti:go",
"seti:godot",
"seti:gradle",
"seti:grails",
"seti:graphql",
"seti:hacklang",
"seti:haml",
"seti:mustache",
"seti:haskell",
"seti:haxe",
"seti:jade",
"seti:java",
"seti:javascript",
"seti:jinja",
"seti:julia",
"seti:karma",
"seti:kotlin",
"seti:dart",
"seti:liquid",
"seti:livescript",
"seti:lua",
"seti:markdown",
"seti:argdown",
"seti:info",
"seti:clock",
"seti:maven",
"seti:nim",
"seti:github",
"seti:notebook",
"seti:nunjucks",
"seti:npm",
"seti:ocaml",
"seti:odata",
"seti:perl",
"seti:php",
"seti:pipeline",
"seti:pddl",
"seti:plan",
"seti:happenings",
"seti:powershell",
"seti:prisma",
"seti:pug",
"seti:puppet",
"seti:purescript",
"seti:python",
"seti:react",
"seti:rescript",
"seti:R",
"seti:ruby",
"seti:rust",
"seti:sass",
"seti:spring",
"seti:slim",
"seti:smarty",
"seti:sbt",
"seti:scala",
"seti:ethereum",
"seti:stylus",
"seti:svelte",
"seti:swift",
"seti:db",
"seti:terraform",
"seti:tex",
"seti:default",
"seti:twig",
"seti:typescript",
"seti:tsconfig",
"seti:vala",
"seti:vite",
"seti:vue",
"seti:wasm",
"seti:wat",
"seti:xml",
"seti:yml",
"seti:prolog",
"seti:zig",
"seti:zip",
"seti:wgt",
"seti:illustrator",
"seti:photoshop",
"seti:pdf",
"seti:font",
"seti:image",
"seti:svg",
"seti:sublime",
"seti:code-search",
"seti:shell",
"seti:video",
"seti:audio",
"seti:windows",
"seti:jenkins",
"seti:babel",
"seti:bower",
"seti:docker",
"seti:code-climate",
"seti:eslint",
"seti:firebase",
"seti:firefox",
"seti:gitlab",
"seti:grunt",
"seti:gulp",
"seti:ionic",
"seti:platformio",
"seti:rollup",
"seti:stylelint",
"seti:yarn",
"seti:webpack",
"seti:lock",
"seti:license",
"seti:makefile",
"seti:heroku",
"seti:todo",
"seti:ignored"
]
},
{
"type": "string",
"pattern": "^\\<svg"
}
]
},
"attrs": {
"type": "object",
"additionalProperties": {
"type": [
"string",
"number",
"boolean"
]
}
}
},
"required": [
"text",
"link"
],
"additionalProperties": false
},
"default": []
}
},
"additionalProperties": false
},
"lastUpdated": {
"anyOf": [
{
"anyOf": [
{
"type": "string",
"format": "date-time"
},
{
"type": "string",
"format": "date"
},
{
"type": "integer",
"format": "unix-time"
}
]
},
{
"type": "boolean"
}
]
},
"prev": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "string"
},
{
"type": "object",
"properties": {
"link": {
"type": "string"
},
"label": {
"type": "string"
}
},
"additionalProperties": false
}
]
},
"next": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "string"
},
{
"type": "object",
"properties": {
"link": {
"type": "string"
},
"label": {
"type": "string"
}
},
"additionalProperties": false
}
]
},
"sidebar": {
"type": "object",
"properties": {
"order": {
"type": "number"
},
"label": {
"type": "string"
},
"hidden": {
"type": "boolean",
"default": false
},
"badge": {
"anyOf": [
{
"type": "string"
},
{
"type": "object",
"properties": {
"variant": {
"type": "string",
"enum": [
"note",
"danger",
"success",
"caution",
"tip",
"default"
],
"default": "default"
},
"class": {
"type": "string"
},
"text": {
"type": "string"
}
},
"required": [
"text"
],
"additionalProperties": false
}
]
},
"attrs": {
"type": "object",
"additionalProperties": {
"anyOf": [
{
"type": "string"
},
{
"type": "number"
},
{
"type": "boolean"
},
{
"not": {}
}
]
},
"default": {}
}
},
"additionalProperties": false,
"default": {}
},
"banner": {
"type": "object",
"properties": {
"content": {
"type": "string"
}
},
"required": [
"content"
],
"additionalProperties": false
},
"pagefind": {
"type": "boolean",
"default": true
},
"draft": {
"type": "boolean",
"default": false
},
"$schema": {
"type": "string"
}
},
"required": [
"title"
],
"additionalProperties": false
}
},
"$schema": "http://json-schema.org/draft-07/schema#"
}

View File

@ -0,0 +1 @@
export default new Map();

View File

@ -0,0 +1 @@
export default new Map();

164
docs/.astro/content.d.ts vendored Normal file
View File

@ -0,0 +1,164 @@
declare module 'astro:content' {
export interface RenderResult {
Content: import('astro/runtime/server/index.js').AstroComponentFactory;
headings: import('astro').MarkdownHeading[];
remarkPluginFrontmatter: Record<string, any>;
}
interface Render {
'.md': Promise<RenderResult>;
}
export interface RenderedContent {
html: string;
metadata?: {
imagePaths: Array<string>;
[key: string]: unknown;
};
}
}
declare module 'astro:content' {
type Flatten<T> = T extends { [K: string]: infer U } ? U : never;
export type CollectionKey = keyof AnyEntryMap;
export type CollectionEntry<C extends CollectionKey> = Flatten<AnyEntryMap[C]>;
export type ContentCollectionKey = keyof ContentEntryMap;
export type DataCollectionKey = keyof DataEntryMap;
type AllValuesOf<T> = T extends any ? T[keyof T] : never;
type ValidContentEntrySlug<C extends keyof ContentEntryMap> = AllValuesOf<
ContentEntryMap[C]
>['slug'];
export type ReferenceDataEntry<
C extends CollectionKey,
E extends keyof DataEntryMap[C] = string,
> = {
collection: C;
id: E;
};
export type ReferenceContentEntry<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}) = string,
> = {
collection: C;
slug: E;
};
/** @deprecated Use `getEntry` instead. */
export function getEntryBySlug<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}),
>(
collection: C,
// Note that this has to accept a regular string too, for SSR
entrySlug: E,
): E extends ValidContentEntrySlug<C>
? Promise<CollectionEntry<C>>
: Promise<CollectionEntry<C> | undefined>;
/** @deprecated Use `getEntry` instead. */
export function getDataEntryById<C extends keyof DataEntryMap, E extends keyof DataEntryMap[C]>(
collection: C,
entryId: E,
): Promise<CollectionEntry<C>>;
export function getCollection<C extends keyof AnyEntryMap, E extends CollectionEntry<C>>(
collection: C,
filter?: (entry: CollectionEntry<C>) => entry is E,
): Promise<E[]>;
export function getCollection<C extends keyof AnyEntryMap>(
collection: C,
filter?: (entry: CollectionEntry<C>) => unknown,
): Promise<CollectionEntry<C>[]>;
export function getEntry<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}),
>(
entry: ReferenceContentEntry<C, E>,
): E extends ValidContentEntrySlug<C>
? Promise<CollectionEntry<C>>
: Promise<CollectionEntry<C> | undefined>;
export function getEntry<
C extends keyof DataEntryMap,
E extends keyof DataEntryMap[C] | (string & {}),
>(
entry: ReferenceDataEntry<C, E>,
): E extends keyof DataEntryMap[C]
? Promise<DataEntryMap[C][E]>
: Promise<CollectionEntry<C> | undefined>;
export function getEntry<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}),
>(
collection: C,
slug: E,
): E extends ValidContentEntrySlug<C>
? Promise<CollectionEntry<C>>
: Promise<CollectionEntry<C> | undefined>;
export function getEntry<
C extends keyof DataEntryMap,
E extends keyof DataEntryMap[C] | (string & {}),
>(
collection: C,
id: E,
): E extends keyof DataEntryMap[C]
? string extends keyof DataEntryMap[C]
? Promise<DataEntryMap[C][E]> | undefined
: Promise<DataEntryMap[C][E]>
: Promise<CollectionEntry<C> | undefined>;
/** Resolve an array of entry references from the same collection */
export function getEntries<C extends keyof ContentEntryMap>(
entries: ReferenceContentEntry<C, ValidContentEntrySlug<C>>[],
): Promise<CollectionEntry<C>[]>;
export function getEntries<C extends keyof DataEntryMap>(
entries: ReferenceDataEntry<C, keyof DataEntryMap[C]>[],
): Promise<CollectionEntry<C>[]>;
export function render<C extends keyof AnyEntryMap>(
entry: AnyEntryMap[C][string],
): Promise<RenderResult>;
export function reference<C extends keyof AnyEntryMap>(
collection: C,
): import('astro/zod').ZodEffects<
import('astro/zod').ZodString,
C extends keyof ContentEntryMap
? ReferenceContentEntry<C, ValidContentEntrySlug<C>>
: ReferenceDataEntry<C, keyof DataEntryMap[C]>
>;
// Allow generic `string` to avoid excessive type errors in the config
// if `dev` is not running to update as you edit.
// Invalid collection names will be caught at build time.
export function reference<C extends string>(
collection: C,
): import('astro/zod').ZodEffects<import('astro/zod').ZodString, never>;
type ReturnTypeOrOriginal<T> = T extends (...args: any[]) => infer R ? R : T;
type InferEntrySchema<C extends keyof AnyEntryMap> = import('astro/zod').infer<
ReturnTypeOrOriginal<Required<ContentConfig['collections'][C]>['schema']>
>;
type ContentEntryMap = {
};
type DataEntryMap = {
"docs": Record<string, {
id: string;
body?: string;
collection: "docs";
data: any;
rendered?: RenderedContent;
filePath?: string;
}>;
};
type AnyEntryMap = ContentEntryMap & DataEntryMap;
export type ContentConfig = typeof import("../src/content.config.mjs");
}

View File

@ -0,0 +1 @@
[["Map",1,2],"meta::meta",["Map",3,4,5,6],"astro-version","5.9.3","astro-config-digest","{\"root\":{},\"srcDir\":{},\"publicDir\":{},\"outDir\":{},\"cacheDir\":{},\"compressHTML\":true,\"base\":\"/\",\"trailingSlash\":\"ignore\",\"output\":\"static\",\"scopedStyleStrategy\":\"attribute\",\"build\":{\"format\":\"directory\",\"client\":{},\"server\":{},\"assets\":\"_astro\",\"serverEntry\":\"entry.mjs\",\"redirects\":true,\"inlineStylesheets\":\"auto\",\"concurrency\":1},\"server\":{\"open\":false,\"host\":false,\"port\":4321,\"streaming\":true,\"allowedHosts\":[]},\"redirects\":{},\"image\":{\"endpoint\":{\"route\":\"/_image\"},\"service\":{\"entrypoint\":\"astro/assets/services/sharp\",\"config\":{}},\"domains\":[],\"remotePatterns\":[],\"experimentalDefaultStyles\":true},\"devToolbar\":{\"enabled\":true},\"markdown\":{\"syntaxHighlight\":{\"type\":\"shiki\",\"excludeLangs\":[\"math\"]},\"shikiConfig\":{\"langs\":[],\"langAlias\":{},\"theme\":\"github-dark\",\"themes\":{},\"wrap\":false,\"transformers\":[]},\"remarkPlugins\":[],\"rehypePlugins\":[],\"remarkRehype\":{},\"gfm\":true,\"smartypants\":true},\"security\":{\"checkOrigin\":true},\"env\":{\"schema\":{},\"validateSecrets\":false},\"experimental\":{\"clientPrerender\":false,\"contentIntellisense\":false,\"responsiveImages\":false,\"headingIdCompat\":false,\"preserveScriptOrder\":false,\"csp\":false},\"legacy\":{\"collections\":false}}"]

View File

@ -0,0 +1,5 @@
{
"_variables": {
"lastUpdateCheck": 1750832446593
}
}

2
docs/.astro/types.d.ts vendored Normal file
View File

@ -0,0 +1,2 @@
/// <reference types="astro/client" />
/// <reference path="content.d.ts" />

View File

@ -27,7 +27,7 @@
"embla-carousel-react": "^8.0.0",
"fs": "^0.0.1-security",
"gray-matter": "^4.0.3",
"lucide-react": "^0.372.0",
"lucide-react": "^0.522.0",
"next": "^14.1.4",
"next-seo": "^6.5.0",
"next-sitemap": "^4.2.3",

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 MiB

Binary file not shown.

View File

@ -11,6 +11,21 @@
"type": "page",
"title": "Documentation"
},
"cortex": {
"type": "page",
"title": "Cortex",
"display": "hidden"
},
"integrations": {
"type": "page",
"title": "Integrations",
"display": "hidden"
},
"platforms": {
"type": "page",
"title": "Platforms",
"display": "hidden"
},
"changelog": {
"type": "page",
"title": "Changelog",

View File

@ -0,0 +1,21 @@
---
title: "Jan v0.6.3 brings new features and models!"
version: 0.6.3
description: "Unlocking MCP for everyone and bringing our latest model to Jan!"
date: 2025-06-26
ogImage: "/assets/images/changelog/jn128.gif"
---
import ChangelogHeader from "@/components/Changelog/ChangelogHeader"
<ChangelogHeader title="Jan v0.6.3 brings with it MCP and our latest model!" date="2025-06-26" ogImage="/assets/images/changelog/jn128.gif" />
## Highlights 🎉
- We have added Model Context Protocol (MCP) support to the stable build of Jan. It needs to be enabled in the General Settings tab.
- Jan now supports Menlo's latest model, Jan-Nano-128k.
- Some hot fixes and improvements.
Update your Jan or [download the latest](https://jan.ai/).
For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.6.3).

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 343 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 512 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 614 KiB

After

Width:  |  Height:  |  Size: 231 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

View File

@ -1,4 +1,8 @@
{
"-- Switcher": {
"type": "separator",
"title": "Switcher"
},
"index": "Overview",
"how-to-separator": {
"title": "HOW TO",
@ -6,8 +10,7 @@
},
"desktop": "Install 👋 Jan",
"threads": "Start Chatting",
"manage-models": "Manage Models",
"menlo-models": "Menlo Models",
"jan-models": "Use Jan Models",
"assistants": "Create Assistants",
"tutorials-separators": {
@ -16,8 +19,7 @@
},
"quickstart": "Quickstart",
"remote-models": "Connect to Remote Models",
"server-examples": "Provide AI to Tools",
"mcp": "Model Context Protocol",
"server-examples": "Integrations",
"explanation-separator": {
"title": "EXPLANATION",
@ -26,18 +28,25 @@
"llama-cpp": "Local AI Engine",
"api-server": "Server Overview",
"data-folder": "Jan Data Folder",
"privacy": "Privacy",
"privacy-policy": {
"type": "page",
"display": "hidden",
"title": "Privacy Policy"
},
"advanced-separator": {
"title": "ADVANCED",
"type": "separator"
},
"manage-models": "Manage Models",
"mcp": "Model Context Protocol",
"reference-separator": {
"title": "REFERENCE",
"type": "separator"
},
"settings": "Settings",
"troubleshooting": "Troubleshooting",
"model-parameters": "Model Parameters"
"model-parameters": "Model Parameters",
"privacy": "Privacy"
}

View File

@ -77,7 +77,7 @@ Provide examples when explaining complex topics.
You can quickly switch between assistants, or create and edit them, directly from the Chat screen using the
assistant dropdown menu at the top:
![Assistant Dropdown](./_assets/assistant-dropdown.png)
![Assistant Dropdown](./_assets/assistant-dropdown-updated.png)
- Click the assistant's name (e.g., "Travel Planner") at the top of the Chat screen to open the dropdown menu.
- The dropdown lists all of your assistants. Click on any of the assistants available to switch to it for the

View File

@ -56,36 +56,37 @@ cd ~/.config/Jan/data # Default install
<Callout type="info">
Root directory: `~/jan`
</Callout>
```sh
/assistants
/jan
/assistants/
/jan/
assistant.json
/extensions
/engines/
/llama.cpp/
/extensions/
extensions.json
/@janhq
/extension_A
package.json
/logs
/app.txt
/models
/model_A
model.yaml
/@janhq/
/assistant-extension/
/conversational-extension/
/download-extension/
/engine-management-extension/
/hardware-management-extension/
/inference-cortex-extension/
/model-extension/
/files/
/logs/
app.log
/models/
/huggingface.co/
/Model_Provider_A/
/Model_A
model_A.gguf
model_A.yaml
/settings
settings.json
/@janhq
/extension_A_Settings
settings.json
/themes
/dark-dimmed
/joi-dark
/joi-light
/night-blue
/threads
/jan_thread_A
/threads/
/thread_A/
messages.jsonl
thread.json
messages.jsonl
```
### `assistants/`
@ -93,14 +94,28 @@ Where AI personalities live. The default one (`/assistants/jan/`):
```json
{
"avatar": "",
"avatar": "👋",
"id": "jan",
"object": "assistant",
"created_at": 1715132389207,
"created_at": 1750945742.536,
"name": "Jan",
"description": "A default assistant that can use all downloaded models",
"description": "Jan is a helpful AI assistant that can use tools and help complete tasks for its users.",
"model": "*",
"instructions": ""
"instructions": "You have access to a set of tools to help you answer the users question. You can use only one tool per message, and youll receive the result of that tool in the users next response. To complete a task, use tools step by step—each step should be guided by the outcome of the previous one.\nTool Usage Rules:\n1. Always provide the correct values as arguments when using tools. Do not pass variable names—use actual values instead.\n2. You may perform multiple tool steps to complete a task.\n3. Avoid repeating a tool call with exactly the same parameters to prevent infinite loops.",
"tools": [
{
"type": "retrieval",
"enabled": false,
"useTimeWeightedRetriever": false,
"settings": {
"top_k": 2,
"chunk_size": 1024,
"chunk_overlap": 64,
"retrieval_template": "Use the following pieces of context to answer the question at the end.\n----------------\nCONTEXT: {CONTEXT}\n----------------\nQUESTION: {QUESTION}\n----------------\nHelpful Answer:"
}
}
],
"file_ids": []
}
```
@ -140,75 +155,47 @@ Debugging headquarters (`/logs/app.txt`):
The silicon brain collection. Each model has its own `model.json`.
<Callout type="info">
Full parameters: [here](/docs/models/model-parameters)
Full parameters: [here](/docs/model-parameters)
</Callout>
### `settings/`
Control panel. Extension settings in `/settings/@janhq/`:
| Parameter | Description |
|----------------|----------------------------------------------------|
| key | Setting identifier |
| title | Display name |
| description | Setting explanation |
| controllerType | UI component type |
| controllerProps| Component properties |
| extensionName | Parent extension link |
GPU settings (`settings.json`):
| Parameter | Description |
|----------------------|--------------------------------------------|
| notify | Notification status |
| run_mode | Operating mode |
| nvidia_driver.exist | NVIDIA driver presence |
| nvidia_driver.version| Driver version |
| cuda.exist | CUDA availability |
| cuda.version | CUDA version |
| gpus[0].id | GPU identifier |
| gpus[0].vram | GPU memory (MB) |
| gpus[0].name | GPU model |
| gpus[0].arch | GPU architecture |
| gpu_highest_vram | Most capable GPU |
| gpus_in_use | Active GPUs |
| is_initial | First run flag |
| vulkan | Vulkan support |
### `themes/`
Visual wardrobe. Each theme's `theme.json`:
| Parameter | Description |
|------------------|-------------------------------------------|
| id | Theme identifier |
| displayName | UI name |
| reduceTransparent| Transparency control |
| nativeTheme | OS theme sync |
| variables | Component settings |
### `threads/`
Chat archive. Each thread (`/threads/jan_unixstamp/`) contains:
- `messages.jsonl`:
```json
{
"id":"01J6Y6FH8PFTHQB5PNJTHEN27C",
"thread_id":"jan_1725437954",
"type":"Thread",
"role":"assistant",
"content":
[
"completed_at": 0,
"content": [
{
"type": "text",
"text": {
"value": "Hello! Is there something I can help you with or would you like to chat?",
"annotations": []
}
"annotations": [],
"value": "Hello! I can help you with various tasks. I can search for information on the internet, including news, videos, images, shopping, and more. I can also scrape webpages to extract specific information. Let me know what you need!"
},
"type": "text"
}
],
"created_at": 1751012639307,
"id": "01JYR7S0JB5ZBGMJV52KWMW5VW",
"metadata": {
"assistant": {
"avatar": "👋",
"id": "jan",
"instructions": "You have access to a set of tools to help you answer the user's question. You can use only one tool per message, and you'll receive the result of that tool in the user's next response. To complete a task, use tools step by step—each step should be guided by the outcome of the previous one.\nTool Usage Rules:\n1. Always provide the correct values as arguments when using tools. Do not pass variable names—use actual values instead.\n2. You may perform multiple tool steps to complete a task.\n3. Avoid repeating a tool call with exactly the same parameters to prevent infinite loops.",
"name": "Jan",
"parameters": ""
},
"tokenSpeed": {
"lastTimestamp": 1751012637097,
"message": "01JYR7S0GW5M9PSHMRE7T8VQJM",
"tokenCount": 49,
"tokenSpeed": 22.653721682847895
}
},
"object": "thread.message",
"role": "assistant",
"status": "ready",
"created": 1725442802966,
"updated": 1725442802966,
"object": "thread.message"
"thread_id": "8f2c9922-db49-4d1e-8620-279c05baf2d0",
"type": "text"
}
```
@ -216,12 +203,17 @@ Chat archive. Each thread (`/threads/jan_unixstamp/`) contains:
| Parameter | Description |
|------------|------------------------------------------------|
| assistants | Assistant configuration clone |
| created | Creation timestamp |
| id | Thread identifier |
| metadata | Additional thread data |
| model | Active model settings |
| object | OpenAI compatibility marker |
| title | Thread name |
| assistants | Assistant configuration clone |
| model | Active model settings |
| metadata | Additional thread data |
| updated | Updated timestamp |
## Delete Jan Data
Uninstall guides: [Mac](/docs/desktop/mac#step-2-clean-up-data-optional),

View File

@ -22,7 +22,7 @@ import FAQBox from '@/components/FaqBox'
# Jan
![Jan's Cover Image](./_assets/jan-app.png)
![Jan's Cover Image](./_assets/jan-app-new.png)
Jan is a ChatGPT alternative that runs 100% offline on your desktop and (*soon*) on mobile. Our goal is to
@ -39,10 +39,10 @@ add it to Jan via the configuration's page and start talking to your favorite mo
### Features
- Download popular open-source LLMs (Llama3, Gemma3, Qwen3, and more) from the HuggingFace [Model Hub](./docs/models/manage-models.mdx)
- Download popular open-source LLMs (Llama3, Gemma3, Qwen3, and more) from the HuggingFace [Model Hub](./docs/manage-models.mdx)
or import any GGUF files (the model format used by llama.cpp) available locally
- Connect to [cloud services](/docs/remote-models/openai) (OpenAI, Anthropic, Mistral, Groq, etc.)
- [Chat](./docs/threads.mdx) with AI models & [customize their parameters](./docs/models/model-parameters.mdx) via our
- [Chat](./docs/threads.mdx) with AI models & [customize their parameters](/docs/model-parameters.mdx) via our
intuitive interface
- Use our [local API server](https://jan.ai/api-reference) with an OpenAI-equivalent API to power other apps.
@ -86,7 +86,7 @@ Jan is built on the shoulders of many open-source projects like:
Jan supports all major operating systems,
- [Mac](/docs/desktop/mac#compatibility)
- [Windows](/docs/desktop/windows#compatibility)
- [Linux](docs/desktop/linux).
- [Linux](/docs/desktop/linux)
Hardware compatibility includes:
- NVIDIA GPUs (CUDA)
@ -111,7 +111,7 @@ Jan is built on the shoulders of many open-source projects like:
</FAQBox>
<FAQBox title="What models can I use with Jan?">
- Download optimized models from the [Jan Hub](/docs/models/manage-models#1-download-from-jan-hub-recommended)
- Download optimized models from the [Jan Hub](/docs/manage-models)
- Import GGUF models from Hugging Face or your local files
- Connect to cloud providers like OpenAI, Anthropic, Mistral and Groq (requires your own API keys)
</FAQBox>

View File

@ -0,0 +1,139 @@
---
title: Jan Nano 128k
description: Jan Models
keywords:
[
Jan,
Jan Models,
Jan Model,
Jan Model List,
Menlo Models,
Menlo Model,
Jan-Nano-Gguf,
ReZero,
Model Context Protocol,
MCP,
]
---
import { Callout } from 'nextra/components'
# Jan-Nano-128k
> Enabling deeper research through extended context understanding.
Jan-Nano-128k represents a notable advancement in compact language models for different applications. Building upon the
success of Jan-Nano-32k, this enhanced version features a native 128k context window that enables deeper, more comprehensive
research capabilities without the performance degradation typically associated with context extension methods.
You can have a look at all of our models, and download them from the HuggingFace [Menlo Models page](https://huggingface.co/Menlo).
**Key Improvements:**
- 🔍 Deeper Research: Extended context allows for processing entire research papers, lengthy documents, and complex multi-turn conversations
- ⚡ Native 128k Window: Built to handle long contexts efficiently, maintaining performance across the full context range
- 📈 Enhanced Performance: Unlike traditional context extension methods, Jan-Nano-128k's performance remains consistent with longer contexts
This model maintains full compatibility with Model Context Protocol (MCP) servers while dramatically expanding the scope of research
tasks it can handle in a single session.
## Why Jan-Nano-128k?
Most small models hit a wall at 8-32k tokens. Jan-Nano-128k goes beyond this limitation with a native 128k context window—that's roughly
300 pages of text or an entire novel's worth of information processed simultaneously.
Unlike YaRN or PI methods that retrofit models beyond their limits and degrade performance, Jan-Nano-128k was architecturally rewired for
128k contexts from the ground up. The result: an inverse scaling behavior where performance actually improves with longer contexts,
maintaining consistent accuracy from 1k to 128k tokens as the model leverages more information for synthesis.
<Callout type="info">
**Position Interpolation (PI):** A method that extends a model's context by scaling down position indices to fit within the original context
window. For example, to extend a 4k model to 32k, PI compresses the 32k positions into the original 4k range by dividing each position by 8.
**YaRN (Yet another RoPE extensioN method):** A more sophisticated context extension method that preserves frequently occurring tokens while
selectively scaling others. YaRN divides position embeddings into frequency groups and applies different scaling factors to each, resulting
in more efficient training and better performance than PI.
The key difference is that PI applies uniform scaling across all dimensions, while YaRN uses targeted interpolation based on frequency analysis—preserving
high-frequency information that's crucial for distinguishing nearby tokens while interpolating lower frequencies more aggressively.
</Callout>
**Applications unlocked:**
- **Academic**: Extract key findings from 50+ papers simultaneously
- **Legal**: Pinpoint relevant clauses across thousand-page contracts
- **Code**: Trace specific functions through massive codebases
- **Business**: Distill insights from quarters of financial data
- **Content**: Maintain narrative coherence across book-length outputs
**MCP Usage:** Jan-Nano-128k doesn't memorize, it orchestrates. With MCP integration, it becomes a research conductor that fetches dozens
of sources, holds everything in active memory, extracts precisely what's needed, and synthesizes findings across a marathon research session. It's
not about understanding every word; it's about finding the needle in a haystack of haystacks.
## Evaluation
Jan-Nano-128k has been rigorously evaluated on the SimpleQA benchmark using our MCP-based methodology, demonstrating superior performance compared to its predecessor:
![Jan-Nano-128k Performance](../_assets/jan-nano-bench.png)
**Key findings:**
- 15% improvement over Jan-Nano-32k on complex multi-document tasks
- Consistent performance across all context lengths (no cliff at 64k like other extended models)
- Superior citation accuracy when handling 10+ sources simultaneously
## 🖥️ How to Run Locally
### Demo
<video width="100%" controls>
<source src="/assets/videos/jan-nano-demo.mp4" type="video/mp4" />
Your browser does not support the video tag.
</video>
### Quick Start Guide
1. **Download Jan**
2. **Download Jan-Nano-128k**
3. **Enable MCP**, the serper or the exa MCPs work very well with Jan-Nano-128k
4. **Start researching**
### Usage
Deploy using VLLM:
```bash
vllm serve Menlo/Jan-nano-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072
```
Or with `llama-server` from `llama.cpp`:
```bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960
```
**Note:** The chat template is included in the tokenizer. For troubleshooting, download the [Non-think chat template](https://qwen.readthedocs.io/en/latest/_downloads/c101120b5bebcc2f12ec504fc93a965e/qwen3_nonthinking.jinja).
### Recommended Sampling Parameters
```yaml
Temperature: 0.7
Top-p: 0.8
Top-k: 20
Min-p: 0.0
```
### Hardware Requirements
- **Minimum**: 16GB RAM for Q4 quantization
- **Recommended**: 24GB RAM for Q8 quantization
- **Optimal**: 32GB+ RAM for full precision
## 🤝 Community & Support
- **Discussions**: [HuggingFace Community](https://huggingface.co/Menlo/Jan-nano-128k/discussions)
- **Issues**: [GitHub Repository](https://github.com/menloresearch/deep-research/issues)
- **Discord**: Join our research community for tips and best practices

View File

@ -1,5 +1,5 @@
---
title: Jan Nano
title: Jan Nano 32k
description: Jan-Nano-Gguf Model
keywords:
[
@ -20,15 +20,26 @@ import { Callout } from 'nextra/components'
# Jan Nano
Jan-Nano is a compact 4-billion parameter language model specifically designed and trained for deep
research tasks. This model has been optimized to work seamlessly with Model Context Protocol (MCP) servers,
enabling efficient integration with various research tools and data sources.
![Jan Nano](../_assets/jan-nano0.png)
## Why Jan Nano?
Most language models face a fundamental tradeoff where powerful capabilities require a lot of computational resources. Jan
Nano breaks this constraint through a focused design philosophy where instead of trying to know everything, it excels at
knowing how to find anything.
## What is Jan Nano?
Jan Nano is a compact 4-billion parameter language model specifically designed and trained for deep research tasks.
This model has been optimized to work seamlessly with Model Context Protocol (MCP) servers, enabling efficient integration
with various research tools and data sources.
The model and its different model variants are fully supported by Jan.
<Callout type="info">
Jan-Nano can be used by Jan's stable version but its true capabilities shine in Jan's beta version, which
offers MCP support. You can download Jan's beta version from [here](https://jan.ai/docs/desktop/beta).
To use Jan-Nano, you will need to use a search engine via MCP. You can enable MCP in the **Settings**
tab under **Advanced Settings**.
</Callout>
@ -45,29 +56,29 @@ The model and its different model variants are fully supported by Jan.
- RTX 30/40 series or newer
## Using Jan-Nano
## Using Jan-Nano-32k
### Step 1
Download Jan Beta from [here](https://jan.ai/docs/desktop/beta).
**Step 1**
Download Jan from [here](https://jan.ai/docs/desktop/).
### Step 2
**Step 2**
Go to the Hub Tab, search for Jan-Nano-Gguf, and click on the download button to the best model size for your system.
![Jan Nano](../_assets/jan-nano1.png)
### Step 3
**Step 3**
Go to **Settings** > **Model Providers** > **Llama.cpp** click on the pencil icon and enable tool use for Jan-Nano-Gguf.
### Step 4
**Step 4**
To take advantage of Jan-Nano's full capabilities, you need to enable MCP support. We're going to use it with Serper's
API. You can get a free API key from [here](https://serper.dev/). Sign up and they will immediately generate one for you.
### Step 5
**Step 5**
Add the serper MCP to Jan via the **Settings** > **MCP Servers** tab.
![Serper MCP](../_assets/serper-mcp.png)
### Step 6
**Step 6**
Open up a new chat and ask Jan-Nano to search the web for you.
![Jan Nano](../_assets/jan-nano-demo.gif)

View File

@ -33,7 +33,7 @@ import { Settings, EllipsisVertical, Plus, FolderOpen, Pencil } from 'lucide-rea
Jan uses **llama.cpp** for running local AI models. You can find its settings in **Settings** (<Settings width={16} height={16} style={{display:"inline"}}/>) > **Local Engine** > **llama.cpp**:
<br/>
![llama.cpp](./_assets/llama.cpp-01.png)
![llama.cpp](./_assets/llama.cpp-01-updated.png)
<br/>
These settings are for advanced users, you would want to check these settings when:
@ -151,6 +151,7 @@ For detailed hardware compatibility, please visit our guide for [Mac](/docs/desk
| **Caching** | - Enable to store recent prompts and responses<br></br>- Improves response time for repeated prompts | Enabled |
| **KV Cache Type** | - KV cache implementation type; controls memory usage and precision trade-off<br></br>- Options:<br></br>• f16 (most stable)<br></br>• q8_0 (balanced)<br></br>• q4_0 (lowest memory) | f16 |
| **mmap** | - Enables memory-mapped model loading<br></br>- Reduces memory usage<br></br>- Recommended for large models | Enabled |
| **Context Shift** | - Automatically shifts the context window when the model is unable to process the entire prompt<br/> - Ensures that the most relevant information is always included <br/> - Recommended for long conversations and multiple tool calls | Disabled |
## Best Practices

View File

@ -21,6 +21,49 @@ import { Callout, Steps } from 'nextra/components'
# Using the Model Context Protocol (MCP) in Jan
```mermaid
graph TD
subgraph "What is MCP?"
You[You using Jan Desktop]
Claude[Jan AI Assistant]
subgraph "Your Connected Tools"
Files[📁 Your Files<br/>Documents, folders,<br/>text files]
Database[📊 Your Data<br/>Spreadsheets,<br/>databases]
WebServices[🌐 Online Services<br/>GitHub, Slack,<br/>Google Drive]
Custom[🔧 Custom Tools<br/>Special programs<br/>you've added]
end
subgraph "What Jan Can Do"
Read[Read & Understand<br/>- View your files<br/>- Check your data<br/>- See updates]
Action[Take Actions<br/>- Search for info<br/>- Create content<br/>- Run commands]
Templates[Use Templates<br/>- Common tasks<br/>- Saved prompts<br/>- Workflows]
end
end
You --> Claude
Claude -->|"Can I see this file?"| Files
Claude -->|"What's in my database?"| Database
Claude -->|"Check my GitHub"| WebServices
Claude -->|"Run this tool"| Custom
Files --> Read
Database --> Read
WebServices --> Action
Custom --> Templates
style You fill:transparent
style Claude fill:transparent
style Files fill:transparent
style Database fill:transparent
style WebServices fill:transparent
style Custom fill:transparent
style Read fill:transparent
style Action fill:transparent
style Templates fill:transparent
```
Jan now supports the **Model Context Protocol (MCP)**, an open standard designed to allow language models to
interact with external tools and data sources.

View File

@ -1,10 +0,0 @@
{
"overview": {
"title": "Overview",
"href": "/docs/menlo-models/overview"
},
"jan-nano": {
"title": "Jan Nano",
"href": "/docs/menlo-models/jan-nano"
}
}

View File

@ -1,40 +0,0 @@
---
title: Overview
description: Jan Models
keywords:
[
Jan,
Jan Models,
Jan Model,
Jan Model List,
Menlo Models,
Menlo Model,
Jan-Nano-Gguf,
ReZero,
Model Context Protocol,
MCP,
]
---
# Menlo Models
At Menlo, we have focused on creating a series of models that are optimized for all sorts of tasks, including
web search, deep research, robotic control, and using MCPs. Our latest model, Jan-Nano-Gguf, is available in Jan
right now providing excellent results on tasks that use MCPs.
You can have a look at all of our models, and download them from the HuggingFace [Menlo Models page](https://huggingface.co/Menlo).
## Jan-Nano-Gguf (Available in Jan right now 🚀)
![Jan Nano](../_assets/jan-nano0.png)
Jan-Nano-Gguf is a 4-billion parameter model that is optimized for deep research tasks. It has been trained on a
variety of datasets and is designed to be used with the Model Context Protocol (MCP) servers.
## ReZero
ReZero (Retry-Zero) is a reinforcement learning framework that improves RAG systems by rewarding LLMs for retrying
failed queries. Traditional RAG approaches struggle when initial searches fail, but ReZero encourages persistence and
alternative strategies. This increases accuracy from 25% to 46.88% in complex information-seeking tasks.

View File

@ -38,8 +38,6 @@ These settings are available in the model settings modal:
| **Repeat Last N** | Number of tokens to consider for repeat penalty. |
| **Repeat Penalty** | Penalize repeating token sequences. |
| **Presence Penalty**| Penalize alpha presence (encourages new topics). |
| **Max Tokens** | Maximum length of the model's response. |
| **Stop Sequences** | Tokens or phrases that will end the model's response. |
| **Frequency Penalty** | Reduces word repetition. |
<br/>

View File

@ -91,7 +91,7 @@ Install all required dependencies and drivers before enabling GPU acceleration.
### Step 4: Customize Assistant Instructions
With your model ready to roll, you can tailor how it responds by tweaking instructions or model configurations
in [Assistant.](/docs/assistants).
through the [Assistants feature](/docs/assistants).
<br/>
@ -104,7 +104,7 @@ these is that you can use them no matter which model you choose.
<br/>
![Add an Assistant Instruction](./_assets/add_assistant.png)
![Add an Assistant Instruction](./_assets/assistant-edit-dialog.png)
<br/>

View File

@ -36,11 +36,15 @@ Follow this [guide](https://continue.dev/docs/quickstart) to install the Continu
To set up Continue for use with Jan's Local Server, you must activate the Jan API Server with your chosen model.
1. Press the `<>` button. Jan will take you to the **Local API Server** section.
1. Press the `⚙️ Settings` button.
2. Setup the server, which includes the **IP Port**, **Cross-Origin-Resource-Sharing (CORS)** and **Verbose Server Logs**.
2. Locate `Local API Server`.
3. Press the **Start Server** button
3. Setup the server, which includes the **IP Port**, **Cross-Origin-Resource-Sharing (CORS)** and **Verbose Server Logs**.
4. Include your user-defined API Key.
5. Press the **Start Server** button
### Step 3: Configure Continue to Use Jan's Local Server
@ -64,30 +68,35 @@ To set up Continue for use with Jan's Local Server, you must activate the Jan AP
</Tabs.Tab>
</Tabs>
```json title="~/.continue/config.json"
{
"models": [
{
"title": "Jan",
"provider": "openai",
"model": "mistral-ins-7b-q4",
"apiKey": "EMPTY",
"apiBase": "http://localhost:1337/v1"
}
]
}
```yaml title="~/.continue/config.yaml"
name: Local Assistant
version: 1.0.0
schema: v1
models:
- name: Jan
provider: openai
model: #MODEL_NAME (e.g. qwen3:0.6b)
apiKey: #YOUR_USER_DEFINED_API_KEY_HERE (e.g. hello)
apiBase: http://localhost:1337/v1
context:
- provider: code
- provider: docs
- provider: diff
- provider: terminal
- provider: problems
- provider: folder
- provider: codebase
```
2. Ensure the file has the following configurations:
- Ensure `openai` is selected as the `provider`.
- Match the `model` with the one enabled in the Jan API Server.
- Set `apiBase` to `http://localhost:1337`.
- Leave the `apiKey` field to `EMPTY`.
- Set `apiBase` to `http://localhost:1337/v1`.
### Step 4: Ensure the Using Model Is Activated in Jan
1. Navigate to `Settings` > `My Models`.
2. Click the **three dots (⋮)** button.
1. Navigate to `Settings` > `Model Providers`.
2. Under Llama.cpp, find the model that you would want to use.
3. Select the **Start Model** button to activate the model.
</Steps>

View File

@ -1,70 +0,0 @@
---
title: Open Interpreter
description: A step-by-step guide on integrating Jan with Open Interpreter.
keywords:
[
Jan,
Customizable Intelligence, LLM,
local AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language models,
Open Interpreter integration,
Open Interpreter,
]
---
import { Callout, Steps } from 'nextra/components'
# Open Interpreter
## Integrate Open Interpreter with Jan
[Open Interpreter](https://github.com/KillianLucas/open-interpreter/) lets LLMs run code (Python, Javascript, Shell, and more) locally. After installing, you can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running `interpreter`. To integrate Open Interpreter with Jan, follow the steps below:
<Steps>
### Step 1: Install Open Interpreter
1. Install Open Interpreter by running:
```bash
pip install open-interpreter
```
2. A Rust compiler is required to install Open Interpreter. If not already installed, run the following command or go to [this page](https://rustup.rs/) if you are running on Windows:
```bash
sudo apt install rustc
```
<Callout type='info'>
The Rust compiler is necessary for building some native extensions that Open Interpreter requires.
</Callout>
### Step 2: Configure Jan's Local API Server
Before using Open Interpreter, configure the model in `Settings` > `My Model` for Jan and activate its local API server.
#### Enabling Jan API Server
1. Click the `<>` button to access the **Local API Server** section in Jan.
2. Configure the server settings, including **IP Port**, **Cross-Origin-Resource-Sharing (CORS)**, and **Verbose Server Logs**.
3. Click **Start Server**.
### Step 3: Set the Open Interpreter Environment
1. For integration, provide the API Base (`http://localhost:1337/v1`) and the model ID (e.g., `mistral-ins-7b-q4`) when running Open Interpreter. For example, see the code below:
```zsh
interpreter --api_base http://localhost:1337/v1 --model mistral-ins-7b-q4
```
> **Open Interpreter is now ready for use!**
</Steps>

View File

@ -33,7 +33,7 @@ bottom left of Jan.
2. Select your preferred model in **Model Selector** in input field & start chatting.
<br/>
![Create New Thread](./_assets/threads-new-chat.png)
![Create New Thread](./_assets/threads-new-chat-updated.png)
## View Your Chat History
@ -51,7 +51,7 @@ thread and a context menu will pop up with the favorite option for you to click
- **Recents**: See your most recently accessed threads for quick navigation.
<br/>
![Favorites and Recents](./_assets/threads-favorites-and-recents.png)
![Favorites and Recents](./_assets/threads-favorites-and-recents-updated.png)
## Edit a Chat Title
@ -61,7 +61,7 @@ thread and a context menu will pop up with the favorite option for you to click
4. Add new title & save
<br/>
![Context Menu](./_assets/threads-context-menu.png)
![Context Menu](./_assets/threads-context-menu-updated.png)
## Delete Thread
@ -77,7 +77,7 @@ When you want to completely remove a thread:
<br/>
![Delete Thread](./_assets/threads-context-menu.png)
![Delete Thread](./_assets/threads-context-menu-updated.png)
### Delete all threads at once

View File

@ -0,0 +1,9 @@
{
"-- Switcher": {
"type": "separator",
"title": "Switcher"
},
"index": {
"display": "hidden"
}
}

View File

@ -0,0 +1,87 @@
---
title: Coming Soon
description: Exciting new features and platforms are on the way. Stay tuned for Jan Web, Jan Mobile, and our API Platform.
keywords:
[
Jan,
Customizable Intelligence, LLM,
local AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language models,
coming soon,
Jan Web,
Jan Mobile,
API Platform,
]
---
import { Callout } from 'nextra/components'
<div className="text-center py-12">
<div className="mb-8">
<h1 className="text-4xl font-bold bg-gradient-to-r from-blue-600 to-purple-600 bg-clip-text text-transparent mb-4 py-2">
🚀 Coming Soon
</h1>
<p className="text-xl text-gray-600 dark:text-gray-300 max-w-2xl mx-auto">
We're working on the next stage of Jan - making our local assistant more powerful and available in more platforms.
</p>
</div>
<div className="grid grid-cols-1 md:grid-cols-3 gap-6 max-w-4xl mx-auto mb-12">
<div className="p-6 border border-gray-200 dark:border-gray-700 rounded-lg bg-gradient-to-br from-blue-50 to-indigo-50 dark:from-blue-900/20 dark:to-indigo-900/20">
<div className="text-3xl mb-3">🌐</div>
<h3 className="text-lg font-semibold mb-2">Jan Web</h3>
<p className="text-sm text-gray-600 dark:text-gray-400">
Access Jan directly from your browser with our powerful web interface
</p>
</div>
<div className="p-6 border border-gray-200 dark:border-gray-700 rounded-lg bg-gradient-to-br from-green-50 to-emerald-50 dark:from-green-900/20 dark:to-emerald-900/20">
<div className="text-3xl mb-3">📱</div>
<h3 className="text-lg font-semibold mb-2">Jan Mobile</h3>
<p className="text-sm text-gray-600 dark:text-gray-400">
Take Jan on the go with our native mobile applications
</p>
</div>
<div className="p-6 border border-gray-200 dark:border-gray-700 rounded-lg bg-gradient-to-br from-purple-50 to-pink-50 dark:from-purple-900/20 dark:to-pink-900/20">
<div className="text-3xl mb-3">⚡</div>
<h3 className="text-lg font-semibold mb-2">API Platform</h3>
<p className="text-sm text-gray-600 dark:text-gray-400">
Integrate Jan's capabilities into your applications with our API
</p>
</div>
</div>
<Callout type="info">
**Stay Updated**: Follow our [GitHub repository](https://github.com/menloresearch/jan) and join our [Discord community](https://discord.com/invite/FTk2MvZwJH) for the latest updates on these exciting releases!
</Callout>
<div className="mt-12">
<h2 className="text-2xl font-semibold mb-6">What to Expect</h2>
<div className="text-left max-w-2xl mx-auto space-y-4">
<div className="flex items-start gap-3">
<span className="text-green-500 text-xl">✓</span>
<div>
<strong>Seamless Experience:</strong> Unified interface across all platforms
</div>
</div>
<div className="flex items-start gap-3">
<span className="text-green-500 text-xl">✓</span>
<div>
<strong>Privacy First:</strong> Same privacy-focused approach you trust
</div>
</div>
<div className="flex items-start gap-3">
<span className="text-green-500 text-xl">✓</span>
<div>
<strong>Developer Friendly:</strong> Robust APIs and comprehensive documentation
</div>
</div>
</div>
</div>
</div>

View File

@ -65,6 +65,54 @@ const config: DocsThemeConfig = {
</div>
),
},
sidebar: {
titleComponent: ({ type, title }) => {
// eslint-disable-next-line react-hooks/rules-of-hooks
const { asPath } = useRouter()
if (type === 'separator' && title === 'Switcher') {
return (
<div className="-mx-2 hidden md:block">
{[
{ title: 'Jan', path: '/docs', Icon: LibraryBig },
{
title: 'Jan Web',
path: '/platforms',
Icon: BrainCircuit,
},
{ title: 'Jan Mobile', path: '/platforms', Icon: Blocks },
{
title: 'API Platform',
path: '/platforms',
Icon: Computer,
},
].map((item) =>
asPath.startsWith(item.path) ? (
<div
key={item.path}
className="group mb-3 flex flex-row items-center gap-3 nx-text-primary-800 dark:nx-text-primary-600"
>
<item.Icon className="w-7 h-7 p-1 border border-gray-200 dark:border-gray-700 rounded nx-bg-primary-100 dark:nx-bg-primary-400/10" />
{item.title}
</div>
) : (
<Link
href={item.path}
key={item.path}
className="group mb-3 flex flex-row items-center gap-3 text-gray-500 hover:text-primary/100"
>
<item.Icon className="w-7 h-7 p-1 border rounded border-gray-200 dark:border-gray-700" />
{item.title}
</Link>
)
)}
</div>
)
}
return title
},
defaultMenuCollapseLevel: 1,
toggleButton: true,
},
toc: {
backToTop: true,
},
@ -83,14 +131,14 @@ const config: DocsThemeConfig = {
name="description"
content={
frontMatter?.description ||
`Run LLMs like Mistral or Llama2 locally and offline on your computer, or connect to remote AI APIs like OpenAIs GPT-4 or Groq.`
`Run LLMs like Mistral or Llama2 locally and offline on your computer, or connect to remote AI APIs like OpenAI's GPT-4 or Groq.`
}
/>
<meta
name="og:description"
content={
frontMatter?.description ||
`Run LLMs like Mistral or Llama2 locally and offline on your computer, or connect to remote AI APIs like OpenAIs GPT-4 or Groq.`
`Run LLMs like Mistral or Llama2 locally and offline on your computer, or connect to remote AI APIs like OpenAI's GPT-4 or Groq.`
}
/>
<link

View File

@ -5503,10 +5503,10 @@ lru-cache@^4.0.1:
pseudomap "^1.0.2"
yallist "^2.1.2"
lucide-react@^0.372.0:
version "0.372.0"
resolved "https://registry.npmjs.org/lucide-react/-/lucide-react-0.372.0.tgz"
integrity sha512-0cKdqmilHXWUwWAWnf6CrrjHD8YaqPMtLrmEHXolZusNTr9epULCsiJwIOHk2q1yFxdEwd96D4zShlAj67UJdA==
lucide-react@^0.522.0:
version "0.522.0"
resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.522.0.tgz#c0951dd32936b6a7bcc474a829a251fede0bdfbd"
integrity sha512-jnJbw974yZ7rQHHEFKJOlWAefG3ATSCZHANZxIdx8Rk/16siuwjgA4fBULpXEAWx/RlTs3FzmKW/udWUuO0aRw==
lz-string@^1.5.0:
version "1.5.0"

View File

@ -13,7 +13,7 @@
},
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^3.0.2",
"rimraf": "^6.0.1",
"rolldown": "1.0.0-beta.1",
"run-script-os": "^1.1.6",
"typescript": "^5.3.3"

View File

@ -17,7 +17,7 @@
},
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^3.0.2",
"rimraf": "^6.0.1",
"rolldown": "1.0.0-beta.1",
"ts-loader": "^9.5.0",
"typescript": "^5.7.2"

View File

@ -13,10 +13,10 @@
},
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^3.0.2",
"rimraf": "^6.0.1",
"rolldown": "1.0.0-beta.1",
"run-script-os": "^1.1.6",
"typescript": "5.3.3",
"typescript": "5.8.3",
"vitest": "^3.0.6"
},
"files": [

View File

@ -280,7 +280,7 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine {
...(model.id.toLowerCase().includes('jan-nano')
? { reasoning_budget: 0 }
: { reasoning_budget: this.reasoning_budget }),
...(this.context_shift === false
...(this.context_shift !== true // explicit true required to enable context shift
? { 'no-context-shift': true }
: {}),
...(modelSettings.ngl === -1 || modelSettings.ngl === undefined

View File

@ -28,7 +28,7 @@ type Data<T> = {
/**
* Defaul mode sources
*/
const defaultModelSources = ['Menlo/Jan-nano-gguf']
const defaultModelSources = ['Menlo/Jan-nano-gguf', 'Menlo/Jan-nano-128k-gguf']
/**
* A extension for models

209
mise.toml Normal file
View File

@ -0,0 +1,209 @@
[tools]
node = "20"
rust = "1.85.1"
sccache = "latest"
[env]
_.path = ['./node_modules/.bin']
RUSTC_WRAPPER="sccache"
# ============================================================================
# CORE SETUP AND CONFIGURATION TASKS
# ============================================================================
[tasks.config-yarn]
description = "Configure yarn version and settings"
run = [
"corepack enable",
"corepack prepare yarn@4.5.3 --activate",
"yarn --version",
"yarn config set -H enableImmutableInstalls false"
]
[tasks.install]
description = "Install dependencies"
depends = ["config-yarn"]
run = "yarn install"
sources = ['package.json', 'yarn.lock']
outputs = ['node_modules']
[tasks.build-core]
description = "Build core package"
depends = ["install"]
run = "yarn build:core"
sources = ['core/**/*']
outputs = ['core/dist']
[tasks.build-extensions]
description = "Build extensions"
depends = ["build-core"]
run = "yarn build:extensions"
sources = ['extensions/**/*']
outputs = ['pre-install/*.tgz']
[tasks.install-and-build]
description = "Install dependencies and build core and extensions (matches Makefile)"
depends = ["build-extensions"]
# ============================================================================
# DEVELOPMENT TASKS
# ============================================================================
[tasks.dev]
description = "Start development server (matches Makefile)"
depends = ["install-and-build"]
run = [
"yarn install:cortex",
"yarn download:bin",
"yarn copy:lib",
"yarn dev"
]
[tasks.dev-tauri]
description = "Start development server with Tauri (DEPRECATED - matches Makefile)"
depends = ["install-and-build"]
run = [
"yarn install:cortex",
"yarn download:bin",
"yarn copy:lib",
"yarn dev:tauri"
]
# ============================================================================
# BUILD TASKS
# ============================================================================
[tasks.build]
description = "Build complete application (matches Makefile)"
depends = ["install-and-build"]
run = "yarn build"
[tasks.build-tauri]
description = "Build Tauri application (DEPRECATED - matches Makefile)"
depends = ["install-and-build"]
run = [
"yarn copy:lib",
"yarn build"
]
[tasks.build-and-publish]
description = "Build and publish the application (matches Makefile)"
depends = ["install-and-build"]
run = "yarn build"
# ============================================================================
# QUALITY ASSURANCE TASKS
# ============================================================================
[tasks.lint]
description = "Run linting (matches Makefile)"
depends = ["build-extensions"]
run = "yarn lint"
[tasks.test]
description = "Run test suite (matches Makefile)"
depends = ["lint"]
run = "yarn test"
# ============================================================================
# PARALLEL-FRIENDLY QUALITY ASSURANCE TASKS
# ============================================================================
[tasks.lint-only]
description = "Run linting only (parallel-friendly)"
depends = ["build-extensions"]
run = "yarn lint"
hide = true
[tasks.test-only]
description = "Run tests only (parallel-friendly)"
depends = ["build-extensions"]
run = "yarn test"
hide = true
[tasks.qa-parallel]
description = "Run linting and testing in parallel"
depends = ["lint-only", "test-only"]
# ============================================================================
# UTILITY TASKS
# ============================================================================
[tasks.clean]
description = "Clean all build artifacts and dependencies (cross-platform - matches Makefile)"
run = '''
#!/usr/bin/env bash
echo "Cleaning build artifacts and dependencies..."
# Platform detection and cleanup (matches Makefile exactly)
if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then
# Windows cleanup using PowerShell (matches Makefile)
powershell -Command "Get-ChildItem -Path . -Include node_modules, .next, dist, build, out, .turbo, .yarn -Recurse -Directory | Remove-Item -Recurse -Force" 2>/dev/null || true
powershell -Command "Get-ChildItem -Path . -Include package-lock.json, tsconfig.tsbuildinfo -Recurse -File | Remove-Item -Recurse -Force" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./pre-install/*.tgz" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./extensions/*/*.tgz" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./electron/pre-install/*.tgz" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./src-tauri/resources" 2>/dev/null || true
powershell -Command "Remove-Item -Recurse -Force ./src-tauri/target" 2>/dev/null || true
powershell -Command "if (Test-Path \"\$(\$env:USERPROFILE)\\jan\\extensions\\\") { Remove-Item -Path \"\$(\$env:USERPROFILE)\\jan\\extensions\" -Recurse -Force }" 2>/dev/null || true
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
# Linux cleanup (matches Makefile)
find . -name "node_modules" -type d -prune -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".next" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "dist" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "build" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "out" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".turbo" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".yarn" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "package-lock.json" -type f -exec rm -rf '{}' + 2>/dev/null || true
rm -rf ./pre-install/*.tgz 2>/dev/null || true
rm -rf ./extensions/*/*.tgz 2>/dev/null || true
rm -rf ./electron/pre-install/*.tgz 2>/dev/null || true
rm -rf ./src-tauri/resources 2>/dev/null || true
rm -rf ./src-tauri/target 2>/dev/null || true
rm -rf ~/jan/extensions 2>/dev/null || true
rm -rf "~/.cache/jan*" 2>/dev/null || true
else
# macOS cleanup (matches Makefile)
find . -name "node_modules" -type d -prune -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".next" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "dist" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "build" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "out" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".turbo" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name ".yarn" -type d -exec rm -rf '{}' + 2>/dev/null || true
find . -name "package-lock.json" -type f -exec rm -rf '{}' + 2>/dev/null || true
rm -rf ./pre-install/*.tgz 2>/dev/null || true
rm -rf ./extensions/*/*.tgz 2>/dev/null || true
rm -rf ./electron/pre-install/*.tgz 2>/dev/null || true
rm -rf ./src-tauri/resources 2>/dev/null || true
rm -rf ./src-tauri/target 2>/dev/null || true
rm -rf ~/jan/extensions 2>/dev/null || true
rm -rf ~/Library/Caches/jan* 2>/dev/null || true
fi
echo "Clean completed!"
'''
[tasks.all]
description = "Default target - shows available commands (matches Makefile)"
run = "echo 'Specify a target to run. Use: mise tasks'"
# ============================================================================
# DEVELOPMENT WORKFLOW SHORTCUTS
# ============================================================================
[tasks.setup]
description = "Complete development setup"
depends = ["install-and-build"]
alias = "init"
[tasks.ci]
description = "Run CI pipeline (lint + test sequentially)"
depends = ["test"]
[tasks.ci-parallel]
description = "Run CI pipeline (lint + test in parallel)"
depends = ["qa-parallel"]
alias = "ci-fast"

View File

@ -4,7 +4,8 @@
"workspaces": {
"packages": [
"core",
"web-app"
"web-app",
"tests-e2e-js"
]
},
"scripts": {
@ -13,6 +14,11 @@
"build": "yarn build:web && yarn build:tauri",
"test": "yarn workspace @janhq/web-app test",
"test:coverage": "yarn workspace @janhq/web-app test",
"test:prepare": "yarn build:icon && yarn copy:lib && yarn copy:assets:tauri && yarn build --no-bundle ",
"test:e2e:linux": "yarn test:prepare && xvfb-run yarn workspace tests-e2-js test",
"test:e2e:win32": "yarn test:prepare && yarn workspace tests-e2-js test",
"test:e2e:darwin": "echo 'E2E tests are not supported on macOS yet due to WebDriver limitations'",
"test:e2e": "run-script-os",
"dev:web": "yarn workspace @janhq/web-app dev",
"dev:tauri": "CLEAN=true yarn build:icon && yarn copy:assets:tauri && tauri dev",
"install:cortex:linux:darwin": "cd src-tauri/binaries && ./download.sh",
@ -39,7 +45,7 @@
"cpx": "^1.5.0",
"cross-env": "^7.0.3",
"husky": "^9.1.5",
"jest": "^29.7.0",
"jest": "^30.0.3",
"jest-environment-jsdom": "^29.7.0",
"rimraf": "^3.0.2",
"run-script-os": "^1.1.6",

View File

@ -0,0 +1,345 @@
/**
* Script to find missing i18n keys in Jan components
*
* Usage:
* node scripts/find-missing-i18n-key.js [options]
*
* Options:
* --locale=<locale> Only check a specific locale (e.g. --locale=id)
* --file=<file> Only check a specific file (e.g. --file=common.json)
* --help Show this help message
*/
const fs = require('fs')
const path = require('path')
// Parse command-line arguments
const args = process.argv.slice(2).reduce((acc, arg) => {
if (arg === '--help') {
acc.help = true
} else if (arg.startsWith('--locale=')) {
acc.locale = arg.split('=')[1]
} else if (arg.startsWith('--file=')) {
acc.file = arg.split('=')[1]
}
return acc
}, {})
// Display help information
if (args.help) {
console.log(`
Find missing i18n translations in Jan
A useful script to identify whether the i18n keys used in component files exist in all language files.
Usage:
node scripts/find-missing-i18n-key.js [options]
Options:
--locale=<locale> Only check a specific language (e.g., --locale=id)
--file=<file> Only check a specific file (e.g., --file=common.json)
--help Display help information
Output:
- Generate a report of missing translations
`)
process.exit(0)
}
// Directories to traverse and their corresponding locales
const DIRS = {
components: {
path: path.join(__dirname, '../web-app/src/components'),
localesDir: path.join(__dirname, '../web-app/src/locales'),
},
containers: {
path: path.join(__dirname, '../web-app/src/containers'),
localesDir: path.join(__dirname, '../web-app/src/locales'),
},
routes: {
path: path.join(__dirname, '../web-app/src/routes'),
localesDir: path.join(__dirname, '../web-app/src/locales'),
},
}
// Regular expressions to match i18n keys
const i18nPatterns = [
/{t\("([^"]+)"\)}/g, // Match {t("key")} format
/i18nKey="([^"]+)"/g, // Match i18nKey="key" format
/\bt\(\s*["']([^"']+)["']\s*(?:,\s*[^)]+)?\)/g, // Match t("key") format with optional parameters - simplified and more robust
]
// Get all language directories for a specific locales directory
function getLocaleDirs(localesDir) {
try {
const allLocales = fs.readdirSync(localesDir).filter((file) => {
const stats = fs.statSync(path.join(localesDir, file))
return stats.isDirectory() // Do not exclude any language directories
})
// Filter to a specific language if specified
return args.locale
? allLocales.filter((locale) => locale === args.locale)
: allLocales
} catch (error) {
if (error.code === 'ENOENT') {
console.warn(`Warning: Locales directory not found: ${localesDir}`)
return []
}
throw error
}
}
// Get the value from JSON by path
function getValueByPath(obj, path) {
const parts = path.split('.')
let current = obj
for (const part of parts) {
if (current === undefined || current === null) {
return undefined
}
current = current[part]
}
return current
}
// Check if the key exists in all language files, return a list of missing language files
function checkKeyInLocales(key, localeDirs, localesDir) {
// Handle namespace:key format (e.g., "common:save" or "settings:general")
let namespace, keyPath
if (key.includes(':')) {
;[namespace, keyPath] = key.split(':', 2)
} else if (key.includes('.')) {
// Handle namespace.key format
const parts = key.split('.')
// Check if the first part is a known namespace
const knownNamespaces = [
'common',
'settings',
'systemMonitor',
'chat',
'hub',
'providers',
'assistants',
'mcpServers',
'mcp-servers',
'toolApproval',
'tool-approval',
'updater',
'setup',
'logs',
'provider',
'model-errors',
]
if (knownNamespaces.includes(parts[0])) {
namespace = parts[0]
keyPath = parts.slice(1).join('.')
} else {
// Default to common namespace if no known namespace is found
namespace = 'common'
keyPath = key
}
} else {
// No dots, default to common namespace
namespace = 'common'
keyPath = key
}
const missingLocales = []
// Map namespace to actual filename
const namespaceToFile = {
'systemMonitor': 'system-monitor',
'mcpServers': 'mcp-servers',
'mcp-servers': 'mcp-servers',
'toolApproval': 'tool-approval',
'tool-approval': 'tool-approval',
'model-errors': 'model-errors',
}
const fileName = namespaceToFile[namespace] || namespace
localeDirs.forEach((locale) => {
const filePath = path.join(localesDir, locale, `${fileName}.json`)
if (!fs.existsSync(filePath)) {
missingLocales.push(`${locale}/${fileName}.json`)
return
}
try {
const json = JSON.parse(fs.readFileSync(filePath, 'utf8'))
// Jan's localization files have flat structure
// e.g., common.json has { "save": "Save", "cancel": "Cancel" }
// not nested like { "common": { "save": "Save" } }
const valueToCheck = getValueByPath(json, keyPath)
if (valueToCheck === undefined) {
missingLocales.push(`${locale}/${fileName}.json`)
}
} catch (error) {
console.warn(`Warning: Could not parse ${filePath}: ${error.message}`)
missingLocales.push(`${locale}/${fileName}.json`)
}
})
return missingLocales
}
// Recursively traverse the directory
function findMissingI18nKeys() {
const results = []
function walk(dir, baseDir, localeDirs, localesDir) {
if (!fs.existsSync(dir)) {
console.warn(`Warning: Directory not found: ${dir}`)
return
}
const files = fs.readdirSync(dir)
for (const file of files) {
const filePath = path.join(dir, file)
const stat = fs.statSync(filePath)
// Exclude test files, __mocks__ directory, and node_modules
if (
filePath.includes('.test.') ||
filePath.includes('__mocks__') ||
filePath.includes('node_modules') ||
filePath.includes('.spec.')
) {
continue
}
if (stat.isDirectory()) {
walk(filePath, baseDir, localeDirs, localesDir) // Recursively traverse subdirectories
} else if (
stat.isFile() &&
['.ts', '.tsx', '.js', '.jsx'].includes(path.extname(filePath))
) {
const content = fs.readFileSync(filePath, 'utf8')
// Match all i18n keys
for (const pattern of i18nPatterns) {
let match
while ((match = pattern.exec(content)) !== null) {
const key = match[1]
// Skip empty keys or keys that look like variables/invalid
if (
!key ||
key.includes('${') ||
key.includes('{{') ||
key.startsWith('$') ||
key.length < 2 ||
key === '.' ||
key === ',' ||
key === '-' ||
!/^[a-zA-Z]/.test(key)
) {
continue
}
const missingLocales = checkKeyInLocales(
key,
localeDirs,
localesDir
)
if (missingLocales.length > 0) {
results.push({
key,
missingLocales,
file: path.relative(baseDir, filePath),
})
}
}
}
}
}
}
// Walk through all directories
Object.entries(DIRS).forEach(([name, config]) => {
const localeDirs = getLocaleDirs(config.localesDir)
if (localeDirs.length > 0) {
console.log(
`\nChecking ${name} directory with ${
localeDirs.length
} languages: ${localeDirs.join(', ')}`
)
walk(config.path, config.path, localeDirs, config.localesDir)
}
})
return results
}
// Execute and output the results
function main() {
try {
if (args.locale) {
// Check if the specified locale exists in the locales directory
const localesDir = path.join(__dirname, '../web-app/src/locales')
const localeDirs = getLocaleDirs(localesDir)
if (!localeDirs.includes(args.locale)) {
console.error(
`Error: Language '${args.locale}' not found in ${localesDir}`
)
process.exit(1)
}
}
const missingKeys = findMissingI18nKeys()
if (missingKeys.length === 0) {
console.log('\n✅ All i18n keys are present!')
return
}
console.log('\nMissing i18n keys:\n')
// Group by file for better readability
const groupedByFile = {}
missingKeys.forEach(({ key, missingLocales, file }) => {
if (!groupedByFile[file]) {
groupedByFile[file] = []
}
groupedByFile[file].push({ key, missingLocales })
})
Object.entries(groupedByFile).forEach(([file, keys]) => {
console.log(`📁 File: ${file}`)
keys.forEach(({ key, missingLocales }) => {
console.log(` 🔑 Key: ${key}`)
console.log(' ❌ Missing in:')
missingLocales.forEach((locale) => console.log(` - ${locale}`))
console.log('')
})
console.log('-------------------')
})
console.log('\n💡 To fix missing translations:')
console.log('1. Add the missing keys to the appropriate locale files')
console.log('2. Use yq commands for efficient updates:')
console.log(
' yq -i \'.namespace.key = "Translation"\' web-app/src/locales/<locale>/<file>.json'
)
console.log('3. Run this script again to verify all keys are present')
// Exit code 1 indicates missing keys
process.exit(1)
} catch (error) {
console.error('Error:', error.message)
console.error(error.stack)
process.exit(1)
}
}
main()

View File

@ -0,0 +1,268 @@
/**
* Script to find missing translations in locale files for Jan
*
* Usage:
* node scripts/find-missing-translations.js [options]
*
* Options:
* --locale=<locale> Only check a specific locale (e.g. --locale=id)
* --file=<file> Only check a specific file (e.g. --file=common.json)
* --help Show this help message
*/
const fs = require('fs')
const path = require('path')
// Process command line arguments
const args = process.argv.slice(2).reduce((acc, arg) => {
if (arg === '--help') {
acc.help = true
} else if (arg.startsWith('--locale=')) {
acc.locale = arg.split('=')[1]
} else if (arg.startsWith('--file=')) {
acc.file = arg.split('=')[1]
}
return acc
}, {})
// Show help if requested
if (args.help) {
console.log(`
Find Missing Translations for Jan
A utility script to identify missing translations across locale files.
Compares non-English locale files to the English ones to find any missing keys.
Usage:
node scripts/find-missing-translations.js [options]
Options:
--locale=<locale> Only check a specific locale (e.g. --locale=id)
--file=<file> Only check a specific file (e.g. --file=common.json)
--help Show this help message
Output:
- Generates a report of missing translations for the web-app
`)
process.exit(0)
}
// Path to the locales directory
const LOCALES_DIR = path.join(__dirname, '../web-app/src/locales')
// Recursively find all keys in an object
function findKeys(obj, parentKey = '') {
let keys = []
for (const [key, value] of Object.entries(obj)) {
const currentKey = parentKey ? `${parentKey}.${key}` : key
if (typeof value === 'object' && value !== null) {
// If value is an object, recurse
keys = [...keys, ...findKeys(value, currentKey)]
} else {
// If value is a primitive, add the key
keys.push(currentKey)
}
}
return keys
}
// Get value at a dotted path in an object
function getValueAtPath(obj, path) {
const parts = path.split('.')
let current = obj
for (const part of parts) {
if (current === undefined || current === null) {
return undefined
}
current = current[part]
}
return current
}
// Function to check translations
function checkTranslations() {
// Get all locale directories (or filter to the specified locale)
const allLocales = fs.readdirSync(LOCALES_DIR).filter((item) => {
const stats = fs.statSync(path.join(LOCALES_DIR, item))
return stats.isDirectory() && item !== 'en' // Exclude English as it's our source
})
// Filter to the specified locale if provided
const locales = args.locale
? allLocales.filter((locale) => locale === args.locale)
: allLocales
if (args.locale && locales.length === 0) {
console.error(`Error: Locale '${args.locale}' not found in ${LOCALES_DIR}`)
process.exit(1)
}
console.log(
`Checking ${locales.length} non-English locale(s): ${locales.join(', ')}`
)
// Get all English JSON files
const englishDir = path.join(LOCALES_DIR, 'en')
let englishFiles = fs
.readdirSync(englishDir)
.filter((file) => file.endsWith('.json') && !file.startsWith('.'))
// Filter to the specified file if provided
if (args.file) {
if (!englishFiles.includes(args.file)) {
console.error(`Error: File '${args.file}' not found in ${englishDir}`)
process.exit(1)
}
englishFiles = englishFiles.filter((file) => file === args.file)
}
// Load file contents
let englishFileContents
try {
englishFileContents = englishFiles.map((file) => ({
name: file,
content: JSON.parse(fs.readFileSync(path.join(englishDir, file), 'utf8')),
}))
} catch (e) {
console.error(`Error: File '${englishDir}' is not a valid JSON file`)
process.exit(1)
}
console.log(
`Checking ${
englishFileContents.length
} translation file(s): ${englishFileContents.map((f) => f.name).join(', ')}`
)
// Results object to store missing translations
const missingTranslations = {}
// For each locale, check for missing translations
for (const locale of locales) {
missingTranslations[locale] = {}
for (const { name, content: englishContent } of englishFileContents) {
const localeFilePath = path.join(LOCALES_DIR, locale, name)
// Check if the file exists in the locale
if (!fs.existsSync(localeFilePath)) {
missingTranslations[locale][name] = { file: 'File is missing entirely' }
continue
}
// Load the locale file
let localeContent
try {
localeContent = JSON.parse(fs.readFileSync(localeFilePath, 'utf8'))
} catch (e) {
console.error(
`Error: File '${localeFilePath}' is not a valid JSON file`
)
process.exit(1)
}
// Find all keys in the English file
const englishKeys = findKeys(englishContent)
// Check for missing keys in the locale file
const missingKeys = []
for (const key of englishKeys) {
const englishValue = getValueAtPath(englishContent, key)
const localeValue = getValueAtPath(localeContent, key)
if (localeValue === undefined) {
missingKeys.push({
key,
englishValue,
})
}
}
if (missingKeys.length > 0) {
missingTranslations[locale][name] = missingKeys
}
}
}
return outputResults(missingTranslations)
}
// Function to output results
function outputResults(missingTranslations) {
let hasMissingTranslations = false
console.log(`\nMissing Translations Report:\n`)
for (const [locale, files] of Object.entries(missingTranslations)) {
if (Object.keys(files).length === 0) {
console.log(`${locale}: No missing translations`)
continue
}
hasMissingTranslations = true
console.log(`📝 ${locale}:`)
for (const [fileName, missingItems] of Object.entries(files)) {
if (missingItems.file) {
console.log(` - ${fileName}: ${missingItems.file}`)
continue
}
console.log(
` - ${fileName}: ${missingItems.length} missing translations`
)
for (const { key, englishValue } of missingItems) {
console.log(` ${key}: "${englishValue}"`)
}
}
console.log('')
}
return hasMissingTranslations
}
// Main function to find missing translations
function findMissingTranslations() {
try {
console.log('Starting translation check for Jan web-app...')
const hasMissingTranslations = checkTranslations()
// Summary
if (!hasMissingTranslations) {
console.log('\n✅ All translations are complete!')
} else {
console.log('\n✏ To add missing translations:')
console.log('1. Add the missing keys to the corresponding locale files')
console.log('2. Translate the English values to the appropriate language')
console.log(
'3. You can use yq commands to update JSON files efficiently:'
)
console.log(
' yq -i \'.namespace.key = "Translation"\' web-app/src/locales/<locale>/<file>.json'
)
console.log(
'4. Run this script again to verify all translations are complete'
)
// Exit with error code to fail CI checks
process.exit(1)
}
} catch (error) {
console.error('Error:', error.message)
console.error(error.stack)
process.exit(1)
}
}
// Run the main function
findMissingTranslations()

View File

@ -19,8 +19,8 @@ tauri-build = { version = "2.0.2", features = [] }
serde_json = "1.0"
serde = { version = "1.0", features = ["derive"] }
log = "0.4"
tauri = { version = "2.4.0", features = [ "protocol-asset", "macos-private-api",
"test",
tauri = { version = "2.5.0", features = [ "protocol-asset", "macos-private-api",
"test"
] }
tauri-plugin-log = "2.0.0-rc"
tauri-plugin-shell = "2.2.0"

View File

@ -3,8 +3,12 @@ use rmcp::{service::RunningService, transport::TokioChildProcess, RoleClient, Se
use serde_json::{Map, Value};
use std::fs;
use std::{collections::HashMap, env, sync::Arc, time::Duration};
use tauri::{AppHandle, Emitter, Runtime, State};
use tokio::{process::Command, sync::Mutex, time::timeout};
use tauri::{AppHandle, Emitter, Manager, Runtime, State};
use tokio::{
process::Command,
sync::Mutex,
time::{sleep, timeout},
};
use super::{cmd::get_jan_data_folder_path, state::AppState};
@ -51,6 +55,58 @@ const DEFAULT_MCP_CONFIG: &str = r#"{
// Timeout for MCP tool calls (30 seconds)
const MCP_TOOL_CALL_TIMEOUT: Duration = Duration::from_secs(30);
// MCP server restart configuration with exponential backoff
const MCP_BASE_RESTART_DELAY_MS: u64 = 1000; // Start with 1 second
const MCP_MAX_RESTART_DELAY_MS: u64 = 30000; // Cap at 30 seconds
const MCP_BACKOFF_MULTIPLIER: f64 = 2.0; // Double the delay each time
/// Calculate exponential backoff delay with jitter
///
/// # Arguments
/// * `attempt` - The current restart attempt number (1-based)
///
/// # Returns
/// * `u64` - Delay in milliseconds, capped at MCP_MAX_RESTART_DELAY_MS
fn calculate_exponential_backoff_delay(attempt: u32) -> u64 {
use std::cmp;
// Calculate base exponential delay: base_delay * multiplier^(attempt-1)
let exponential_delay = (MCP_BASE_RESTART_DELAY_MS as f64)
* MCP_BACKOFF_MULTIPLIER.powi((attempt - 1) as i32);
// Cap the delay at maximum
let capped_delay = cmp::min(exponential_delay as u64, MCP_MAX_RESTART_DELAY_MS);
// Add jitter (±25% randomness) to prevent thundering herd
let jitter_range = (capped_delay as f64 * 0.25) as u64;
let jitter = if jitter_range > 0 {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
// Use attempt number as seed for deterministic but varied jitter
let mut hasher = DefaultHasher::new();
attempt.hash(&mut hasher);
let hash = hasher.finish();
// Convert hash to jitter value in range [-jitter_range, +jitter_range]
let jitter_offset = (hash % (jitter_range * 2)) as i64 - jitter_range as i64;
jitter_offset
} else {
0
};
// Apply jitter while ensuring delay stays positive and within bounds
let final_delay = cmp::max(
100, // Minimum 100ms delay
cmp::min(
MCP_MAX_RESTART_DELAY_MS,
(capped_delay as i64 + jitter) as u64
)
);
final_delay
}
/// Runs MCP commands by reading configuration from a JSON file and initializing servers
///
/// # Arguments
@ -70,44 +126,359 @@ pub async fn run_mcp_commands<R: Runtime>(
"Load MCP configs from {}",
app_path_str.clone() + "/mcp_config.json"
);
let config_content = std::fs::read_to_string(app_path_str.clone() + "/mcp_config.json")
.map_err(|e| format!("Failed to read config file: {}", e))?;
let config_content = std::fs::read_to_string(app_path_str + "/mcp_config.json")
.map_err(|e| format!("Failed to read config file: {e}"))?;
let mcp_servers: serde_json::Value = serde_json::from_str(&config_content)
.map_err(|e| format!("Failed to parse config: {}", e))?;
.map_err(|e| format!("Failed to parse config: {e}"))?;
let server_map = mcp_servers
.get("mcpServers")
.and_then(Value::as_object)
.ok_or("No mcpServers found in config")?;
if let Some(server_map) = mcp_servers.get("mcpServers").and_then(Value::as_object) {
log::trace!("MCP Servers: {server_map:#?}");
// Collect handles for initial server startup
let mut startup_handles = Vec::new();
for (name, config) in server_map {
if let Some(false) = extract_active_status(config) {
if extract_active_status(config) == Some(false) {
log::trace!("Server {name} is not active, skipping.");
continue;
}
match start_mcp_server(
let app_clone = app.clone();
let servers_clone = servers_state.clone();
let name_clone = name.clone();
let config_clone = config.clone();
// Spawn task for initial startup attempt
let handle = tokio::spawn(async move {
// Only wait for the initial startup attempt, not the monitoring
let result = start_mcp_server_with_restart(
app_clone.clone(),
servers_clone.clone(),
name_clone.clone(),
config_clone.clone(),
Some(3), // Default max restarts for startup
).await;
// If initial startup failed, we still want to continue with other servers
if let Err(e) = &result {
log::error!("Initial startup failed for MCP server {}: {}", name_clone, e);
}
(name_clone, result)
});
startup_handles.push(handle);
}
// Wait for all initial startup attempts to complete
let mut successful_count = 0;
let mut failed_count = 0;
for handle in startup_handles {
match handle.await {
Ok((name, result)) => {
match result {
Ok(_) => {
log::info!("MCP server {} initialized successfully", name);
successful_count += 1;
}
Err(e) => {
log::error!("MCP server {} failed to initialize: {}", name, e);
failed_count += 1;
}
}
}
Err(e) => {
log::error!("Failed to join startup task: {}", e);
failed_count += 1;
}
}
}
log::info!(
"MCP server initialization complete: {} successful, {} failed",
successful_count,
failed_count
);
Ok(())
}
/// Monitor MCP server health without removing it from the HashMap
async fn monitor_mcp_server_handle(
servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
) -> Option<rmcp::service::QuitReason> {
log::info!("Monitoring MCP server {} health", name);
// Monitor server health with periodic checks
loop {
// Small delay between health checks
sleep(Duration::from_secs(5)).await;
// Check if server is still healthy by trying to list tools
let health_check_result = {
let servers = servers_state.lock().await;
if let Some(service) = servers.get(&name) {
// Try to list tools as a health check with a short timeout
match timeout(Duration::from_secs(2), service.list_all_tools()).await {
Ok(Ok(_)) => {
// Server responded successfully
true
}
Ok(Err(e)) => {
log::warn!("MCP server {} health check failed: {}", name, e);
false
}
Err(_) => {
log::warn!("MCP server {} health check timed out", name);
false
}
}
} else {
// Server was removed from HashMap (e.g., by deactivate_mcp_server)
log::info!("MCP server {} no longer in running services", name);
return Some(rmcp::service::QuitReason::Closed);
}
};
if !health_check_result {
// Server failed health check - remove it and return
log::error!("MCP server {} failed health check, removing from active servers", name);
let mut servers = servers_state.lock().await;
if let Some(service) = servers.remove(&name) {
// Try to cancel the service gracefully
let _ = service.cancel().await;
}
return Some(rmcp::service::QuitReason::Closed);
}
}
}
/// Starts an MCP server with restart monitoring (similar to cortex restart)
/// Returns the result of the first start attempt, then continues with restart monitoring
async fn start_mcp_server_with_restart<R: Runtime>(
app: AppHandle<R>,
servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
config: Value,
max_restarts: Option<u32>,
) -> Result<(), String> {
let app_state = app.state::<AppState>();
let restart_counts = app_state.mcp_restart_counts.clone();
let active_servers_state = app_state.mcp_active_servers.clone();
let successfully_connected = app_state.mcp_successfully_connected.clone();
// Store active server config for restart purposes
store_active_server_config(&active_servers_state, &name, &config).await;
let max_restarts = max_restarts.unwrap_or(5);
// Try the first start attempt and return its result
log::info!("Starting MCP server {} (Initial attempt)", name);
let first_start_result = schedule_mcp_start_task(
app.clone(),
servers_state.clone(),
name.clone(),
config.clone(),
)
.await
{
).await;
match first_start_result {
Ok(_) => {
log::info!("Server {name} activated successfully.");
log::info!("MCP server {} started successfully on first attempt", name);
reset_restart_count(&restart_counts, &name).await;
// Check if server was marked as successfully connected (passed verification)
let was_verified = {
let connected = successfully_connected.lock().await;
connected.get(&name).copied().unwrap_or(false)
};
if was_verified {
// Only spawn monitoring task if server passed verification
spawn_server_monitoring_task(
app,
servers_state,
name,
config,
max_restarts,
restart_counts,
successfully_connected,
).await;
Ok(())
} else {
// Server failed verification, don't monitor for restarts
log::error!("MCP server {} failed verification after startup", name);
Err(format!("MCP server {} failed verification after startup", name))
}
}
Err(e) => {
let _ = app.emit(
"mcp-error",
format!("Failed to activate MCP server {name}: {e}"),
);
log::error!("Failed to activate server {name}: {e}");
continue; // Skip to the next server
}
log::error!("Failed to start MCP server {} on first attempt: {}", name, e);
Err(e)
}
}
}
Ok(())
/// Helper function to handle the restart loop logic
async fn start_restart_loop<R: Runtime>(
app: AppHandle<R>,
servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
config: Value,
max_restarts: u32,
restart_counts: Arc<Mutex<HashMap<String, u32>>>,
successfully_connected: Arc<Mutex<HashMap<String, bool>>>,
) {
loop {
let current_restart_count = {
let mut counts = restart_counts.lock().await;
let count = counts.entry(name.clone()).or_insert(0);
*count += 1;
*count
};
if current_restart_count > max_restarts {
log::error!(
"MCP server {} reached maximum restart attempts ({}). Giving up.",
name,
max_restarts
);
if let Err(e) = app.emit("mcp_max_restarts_reached",
serde_json::json!({
"server": name,
"max_restarts": max_restarts
})
) {
log::error!("Failed to emit mcp_max_restarts_reached event: {e}");
}
break;
}
log::info!(
"Restarting MCP server {} (Attempt {}/{})",
name,
current_restart_count,
max_restarts
);
// Calculate exponential backoff delay
let delay_ms = calculate_exponential_backoff_delay(current_restart_count);
log::info!(
"Waiting {}ms before restart attempt {} for MCP server {}",
delay_ms,
current_restart_count,
name
);
sleep(Duration::from_millis(delay_ms)).await;
// Attempt to restart the server
let start_result = schedule_mcp_start_task(
app.clone(),
servers_state.clone(),
name.clone(),
config.clone(),
).await;
match start_result {
Ok(_) => {
log::info!("MCP server {} restarted successfully.", name);
// Check if server passed verification (was marked as successfully connected)
let passed_verification = {
let connected = successfully_connected.lock().await;
connected.get(&name).copied().unwrap_or(false)
};
if !passed_verification {
log::error!(
"MCP server {} failed verification after restart - stopping permanently",
name
);
break;
}
// Reset restart count on successful restart with verification
{
let mut counts = restart_counts.lock().await;
if let Some(count) = counts.get_mut(&name) {
if *count > 0 {
log::info!(
"MCP server {} restarted successfully, resetting restart count from {} to 0.",
name,
*count
);
*count = 0;
}
}
}
// Monitor the server again
let quit_reason = monitor_mcp_server_handle(
servers_state.clone(),
name.clone(),
).await;
log::info!("MCP server {} quit with reason: {:?}", name, quit_reason);
// Check if server was marked as successfully connected
let was_connected = {
let connected = successfully_connected.lock().await;
connected.get(&name).copied().unwrap_or(false)
};
// Only continue restart loop if server was previously connected
if !was_connected {
log::error!(
"MCP server {} failed before establishing successful connection - stopping permanently",
name
);
break;
}
// Determine if we should restart based on quit reason
let should_restart = match quit_reason {
Some(reason) => {
log::warn!("MCP server {} terminated unexpectedly: {:?}", name, reason);
true
}
None => {
log::info!("MCP server {} was manually stopped - not restarting", name);
false
}
};
if !should_restart {
break;
}
// Continue the loop for another restart attempt
}
Err(e) => {
log::error!("Failed to restart MCP server {}: {}", name, e);
// Check if server was marked as successfully connected before
let was_connected = {
let connected = successfully_connected.lock().await;
connected.get(&name).copied().unwrap_or(false)
};
// Only continue restart attempts if server was previously connected
if !was_connected {
log::error!(
"MCP server {} failed restart and was never successfully connected - stopping permanently",
name
);
break;
}
// Continue the loop for another restart attempt
}
}
}
}
#[tauri::command]
@ -119,10 +490,12 @@ pub async fn activate_mcp_server<R: Runtime>(
) -> Result<(), String> {
let servers: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>> =
state.mcp_servers.clone();
start_mcp_server(app, servers, name, config).await
// Use the modified start_mcp_server_with_restart that returns first attempt result
start_mcp_server_with_restart(app, servers, name, config, Some(3)).await
}
async fn start_mcp_server<R: Runtime>(
async fn schedule_mcp_start_task<R: Runtime>(
app: tauri::AppHandle<R>,
servers: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
@ -134,9 +507,13 @@ async fn start_mcp_server<R: Runtime>(
.parent()
.expect("Executable must have a parent directory");
let bin_path = exe_parent_path.to_path_buf();
if let Some((command, args, envs)) = extract_command_args(&config) {
let (command, args, envs) = extract_command_args(&config)
.ok_or_else(|| format!("Failed to extract command args from config for {name}"))?;
let mut cmd = Command::new(command.clone());
if command.clone() == "npx" {
if command == "npx" {
let mut cache_dir = app_path.clone();
cache_dir.push(".npx");
let bun_x_path = format!("{}/bun", bin_path.display());
@ -145,7 +522,7 @@ async fn start_mcp_server<R: Runtime>(
cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap().to_string());
}
if command.clone() == "uvx" {
if command == "uvx" {
let mut cache_dir = app_path.clone();
cache_dir.push(".uvx");
let bun_x_path = format!("{}/uv", bin_path.display());
@ -154,10 +531,12 @@ async fn start_mcp_server<R: Runtime>(
cmd.arg("run");
cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap().to_string());
}
#[cfg(windows)]
{
cmd.creation_flags(0x08000000); // CREATE_NO_WINDOW: prevents shell window on Windows
}
let app_path_str = app_path.to_str().unwrap().to_string();
let log_file_path = format!("{}/logs/app.log", app_path_str);
match std::fs::OpenOptions::new()
@ -174,7 +553,6 @@ async fn start_mcp_server<R: Runtime>(
};
cmd.kill_on_drop(true);
log::trace!("Command: {cmd:#?}");
args.iter().filter_map(Value::as_str).for_each(|arg| {
@ -186,16 +564,18 @@ async fn start_mcp_server<R: Runtime>(
}
});
let process = TokioChildProcess::new(cmd);
match process {
Ok(p) => {
let service = ().serve(p).await;
let process = TokioChildProcess::new(cmd)
.map_err(|e| {
log::error!("Failed to run command {name}: {e}");
format!("Failed to run command {name}: {e}")
})?;
let service = ().serve(process).await
.map_err(|e| format!("Failed to start MCP server {name}: {e}"))?;
match service {
Ok(running_service) => {
// Get peer info and clone the needed values before moving the service
let (server_name, server_version) = {
let server_info = running_service.peer_info();
let server_info = service.peer_info();
log::trace!("Connected to server: {server_info:#?}");
(
server_info.server_info.name.clone(),
@ -204,9 +584,32 @@ async fn start_mcp_server<R: Runtime>(
};
// Now move the service into the HashMap
servers.lock().await.insert(name.clone(), running_service);
servers.lock().await.insert(name.clone(), service);
log::info!("Server {name} started successfully.");
// Wait a short time to verify the server is stable before marking as connected
// This prevents race conditions where the server quits immediately
let verification_delay = Duration::from_millis(500);
sleep(verification_delay).await;
// Check if server is still running after the verification delay
let server_still_running = {
let servers_map = servers.lock().await;
servers_map.contains_key(&name)
};
if !server_still_running {
return Err(format!("MCP server {} quit immediately after starting", name));
}
// Mark server as successfully connected (for restart policy)
{
let app_state = app.state::<AppState>();
let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true);
log::info!("Marked MCP server {} as successfully connected", name);
}
// Emit event to the frontend
let event = format!("mcp-connected");
let payload = serde_json::json!({
@ -215,32 +618,48 @@ async fn start_mcp_server<R: Runtime>(
});
app.emit(&event, payload)
.map_err(|e| format!("Failed to emit event: {}", e))?;
}
Err(e) => {
return Err(format!("Failed to start MCP server {name}: {e}"));
}
}
}
Err(e) => {
log::error!("Failed to run command {name}: {e}");
return Err(format!("Failed to run command {name}: {e}"));
}
}
}
Ok(())
}
#[tauri::command]
pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) -> Result<(), String> {
log::info!("Deactivating MCP server: {}", name);
// First, mark server as manually deactivated to prevent restart
// Remove from active servers list to prevent restart
{
let mut active_servers = state.mcp_active_servers.lock().await;
active_servers.remove(&name);
log::info!("Removed MCP server {} from active servers list", name);
}
// Mark as not successfully connected to prevent restart logic
{
let mut connected = state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), false);
log::info!("Marked MCP server {} as not successfully connected", name);
}
// Reset restart count
{
let mut counts = state.mcp_restart_counts.lock().await;
counts.remove(&name);
log::info!("Reset restart count for MCP server {}", name);
}
// Now remove and stop the server
let servers = state.mcp_servers.clone();
let mut servers_map = servers.lock().await;
if let Some(service) = servers_map.remove(&name) {
let service = servers_map.remove(&name)
.ok_or_else(|| format!("Server {} not found", name))?;
// Release the lock before calling cancel
drop(servers_map);
service.cancel().await.map_err(|e| e.to_string())?;
log::info!("Server {name} stopped successfully.");
} else {
return Err(format!("Server {} not found", name));
}
log::info!("Server {name} stopped successfully and marked as deactivated.");
Ok(())
}
@ -270,11 +689,83 @@ pub async fn restart_mcp_servers(app: AppHandle, state: State<'_, AppState>) ->
// Stop the servers
stop_mcp_servers(state.mcp_servers.clone()).await?;
// Restart the servers
run_mcp_commands(&app, servers).await?;
// Restart only previously active servers (like cortex)
restart_active_mcp_servers(&app, servers).await?;
app.emit("mcp-update", "MCP servers updated")
.map_err(|e| format!("Failed to emit event: {}", e))
.map_err(|e| format!("Failed to emit event: {}", e))?;
Ok(())
}
/// Restart only servers that were previously active (like cortex restart behavior)
pub async fn restart_active_mcp_servers<R: Runtime>(
app: &AppHandle<R>,
servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
) -> Result<(), String> {
let app_state = app.state::<AppState>();
let active_servers = app_state.mcp_active_servers.lock().await;
log::info!("Restarting {} previously active MCP servers", active_servers.len());
for (name, config) in active_servers.iter() {
log::info!("Restarting MCP server: {}", name);
// Start server with restart monitoring - spawn async task
let app_clone = app.clone();
let servers_clone = servers_state.clone();
let name_clone = name.clone();
let config_clone = config.clone();
tauri::async_runtime::spawn(async move {
let _ = start_mcp_server_with_restart(
app_clone,
servers_clone,
name_clone,
config_clone,
Some(3), // Default max restarts for startup
).await;
});
}
Ok(())
}
/// Handle app quit - stop all MCP servers cleanly (like cortex cleanup)
pub async fn handle_app_quit(state: &AppState) -> Result<(), String> {
log::info!("App quitting - stopping all MCP servers cleanly");
// Stop all running MCP servers
stop_mcp_servers(state.mcp_servers.clone()).await?;
// Clear active servers and restart counts
{
let mut active_servers = state.mcp_active_servers.lock().await;
active_servers.clear();
}
{
let mut restart_counts = state.mcp_restart_counts.lock().await;
restart_counts.clear();
}
log::info!("All MCP servers stopped cleanly");
Ok(())
}
/// Reset MCP restart count for a specific server (like cortex reset)
#[tauri::command]
pub async fn reset_mcp_restart_count(state: State<'_, AppState>, server_name: String) -> Result<(), String> {
let mut counts = state.mcp_restart_counts.lock().await;
let count = match counts.get_mut(&server_name) {
Some(count) => count,
None => return Ok(()), // Server not found, nothing to reset
};
let old_count = *count;
*count = 0;
log::info!("MCP server {} restart count reset from {} to 0.", server_name, old_count);
Ok(())
}
pub async fn stop_mcp_servers(
@ -290,6 +781,7 @@ pub async fn stop_mcp_servers(
drop(servers_map); // Release the lock after stopping
Ok(())
}
#[tauri::command]
pub async fn get_connected_servers(
_app: AppHandle,
@ -366,8 +858,15 @@ pub async fn call_tool(
// Iterate through servers and find the first one that contains the tool
for (_, service) in servers.iter() {
if let Ok(tools) = service.list_all_tools().await {
if tools.iter().any(|t| t.name == tool_name) {
let tools = match service.list_all_tools().await {
Ok(tools) => tools,
Err(_) => continue, // Skip this server if we can't list tools
};
if !tools.iter().any(|t| t.name == tool_name) {
continue; // Tool not found in this server, try next
}
println!("Found tool {} in server", tool_name);
// Call the tool with timeout
@ -377,12 +876,7 @@ pub async fn call_tool(
});
return match timeout(MCP_TOOL_CALL_TIMEOUT, tool_call).await {
Ok(result) => {
match result {
Ok(ok_result) => Ok(ok_result),
Err(e) => Err(e.to_string()),
}
}
Ok(result) => result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds",
tool_name,
@ -390,8 +884,6 @@ pub async fn call_tool(
)),
};
}
}
}
Err(format!("Tool {} not found", tool_name))
}
@ -409,8 +901,7 @@ pub async fn get_mcp_configs(app: AppHandle) -> Result<String, String> {
.map_err(|e| format!("Failed to create default MCP config: {}", e))?;
}
let contents = fs::read_to_string(path).map_err(|e| e.to_string())?;
return Ok(contents);
fs::read_to_string(path).map_err(|e| e.to_string())
}
#[tauri::command]
@ -422,6 +913,100 @@ pub async fn save_mcp_configs(app: AppHandle, configs: String) -> Result<(), Str
fs::write(path, configs).map_err(|e| e.to_string())
}
/// Store active server configuration for restart purposes
async fn store_active_server_config(
active_servers_state: &Arc<Mutex<HashMap<String, Value>>>,
name: &str,
config: &Value,
) {
let mut active_servers = active_servers_state.lock().await;
active_servers.insert(name.to_string(), config.clone());
}
/// Reset restart count for a server
async fn reset_restart_count(
restart_counts: &Arc<Mutex<HashMap<String, u32>>>,
name: &str,
) {
let mut counts = restart_counts.lock().await;
counts.insert(name.to_string(), 0);
}
/// Spawn the server monitoring task for handling restarts
async fn spawn_server_monitoring_task<R: Runtime>(
app: AppHandle<R>,
servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
config: Value,
max_restarts: u32,
restart_counts: Arc<Mutex<HashMap<String, u32>>>,
successfully_connected: Arc<Mutex<HashMap<String, bool>>>,
) {
let app_clone = app.clone();
let servers_clone = servers_state.clone();
let name_clone = name.clone();
let config_clone = config.clone();
tauri::async_runtime::spawn(async move {
// Monitor the server using RunningService's JoinHandle<QuitReason>
let quit_reason = monitor_mcp_server_handle(
servers_clone.clone(),
name_clone.clone(),
).await;
log::info!("MCP server {} quit with reason: {:?}", name_clone, quit_reason);
// Check if we should restart based on connection status and quit reason
if should_restart_server(&successfully_connected, &name_clone, &quit_reason).await {
// Start the restart loop
start_restart_loop(
app_clone,
servers_clone,
name_clone,
config_clone,
max_restarts,
restart_counts,
successfully_connected,
).await;
}
});
}
/// Determine if a server should be restarted based on its connection status and quit reason
async fn should_restart_server(
successfully_connected: &Arc<Mutex<HashMap<String, bool>>>,
name: &str,
quit_reason: &Option<rmcp::service::QuitReason>,
) -> bool {
// Check if server was marked as successfully connected
let was_connected = {
let connected = successfully_connected.lock().await;
connected.get(name).copied().unwrap_or(false)
};
// Only restart if server was previously connected
if !was_connected {
log::error!(
"MCP server {} failed before establishing successful connection - stopping permanently",
name
);
return false;
}
// Determine if we should restart based on quit reason
match quit_reason {
Some(reason) => {
log::warn!("MCP server {} terminated unexpectedly: {:?}", name, reason);
true
}
None => {
log::info!("MCP server {} was manually stopped - not restarting", name);
false
}
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -197,9 +197,39 @@ fn extract_extension_manifest<R: Read>(
}
pub fn setup_mcp(app: &App) {
let state = app.state::<AppState>().inner();
let state = app.state::<AppState>();
let servers = state.mcp_servers.clone();
let app_handle: tauri::AppHandle = app.handle().clone();
// Setup kill-mcp-servers event listener (similar to cortex kill-sidecar)
let app_handle_for_kill = app_handle.clone();
app_handle.listen("kill-mcp-servers", move |_event| {
let app_handle = app_handle_for_kill.clone();
tauri::async_runtime::spawn(async move {
log::info!("Received kill-mcp-servers event - cleaning up MCP servers");
let app_state = app_handle.state::<AppState>();
// Stop all running MCP servers
if let Err(e) = super::mcp::stop_mcp_servers(app_state.mcp_servers.clone()).await {
log::error!("Failed to stop MCP servers: {}", e);
return;
}
// Clear active servers and restart counts
{
let mut active_servers = app_state.mcp_active_servers.lock().await;
active_servers.clear();
}
{
let mut restart_counts = app_state.mcp_restart_counts.lock().await;
restart_counts.clear();
}
log::info!("MCP servers cleaned up successfully");
});
});
tauri::async_runtime::spawn(async move {
if let Err(e) = run_mcp_commands(&app_handle, servers).await {
log::error!("Failed to run mcp commands: {}", e);
@ -247,7 +277,12 @@ pub fn setup_sidecar(app: &App) -> Result<(), String> {
]);
#[cfg(target_os = "windows")]
{
let resource_dir = app_handle_for_spawn.path().resource_dir().unwrap();
let mut resource_dir = app_handle_for_spawn.path().resource_dir().unwrap();
// If debug
#[cfg(debug_assertions)]
{
resource_dir = resource_dir.join("binaries");
}
let normalized_path = resource_dir.to_string_lossy().replace(r"\\?\", "");
let normalized_pathbuf = PathBuf::from(normalized_path);
cmd = cmd.current_dir(normalized_pathbuf);
@ -256,12 +291,12 @@ pub fn setup_sidecar(app: &App) -> Result<(), String> {
#[cfg(not(target_os = "windows"))]
{
cmd = cmd.env("LD_LIBRARY_PATH", {
let current_app_data_dir = app_handle_for_spawn
.path()
.resource_dir()
.unwrap()
.join("binaries");
let dest = current_app_data_dir.to_str().unwrap();
let mut resource_dir = app_handle_for_spawn.path().resource_dir().unwrap();
#[cfg(not(debug_assertions))]
{
resource_dir = resource_dir.join("binaries");
}
let dest = resource_dir.to_str().unwrap();
let ld_path_env = std::env::var("LD_LIBRARY_PATH").unwrap_or_default();
format!("{}{}{}", ld_path_env, ":", dest)
});

View File

@ -16,6 +16,9 @@ pub struct AppState {
pub download_manager: Arc<Mutex<DownloadManagerState>>,
pub cortex_restart_count: Arc<Mutex<u32>>,
pub cortex_killed_intentionally: Arc<Mutex<bool>>,
pub mcp_restart_counts: Arc<Mutex<HashMap<String, u32>>>,
pub mcp_active_servers: Arc<Mutex<HashMap<String, serde_json::Value>>>,
pub mcp_successfully_connected: Arc<Mutex<HashMap<String, bool>>>,
pub server_handle: Arc<Mutex<Option<ServerHandle>>>,
}
pub fn generate_app_token() -> String {

View File

@ -68,6 +68,7 @@ pub fn run() {
core::mcp::get_mcp_configs,
core::mcp::activate_mcp_server,
core::mcp::deactivate_mcp_server,
core::mcp::reset_mcp_restart_count,
// Threads
core::threads::list_threads,
core::threads::create_thread,
@ -93,6 +94,9 @@ pub fn run() {
download_manager: Arc::new(Mutex::new(DownloadManagerState::default())),
cortex_restart_count: Arc::new(Mutex::new(0)),
cortex_killed_intentionally: Arc::new(Mutex::new(false)),
mcp_restart_counts: Arc::new(Mutex::new(HashMap::new())),
mcp_active_servers: Arc::new(Mutex::new(HashMap::new())),
mcp_successfully_connected: Arc::new(Mutex::new(HashMap::new())),
server_handle: Arc::new(Mutex::new(None)),
})
.setup(|app| {
@ -124,6 +128,7 @@ pub fn run() {
tauri::WindowEvent::CloseRequested { .. } => {
if window.label() == "main" {
window.emit("kill-sidecar", ()).unwrap();
window.emit("kill-mcp-servers", ()).unwrap();
clean_up();
}
}

View File

@ -33,7 +33,7 @@ ${StrLoc}
!define VERSION "jan_version"
!define VERSIONWITHBUILD "jan_build"
!define HOMEPAGE ""
!define INSTALLMODE "currentUser"
!define INSTALLMODE "both"
!define LICENSE ""
!define INSTALLERICON "D:\a\jan\jan\src-tauri\icons\icon.ico"
!define SIDEBARIMAGE ""

View File

@ -17,6 +17,8 @@
"label": "main",
"title": "Jan",
"width": 1024,
"minWidth": 375,
"minHeight": 667,
"height": 800,
"resizable": true,
"fullscreen": false,

1
tests-e2e-js/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
dist

23
tests-e2e-js/package.json Normal file
View File

@ -0,0 +1,23 @@
{
"name": "tests-e2-js",
"version": "0.0.0",
"private": true,
"type": "module",
"main": "src/main.ts",
"scripts": {
"build": "tsc",
"test": "node --test --test-force-exit --loader ts-node/esm ./src/main.ts"
},
"dependencies": {
"@tauri-e2e/selenium": "0.2.2",
"log4js": "^6.9.1",
"selenium-webdriver": "^4.22.0",
"ts-node": "^10.9.2"
},
"devDependencies": {
"@types/node": "^20.14.9",
"@types/selenium-webdriver": "^4.1.28",
"tsimp": "^2.0.11",
"typescript": "^5.5.2"
}
}

51
tests-e2e-js/src/main.ts Normal file
View File

@ -0,0 +1,51 @@
import assert from 'node:assert'
import { ChildProcess } from 'node:child_process'
import { afterEach, beforeEach, describe, test } from 'node:test'
import { By, until, WebDriver } from 'selenium-webdriver'
import * as e2e from '@tauri-e2e/selenium'
import { default as log4js } from 'log4js'
let logger = log4js.getLogger()
logger.level = 'debug'
process.env.TAURI_WEBDRIVER_LOGLEVEL = 'debug'
process.env.TAURI_WEBDRIVER_BINARY = await e2e.install.PlatformDriver()
process.env.TAURI_SELENIUM_BINARY = '../src-tauri/target/release/Jan.exe'
process.env.SELENIUM_REMOTE_URL = 'http://127.0.0.1:6655'
//@ts-ignore fuck you javascript
e2e.setLogger(logger)
describe('Tauri E2E tests', async () => {
let driver: WebDriver
let webDriver: ChildProcess
beforeEach(async () => {
// Spawn WebDriver process.
webDriver = await e2e.launch.spawnWebDriver()
// wait 1 second
await new Promise((r) => setTimeout(r, 1000))
// Create driver session.
driver = new e2e.selenium.Builder().build()
// Wait for the body element to be present
// await driver.wait(until.elementLocated({ css: 'body' }))
})
afterEach(async () => {
await e2e.selenium.cleanupSession(driver)
e2e.launch.killWebDriver(webDriver)
})
test('Find hub', async () => {
const hub = until.elementLocated(By.css('[data-test-id="menu-common:hub"'))
// console.log('GG', hub)
// @ts-ignore
await driver.wait(hub.fn, 120000)
const menuElement = await driver.findElement({
css: '[data-test-id="menu-common:hub"]',
})
assert(menuElement !== null, 'Hub menu element should be available')
await menuElement.isDisplayed()
})
})

View File

@ -0,0 +1,21 @@
{
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src",
"target": "ESNext",
"module": "ESNext",
"moduleResolution": "Bundler",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"declaration": true,
"declarationMap": true
},
"include": [
"src/*.ts"
],
"exclude": [
"node_modules",
"dist"
],
}

View File

@ -28,10 +28,11 @@
"@tabler/icons-react": "^3.33.0",
"@tailwindcss/vite": "^4.1.4",
"@tanstack/react-router": "^1.116.0",
"@tanstack/react-router-devtools": "^1.116.0",
"@tanstack/react-router-devtools": "^1.121.34",
"@tauri-apps/api": "^2.5.0",
"@tauri-apps/plugin-deep-link": "~2",
"@tauri-apps/plugin-dialog": "^2.2.1",
"@tauri-apps/plugin-http": "^2.2.1",
"@tauri-apps/plugin-opener": "^2.2.7",
"@tauri-apps/plugin-os": "^2.2.1",
"@tauri-apps/plugin-updater": "^2.7.1",
@ -45,7 +46,7 @@
"i18next": "^25.0.1",
"katex": "^0.16.22",
"lodash.debounce": "^4.0.8",
"lucide-react": "^0.503.0",
"lucide-react": "^0.522.0",
"motion": "^12.10.5",
"next-themes": "^0.4.6",
"posthog-js": "^1.246.0",
@ -65,7 +66,7 @@
"remark-math": "^6.0.0",
"sonner": "^2.0.3",
"tailwindcss": "^4.1.4",
"token.js": "npm:token.js-fork@0.7.9",
"token.js": "npm:token.js-fork@0.7.12",
"tw-animate-css": "^1.2.7",
"ulidx": "^2.4.1",
"unified": "^11.0.5",
@ -87,7 +88,7 @@
"eslint-plugin-react-refresh": "^0.4.19",
"globals": "^16.0.0",
"tailwind-merge": "^3.2.0",
"typescript": "~5.7.2",
"typescript": "~5.8.3",
"typescript-eslint": "^8.26.1",
"vite": "^6.3.0",
"vite-plugin-node-polyfills": "^0.23.0",

View File

@ -3,6 +3,7 @@ import * as DialogPrimitive from '@radix-ui/react-dialog'
import { XIcon } from 'lucide-react'
import { cn } from '@/lib/utils'
import { useTranslation } from '@/i18n/react-i18next-compat'
function Dialog({
...props
@ -58,6 +59,7 @@ function DialogContent({
'aria-describedby': ariaDescribedBy,
...props
}: DialogContentProps) {
const { t } = useTranslation()
return (
<DialogPortal data-slot="dialog-portal">
<DialogOverlay />
@ -65,7 +67,7 @@ function DialogContent({
data-slot="dialog-content"
aria-describedby={ariaDescribedBy}
className={cn(
'bg-main-view max-h-[calc(100%-48px)] overflow-auto border-main-view-fg/10 text-main-view-fg data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 fixed top-[50%] left-[50%] z-50 grid w-full max-w-[calc(100%-2rem)] translate-x-[-50%] translate-y-[-50%] gap-4 rounded-lg border p-6 shadow-lg duration-200 sm:max-w-lg',
'bg-main-view max-h-[calc(100%-80px)] overflow-auto border-main-view-fg/10 text-main-view-fg data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 fixed top-[50%] left-[50%] z-50 grid w-full max-w-[calc(100%-2rem)] translate-x-[-50%] translate-y-[-50%] gap-4 rounded-lg border p-6 shadow-lg duration-200 sm:max-w-lg',
className
)}
{...props}
@ -74,7 +76,7 @@ function DialogContent({
{showCloseButton && (
<DialogPrimitive.Close className="data-[state=open]:text-main-view-fg/50 absolute top-4 right-4 rounded-xs opacity-70 transition-opacity hover:opacity-100 focus:ring-0 focus:outline-0 disabled:pointer-events-none [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4 cursor-pointer">
<XIcon />
<span className="sr-only">Close</span>
<span className="sr-only">{t('close')}</span>
</DialogPrimitive.Close>
)}
</DialogPrimitive.Content>

View File

@ -3,6 +3,7 @@ import * as SheetPrimitive from '@radix-ui/react-dialog'
import { XIcon } from 'lucide-react'
import { cn } from '@/lib/utils'
import { useTranslation } from '@/i18n/react-i18next-compat'
function Sheet({ ...props }: React.ComponentProps<typeof SheetPrimitive.Root>) {
return <SheetPrimitive.Root data-slot="sheet" {...props} />
@ -50,6 +51,7 @@ function SheetContent({
}: React.ComponentProps<typeof SheetPrimitive.Content> & {
side?: 'top' | 'right' | 'bottom' | 'left'
}) {
const { t } = useTranslation()
return (
<SheetPortal>
<SheetOverlay />
@ -72,7 +74,7 @@ function SheetContent({
{children}
<SheetPrimitive.Close className="absolute top-4 text-main-view-fg right-4 rounded-xs opacity-70 transition-opacity hover:opacity-100 focus:ring-0 disabled:pointer-events-none">
<XIcon className="size-4" />
<span className="sr-only">Close</span>
<span className="sr-only">{t('close')}</span>
</SheetPrimitive.Close>
</SheetPrimitive.Content>
</SheetPortal>

View File

@ -5,6 +5,7 @@ export const route = {
assistant: '/assistant',
settings: {
index: '/settings',
model_providers: '/settings/providers',
providers: '/settings/providers/$providerName',
general: '/settings/general',
appearance: '/settings/appearance',

View File

@ -1,7 +1,8 @@
import { Input } from '@/components/ui/input'
import { useLocalApiServer } from '@/hooks/useLocalApiServer'
import { useState, useEffect } from 'react'
import { useState, useEffect, useCallback } from 'react'
import { Eye, EyeOff } from 'lucide-react'
import { useTranslation } from '@/i18n/react-i18next-compat'
interface ApiKeyInputProps {
showError?: boolean
@ -16,23 +17,24 @@ export function ApiKeyInput({
const [inputValue, setInputValue] = useState(apiKey.toString())
const [showPassword, setShowPassword] = useState(false)
const [error, setError] = useState('')
const { t } = useTranslation()
const validateApiKey = (value: string) => {
const validateApiKey = useCallback((value: string) => {
if (!value || value.trim().length === 0) {
setError('API Key is required')
setError(t('common:apiKeyRequired'))
onValidationChange?.(false)
return false
}
setError('')
onValidationChange?.(true)
return true
}
}, [onValidationChange, t])
useEffect(() => {
if (showError) {
validateApiKey(inputValue)
}
}, [showError, inputValue])
}, [showError, inputValue, validateApiKey])
const handleChange = (e: React.ChangeEvent<HTMLInputElement>) => {
const value = e.target.value
@ -67,7 +69,7 @@ export function ApiKeyInput({
? 'border-1 border-destructive focus:border-destructive focus:ring-destructive'
: ''
}`}
placeholder="Enter API Key"
placeholder={t('common:enterApiKey')}
/>
<div className="absolute right-2 top-1/2 transform -translate-y-1/2 flex items-center gap-1">
<button

View File

@ -10,29 +10,35 @@ type CardProps = {
type CardItemProps = {
title?: string | ReactNode
description?: string | ReactNode
descriptionOutside?: string | ReactNode
align?: 'start' | 'center' | 'end'
actions?: ReactNode
column?: boolean
className?: string
classNameWrapperAction?: string
}
export function CardItem({
title,
description,
descriptionOutside,
className,
classNameWrapperAction,
align = 'center',
column,
actions,
}: CardItemProps) {
return (
<>
<div
className={cn(
'flex justify-between mt-2 first:mt-0 border-b border-main-view-fg/5 pb-3 last:border-none last:pb-0 gap-8',
className,
descriptionOutside && 'border-0',
align === 'start' && 'items-start',
align === 'center' && 'items-center',
align === 'end' && 'items-end',
column && 'flex-col gap-y-0 items-start'
column && 'flex-col gap-y-0 items-start',
className
)}
>
<div className="space-y-1.5">
@ -44,9 +50,23 @@ export function CardItem({
)}
</div>
{actions && (
<div className={cn('shrink-0', column && 'w-full')}>{actions}</div>
<div
className={cn(
'shrink-0',
classNameWrapperAction,
column && 'w-full'
)}
>
{actions}
</div>
)}
</div>
{descriptionOutside && (
<span className="text-main-view-fg/70 leading-normal">
{descriptionOutside}
</span>
)}
</>
)
}

View File

@ -23,7 +23,7 @@ import {
IconPlayerStopFilled,
IconX,
} from '@tabler/icons-react'
import { useTranslation } from 'react-i18next'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { useGeneralSetting } from '@/hooks/useGeneralSetting'
import { useModelProvider } from '@/hooks/useModelProvider'
@ -35,7 +35,6 @@ import { ModelLoader } from '@/containers/loaders/ModelLoader'
import DropdownToolsAvailable from '@/containers/DropdownToolsAvailable'
import { getConnectedServers } from '@/services/mcp'
import { stopAllModels } from '@/services/models'
import { useOutOfContextPromiseModal } from './dialogs/OutOfContextDialog'
type ChatInputProps = {
className?: string
@ -55,8 +54,6 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
const { t } = useTranslation()
const { spellCheckChatInput } = useGeneralSetting()
const { showModal, PromiseModal: OutOfContextModal } =
useOutOfContextPromiseModal()
const maxRows = 10
const { selectedModel } = useModelProvider()
@ -107,7 +104,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
return
}
setMessage('')
sendMessage(prompt, showModal)
sendMessage(prompt)
}
useEffect(() => {
@ -368,6 +365,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
rows={1}
maxRows={10}
value={prompt}
data-test-id={'chat-input'}
onChange={(e) => {
setPrompt(e.target.value)
// Count the number of newlines to estimate rows
@ -382,7 +380,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
// When Shift+Enter is pressed, a new line is added (default behavior)
}
}}
placeholder={t('common.placeholder.chatInput')}
placeholder={t('common:placeholder.chatInput')}
autoFocus
spellCheck={spellCheckChatInput}
data-gramm={spellCheckChatInput}
@ -440,7 +438,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
</div>
</TooltipTrigger>
<TooltipContent>
<p>Vision</p>
<p>{t('vision')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
@ -457,7 +455,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
</div>
</TooltipTrigger>
<TooltipContent>
<p>Embeddings</p>
<p>{t('embeddings')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
@ -513,7 +511,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
</div>
</TooltipTrigger>
<TooltipContent>
<p>Tools</p>
<p>{t('tools')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
@ -547,7 +545,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
</div>
</TooltipTrigger>
<TooltipContent>
<p>Reasoning</p>
<p>{t('reasoning')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
@ -570,6 +568,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
variant={!prompt.trim() ? null : 'default'}
size="icon"
disabled={!prompt.trim()}
data-test-id="send-message-button"
onClick={() => handleSendMesage(prompt)}
>
{streamingContent ? (
@ -599,7 +598,6 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
</div>
</div>
)}
<OutOfContextModal />
</div>
)
}

Some files were not shown because too many files have changed in this diff Show More