Merge branch 'dev' into feat/model-selector

This commit is contained in:
Louis 2025-09-15 12:02:25 +07:00 committed by GitHub
commit 43431c26e7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
92 changed files with 12650 additions and 9690 deletions

View File

@ -0,0 +1,31 @@
---
allowed-tools: Bash(gh issue view:*), Bash(gh search:*), Bash(gh issue list:*), Bash(gh api:*), Bash(gh issue comment:*)
description: Find duplicate GitHub issues
---
Find up to 3 likely duplicate issues for a given GitHub issue.
To do this, follow these steps precisely:
1. Use an agent to check if the Github issue (a) is closed, (b) does not need to be deduped (eg. because it is broad product feedback without a specific solution, or positive feedback), or (c) already has a duplicates comment that you made earlier. If so, do not proceed.
2. Use an agent to view a Github issue, and ask the agent to return a summary of the issue
3. Then, launch 5 parallel agents to search Github for duplicates of this issue, using diverse keywords and search approaches, using the summary from #1
4. Next, feed the results from #1 and #2 into another agent, so that it can filter out false positives, that are likely not actually duplicates of the original issue. If there are no duplicates remaining, do not proceed.
5. Finally, comment back on the issue with a list of up to three duplicate issues (or zero, if there are no likely duplicates)
Notes (be sure to tell this to your agents, too):
- Use `gh` to interact with Github, rather than web fetch
- Do not use other tools, beyond `gh` (eg. don't use other MCP servers, file edit, etc.)
- Make a todo list first
- For your comment, follow the following format precisely (assuming for this example that you found 3 suspected duplicates):
---
Found 3 possible duplicate issues:
1. <link to issue>
2. <link to issue>
3. <link to issue>
---

View File

@ -0,0 +1,31 @@
name: Claude Issue Dedupe
description: Automatically dedupe GitHub issues using Claude Code
on:
issues:
types: [opened]
workflow_dispatch:
inputs:
issue_number:
description: 'Issue number to process for duplicate detection'
required: true
type: string
jobs:
claude-dedupe-issues:
runs-on: ubuntu-latest
timeout-minutes: 10
permissions:
contents: read
issues: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Run Claude Code dedupe
uses: anthropics/claude-code-base-action@beta
with:
prompt: "/dedupe ${{ github.repository }}/issues/${{ github.event.issue.number || inputs.issue_number }}"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_env: |
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -3,26 +3,10 @@ name: Jan Web Server build image and push to Harbor Registry
on:
push:
branches:
- dev
paths:
- '.github/workflows/jan-server-web-ci.yml'
- 'core/**'
- 'web-app/**'
- 'extensions-web/**'
- 'Makefile'
- 'package.json'
- 'Dockerfile'
- dev-web
pull_request:
branches:
- dev
paths:
- '.github/workflows/jan-server-web-ci.yml'
- 'core/**'
- 'web-app/**'
- 'extensions-web/**'
- 'Makefile'
- 'package.json'
- 'Dockerfile'
- dev-web
jobs:
build-and-preview:
@ -53,16 +37,14 @@ jobs:
&& sudo apt update
sudo apt-get install -y jq gettext
- name: Set image tag and service name
- name: Set image tag
id: vars
run: |
SERVICE_NAME=jan-server-web
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
IMAGE_TAG="web:preview-${{ github.sha }}"
else
IMAGE_TAG="web:dev-${{ github.sha }}"
fi
echo "SERVICE_NAME=${SERVICE_NAME}" >> $GITHUB_OUTPUT
echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "FULL_IMAGE=registry.menlo.ai/jan-server/${IMAGE_TAG}" >> $GITHUB_OUTPUT
@ -71,45 +53,6 @@ jobs:
docker build -t ${{ steps.vars.outputs.FULL_IMAGE }} .
- name: Push docker image
if: github.event_name == 'push'
run: |
docker push ${{ steps.vars.outputs.FULL_IMAGE }}
- name: Checkout preview URL repo
if: github.event_name == 'pull_request'
uses: actions/checkout@v4
with:
repository: menloresearch/infra-domains
token: ${{ secrets.PAT_SERVICE_ACCOUNT }}
path: preview-repo
- name: Generate preview manifest
if: github.event_name == 'pull_request'
run: |
cd preview-repo/kubernetes
bash template/generate.sh \
template/preview-url-template.yaml \
preview-url/pr-${{ github.sha }}-${{ steps.vars.outputs.SERVICE_NAME }}.yaml \
${{ github.sha }} \
${{ steps.vars.outputs.SERVICE_NAME }} \
${{ steps.vars.outputs.FULL_IMAGE }} \
80
- name: Commit and push preview manifest
if: github.event_name == 'pull_request'
run: |
cd preview-repo
git config user.name "preview-bot"
git config user.email "preview-bot@users.noreply.github.com"
git add kubernetes/preview-url/pr-${{ github.sha }}-${{ steps.vars.outputs.SERVICE_NAME }}.yaml
git commit -m "feat(preview): add pr-${{ github.sha }}-${{ steps.vars.outputs.SERVICE_NAME }}.yaml"
git push origin main
sleep 180
- name: Comment preview URL on PR
if: github.event_name == 'pull_request'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
uses: mshick/add-pr-comment@v2
with:
message: |
Preview URL: https://pr-${{ github.sha }}-${{ steps.vars.outputs.SERVICE_NAME }}.menlo.ai

View File

@ -0,0 +1,54 @@
name: Jan Web Server deploy to production
on:
push:
branches:
- prod-web
jobs:
build-and-deploy:
runs-on: ubuntu-latest
permissions:
contents: write
deployments: write
pull-requests: write
env:
JAN_API_BASE: "https://api.jan.ai/jan/v1"
CLOUDFLARE_PROJECT_NAME: "jan-server-web"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v3
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
# - name: Fill env vars
# run: |
# env_example_file=".env.example"
# touch .env
# while IFS= read -r line || [[ -n "$line" ]]; do
# if [[ "$line" == *"="* ]]; then
# var_name=$(echo $line | cut -d '=' -f 1)
# echo $var_name
# var_value="$(jq -r --arg key "$var_name" '.[$key]' <<< "$SECRETS")"
# echo "$var_name=$var_value" >> .env
# fi
# done < "$env_example_file"
# env:
# SECRETS: '${{ toJson(secrets) }}'
- name: Install dependencies
run: make config-yarn && yarn install && yarn build:core && make build-web-app
- name: Publish to Cloudflare Pages Production
uses: cloudflare/pages-action@v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
projectName: ${{ env.CLOUDFLARE_PROJECT_NAME }}
directory: ./web-app/dist-web
branch: main
# Optional: Enable this if you want to have GitHub Deployments triggered
gitHubToken: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,47 @@
name: Tauri Builder - Nightly / External PRs
on:
pull_request:
branches:
- dev
paths:
- '.github/workflows/jan-tauri-build-nightly-external.yaml'
- '.github/workflows/template-tauri-build-*-external.yml'
- 'src-tauri/**'
- 'core/**'
- 'web-app/**'
- 'extensions/**'
- 'scripts/**'
- 'pre-install/**'
- 'Makefile'
- 'package.json'
- 'mise.toml'
jobs:
get-update-version:
uses: ./.github/workflows/template-get-update-version.yml
build-macos:
uses: ./.github/workflows/template-tauri-build-macos-external.yml
needs: [get-update-version]
with:
ref: ${{ github.ref }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
build-windows-x64:
uses: ./.github/workflows/template-tauri-build-windows-x64-external.yml
needs: [get-update-version]
with:
ref: ${{ github.ref }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
build-linux-x64:
uses: ./.github/workflows/template-tauri-build-linux-x64-external.yml
needs: [get-update-version]
with:
ref: ${{ github.ref }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
disable_updater: false

View File

@ -0,0 +1,114 @@
name: tauri-build-linux-x64-external
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
new_version:
required: true
type: string
default: ''
channel:
required: true
type: string
default: 'nightly'
description: 'The channel to use for this job'
disable_updater:
required: false
type: boolean
default: false
description: 'If true, builds both .deb and .appimage but disables auto-updater'
jobs:
build-linux-x64-external:
runs-on: ubuntu-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Free Disk Space Before Build
run: |
echo "Disk space before cleanup:"
df -h
sudo rm -rf /usr/local/.ghcup
sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo rm -rf /usr/local/lib/android/sdk/ndk
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf /usr/local/share/boost
sudo apt-get clean
echo "Disk space after cleanup:"
df -h
- name: Replace Icons for Beta Build
if: inputs.channel != 'stable'
shell: bash
run: |
cp .github/scripts/icon-${{ inputs.channel }}.png src-tauri/icons/icon.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Install ctoml
run: |
cargo install ctoml
- name: Install Tauri dependencies
run: |
sudo apt update
sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2
- name: Update app version
run: |
echo "Version: ${{ inputs.new_version }}"
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = false' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun",
"usr/lib/Jan-${{ inputs.channel }}/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json
fi
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
chmod +x .github/scripts/rename-tauri-app.sh
.github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }}
ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}"
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json ${{ inputs.channel }}
fi
- name: Build app
run: |
make build
env:
RELEASE_CHANNEL: '${{ inputs.channel }}'
AUTO_UPDATER_DISABLED: ${{ inputs.disable_updater && 'true' || 'false' }}
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-linux-amd64-${{ inputs.new_version }}-deb
path: ./src-tauri/target/release/bundle/deb/*.deb
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-linux-amd64-${{ inputs.new_version }}-AppImage
path: ./src-tauri/target/release/bundle/appimage/*.AppImage

View File

@ -0,0 +1,102 @@
name: tauri-build-macos-external
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
new_version:
required: true
type: string
default: ''
channel:
required: true
type: string
default: 'nightly'
description: 'The channel to use for this job'
jobs:
build-macos-external:
runs-on: macos-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.channel != 'stable'
shell: bash
run: |
cp .github/scripts/icon-${{ inputs.channel }}.png src-tauri/icons/icon.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Install ctoml
run: |
cargo install ctoml
- name: Create bun and uv universal
run: |
mkdir -p ./src-tauri/resources/bin/
cd ./src-tauri/resources/bin/
curl -L -o bun-darwin-x64.zip https://github.com/oven-sh/bun/releases/download/bun-v1.2.10/bun-darwin-x64.zip
curl -L -o bun-darwin-aarch64.zip https://github.com/oven-sh/bun/releases/download/bun-v1.2.10/bun-darwin-aarch64.zip
unzip bun-darwin-x64.zip
unzip bun-darwin-aarch64.zip
lipo -create -output bun-universal-apple-darwin bun-darwin-x64/bun bun-darwin-aarch64/bun
cp -f bun-darwin-aarch64/bun bun-aarch64-apple-darwin
cp -f bun-darwin-x64/bun bun-x86_64-apple-darwin
cp -f bun-universal-apple-darwin bun
curl -L -o uv-x86_64.tar.gz https://github.com/astral-sh/uv/releases/download/0.6.17/uv-x86_64-apple-darwin.tar.gz
curl -L -o uv-arm64.tar.gz https://github.com/astral-sh/uv/releases/download/0.6.17/uv-aarch64-apple-darwin.tar.gz
tar -xzf uv-x86_64.tar.gz
tar -xzf uv-arm64.tar.gz
mv uv-x86_64-apple-darwin uv-x86_64
mv uv-aarch64-apple-darwin uv-aarch64
lipo -create -output uv-universal-apple-darwin uv-x86_64/uv uv-aarch64/uv
cp -f uv-x86_64/uv uv-x86_64-apple-darwin
cp -f uv-aarch64/uv uv-aarch64-apple-darwin
cp -f uv-universal-apple-darwin uv
ls -la
- name: Update app version
run: |
echo "Version: ${{ inputs.new_version }}"
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = false' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
chmod +x .github/scripts/rename-tauri-app.sh
.github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }}
ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}"
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json ${{ inputs.channel }}
fi
- name: Build app
run: |
rustup target add x86_64-apple-darwin
make build
env:
APP_PATH: '.'
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-${{ inputs.channel }}-mac-universal-${{ inputs.new_version }}.dmg
path: |
./src-tauri/target/universal-apple-darwin/release/bundle/dmg/*.dmg

View File

@ -0,0 +1,124 @@
name: tauri-build-windows-x64-external
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
new_version:
required: true
type: string
default: ''
channel:
required: true
type: string
default: 'nightly'
description: 'The channel to use for this job'
jobs:
build-windows-x64-external:
runs-on: windows-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.channel != 'stable'
shell: bash
run: |
cp .github/scripts/icon-${{ inputs.channel }}.png src-tauri/icons/icon.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Install ctoml
run: |
cargo install ctoml
- name: Update app version
shell: bash
run: |
echo "Version: ${{ inputs.new_version }}"
# Update tauri.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = false' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
jq '.bundle.windows.nsis.template = "tauri.bundle.windows.nsis.template"' ./src-tauri/tauri.windows.conf.json > /tmp/tauri.windows.conf.json
mv /tmp/tauri.windows.conf.json ./src-tauri/tauri.windows.conf.json
jq '.bundle.windows.signCommand = "echo External build - skipping signature: %1"' ./src-tauri/tauri.windows.conf.json > /tmp/tauri.windows.conf.json
mv /tmp/tauri.windows.conf.json ./src-tauri/tauri.windows.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------Cargo.toml---------"
cat ./src-tauri/Cargo.toml
generate_build_version() {
### Examble
### input 0.5.6 output will be 0.5.6 and 0.5.6.0
### input 0.5.6-rc2-beta output will be 0.5.6 and 0.5.6.2
### input 0.5.6-1213 output will be 0.5.6 and and 0.5.6.1213
local new_version="$1"
local base_version
local t_value
# Check if it has a "-"
if [[ "$new_version" == *-* ]]; then
base_version="${new_version%%-*}" # part before -
suffix="${new_version#*-}" # part after -
# Check if it is rcX-beta
if [[ "$suffix" =~ ^rc([0-9]+)-beta$ ]]; then
t_value="${BASH_REMATCH[1]}"
else
t_value="$suffix"
fi
else
base_version="$new_version"
t_value="0"
fi
# Export two values
new_base_version="$base_version"
new_build_version="${base_version}.${t_value}"
}
generate_build_version ${{ inputs.new_version }}
sed -i "s/jan_version/$new_base_version/g" ./src-tauri/tauri.bundle.windows.nsis.template
sed -i "s/jan_build/$new_build_version/g" ./src-tauri/tauri.bundle.windows.nsis.template
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
chmod +x .github/scripts/rename-tauri-app.sh
.github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }}
ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}"
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json ${{ inputs.channel }}
sed -i "s/jan_productname/Jan-${{ inputs.channel }}/g" ./src-tauri/tauri.bundle.windows.nsis.template
sed -i "s/jan_mainbinaryname/jan-${{ inputs.channel }}/g" ./src-tauri/tauri.bundle.windows.nsis.template
else
sed -i "s/jan_productname/Jan/g" ./src-tauri/tauri.bundle.windows.nsis.template
sed -i "s/jan_mainbinaryname/jan/g" ./src-tauri/tauri.bundle.windows.nsis.template
fi
echo "---------nsis.template---------"
cat ./src-tauri/tauri.bundle.windows.nsis.template
- name: Build app
shell: bash
run: |
make build
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-windows-${{ inputs.new_version }}
path: |
./src-tauri/target/release/bundle/nsis/*.exe

View File

@ -1,186 +0,0 @@
name: Update Cloud API Spec
on:
# Manual trigger with options
workflow_dispatch:
inputs:
commit_changes:
description: 'Commit changes to repository'
required: false
default: 'true'
type: choice
options:
- 'true'
- 'false'
spec_url:
description: 'Custom API spec URL (optional)'
required: false
type: string
create_pr:
description: 'Create pull request for changes'
required: false
default: 'false'
type: choice
options:
- 'true'
- 'false'
# Scheduled updates - runs daily at 2 AM UTC
schedule:
- cron: '0 2 * * *'
# Can be triggered by repository dispatch (webhook from Jan Server)
repository_dispatch:
types: [update-api-spec]
jobs:
update-spec:
name: Update Jan Server API Specification
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
- name: Install dependencies
working-directory: website
run: bun install
- name: Configure Git
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
- name: Update API Specification
id: update_spec
working-directory: website
run: |
# Set custom spec URL if provided
if [ -n "${{ github.event.inputs.spec_url }}" ]; then
export JAN_SERVER_SPEC_URL="${{ github.event.inputs.spec_url }}"
echo "📡 Using custom spec URL: $JAN_SERVER_SPEC_URL"
elif [ -n "${{ github.event.client_payload.spec_url }}" ]; then
export JAN_SERVER_SPEC_URL="${{ github.event.client_payload.spec_url }}"
echo "📡 Using webhook spec URL: $JAN_SERVER_SPEC_URL"
else
export JAN_SERVER_SPEC_URL="${{ secrets.JAN_SERVER_SPEC_URL || 'https://api.jan.ai/api/swagger/doc.json' }}"
echo "📡 Using default spec URL: $JAN_SERVER_SPEC_URL"
fi
# Force update the spec
export FORCE_UPDATE=true
bun run generate:cloud-spec
# Check if there are changes
if git diff --quiet public/openapi/cloud-openapi.json; then
echo "✅ No changes to API specification"
echo "has_changes=false" >> $GITHUB_OUTPUT
else
echo "📝 API specification has been updated"
echo "has_changes=true" >> $GITHUB_OUTPUT
# Get summary of changes
echo "### Changes Summary" >> $GITHUB_STEP_SUMMARY
echo '```diff' >> $GITHUB_STEP_SUMMARY
git diff --stat public/openapi/cloud-openapi.json >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
fi
env:
JAN_SERVER_PROD_URL: ${{ secrets.JAN_SERVER_PROD_URL || 'https://api.jan.ai/v1' }}
JAN_SERVER_STAGING_URL: ${{ secrets.JAN_SERVER_STAGING_URL || 'https://staging-api.jan.ai/v1' }}
- name: Create Pull Request
if: |
steps.update_spec.outputs.has_changes == 'true' &&
(github.event.inputs.create_pr == 'true' || github.event_name == 'repository_dispatch')
uses: peter-evans/create-pull-request@v5
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: "chore: update Jan Server API specification"
title: "chore: update Jan Server API specification"
body: |
## 🤖 Automated API Spec Update
This PR updates the Jan Server API specification.
### Trigger Information
- **Event**: ${{ github.event_name }}
- **Triggered by**: ${{ github.actor }}
- **Timestamp**: ${{ github.event.head_commit.timestamp || github.event.repository.updated_at }}
### What's Changed
The OpenAPI specification for Jan Server has been updated with the latest endpoints and schemas.
### Review Checklist
- [ ] API endpoints are correctly documented
- [ ] Authentication requirements are accurate
- [ ] Model examples are up to date
- [ ] Breaking changes are noted (if any)
---
*This PR was automatically generated by the API spec update workflow.*
branch: update-api-spec-${{ github.run_number }}
delete-branch: true
labels: |
documentation
api
automated
- name: Commit and Push Changes
if: |
steps.update_spec.outputs.has_changes == 'true' &&
github.event.inputs.commit_changes != 'false' &&
github.event.inputs.create_pr != 'true' &&
github.event_name != 'repository_dispatch'
run: |
cd website
git add public/openapi/cloud-openapi.json
git commit -m "chore: update Jan Server API specification [skip ci]
Event: ${{ github.event_name }}
Triggered by: ${{ github.actor }}"
# Only push to dev branch if it's a scheduled run
if [ "${{ github.event_name }}" = "schedule" ] && [ "${{ github.ref }}" = "refs/heads/dev" ]; then
git push origin HEAD:dev
echo "✅ Changes committed to dev branch"
elif [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
git push origin HEAD:${{ github.ref_name }}
echo "✅ Changes committed to ${{ github.ref_name }} branch"
else
echo " Changes prepared but not pushed (event: ${{ github.event_name }})"
fi
- name: Send Notification
if: steps.update_spec.outputs.has_changes == 'true'
continue-on-error: true
run: |
echo "📬 API specification updated successfully"
# You can add Slack/Discord notification here if needed
# Example webhook call:
# curl -X POST ${{ secrets.SLACK_WEBHOOK_URL }} \
# -H 'Content-Type: application/json' \
# -d '{"text": "Jan Server API spec has been updated"}'
- name: Summary
if: always()
run: |
echo "## Workflow Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- **Status**: ${{ steps.update_spec.outputs.has_changes == 'true' && '✅ Updated' || '⏭️ No changes' }}" >> $GITHUB_STEP_SUMMARY
echo "- **Event**: ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY
echo "- **Branch**: ${{ github.ref_name }}" >> $GITHUB_STEP_SUMMARY
echo "- **Commit changes**: ${{ github.event.inputs.commit_changes || 'auto' }}" >> $GITHUB_STEP_SUMMARY
echo "- **Create PR**: ${{ github.event.inputs.create_pr || 'false' }}" >> $GITHUB_STEP_SUMMARY

2
.gitignore vendored
View File

@ -13,6 +13,7 @@ core/lib/**
.yarnrc
*.tsbuildinfo
test_results.html
pre-install
# docs
docs/yarn.lock
@ -54,6 +55,7 @@ docs/.next/
## cargo
target
Cargo.lock
src-tauri/resources/
## test
test-data

View File

@ -40,9 +40,11 @@ install-web-app: config-yarn
yarn install
dev-web-app: install-web-app
yarn build:core
yarn dev:web-app
build-web-app: install-web-app
yarn build:core
yarn build:web-app
serve-web-app:
@ -65,6 +67,7 @@ test: lint
cargo test --manifest-path src-tauri/Cargo.toml --no-default-features --features test-tauri -- --test-threads=1
cargo test --manifest-path src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
cargo test --manifest-path src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
cargo test --manifest-path src-tauri/utils/Cargo.toml
# Builds and publishes the app
build-and-publish: install-and-build

View File

@ -7,6 +7,7 @@ export enum ExtensionTypeEnum {
Inference = 'inference',
Model = 'model',
SystemMonitoring = 'systemMonitoring',
MCP = 'mcp',
HuggingFace = 'huggingFace',
Engine = 'engine',
Hardware = 'hardware',

View File

@ -14,6 +14,11 @@ export { InferenceExtension } from './inference'
*/
export { AssistantExtension } from './assistant'
/**
* MCP extension for managing tools and server communication.
*/
export { MCPExtension } from './mcp'
/**
* Base AI Engines.
*/

View File

@ -0,0 +1,99 @@
import { describe, it, expect, beforeEach } from 'vitest'
import { MCPExtension } from './mcp'
import { ExtensionTypeEnum } from '../extension'
import { MCPTool, MCPToolCallResult } from '../../types'
class TestMCPExtension extends MCPExtension {
constructor() {
super('test://mcp', 'test-mcp')
}
async getTools(): Promise<MCPTool[]> {
return [
{
name: 'test_tool',
description: 'A test tool',
inputSchema: { type: 'object' },
server: 'test-server'
}
]
}
async callTool(toolName: string, args: Record<string, unknown>): Promise<MCPToolCallResult> {
return {
error: '',
content: [{ type: 'text', text: `Called ${toolName} with ${JSON.stringify(args)}` }]
}
}
async getConnectedServers(): Promise<string[]> {
return ['test-server']
}
async refreshTools(): Promise<void> {
// Mock implementation
}
async isHealthy(): Promise<boolean> {
return true
}
async onLoad(): Promise<void> {
// Mock implementation
}
onUnload(): void {
// Mock implementation
}
}
describe('MCPExtension', () => {
let mcpExtension: TestMCPExtension
beforeEach(() => {
mcpExtension = new TestMCPExtension()
})
describe('type', () => {
it('should return MCP extension type', () => {
expect(mcpExtension.type()).toBe(ExtensionTypeEnum.MCP)
})
})
describe('getTools', () => {
it('should return array of MCP tools', async () => {
const tools = await mcpExtension.getTools()
expect(tools).toHaveLength(1)
expect(tools[0]).toEqual({
name: 'test_tool',
description: 'A test tool',
inputSchema: { type: 'object' },
server: 'test-server'
})
})
})
describe('callTool', () => {
it('should call tool and return result', async () => {
const result = await mcpExtension.callTool('test_tool', { param: 'value' })
expect(result).toEqual({
error: '',
content: [{ type: 'text', text: 'Called test_tool with {"param":"value"}' }]
})
})
})
describe('getConnectedServers', () => {
it('should return list of connected servers', async () => {
const servers = await mcpExtension.getConnectedServers()
expect(servers).toEqual(['test-server'])
})
})
describe('isHealthy', () => {
it('should return health status', async () => {
const healthy = await mcpExtension.isHealthy()
expect(healthy).toBe(true)
})
})
})

View File

@ -0,0 +1,21 @@
import { MCPInterface, MCPTool, MCPToolCallResult } from '../../types'
import { BaseExtension, ExtensionTypeEnum } from '../extension'
/**
* MCP (Model Context Protocol) extension for managing tools and server communication.
* @extends BaseExtension
*/
export abstract class MCPExtension extends BaseExtension implements MCPInterface {
/**
* MCP extension type.
*/
type(): ExtensionTypeEnum | undefined {
return ExtensionTypeEnum.MCP
}
abstract getTools(): Promise<MCPTool[]>
abstract callTool(toolName: string, args: Record<string, unknown>): Promise<MCPToolCallResult>
abstract getConnectedServers(): Promise<string[]>
abstract refreshTools(): Promise<void>
abstract isHealthy(): Promise<boolean>
}

View File

@ -10,3 +10,4 @@ export * from './api'
export * from './setting'
export * from './engine'
export * from './hardware'
export * from './mcp'

View File

@ -0,0 +1,2 @@
export * from './mcpEntity'
export * from './mcpInterface'

View File

@ -0,0 +1,24 @@
/**
* MCP (Model Context Protocol) entities
*/
export interface MCPTool {
name: string
description: string
inputSchema: Record<string, unknown>
server: string
}
export interface MCPToolCallResult {
error: string
content: Array<{
type?: string
text: string
}>
}
export interface MCPServerInfo {
name: string
connected: boolean
tools?: MCPTool[]
}

View File

@ -0,0 +1,32 @@
/**
* MCP (Model Context Protocol) interface
*/
import { MCPTool, MCPToolCallResult } from './mcpEntity'
export interface MCPInterface {
/**
* Get all available MCP tools
*/
getTools(): Promise<MCPTool[]>
/**
* Call a specific MCP tool
*/
callTool(toolName: string, args: Record<string, unknown>): Promise<MCPToolCallResult>
/**
* Get list of connected MCP servers
*/
getConnectedServers(): Promise<string[]>
/**
* Refresh the list of available tools
*/
refreshTools(): Promise<void>
/**
* Check if MCP service is healthy
*/
isHealthy(): Promise<boolean>
}

View File

@ -30,5 +30,8 @@
"peerDependencies": {
"@janhq/core": "*",
"zustand": "^5.0.0"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.17.5"
}
}

View File

@ -8,6 +8,7 @@ import type { WebExtensionRegistry } from './types'
export { default as AssistantExtensionWeb } from './assistant-web'
export { default as ConversationalExtensionWeb } from './conversational-web'
export { default as JanProviderWeb } from './jan-provider-web'
export { default as MCPExtensionWeb } from './mcp-web'
// Re-export types
export type {
@ -17,7 +18,8 @@ export type {
WebExtensionLoader,
AssistantWebModule,
ConversationalWebModule,
JanProviderWebModule
JanProviderWebModule,
MCPWebModule
} from './types'
// Extension registry for dynamic loading
@ -25,4 +27,5 @@ export const WEB_EXTENSIONS: WebExtensionRegistry = {
'assistant-web': () => import('./assistant-web'),
'conversational-web': () => import('./conversational-web'),
'jan-provider-web': () => import('./jan-provider-web'),
'mcp-web': () => import('./mcp-web'),
}

View File

@ -3,7 +3,7 @@
* Handles API requests to Jan backend for models and chat completions
*/
import { JanAuthService } from './auth'
import { getSharedAuthService, JanAuthService } from '../shared'
import { JanModel, janProviderStore } from './store'
// JAN_API_BASE is defined in vite.config.ts
@ -18,6 +18,7 @@ export interface JanChatMessage {
content: string
reasoning?: string
reasoning_content?: string
tool_calls?: any[]
}
export interface JanChatCompletionRequest {
@ -30,6 +31,8 @@ export interface JanChatCompletionRequest {
presence_penalty?: number
stream?: boolean
stop?: string | string[]
tools?: any[]
tool_choice?: any
}
export interface JanChatCompletionChoice {
@ -63,6 +66,7 @@ export interface JanChatCompletionChunk {
content?: string
reasoning?: string
reasoning_content?: string
tool_calls?: any[]
}
finish_reason: string | null
}>
@ -73,7 +77,7 @@ export class JanApiClient {
private authService: JanAuthService
private constructor() {
this.authService = JanAuthService.getInstance()
this.authService = getSharedAuthService()
}
static getInstance(): JanApiClient {
@ -83,40 +87,12 @@ export class JanApiClient {
return JanApiClient.instance
}
private async makeAuthenticatedRequest<T>(
url: string,
options: RequestInit = {}
): Promise<T> {
try {
const authHeader = await this.authService.getAuthHeader()
const response = await fetch(url, {
...options,
headers: {
'Content-Type': 'application/json',
...authHeader,
...options.headers,
},
})
if (!response.ok) {
const errorText = await response.text()
throw new Error(`API request failed: ${response.status} ${response.statusText} - ${errorText}`)
}
return response.json()
} catch (error) {
console.error('API request failed:', error)
throw error
}
}
async getModels(): Promise<JanModel[]> {
try {
janProviderStore.setLoadingModels(true)
janProviderStore.clearError()
const response = await this.makeAuthenticatedRequest<JanModelsResponse>(
const response = await this.authService.makeAuthenticatedRequest<JanModelsResponse>(
`${JAN_API_BASE}/models`
)
@ -138,7 +114,7 @@ export class JanApiClient {
try {
janProviderStore.clearError()
return await this.makeAuthenticatedRequest<JanChatCompletionResponse>(
return await this.authService.makeAuthenticatedRequest<JanChatCompletionResponse>(
`${JAN_API_BASE}/chat/completions`,
{
method: 'POST',
@ -240,12 +216,9 @@ export class JanApiClient {
async initialize(): Promise<void> {
try {
await this.authService.initialize()
janProviderStore.setAuthenticated(true)
// Fetch initial models
await this.getModels()
console.log('Jan API client initialized successfully')
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Failed to initialize API client'

View File

@ -62,6 +62,7 @@ export default class JanProviderWeb extends AIEngine {
path: undefined, // Remote model, no local path
owned_by: model.owned_by,
object: model.object,
capabilities: ['tools'], // Jan models support both tools via MCP
}))
} catch (error) {
console.error('Failed to list Jan models:', error)
@ -150,6 +151,8 @@ export default class JanProviderWeb extends AIEngine {
presence_penalty: opts.presence_penalty ?? undefined,
stream: opts.stream ?? false,
stop: opts.stop ?? undefined,
tools: opts.tools ?? undefined,
tool_choice: opts.tool_choice ?? undefined,
}
if (opts.stream) {
@ -176,6 +179,7 @@ export default class JanProviderWeb extends AIEngine {
content: choice.message.content,
reasoning: choice.message.reasoning,
reasoning_content: choice.message.reasoning_content,
tool_calls: choice.message.tool_calls,
},
finish_reason: (choice.finish_reason || 'stop') as 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call',
})),
@ -233,6 +237,7 @@ export default class JanProviderWeb extends AIEngine {
content: choice.delta.content,
reasoning: choice.delta.reasoning,
reasoning_content: choice.delta.reasoning_content,
tool_calls: choice.delta.tool_calls,
},
finish_reason: choice.finish_reason,
})),
@ -300,8 +305,9 @@ export default class JanProviderWeb extends AIEngine {
return Array.from(this.activeSessions.values()).map(session => session.model_id)
}
async isToolSupported(): Promise<boolean> {
// Tools are not yet supported
return false
async isToolSupported(modelId: string): Promise<boolean> {
// Jan models support tool calls via MCP
console.log(`Checking tool support for Jan model ${modelId}: supported`);
return true;
}
}

View File

@ -0,0 +1,235 @@
/**
* MCP Web Extension
* Provides Model Context Protocol functionality for web platform
* Uses official MCP TypeScript SDK with proper session handling
*/
import { MCPExtension, MCPTool, MCPToolCallResult } from '@janhq/core'
import { getSharedAuthService, JanAuthService } from '../shared'
import { Client } from '@modelcontextprotocol/sdk/client/index.js'
import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'
import { JanMCPOAuthProvider } from './oauth-provider'
// JAN_API_BASE is defined in vite.config.ts (defaults to 'https://api-dev.jan.ai/jan/v1')
declare const JAN_API_BASE: string
export default class MCPExtensionWeb extends MCPExtension {
private mcpEndpoint = '/mcp'
private tools: MCPTool[] = []
private initialized = false
private authService: JanAuthService
private mcpClient: Client | null = null
private oauthProvider: JanMCPOAuthProvider
constructor(
url: string,
name: string,
productName?: string,
active?: boolean,
description?: string,
version?: string
) {
super(url, name, productName, active, description, version)
this.authService = getSharedAuthService()
this.oauthProvider = new JanMCPOAuthProvider(this.authService)
}
async onLoad(): Promise<void> {
try {
// Initialize MCP client with OAuth
await this.initializeMCPClient()
// Then fetch tools
await this.initializeTools()
} catch (error) {
console.warn('Failed to initialize MCP extension:', error)
this.tools = []
}
}
async onUnload(): Promise<void> {
this.tools = []
this.initialized = false
// Close MCP client
if (this.mcpClient) {
try {
await this.mcpClient.close()
} catch (error) {
console.warn('Error closing MCP client:', error)
}
this.mcpClient = null
}
}
private async initializeMCPClient(): Promise<void> {
try {
// Close existing client if any
if (this.mcpClient) {
try {
await this.mcpClient.close()
} catch (error) {
// Ignore close errors
}
this.mcpClient = null
}
// Create transport with OAuth provider (handles token refresh automatically)
const transport = new StreamableHTTPClientTransport(
new URL(`${JAN_API_BASE}${this.mcpEndpoint}`),
{
authProvider: this.oauthProvider
// No sessionId needed - server will generate one automatically
}
)
// Create MCP client
this.mcpClient = new Client(
{
name: 'jan-web-client',
version: '1.0.0'
},
{
capabilities: {
tools: {},
resources: {},
prompts: {},
logging: {}
}
}
)
// Connect to MCP server (OAuth provider handles auth automatically)
await this.mcpClient.connect(transport)
console.log('MCP client connected successfully, session ID:', transport.sessionId)
} catch (error) {
console.error('Failed to initialize MCP client:', error)
throw error
}
}
private async initializeTools(): Promise<void> {
if (this.initialized || !this.mcpClient) {
return
}
try {
// Use MCP SDK to list tools
const result = await this.mcpClient.listTools()
console.log('MCP tools/list response:', result)
if (result.tools && Array.isArray(result.tools)) {
this.tools = result.tools.map((tool) => ({
name: tool.name,
description: tool.description || '',
inputSchema: (tool.inputSchema || {}) as Record<string, unknown>,
server: 'Jan MCP Server'
}))
} else {
console.warn('No tools found in MCP server response')
this.tools = []
}
this.initialized = true
console.log(`Initialized MCP extension with ${this.tools.length} tools:`, this.tools.map(t => t.name))
} catch (error) {
console.error('Failed to fetch MCP tools:', error)
this.tools = []
this.initialized = false
throw error
}
}
async getTools(): Promise<MCPTool[]> {
if (!this.initialized) {
await this.initializeTools()
}
return this.tools
}
async callTool(toolName: string, args: Record<string, unknown>): Promise<MCPToolCallResult> {
if (!this.mcpClient) {
return {
error: 'MCP client not initialized',
content: [{ type: 'text', text: 'MCP client not initialized' }]
}
}
try {
// Use MCP SDK to call tool (OAuth provider handles auth automatically)
const result = await this.mcpClient.callTool({
name: toolName,
arguments: args
})
console.log(`MCP tool call result for ${toolName}:`, result)
// Handle tool call result
if (result.isError) {
const errorText = Array.isArray(result.content) && result.content.length > 0
? (result.content[0].type === 'text' ? (result.content[0] as any).text : 'Tool call failed')
: 'Tool call failed'
return {
error: errorText,
content: [{ type: 'text', text: errorText }]
}
}
// Convert MCP content to Jan's format
const content = Array.isArray(result.content)
? result.content.map(item => {
if (item.type === 'text') {
return { type: 'text' as const, text: (item as any).text }
} else {
// For non-text types, convert to text representation
return { type: 'text' as const, text: JSON.stringify(item) }
}
})
: [{ type: 'text' as const, text: 'No content returned' }]
return {
error: '',
content
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
console.error(`Failed to call MCP tool ${toolName}:`, error)
return {
error: errorMessage,
content: [{ type: 'text', text: errorMessage }]
}
}
}
async isHealthy(): Promise<boolean> {
if (!this.mcpClient) {
return false
}
try {
// Try to list tools as health check (OAuth provider handles auth)
await this.mcpClient.listTools()
return true
} catch (error) {
console.warn('MCP health check failed:', error)
return false
}
}
async getConnectedServers(): Promise<string[]> {
// Return servers based on MCP client connection status
return this.mcpClient && this.initialized ? ['Jan MCP Server'] : []
}
async refreshTools(): Promise<void> {
this.initialized = false
try {
await this.initializeTools()
} catch (error) {
console.error('Failed to refresh tools:', error)
throw error
}
}
}

View File

@ -0,0 +1,60 @@
import { OAuthClientProvider } from '@modelcontextprotocol/sdk/client/auth.js'
import { OAuthTokens, OAuthClientInformation, OAuthClientMetadata } from '@modelcontextprotocol/sdk/shared/auth.js'
import { JanAuthService } from '../shared/auth'
/**
* MCP OAuth provider that integrates with Jan Auth Service
* Just provides tokens, no storage or validation needed
*/
export class JanMCPOAuthProvider implements OAuthClientProvider {
private authService: JanAuthService
constructor(authService: JanAuthService) {
this.authService = authService
}
get redirectUrl(): string {
return ''
}
get clientMetadata(): OAuthClientMetadata {
return {
redirect_uris: [] // Not used, but required by interface
}
}
async clientInformation(): Promise<OAuthClientInformation | undefined> {
return undefined
}
async tokens(): Promise<OAuthTokens | undefined> {
try {
const accessToken = await this.authService.getValidAccessToken()
if (accessToken) {
return {
access_token: accessToken,
token_type: 'Bearer'
}
}
} catch (error) {
console.warn('Failed to get tokens from auth service:', error)
}
return undefined
}
async saveTokens(): Promise<void> {
// No-op: Jan auth service handles token storage
}
redirectToAuthorization(): void {
// No-op: Not handling authorization flow
}
async saveCodeVerifier(): Promise<void> {
// No-op: Not handling authorization flow
}
async codeVerifier(): Promise<string> {
throw new Error('Code verifier not supported')
}
}

View File

@ -0,0 +1,12 @@
/**
* MCP Web Extension Types
*/
export interface MCPApiResponse {
content?: Array<{
type?: string
text?: string
}>
result?: string | object
}

View File

@ -1,8 +1,11 @@
/**
* Jan Provider Authentication Service
* Shared Authentication Service
* Handles guest login and token refresh for Jan API
*/
// JAN_API_BASE is defined in vite.config.ts
declare const JAN_API_BASE: string
export interface AuthTokens {
access_token: string
expires_in: number
@ -13,26 +16,17 @@ export interface AuthResponse {
expires_in: number
}
// JAN_API_BASE is defined in vite.config.ts
const AUTH_STORAGE_KEY = 'jan_auth_tokens'
const TOKEN_EXPIRY_BUFFER = 60 * 1000 // 1 minute buffer before actual expiry
export class JanAuthService {
private static instance: JanAuthService
private tokens: AuthTokens | null = null
private tokenExpiryTime: number = 0
private constructor() {
constructor() {
this.loadTokensFromStorage()
}
static getInstance(): JanAuthService {
if (!JanAuthService.instance) {
JanAuthService.instance = new JanAuthService()
}
return JanAuthService.instance
}
private loadTokensFromStorage(): void {
try {
const storedTokens = localStorage.getItem(AUTH_STORAGE_KEY)
@ -167,16 +161,6 @@ export class JanAuthService {
return this.tokens.access_token
}
async initialize(): Promise<void> {
try {
await this.getValidAccessToken()
console.log('Jan auth service initialized successfully')
} catch (error) {
console.error('Failed to initialize Jan auth service:', error)
throw error
}
}
async getAuthHeader(): Promise<{ Authorization: string }> {
const token = await this.getValidAccessToken()
return {
@ -184,7 +168,52 @@ export class JanAuthService {
}
}
async makeAuthenticatedRequest<T>(
url: string,
options: RequestInit = {}
): Promise<T> {
try {
const authHeader = await this.getAuthHeader()
const response = await fetch(url, {
...options,
headers: {
'Content-Type': 'application/json',
...authHeader,
...options.headers,
},
})
if (!response.ok) {
const errorText = await response.text()
throw new Error(`API request failed: ${response.status} ${response.statusText} - ${errorText}`)
}
return response.json()
} catch (error) {
console.error('API request failed:', error)
throw error
}
}
logout(): void {
this.clearTokens()
}
}
declare global {
interface Window {
janAuthService?: JanAuthService
}
}
/**
* Gets or creates the shared JanAuthService instance on the window object
* This ensures all extensions use the same auth service instance
*/
export function getSharedAuthService(): JanAuthService {
if (!window.janAuthService) {
window.janAuthService = new JanAuthService()
}
return window.janAuthService
}

View File

@ -0,0 +1,3 @@
export { getSharedDB } from './db'
export { JanAuthService, getSharedAuthService } from './auth'
export type { AuthTokens, AuthResponse } from './auth'

View File

@ -2,7 +2,7 @@
* Web Extension Types
*/
import type { AssistantExtension, ConversationalExtension, BaseExtension, AIEngine } from '@janhq/core'
import type { AssistantExtension, ConversationalExtension, BaseExtension, AIEngine, MCPExtension } from '@janhq/core'
type ExtensionConstructorParams = ConstructorParameters<typeof BaseExtension>
@ -18,12 +18,17 @@ export interface JanProviderWebModule {
default: new (...args: ExtensionConstructorParams) => AIEngine
}
export type WebExtensionModule = AssistantWebModule | ConversationalWebModule | JanProviderWebModule
export interface MCPWebModule {
default: new (...args: ExtensionConstructorParams) => MCPExtension
}
export type WebExtensionModule = AssistantWebModule | ConversationalWebModule | JanProviderWebModule | MCPWebModule
export interface WebExtensionRegistry {
'assistant-web': () => Promise<AssistantWebModule>
'conversational-web': () => Promise<ConversationalWebModule>
'jan-provider-web': () => Promise<JanProviderWebModule>
'mcp-web': () => Promise<MCPWebModule>
}
export type WebExtensionName = keyof WebExtensionRegistry

View File

@ -14,6 +14,6 @@ export default defineConfig({
emptyOutDir: false // Don't clean the output directory
},
define: {
JAN_API_BASE: JSON.stringify(process.env.JAN_API_BASE || 'https://api-dev.jan.ai/jan/v1'),
JAN_API_BASE: JSON.stringify(process.env.JAN_API_BASE || 'https://api-dev.jan.ai/v1'),
}
})

View File

@ -36,6 +36,21 @@
"controllerType": "checkbox",
"controllerProps": { "value": true }
},
{
"key": "memory_util",
"title": "Smart Memory utilization",
"description": "Smart memory utilization mode for running local GGUF models",
"controllerType": "dropdown",
"controllerProps": {
"value": "high",
"options": [
{ "value": "high", "name": "High" },
{ "value": "medium", "name": "Medium" },
{ "value": "low", "name": "Low" }
],
"recommended": "high"
}
},
{
"key": "threads",
"title": "Threads",
@ -178,15 +193,6 @@
"value": false
}
},
{
"key": "no_kv_offload",
"title": "Disable KV Offload",
"description": "Disable KV cache offload to GPU (if GPU is used).",
"controllerType": "checkbox",
"controllerProps": {
"value": false
}
},
{
"key": "cache_type_k",
"title": "KV Cache K Type",

View File

@ -1,9 +1,90 @@
import { getJanDataFolderPath, fs, joinPath, events } from '@janhq/core'
import { invoke } from '@tauri-apps/api/core'
import { getProxyConfig } from './util'
import { dirname } from '@tauri-apps/api/path'
import { dirname, basename } from '@tauri-apps/api/path'
import { getSystemInfo } from '@janhq/tauri-plugin-hardware-api'
/*
* Reads currently installed backends in janDataFolderPath
*
*/
export async function getLocalInstalledBackends(): Promise<
{ version: string; backend: string }[]
> {
const local: Array<{ version: string; backend: string }> = []
const janDataFolderPath = await getJanDataFolderPath()
const backendsDir = await joinPath([
janDataFolderPath,
'llamacpp',
'backends',
])
if (await fs.existsSync(backendsDir)) {
const versionDirs = await fs.readdirSync(backendsDir)
// If the folder does not exist we are done.
if (!versionDirs) {
return local
}
for (const version of versionDirs) {
const versionPath = await joinPath([backendsDir, version])
const versionName = await basename(versionPath)
// Check if versionPath is actually a directory before reading it
const versionStat = await fs.fileStat(versionPath)
if (!versionStat?.isDirectory) {
continue
}
const backendTypes = await fs.readdirSync(versionPath)
// Verify that the backend is really installed
for (const backendType of backendTypes) {
const backendName = await basename(backendType)
if (await isBackendInstalled(backendType, versionName)) {
local.push({ version: versionName, backend: backendName })
}
}
}
}
console.debug(local)
return local
}
/*
* currently reads available backends in remote
*
*/
async function fetchRemoteSupportedBackends(
supportedBackends: string[]
): Promise<{ version: string; backend: string }[]> {
// Pull the latest releases from the repo
const { releases } = await _fetchGithubReleases('menloresearch', 'llama.cpp')
releases.sort((a, b) => b.tag_name.localeCompare(a.tag_name))
releases.splice(10) // keep only the latest 10 releases
// Walk the assets and keep only those that match a supported backend
const remote: { version: string; backend: string }[] = []
for (const release of releases) {
const version = release.tag_name
const prefix = `llama-${version}-bin-`
for (const asset of release.assets) {
const name = asset.name
if (!name.startsWith(prefix)) continue
const backend = name.replace(prefix, '').replace('.tar.gz', '')
if (supportedBackends.includes(backend)) {
remote.push({ version, backend })
}
}
}
return remote
}
// folder structure
// <Jan's data folder>/llamacpp/backends/<backend_version>/<backend_type>
@ -76,31 +157,29 @@ export async function listSupportedBackends(): Promise<
} else if (sysType === 'macos-aarch64' || sysType === 'macos-arm64') {
supportedBackends.push('macos-arm64')
}
// get latest backends from Github
const remoteBackendVersions =
await fetchRemoteSupportedBackends(supportedBackends)
const { releases } = await _fetchGithubReleases('menloresearch', 'llama.cpp')
releases.sort((a, b) => b.tag_name.localeCompare(a.tag_name))
releases.splice(10) // keep only the latest 10 releases
let backendVersions = []
for (const release of releases) {
const version = release.tag_name
const prefix = `llama-${version}-bin-`
// NOTE: there is checksum.yml. we can also download it to verify the download
for (const asset of release.assets) {
const name = asset.name
if (!name.startsWith(prefix)) {
continue
}
const backend = name.replace(prefix, '').replace('.tar.gz', '')
if (supportedBackends.includes(backend)) {
backendVersions.push({ version, backend })
}
}
// Get locally installed versions
const localBackendVersions = await getLocalInstalledBackends()
// Use a Map keyed by “${version}|${backend}” for O(1) deduplication.
const mergedMap = new Map<string, { version: string; backend: string }>()
for (const entry of remoteBackendVersions) {
mergedMap.set(`${entry.version}|${entry.backend}`, entry)
}
for (const entry of localBackendVersions) {
mergedMap.set(`${entry.version}|${entry.backend}`, entry)
}
return backendVersions
const merged = Array.from(mergedMap.values())
// Sort newest version first; if versions tie, sort by backend name
merged.sort((a, b) => {
const versionCmp = b.version.localeCompare(a.version)
return versionCmp !== 0 ? versionCmp : a.backend.localeCompare(b.backend)
})
return merged
}
export async function getBackendDir(
@ -279,9 +358,8 @@ async function _getSupportedFeatures() {
if (compareVersions(driverVersion, minCuda12DriverVersion) >= 0)
features.cuda12 = true
}
// Vulkan support check - only discrete GPUs with 6GB+ VRAM
if (gpuInfo.vulkan_info?.api_version && gpuInfo.total_memory >= 6 * 1024) {
// 6GB (total_memory is in MB)
// Vulkan support check
if (gpuInfo.vulkan_info?.api_version) {
features.vulkan = true
}
}
@ -299,21 +377,23 @@ async function _fetchGithubReleases(
const githubUrl = `https://api.github.com/repos/${owner}/${repo}/releases`
try {
const response = await fetch(githubUrl)
if (!response.ok) throw new Error(`GitHub error: ${response.status} ${response.statusText}`)
if (!response.ok)
throw new Error(`GitHub error: ${response.status} ${response.statusText}`)
const releases = await response.json()
return { releases, source: 'github' }
} catch (_err) {
const cdnUrl = 'https://catalog.jan.ai/llama.cpp/releases/releases.json'
const response = await fetch(cdnUrl)
if (!response.ok) {
throw new Error(`Failed to fetch releases from both sources. CDN error: ${response.status} ${response.statusText}`)
throw new Error(
`Failed to fetch releases from both sources. CDN error: ${response.status} ${response.statusText}`
)
}
const releases = await response.json()
return { releases, source: 'cdn' }
}
}
async function _isCudaInstalled(version: string): Promise<boolean> {
const sysInfo = await getSystemInfo()
const os_type = sysInfo.os_type

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -82,12 +82,12 @@ run = [
[tasks.dev-web-app]
description = "Start web application development server (matches Makefile)"
depends = ["install"]
depends = ["build-core"]
run = "yarn dev:web-app"
[tasks.build-web-app]
description = "Build web application (matches Makefile)"
depends = ["install"]
depends = ["build-core"]
run = "yarn build:web-app"
[tasks.serve-web-app]
@ -138,11 +138,56 @@ description = "Run linting (matches Makefile)"
depends = ["build-extensions"]
run = "yarn lint"
[tasks.test]
description = "Run test suite (matches Makefile)"
depends = ["lint"]
# ============================================================================
# RUST TEST COMPONENTS
# ============================================================================
[tasks.test-rust-main]
description = "Test main src-tauri package"
run = "cargo test --manifest-path src-tauri/Cargo.toml --no-default-features --features test-tauri -- --test-threads=1"
[tasks.test-rust-hardware]
description = "Test hardware plugin"
run = "cargo test --manifest-path src-tauri/plugins/tauri-plugin-hardware/Cargo.toml"
[tasks.test-rust-llamacpp]
description = "Test llamacpp plugin"
run = "cargo test --manifest-path src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml"
[tasks.test-rust-utils]
description = "Test utils package"
run = "cargo test --manifest-path src-tauri/utils/Cargo.toml"
[tasks.test-rust]
description = "Run all Rust tests"
depends = ["test-rust-main", "test-rust-hardware", "test-rust-llamacpp", "test-rust-utils"]
# ============================================================================
# JS TEST COMPONENTS
# ============================================================================
[tasks.test-js-setup]
description = "Setup for JS tests"
run = [
"yarn download:bin",
"yarn download:lib",
"yarn copy:assets:tauri",
"yarn build:icon"
]
[tasks.test-js]
description = "Run JS tests"
depends = ["test-js-setup"]
run = "yarn test"
# ============================================================================
# COMBINED TEST TASKS
# ============================================================================
[tasks.test]
description = "Run complete test suite (matches Makefile)"
depends = ["lint", "test-js", "test-rust"]
# ============================================================================
# PARALLEL-FRIENDLY QUALITY ASSURANCE TASKS
# ============================================================================
@ -155,8 +200,7 @@ hide = true
[tasks.test-only]
description = "Run tests only (parallel-friendly)"
depends = ["build-extensions"]
run = "yarn test"
depends = ["build-extensions", "test-js", "test-rust"]
hide = true
[tasks.qa-parallel]

View File

@ -1,6 +1,7 @@
{
"name": "jan-app",
"private": true,
"type": "module",
"workspaces": {
"packages": [
"core",
@ -41,19 +42,15 @@
"devDependencies": {
"@tauri-apps/cli": "^2.7.0",
"@vitest/coverage-v8": "^3.1.3",
"concurrently": "^9.1.0",
"cpx": "^1.5.0",
"cross-env": "^7.0.3",
"happy-dom": "^15.11.6",
"husky": "^9.1.5",
"jsdom": "^26.1.0",
"nyc": "^17.1.0",
"rimraf": "^3.0.2",
"run-script-os": "^1.1.6",
"tar": "^4.4.19",
"unzipper": "^0.12.3",
"vitest": "^3.1.3",
"wait-on": "^7.0.1"
"vitest": "^3.1.3"
},
"version": "0.0.0",
"installConfig": {
@ -62,9 +59,5 @@
"resolutions": {
"yallist": "4.0.0"
},
"packageManager": "yarn@4.5.3",
"dependencies": {
"@tanstack/react-virtual": "^3.13.12",
"download-cli": "^1.1.1"
}
"packageManager": "yarn@4.5.3"
}

View File

View File

@ -231,11 +231,6 @@ async function main() {
console.log('Downloads completed.')
}
// Ensure the downloads directory exists
if (!fs.existsSync('downloads')) {
fs.mkdirSync('downloads')
}
main().catch((err) => {
console.error('Error:', err)
process.exit(1)

2
src-tauri/Cargo.lock generated
View File

@ -2325,6 +2325,7 @@ dependencies = [
"tokio",
"tokio-util",
"url",
"windows-sys 0.60.2",
]
[[package]]
@ -5188,7 +5189,6 @@ dependencies = [
"tauri-plugin",
"thiserror 2.0.12",
"tokio",
"windows-sys 0.60.2",
]
[[package]]

View File

@ -25,13 +25,12 @@ thiserror = "2.0.12"
tokio = { version = "1", features = ["full"] }
reqwest = { version = "0.11", features = ["json", "blocking", "stream"] }
# Windows-specific dependencies
[target.'cfg(windows)'.dependencies]
windows-sys = { version = "0.60.2", features = ["Win32_Storage_FileSystem"] }
# Unix-specific dependencies
[target.'cfg(unix)'.dependencies]
nix = { version = "=0.30.1", features = ["signal", "process"] }
[dev-dependencies]
tempfile = "3.0"
[build-dependencies]
tauri-plugin = { version = "2.3.1", features = ["build"] }

View File

@ -228,3 +228,186 @@ fn parse_memory_value(mem_str: &str) -> ServerResult<i32> {
.into()
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_memory_pattern_valid() {
assert!(is_memory_pattern("8128 MiB, 8128 MiB free"));
assert!(is_memory_pattern("1024 MiB, 512 MiB free"));
assert!(is_memory_pattern("16384 MiB, 12000 MiB free"));
assert!(is_memory_pattern("0 MiB, 0 MiB free"));
}
#[test]
fn test_is_memory_pattern_invalid() {
assert!(!is_memory_pattern("8128 MB, 8128 MB free")); // Wrong unit
assert!(!is_memory_pattern("8128 MiB 8128 MiB free")); // Missing comma
assert!(!is_memory_pattern("8128 MiB, 8128 MiB used")); // Wrong second part
assert!(!is_memory_pattern("not_a_number MiB, 8128 MiB free")); // Invalid number
assert!(!is_memory_pattern("8128 MiB")); // Missing second part
assert!(!is_memory_pattern("")); // Empty string
assert!(!is_memory_pattern("8128 MiB, free")); // Missing number in second part
}
#[test]
fn test_find_memory_pattern() {
let text = "Intel(R) Arc(tm) A750 Graphics (DG2) (8128 MiB, 4096 MiB free)";
let result = find_memory_pattern(text);
assert!(result.is_some());
let (start_idx, content) = result.unwrap();
assert!(start_idx > 0);
assert_eq!(content, "8128 MiB, 4096 MiB free");
}
#[test]
fn test_find_memory_pattern_multiple_parentheses() {
let text = "Device (test) with (1024 MiB, 512 MiB free) and (2048 MiB, 1024 MiB free)";
let result = find_memory_pattern(text);
assert!(result.is_some());
let (_, content) = result.unwrap();
// Should return the LAST valid memory pattern
assert_eq!(content, "2048 MiB, 1024 MiB free");
}
#[test]
fn test_find_memory_pattern_no_match() {
let text = "No memory info here";
assert!(find_memory_pattern(text).is_none());
let text_with_invalid = "Some text (invalid memory info) here";
assert!(find_memory_pattern(text_with_invalid).is_none());
}
#[test]
fn test_parse_memory_value() {
assert_eq!(parse_memory_value("8128 MiB").unwrap(), 8128);
assert_eq!(parse_memory_value("7721 MiB free").unwrap(), 7721);
assert_eq!(parse_memory_value("0 MiB").unwrap(), 0);
assert_eq!(parse_memory_value("24576 MiB").unwrap(), 24576);
}
#[test]
fn test_parse_memory_value_invalid() {
assert!(parse_memory_value("").is_err());
assert!(parse_memory_value("not_a_number MiB").is_err());
assert!(parse_memory_value(" ").is_err());
}
#[test]
fn test_parse_device_line_vulkan() {
let line = "Vulkan0: Intel(R) Arc(tm) A750 Graphics (DG2) (8128 MiB, 8128 MiB free)";
let result = parse_device_line(line).unwrap();
assert!(result.is_some());
let device = result.unwrap();
assert_eq!(device.id, "Vulkan0");
assert_eq!(device.name, "Intel(R) Arc(tm) A750 Graphics (DG2)");
assert_eq!(device.mem, 8128);
assert_eq!(device.free, 8128);
}
#[test]
fn test_parse_device_line_cuda() {
let line = "CUDA0: NVIDIA GeForce RTX 4090 (24576 MiB, 24000 MiB free)";
let result = parse_device_line(line).unwrap();
assert!(result.is_some());
let device = result.unwrap();
assert_eq!(device.id, "CUDA0");
assert_eq!(device.name, "NVIDIA GeForce RTX 4090");
assert_eq!(device.mem, 24576);
assert_eq!(device.free, 24000);
}
#[test]
fn test_parse_device_line_sycl() {
let line = "SYCL0: Intel(R) Arc(TM) A750 Graphics (8000 MiB, 7721 MiB free)";
let result = parse_device_line(line).unwrap();
assert!(result.is_some());
let device = result.unwrap();
assert_eq!(device.id, "SYCL0");
assert_eq!(device.name, "Intel(R) Arc(TM) A750 Graphics");
assert_eq!(device.mem, 8000);
assert_eq!(device.free, 7721);
}
#[test]
fn test_parse_device_line_malformed() {
// Missing colon
let result = parse_device_line("Vulkan0 Intel Graphics (8128 MiB, 8128 MiB free)").unwrap();
assert!(result.is_none());
// Missing memory info
let result = parse_device_line("Vulkan0: Intel Graphics").unwrap();
assert!(result.is_none());
// Invalid memory format
let result = parse_device_line("Vulkan0: Intel Graphics (invalid memory)").unwrap();
assert!(result.is_none());
}
#[test]
fn test_parse_device_output_valid() {
let output = r#"
Some header text
Available devices:
Vulkan0: Intel(R) Arc(tm) A750 Graphics (DG2) (8128 MiB, 8128 MiB free)
CUDA0: NVIDIA GeForce RTX 4090 (24576 MiB, 24000 MiB free)
SYCL0: Intel(R) Arc(TM) A750 Graphics (8000 MiB, 7721 MiB free)
Some footer text
"#;
let result = parse_device_output(output).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(result[0].id, "Vulkan0");
assert_eq!(result[0].name, "Intel(R) Arc(tm) A750 Graphics (DG2)");
assert_eq!(result[0].mem, 8128);
assert_eq!(result[1].id, "CUDA0");
assert_eq!(result[1].name, "NVIDIA GeForce RTX 4090");
assert_eq!(result[1].mem, 24576);
assert_eq!(result[2].id, "SYCL0");
assert_eq!(result[2].name, "Intel(R) Arc(TM) A750 Graphics");
assert_eq!(result[2].mem, 8000);
}
#[test]
fn test_parse_device_output_no_devices_section() {
let output = "Some output without Available devices section";
let result = parse_device_output(output);
assert!(result.is_err());
}
#[test]
fn test_parse_device_output_empty_devices() {
let output = r#"
Some header text
Available devices:
Some footer text
"#;
let result = parse_device_output(output).unwrap();
assert_eq!(result.len(), 0);
}
#[test]
fn test_parse_device_output_mixed_valid_invalid() {
let output = r#"
Available devices:
Vulkan0: Intel(R) Arc(tm) A750 Graphics (DG2) (8128 MiB, 8128 MiB free)
InvalidLine: No memory info
CUDA0: NVIDIA GeForce RTX 4090 (24576 MiB, 24000 MiB free)
AnotherInvalid
"#;
let result = parse_device_output(output).unwrap();
assert_eq!(result.len(), 2); // Only valid lines should be parsed
assert_eq!(result[0].id, "Vulkan0");
assert_eq!(result[1].id, "CUDA0");
}
}

View File

@ -1,147 +1,278 @@
use std::path::PathBuf;
use crate::error::{ErrorCode, LlamacppError, ServerResult};
#[cfg(windows)]
use std::os::windows::ffi::OsStrExt;
#[cfg(windows)]
use std::ffi::OsStr;
#[cfg(windows)]
use windows_sys::Win32::Storage::FileSystem::GetShortPathNameW;
/// Get Windows short path to avoid issues with spaces and special characters
#[cfg(windows)]
pub fn get_short_path<P: AsRef<std::path::Path>>(path: P) -> Option<String> {
let wide: Vec<u16> = OsStr::new(path.as_ref())
.encode_wide()
.chain(Some(0))
.collect();
let mut buffer = vec![0u16; 260];
let len = unsafe { GetShortPathNameW(wide.as_ptr(), buffer.as_mut_ptr(), buffer.len() as u32) };
if len > 0 {
Some(String::from_utf16_lossy(&buffer[..len as usize]))
} else {
None
}
}
/// Validate that a binary path exists and is accessible
pub fn validate_binary_path(backend_path: &str) -> ServerResult<PathBuf> {
let server_path_buf = PathBuf::from(backend_path);
if !server_path_buf.exists() {
let err_msg = format!("Binary not found at {:?}", backend_path);
log::error!(
"Server binary not found at expected path: {:?}",
backend_path
);
return Err(LlamacppError::new(
ErrorCode::BinaryNotFound,
"The llama.cpp server binary could not be found.".into(),
Some(err_msg),
)
.into());
}
Ok(server_path_buf)
}
/// Validate model path exists and update args with platform-appropriate path format
pub fn validate_model_path(args: &mut Vec<String>) -> ServerResult<PathBuf> {
let model_path_index = args.iter().position(|arg| arg == "-m").ok_or_else(|| {
LlamacppError::new(
ErrorCode::ModelLoadFailed,
"Model path argument '-m' is missing.".into(),
None,
)
})?;
let model_path = args.get(model_path_index + 1).cloned().ok_or_else(|| {
LlamacppError::new(
ErrorCode::ModelLoadFailed,
"Model path was not provided after '-m' flag.".into(),
None,
)
})?;
let model_path_pb = PathBuf::from(&model_path);
if !model_path_pb.exists() {
let err_msg = format!(
"Invalid or inaccessible model path: {}",
model_path_pb.display()
);
log::error!("{}", &err_msg);
return Err(LlamacppError::new(
ErrorCode::ModelFileNotFound,
"The specified model file does not exist or is not accessible.".into(),
Some(err_msg),
)
.into());
}
// Update the path in args with appropriate format for the platform
#[cfg(windows)]
{
// use short path on Windows
if let Some(short) = get_short_path(&model_path_pb) {
args[model_path_index + 1] = short;
} else {
args[model_path_index + 1] = model_path_pb.display().to_string();
}
}
#[cfg(not(windows))]
{
args[model_path_index + 1] = model_path_pb.display().to_string();
}
Ok(model_path_pb)
}
/// Validate mmproj path exists and update args with platform-appropriate path format
pub fn validate_mmproj_path(args: &mut Vec<String>) -> ServerResult<Option<PathBuf>> {
let mmproj_path_index = match args.iter().position(|arg| arg == "--mmproj") {
Some(index) => index,
None => return Ok(None), // mmproj is optional
};
let mmproj_path = args.get(mmproj_path_index + 1).cloned().ok_or_else(|| {
LlamacppError::new(
ErrorCode::ModelLoadFailed,
"Mmproj path was not provided after '--mmproj' flag.".into(),
None,
)
})?;
let mmproj_path_pb = PathBuf::from(&mmproj_path);
if !mmproj_path_pb.exists() {
let err_msg = format!(
"Invalid or inaccessible mmproj path: {}",
mmproj_path_pb.display()
);
log::error!("{}", &err_msg);
return Err(LlamacppError::new(
ErrorCode::ModelFileNotFound,
"The specified mmproj file does not exist or is not accessible.".into(),
Some(err_msg),
)
.into());
}
#[cfg(windows)]
{
// use short path on Windows
if let Some(short) = get_short_path(&mmproj_path_pb) {
args[mmproj_path_index + 1] = short;
} else {
args[mmproj_path_index + 1] = mmproj_path_pb.display().to_string();
}
}
#[cfg(not(windows))]
{
args[mmproj_path_index + 1] = mmproj_path_pb.display().to_string();
}
Ok(Some(mmproj_path_pb))
}
use std::path::PathBuf;
use crate::error::{ErrorCode, LlamacppError, ServerResult};
#[cfg(windows)]
use jan_utils::path::get_short_path;
/// Validate that a binary path exists and is accessible
pub fn validate_binary_path(backend_path: &str) -> ServerResult<PathBuf> {
let server_path_buf = PathBuf::from(backend_path);
if !server_path_buf.exists() {
let err_msg = format!("Binary not found at {:?}", backend_path);
log::error!(
"Server binary not found at expected path: {:?}",
backend_path
);
return Err(LlamacppError::new(
ErrorCode::BinaryNotFound,
"The llama.cpp server binary could not be found.".into(),
Some(err_msg),
)
.into());
}
Ok(server_path_buf)
}
/// Validate model path exists and update args with platform-appropriate path format
pub fn validate_model_path(args: &mut Vec<String>) -> ServerResult<PathBuf> {
let model_path_index = args.iter().position(|arg| arg == "-m").ok_or_else(|| {
LlamacppError::new(
ErrorCode::ModelLoadFailed,
"Model path argument '-m' is missing.".into(),
None,
)
})?;
let model_path = args.get(model_path_index + 1).cloned().ok_or_else(|| {
LlamacppError::new(
ErrorCode::ModelLoadFailed,
"Model path was not provided after '-m' flag.".into(),
None,
)
})?;
let model_path_pb = PathBuf::from(&model_path);
if !model_path_pb.exists() {
let err_msg = format!(
"Invalid or inaccessible model path: {}",
model_path_pb.display()
);
log::error!("{}", &err_msg);
return Err(LlamacppError::new(
ErrorCode::ModelFileNotFound,
"The specified model file does not exist or is not accessible.".into(),
Some(err_msg),
)
.into());
}
// Update the path in args with appropriate format for the platform
#[cfg(windows)]
{
// use short path on Windows
if let Some(short) = get_short_path(&model_path_pb) {
args[model_path_index + 1] = short;
} else {
args[model_path_index + 1] = model_path_pb.display().to_string();
}
}
#[cfg(not(windows))]
{
args[model_path_index + 1] = model_path_pb.display().to_string();
}
Ok(model_path_pb)
}
/// Validate mmproj path exists and update args with platform-appropriate path format
pub fn validate_mmproj_path(args: &mut Vec<String>) -> ServerResult<Option<PathBuf>> {
let mmproj_path_index = match args.iter().position(|arg| arg == "--mmproj") {
Some(index) => index,
None => return Ok(None), // mmproj is optional
};
let mmproj_path = args.get(mmproj_path_index + 1).cloned().ok_or_else(|| {
LlamacppError::new(
ErrorCode::ModelLoadFailed,
"Mmproj path was not provided after '--mmproj' flag.".into(),
None,
)
})?;
let mmproj_path_pb = PathBuf::from(&mmproj_path);
if !mmproj_path_pb.exists() {
let err_msg = format!(
"Invalid or inaccessible mmproj path: {}",
mmproj_path_pb.display()
);
log::error!("{}", &err_msg);
return Err(LlamacppError::new(
ErrorCode::ModelFileNotFound,
"The specified mmproj file does not exist or is not accessible.".into(),
Some(err_msg),
)
.into());
}
#[cfg(windows)]
{
// use short path on Windows
if let Some(short) = get_short_path(&mmproj_path_pb) {
args[mmproj_path_index + 1] = short;
} else {
args[mmproj_path_index + 1] = mmproj_path_pb.display().to_string();
}
}
#[cfg(not(windows))]
{
args[mmproj_path_index + 1] = mmproj_path_pb.display().to_string();
}
Ok(Some(mmproj_path_pb))
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::NamedTempFile;
#[test]
fn test_validate_binary_path_existing() {
let temp_file = NamedTempFile::new().unwrap();
let path = temp_file.path().to_str().unwrap();
let result = validate_binary_path(path);
assert!(result.is_ok());
assert_eq!(result.unwrap(), PathBuf::from(path));
}
#[test]
fn test_validate_binary_path_nonexistent() {
let nonexistent_path = "/tmp/definitely_does_not_exist_123456789";
let result = validate_binary_path(nonexistent_path);
assert!(result.is_err());
}
#[test]
fn test_validate_model_path_valid() {
let temp_file = NamedTempFile::new().unwrap();
let path = temp_file.path().to_str().unwrap();
let mut args = vec!["-m".to_string(), path.to_string(), "--verbose".to_string()];
let result = validate_model_path(&mut args);
assert!(result.is_ok());
assert_eq!(result.unwrap(), PathBuf::from(path));
// Args should be updated with the path
#[cfg(windows)]
{
// On Windows, the path might be converted to short path format
// Just verify that the path in args[1] points to the same file
assert!(PathBuf::from(&args[1]).exists());
}
#[cfg(not(windows))]
{
assert_eq!(args[1], temp_file.path().display().to_string());
}
}
#[test]
fn test_validate_model_path_missing_flag() {
let mut args = vec!["--verbose".to_string(), "value".to_string()];
let result = validate_model_path(&mut args);
assert!(result.is_err());
}
#[test]
fn test_validate_model_path_missing_value() {
let mut args = vec!["-m".to_string()];
let result = validate_model_path(&mut args);
assert!(result.is_err());
}
#[test]
fn test_validate_model_path_nonexistent_file() {
let nonexistent_path = "/tmp/nonexistent_model_123456789.gguf";
let mut args = vec!["-m".to_string(), nonexistent_path.to_string()];
let result = validate_model_path(&mut args);
assert!(result.is_err());
}
#[test]
fn test_validate_mmproj_path_valid() {
let temp_file = NamedTempFile::new().unwrap();
let path = temp_file.path().to_str().unwrap();
let mut args = vec!["--mmproj".to_string(), path.to_string(), "--verbose".to_string()];
let result = validate_mmproj_path(&mut args);
assert!(result.is_ok());
assert!(result.unwrap().is_some());
// Args should be updated with the path
#[cfg(windows)]
{
// On Windows, the path might be converted to short path format
// Just verify that the path in args[1] points to the same file
assert!(PathBuf::from(&args[1]).exists());
}
#[cfg(not(windows))]
{
assert_eq!(args[1], temp_file.path().display().to_string());
}
}
#[test]
fn test_validate_mmproj_path_missing() {
let mut args = vec!["--verbose".to_string(), "value".to_string()];
let result = validate_mmproj_path(&mut args);
assert!(result.is_ok());
assert!(result.unwrap().is_none()); // mmproj is optional
}
#[test]
fn test_validate_mmproj_path_missing_value() {
let mut args = vec!["--mmproj".to_string()];
let result = validate_mmproj_path(&mut args);
assert!(result.is_err());
}
#[test]
fn test_validate_mmproj_path_nonexistent_file() {
let nonexistent_path = "/tmp/nonexistent_mmproj_123456789.gguf";
let mut args = vec!["--mmproj".to_string(), nonexistent_path.to_string()];
let result = validate_mmproj_path(&mut args);
assert!(result.is_err());
}
#[test]
fn test_validate_model_path_multiple_m_flags() {
let temp_file = NamedTempFile::new().unwrap();
let path = temp_file.path().to_str().unwrap();
// Multiple -m flags - should use the first one
let mut args = vec![
"-m".to_string(),
path.to_string(),
"--verbose".to_string(),
"-m".to_string(),
"another_path".to_string()
];
let result = validate_model_path(&mut args);
assert!(result.is_ok());
assert_eq!(result.unwrap(), PathBuf::from(path));
}
#[test]
fn test_validate_mmproj_path_multiple_flags() {
let temp_file = NamedTempFile::new().unwrap();
let path = temp_file.path().to_str().unwrap();
// Multiple --mmproj flags - should use the first one
let mut args = vec![
"--mmproj".to_string(),
path.to_string(),
"--verbose".to_string(),
"--mmproj".to_string(),
"another_path".to_string()
];
let result = validate_mmproj_path(&mut args);
assert!(result.is_ok());
let result_path = result.unwrap();
assert!(result_path.is_some());
assert_eq!(result_path.unwrap(), PathBuf::from(path));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -165,13 +165,6 @@ pub fn read_yaml(app: tauri::AppHandle, path: &str) -> Result<serde_json::Value,
pub fn decompress(app: tauri::AppHandle, path: &str, output_dir: &str) -> Result<(), String> {
let jan_data_folder = crate::core::app::commands::get_jan_data_folder_path(app.clone());
let path_buf = jan_utils::normalize_path(&jan_data_folder.join(path));
if !path_buf.starts_with(&jan_data_folder) {
return Err(format!(
"Error: path {} is not under jan_data_folder {}",
path_buf.to_string_lossy(),
jan_data_folder.to_string_lossy(),
));
}
let output_dir_buf = jan_utils::normalize_path(&jan_data_folder.join(output_dir));
if !output_dir_buf.starts_with(&jan_data_folder) {
@ -191,6 +184,17 @@ pub fn decompress(app: tauri::AppHandle, path: &str, output_dir: &str) -> Result
)
})?;
// Use short path on Windows to handle paths with spaces
#[cfg(windows)]
let file = {
if let Some(short_path) = jan_utils::path::get_short_path(&path_buf) {
fs::File::open(&short_path).map_err(|e| e.to_string())?
} else {
fs::File::open(&path_buf).map_err(|e| e.to_string())?
}
};
#[cfg(not(windows))]
let file = fs::File::open(&path_buf).map_err(|e| e.to_string())?;
if path.ends_with(".tar.gz") {
let tar = flate2::read::GzDecoder::new(file);

View File

@ -12,10 +12,16 @@ reqwest = { version = "0.11", features = ["json"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
sha2 = "0.10"
tokio = { version = "1", features = ["process"] }
tokio = { version = "1", features = ["process", "fs", "macros", "rt"] }
tokio-util = "0.7.14"
url = "2.5"
[target.'cfg(windows)'.dependencies]
windows-sys = { version = "0.60.2", features = ["Win32_Storage_FileSystem"] }
[dev-dependencies]
tempfile = "3.0"
[features]
default = []
logging = ["log"]

View File

@ -84,3 +84,108 @@ pub async fn compute_file_sha256_with_cancellation(
log::debug!("Hash computation completed for {} bytes", total_read);
Ok(hash_hex)
}
#[cfg(test)]
mod tests {
use super::*;
use tokio_util::sync::CancellationToken;
#[test]
fn test_generate_app_token() {
let token1 = generate_app_token();
let token2 = generate_app_token();
// Should be 32 characters long
assert_eq!(token1.len(), 32);
assert_eq!(token2.len(), 32);
// Should be different each time
assert_ne!(token1, token2);
// Should only contain alphanumeric characters
assert!(token1.chars().all(|c| c.is_alphanumeric()));
assert!(token2.chars().all(|c| c.is_alphanumeric()));
}
#[test]
fn test_generate_api_key() {
let model_id = "test-model".to_string();
let api_secret = "test-secret".to_string();
let key1 = generate_api_key(model_id.clone(), api_secret.clone()).unwrap();
let key2 = generate_api_key(model_id.clone(), api_secret.clone()).unwrap();
// Should generate same key for same inputs
assert_eq!(key1, key2);
// Should be base64 encoded (and thus contain base64 characters)
assert!(key1.chars().all(|c| c.is_alphanumeric() || c == '+' || c == '/' || c == '='));
// Different model_id should produce different key
let different_key = generate_api_key("different-model".to_string(), api_secret).unwrap();
assert_ne!(key1, different_key);
}
#[test]
fn test_generate_api_key_empty_inputs() {
let result = generate_api_key("".to_string(), "secret".to_string());
assert!(result.is_ok()); // Should still work with empty model_id
let result = generate_api_key("model".to_string(), "".to_string());
assert!(result.is_ok()); // Should still work with empty secret
}
#[tokio::test]
async fn test_compute_file_sha256_with_cancellation() {
use std::io::Write;
use tempfile::NamedTempFile;
// Create a temporary file with known content
let mut temp_file = NamedTempFile::new().unwrap();
let test_content = b"Hello, World!";
temp_file.write_all(test_content).unwrap();
temp_file.flush().unwrap();
let token = CancellationToken::new();
// Compute hash of the file
let hash = compute_file_sha256_with_cancellation(temp_file.path(), &token).await.unwrap();
// Verify it's a valid hex string
assert_eq!(hash.len(), 64); // SHA256 is 256 bits = 64 hex chars
assert!(hash.chars().all(|c| c.is_ascii_hexdigit()));
// Verify it matches expected SHA256 of "Hello, World!"
let expected = "dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f";
assert_eq!(hash, expected);
}
#[tokio::test]
async fn test_compute_file_sha256_cancellation() {
use std::io::Write;
use tempfile::NamedTempFile;
// Create a temporary file
let mut temp_file = NamedTempFile::new().unwrap();
temp_file.write_all(b"test content").unwrap();
temp_file.flush().unwrap();
let token = CancellationToken::new();
token.cancel(); // Cancel immediately
// Should return cancellation error
let result = compute_file_sha256_with_cancellation(temp_file.path(), &token).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("cancelled"));
}
#[tokio::test]
async fn test_compute_file_sha256_nonexistent_file() {
let token = CancellationToken::new();
let nonexistent_path = Path::new("/nonexistent/file.txt");
let result = compute_file_sha256_with_cancellation(nonexistent_path, &token).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Failed to open file for hashing"));
}
}

View File

@ -48,3 +48,86 @@ pub fn calculate_exponential_backoff_delay(attempt: u32) -> u64 {
final_delay
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_calculate_exponential_backoff_delay_basic() {
let delay1 = calculate_exponential_backoff_delay(1);
let delay2 = calculate_exponential_backoff_delay(2);
let delay3 = calculate_exponential_backoff_delay(3);
// First attempt should be around base delay (1000ms) ± jitter
assert!(delay1 >= 100 && delay1 <= 2000);
// Second attempt should be roughly double
assert!(delay2 >= 1000 && delay2 <= 4000);
// Third attempt should be roughly quadruple
assert!(delay3 >= 2000 && delay3 <= 6000);
// Generally increasing pattern
assert!(delay1 < delay3);
}
#[test]
fn test_calculate_exponential_backoff_delay_max_cap() {
// Very high attempt numbers should be capped at MAX_RESTART_DELAY_MS
let high_attempt_delay = calculate_exponential_backoff_delay(100);
assert!(high_attempt_delay <= MCP_MAX_RESTART_DELAY_MS);
assert!(high_attempt_delay >= 100); // Minimum delay
}
#[test]
fn test_calculate_exponential_backoff_delay_minimum() {
// Even with jitter, should never go below minimum
for attempt in 1..=10 {
let delay = calculate_exponential_backoff_delay(attempt);
assert!(delay >= 100, "Delay {} for attempt {} is below minimum", delay, attempt);
}
}
#[test]
fn test_calculate_exponential_backoff_delay_deterministic() {
// Same attempt number should produce same delay (deterministic jitter)
let delay1_a = calculate_exponential_backoff_delay(5);
let delay1_b = calculate_exponential_backoff_delay(5);
assert_eq!(delay1_a, delay1_b);
let delay2_a = calculate_exponential_backoff_delay(10);
let delay2_b = calculate_exponential_backoff_delay(10);
assert_eq!(delay2_a, delay2_b);
}
#[test]
fn test_calculate_exponential_backoff_delay_progression() {
// Test the general progression pattern
let mut delays = Vec::new();
for attempt in 1..=8 {
delays.push(calculate_exponential_backoff_delay(attempt));
}
// Should not exceed maximum
for delay in &delays {
assert!(*delay <= MCP_MAX_RESTART_DELAY_MS);
}
// Earlier attempts should generally be smaller than later ones
// (allowing some variance due to jitter)
assert!(delays[0] < delays[6]); // 1st vs 7th attempt
assert!(delays[1] < delays[7]); // 2nd vs 8th attempt
}
#[test]
fn test_constants() {
// Verify our constants are reasonable
assert_eq!(MCP_BASE_RESTART_DELAY_MS, 1000);
assert_eq!(MCP_MAX_RESTART_DELAY_MS, 30000);
assert_eq!(MCP_BACKOFF_MULTIPLIER, 2.0);
// Max should be greater than base
assert!(MCP_MAX_RESTART_DELAY_MS > MCP_BASE_RESTART_DELAY_MS);
}
}

View File

@ -1,76 +1,121 @@
#[cfg(windows)]
use std::path::Prefix;
use std::path::{Component, Path, PathBuf};
/// Normalizes file paths by handling path components, prefixes, and resolving relative paths
/// Based on: https://github.com/rust-lang/cargo/blob/rust-1.67.0/crates/cargo-util/src/paths.rs#L82-L107
pub fn normalize_path(path: &Path) -> PathBuf {
let mut components = path.components().peekable();
let mut ret = if let Some(c @ Component::Prefix(_prefix_component)) = components.peek().cloned()
{
#[cfg(windows)]
// Remove only the Verbatim prefix, but keep the drive letter (e.g., C:\)
match _prefix_component.kind() {
Prefix::VerbatimDisk(disk) => {
components.next(); // skip this prefix
// Re-add the disk prefix (e.g., C:)
let mut pb = PathBuf::new();
pb.push(format!("{}:", disk as char));
pb
}
Prefix::Verbatim(_) | Prefix::VerbatimUNC(_, _) => {
components.next(); // skip this prefix
PathBuf::new()
}
_ => {
components.next();
PathBuf::from(c.as_os_str())
}
}
#[cfg(not(windows))]
{
components.next(); // skip this prefix
PathBuf::from(c.as_os_str())
}
} else {
PathBuf::new()
};
for component in components {
match component {
Component::Prefix(..) => unreachable!(),
Component::RootDir => {
ret.push(component.as_os_str());
}
Component::CurDir => {}
Component::ParentDir => {
ret.pop();
}
Component::Normal(c) => {
ret.push(c);
}
}
}
ret
}
/// Removes file:/ and file:\ prefixes from file paths
pub fn normalize_file_path(path: &str) -> String {
path.replace("file:/", "").replace("file:\\", "")
}
/// Removes prefix from path string with proper formatting
pub fn remove_prefix(path: &str, prefix: &str) -> String {
if !prefix.is_empty() && path.starts_with(prefix) {
let result = path[prefix.len()..].to_string();
if result.is_empty() {
"/".to_string()
} else if result.starts_with('/') {
result
} else {
format!("/{}", result)
}
} else {
path.to_string()
}
}
#[cfg(windows)]
use std::path::Prefix;
use std::path::{Component, Path, PathBuf};
#[cfg(windows)]
use std::os::windows::ffi::OsStrExt;
#[cfg(windows)]
use std::ffi::OsStr;
#[cfg(windows)]
use windows_sys::Win32::Storage::FileSystem::GetShortPathNameW;
/// Normalizes file paths by handling path components, prefixes, and resolving relative paths
/// Based on: https://github.com/rust-lang/cargo/blob/rust-1.67.0/crates/cargo-util/src/paths.rs#L82-L107
pub fn normalize_path(path: &Path) -> PathBuf {
let mut components = path.components().peekable();
let mut ret = if let Some(c @ Component::Prefix(_prefix_component)) = components.peek().cloned()
{
#[cfg(windows)]
// Remove only the Verbatim prefix, but keep the drive letter (e.g., C:\)
match _prefix_component.kind() {
Prefix::VerbatimDisk(disk) => {
components.next(); // skip this prefix
// Re-add the disk prefix (e.g., C:)
let mut pb = PathBuf::new();
pb.push(format!("{}:", disk as char));
pb
}
Prefix::Verbatim(_) | Prefix::VerbatimUNC(_, _) => {
components.next(); // skip this prefix
PathBuf::new()
}
_ => {
components.next();
PathBuf::from(c.as_os_str())
}
}
#[cfg(not(windows))]
{
components.next(); // skip this prefix
PathBuf::from(c.as_os_str())
}
} else {
PathBuf::new()
};
for component in components {
match component {
Component::Prefix(..) => unreachable!(),
Component::RootDir => {
ret.push(component.as_os_str());
}
Component::CurDir => {}
Component::ParentDir => {
ret.pop();
}
Component::Normal(c) => {
ret.push(c);
}
}
}
ret
}
/// Removes file:/ and file:\ prefixes from file paths
pub fn normalize_file_path(path: &str) -> String {
path.replace("file:/", "").replace("file:\\", "")
}
/// Removes prefix from path string with proper formatting
pub fn remove_prefix(path: &str, prefix: &str) -> String {
if !prefix.is_empty() && path.starts_with(prefix) {
let result = path[prefix.len()..].to_string();
if result.is_empty() {
"/".to_string()
} else if result.starts_with('/') {
result
} else {
format!("/{}", result)
}
} else {
path.to_string()
}
}
/// Get Windows short path to avoid issues with spaces and special characters
#[cfg(windows)]
pub fn get_short_path<P: AsRef<std::path::Path>>(path: P) -> Option<String> {
let wide: Vec<u16> = OsStr::new(path.as_ref())
.encode_wide()
.chain(Some(0))
.collect();
let mut buffer = vec![0u16; 260];
let len = unsafe { GetShortPathNameW(wide.as_ptr(), buffer.as_mut_ptr(), buffer.len() as u32) };
if len > 0 {
Some(String::from_utf16_lossy(&buffer[..len as usize]))
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(windows)]
#[test]
fn test_get_short_path() {
// Test with a real path that should exist on Windows
use std::env;
if let Ok(temp_dir) = env::var("TEMP") {
let result = get_short_path(&temp_dir);
// Should return some short path or None (both are valid)
// We can't assert the exact value as it depends on the system
println!("Short path result: {:?}", result);
}
}
}

View File

@ -73,3 +73,130 @@ pub fn is_memory_pattern(content: &str) -> bool {
&& part.contains("MiB")
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_uuid() {
let uuid_bytes = [
0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0,
0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88
];
let uuid_string = parse_uuid(&uuid_bytes);
assert_eq!(uuid_string, "12345678-9abc-def0-1122-334455667788");
}
#[test]
fn test_parse_uuid_zeros() {
let zero_bytes = [0; 16];
let uuid_string = parse_uuid(&zero_bytes);
assert_eq!(uuid_string, "00000000-0000-0000-0000-000000000000");
}
#[test]
fn test_parse_uuid_max_values() {
let max_bytes = [0xff; 16];
let uuid_string = parse_uuid(&max_bytes);
assert_eq!(uuid_string, "ffffffff-ffff-ffff-ffff-ffffffffffff");
}
#[test]
fn test_parse_c_string() {
let c_string = [b'H' as i8, b'e' as i8, b'l' as i8, b'l' as i8, b'o' as i8, 0, b'W' as i8];
let result = parse_c_string(&c_string);
assert_eq!(result, "Hello");
}
#[test]
fn test_parse_c_string_empty() {
let empty_c_string = [0];
let result = parse_c_string(&empty_c_string);
assert_eq!(result, "");
}
#[test]
fn test_parse_c_string_no_null_terminator() {
let no_null = [b'T' as i8, b'e' as i8, b's' as i8, b't' as i8];
let result = parse_c_string(&no_null);
assert_eq!(result, "Test");
}
#[test]
fn test_parse_c_string_with_negative_values() {
let with_negative = [-1, b'A' as i8, b'B' as i8, 0];
let result = parse_c_string(&with_negative);
// Should convert negative to unsigned byte
assert!(result.len() > 0);
assert!(result.contains('A'));
assert!(result.contains('B'));
}
#[test]
fn test_err_to_string() {
let error_msg = "Something went wrong";
let result = err_to_string(error_msg);
assert_eq!(result, "Error: Something went wrong");
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "File not found");
let result = err_to_string(io_error);
assert!(result.starts_with("Error: "));
assert!(result.contains("File not found"));
}
#[test]
fn test_is_memory_pattern_valid() {
assert!(is_memory_pattern("8128 MiB, 8128 MiB free"));
assert!(is_memory_pattern("1024 MiB, 512 MiB free"));
assert!(is_memory_pattern("16384 MiB, 12000 MiB free"));
assert!(is_memory_pattern("0 MiB, 0 MiB free"));
}
#[test]
fn test_is_memory_pattern_invalid() {
assert!(!is_memory_pattern("8128 MB, 8128 MB free")); // Wrong unit
assert!(!is_memory_pattern("8128 MiB 8128 MiB free")); // Missing comma
assert!(!is_memory_pattern("8128 MiB, 8128 MiB used")); // Wrong second part
assert!(!is_memory_pattern("not_a_number MiB, 8128 MiB free")); // Invalid number
assert!(!is_memory_pattern("8128 MiB")); // Missing second part
assert!(!is_memory_pattern("")); // Empty string
assert!(!is_memory_pattern("8128 MiB, free")); // Missing number in second part
}
#[test]
fn test_find_memory_pattern() {
let text = "Loading model... (8128 MiB, 4096 MiB free) completed";
let result = find_memory_pattern(text);
assert!(result.is_some());
let (start_idx, content) = result.unwrap();
assert!(start_idx > 0);
assert_eq!(content, "8128 MiB, 4096 MiB free");
}
#[test]
fn test_find_memory_pattern_multiple_parentheses() {
let text = "Start (not memory) then (1024 MiB, 512 MiB free) and (2048 MiB, 1024 MiB free) end";
let result = find_memory_pattern(text);
assert!(result.is_some());
let (_, content) = result.unwrap();
// Should return the LAST valid memory pattern
assert_eq!(content, "2048 MiB, 1024 MiB free");
}
#[test]
fn test_find_memory_pattern_no_match() {
let text = "No memory info here";
assert!(find_memory_pattern(text).is_none());
let text_with_invalid = "Some text (invalid memory info) here";
assert!(find_memory_pattern(text_with_invalid).is_none());
}
#[test]
fn test_find_memory_pattern_unclosed_parenthesis() {
let text = "Unclosed (8128 MiB, 4096 MiB free";
assert!(find_memory_pattern(text).is_none());
}
}

View File

@ -38,6 +38,7 @@
"@tailwindcss/vite": "^4.1.4",
"@tanstack/react-router": "^1.116.0",
"@tanstack/react-router-devtools": "^1.121.34",
"@tanstack/react-virtual": "^3.13.12",
"@tauri-apps/api": "^2.5.0",
"@tauri-apps/plugin-deep-link": "~2",
"@tauri-apps/plugin-dialog": "^2.2.1",
@ -74,6 +75,7 @@
"react-textarea-autosize": "^8.5.9",
"rehype-katex": "^7.0.1",
"rehype-raw": "^7.0.0",
"remark-breaks": "^4.0.0",
"remark-emoji": "^5.0.1",
"remark-gfm": "^4.0.1",
"remark-math": "^6.0.0",

View File

@ -104,15 +104,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
if (selectedModel && selectedModel?.id) {
try {
// Only check mmproj for llamacpp provider
if (selectedProvider === 'llamacpp') {
const hasLocalMmproj = await serviceHub.models().checkMmprojExists(selectedModel.id)
setHasMmproj(hasLocalMmproj)
}
// For non-llamacpp providers, only check vision capability
else if (
selectedProvider !== 'llamacpp' &&
selectedModel?.capabilities?.includes('vision')
) {
if (selectedModel?.capabilities?.includes('vision')) {
setHasMmproj(true)
} else {
setHasMmproj(false)

View File

@ -119,13 +119,20 @@ const DropdownModelProvider = ({
const model = provider.models[modelIndex]
const capabilities = model.capabilities || []
// Add 'vision' capability if not already present
if (!capabilities.includes('vision')) {
// Add 'vision' capability if not already present AND if user hasn't manually configured capabilities
// Check if model has a custom capabilities config flag
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const hasUserConfiguredCapabilities = (model as any)._userConfiguredCapabilities === true
if (!capabilities.includes('vision') && !hasUserConfiguredCapabilities) {
const updatedModels = [...provider.models]
updatedModels[modelIndex] = {
...model,
capabilities: [...capabilities, 'vision'],
}
// Mark this as auto-detected, not user-configured
_autoDetectedVision: true,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any
updateProvider('llamacpp', { models: updatedModels })
}
@ -139,7 +146,7 @@ const DropdownModelProvider = ({
[getProviderByName, updateProvider, serviceHub]
)
// Initialize model provider only once
// Initialize model provider - avoid race conditions with manual selections
useEffect(() => {
const initializeModel = async () => {
// Auto select model when existing thread is passed
@ -150,11 +157,13 @@ const DropdownModelProvider = ({
}
// Check mmproj existence for llamacpp models
if (model?.provider === 'llamacpp') {
await serviceHub.models().checkMmprojExistsAndUpdateOffloadMMprojSetting(
model.id as string,
updateProvider,
getProviderByName
)
await serviceHub
.models()
.checkMmprojExistsAndUpdateOffloadMMprojSetting(
model.id as string,
updateProvider,
getProviderByName
)
// Also check vision capability
await checkAndUpdateModelVisionCapability(model.id as string)
}
@ -164,11 +173,13 @@ const DropdownModelProvider = ({
if (lastUsed && checkModelExists(lastUsed.provider, lastUsed.model)) {
selectModelProvider(lastUsed.provider, lastUsed.model)
if (lastUsed.provider === 'llamacpp') {
await serviceHub.models().checkMmprojExistsAndUpdateOffloadMMprojSetting(
lastUsed.model,
updateProvider,
getProviderByName
)
await serviceHub
.models()
.checkMmprojExistsAndUpdateOffloadMMprojSetting(
lastUsed.model,
updateProvider,
getProviderByName
)
// Also check vision capability
await checkAndUpdateModelVisionCapability(lastUsed.model)
}
@ -186,19 +197,28 @@ const DropdownModelProvider = ({
}
selectModelProvider('', '')
}
} else if (PlatformFeatures[PlatformFeature.WEB_AUTO_MODEL_SELECTION] && !selectedModel) {
// For web-only builds, always auto-select the first model from jan provider if none is selected
const janProvider = providers.find(
(p) => p.provider === 'jan' && p.active && p.models.length > 0
)
if (janProvider && janProvider.models.length > 0) {
const firstModel = janProvider.models[0]
selectModelProvider(janProvider.provider, firstModel.id)
} else {
// Get current state for web auto-selection check
const currentState = { selectedModel, selectedProvider }
if (
PlatformFeatures[PlatformFeature.WEB_AUTO_MODEL_SELECTION] &&
!currentState.selectedModel &&
!currentState.selectedProvider
) {
// For web-only builds, auto-select the first model from jan provider only if nothing is selected
const janProvider = providers.find(
(p) => p.provider === 'jan' && p.active && p.models.length > 0
)
if (janProvider && janProvider.models.length > 0) {
const firstModel = janProvider.models[0]
selectModelProvider(janProvider.provider, firstModel.id)
}
}
}
}
initializeModel()
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [
model,
selectModelProvider,
@ -210,7 +230,7 @@ const DropdownModelProvider = ({
getProviderByName,
checkAndUpdateModelVisionCapability,
serviceHub,
selectedModel,
// selectedModel and selectedProvider intentionally excluded to prevent race conditions
])
// Update display model when selection changes
@ -376,11 +396,13 @@ const DropdownModelProvider = ({
// Check mmproj existence for llamacpp models
if (searchableModel.provider.provider === 'llamacpp') {
await serviceHub.models().checkMmprojExistsAndUpdateOffloadMMprojSetting(
searchableModel.model.id,
updateProvider,
getProviderByName
)
await serviceHub
.models()
.checkMmprojExistsAndUpdateOffloadMMprojSetting(
searchableModel.model.id,
updateProvider,
getProviderByName
)
// Also check vision capability
await checkAndUpdateModelVisionCapability(searchableModel.model.id)
}
@ -572,7 +594,9 @@ const DropdownModelProvider = ({
{getProviderTitle(providerInfo.provider)}
</span>
</div>
{PlatformFeatures[PlatformFeature.MODEL_PROVIDER_SETTINGS] && (
{PlatformFeatures[
PlatformFeature.MODEL_PROVIDER_SETTINGS
] && (
<div
className="size-6 cursor-pointer flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out"
onClick={(e) => {

View File

@ -1,5 +1,6 @@
import { IconSettings } from '@tabler/icons-react'
import { IconSettings, IconLoader } from '@tabler/icons-react'
import debounce from 'lodash.debounce'
import { useState } from 'react'
import {
Sheet,
@ -9,6 +10,7 @@ import {
SheetTitle,
SheetTrigger,
} from '@/components/ui/sheet'
import { Button } from '@/components/ui/button'
import { DynamicControllerSetting } from '@/containers/dynamicControllerSetting'
import { useModelProvider } from '@/hooks/useModelProvider'
import { useServiceHub } from '@/hooks/useServiceHub'
@ -30,11 +32,154 @@ export function ModelSetting({
const { t } = useTranslation()
const serviceHub = useServiceHub()
const [isPlanning, setIsPlanning] = useState(false)
// Create a debounced version of stopModel that waits 500ms after the last call
const debouncedStopModel = debounce((modelId: string) => {
serviceHub.models().stopModel(modelId)
}, 500)
const handlePlanModelLoad = async () => {
if (provider.provider !== 'llamacpp') {
console.warn('planModelLoad is only available for llamacpp provider')
return
}
setIsPlanning(true)
try {
// Read the model config to get the actual model path and mmproj path
const modelConfig = await serviceHub.app().readYaml<{
model_path: string
mmproj_path?: string
}>(`llamacpp/models/${model.id}/model.yml`)
if (modelConfig && modelConfig.model_path) {
const result = await serviceHub
.models()
.planModelLoad(modelConfig.model_path, modelConfig.mmproj_path)
// Apply the recommended settings to the model sequentially to avoid race conditions
const settingsToUpdate: Array<{
key: string
value: number | boolean
}> = []
if (model.settings?.ngl && result.gpuLayers !== undefined) {
settingsToUpdate.push({ key: 'ngl', value: result.gpuLayers })
}
if (model.settings?.ctx_len && result.maxContextLength !== undefined) {
settingsToUpdate.push({
key: 'ctx_len',
value: result.maxContextLength,
})
}
if (
model.settings?.no_kv_offload &&
result.noOffloadKVCache !== undefined
) {
settingsToUpdate.push({
key: 'no_kv_offload',
value: result.noOffloadKVCache,
})
}
if (
model.settings?.no_kv_offload &&
result.noOffloadKVCache !== undefined
) {
settingsToUpdate.push({
key: 'no_kv_offload',
value: result.noOffloadKVCache,
})
}
if (
model.settings?.mmproj_offload &&
result.offloadMmproj !== undefined
) {
settingsToUpdate.push({
key: 'mmproj_offload',
value: result.offloadMmproj,
})
}
// Apply all settings in a single update to avoid race conditions
if (settingsToUpdate.length > 0) {
handleMultipleSettingsChange(settingsToUpdate)
}
} else {
console.warn('No model_path found in config for', model.id)
}
} catch (error) {
console.error('Error calling planModelLoad:', error)
} finally {
setIsPlanning(false)
}
}
const handleMultipleSettingsChange = (
settingsToUpdate: Array<{ key: string; value: number | boolean }>
) => {
if (!provider) return
// Create a copy of the model with ALL updated settings at once
let updatedModel = { ...model }
settingsToUpdate.forEach(({ key, value }) => {
const existingSetting = updatedModel.settings?.[key] as ProviderSetting
updatedModel = {
...updatedModel,
settings: {
...updatedModel.settings,
[key]: {
...existingSetting,
controller_props: {
...existingSetting?.controller_props,
value: value,
},
} as ProviderSetting,
},
}
})
// Find the model index in the provider's models array
const modelIndex = provider.models.findIndex((m) => m.id === model.id)
if (modelIndex !== -1) {
// Create a copy of the provider's models array
const updatedModels = [...provider.models]
// Update the specific model in the array
updatedModels[modelIndex] = updatedModel as Model
// Update the provider with the new models array
updateProvider(provider.provider, {
models: updatedModels,
})
// Check if any of the updated settings require a model restart
const requiresRestart = settingsToUpdate.some(
({ key }) =>
key === 'ctx_len' ||
key === 'ngl' ||
key === 'chat_template' ||
key === 'offload_mmproj'
)
if (requiresRestart) {
// Check if model is running before stopping it
serviceHub
.models()
.getActiveModels()
.then((activeModels) => {
if (activeModels.includes(model.id)) {
debouncedStopModel(model.id)
}
})
}
}
}
const handleSettingChange = (
key: string,
value: string | boolean | number
@ -72,8 +217,22 @@ export function ModelSetting({
})
// Call debounced stopModel only when updating ctx_len, ngl, chat_template, or offload_mmproj
if (key === 'ctx_len' || key === 'ngl' || key === 'chat_template' || key === 'offload_mmproj') {
debouncedStopModel(model.id)
// and only if the model is currently running
if (
key === 'ctx_len' ||
key === 'ngl' ||
key === 'chat_template' ||
key === 'offload_mmproj'
) {
// Check if model is running before stopping it
serviceHub
.models()
.getActiveModels()
.then((activeModels) => {
if (activeModels.includes(model.id)) {
debouncedStopModel(model.id)
}
})
}
}
}
@ -98,7 +257,43 @@ export function ModelSetting({
<SheetDescription>
{t('common:modelSettings.description')}
</SheetDescription>
{/* Model Load Planning Section - Only show for llamacpp provider */}
{provider.provider === 'llamacpp' && (
<div className="pb-4 border-b border-main-view-fg/10 my-4">
<div>
<div>
<div className="flex items-center gap-2 mb-1">
<h3 className="font-medium">Optimize Settings</h3>
<div className="text-xs bg-main-view-fg/10 border border-main-view-fg/20 text-main-view-fg/70 rounded-full py-0.5 px-2">
<span>{t('mcp-servers:experimental')}</span>
</div>
</div>
<p className="text-main-view-fg/70 text-xs mb-3">
Analyze your system and model, then apply optimal loading
settings automatically
</p>
</div>
<Button
onClick={handlePlanModelLoad}
disabled={isPlanning}
variant="default"
className="w-full"
>
{isPlanning ? (
<>
<IconLoader size={16} className="mr-2 animate-spin" />
Optimizing...
</>
) : (
<>Auto-Optimize Settings</>
)}
</Button>
</div>
</div>
)}
</SheetHeader>
<div className="px-4 space-y-6">
{Object.entries(model.settings || {}).map(([key, value]) => {
const config = value as ProviderSetting

View File

@ -3,6 +3,7 @@ import ReactMarkdown, { Components } from 'react-markdown'
import remarkGfm from 'remark-gfm'
import remarkEmoji from 'remark-emoji'
import remarkMath from 'remark-math'
import remarkBreaks from 'remark-breaks'
import rehypeKatex from 'rehype-katex'
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'
import * as prismStyles from 'react-syntax-highlighter/dist/cjs/styles/prism'
@ -162,8 +163,13 @@ function RenderMarkdownComponent({
// Memoize the remarkPlugins to prevent unnecessary re-renders
const remarkPlugins = useMemo(() => {
// Using a simpler configuration to avoid TypeScript errors
return [remarkGfm, remarkMath, remarkEmoji]
}, [])
const basePlugins = [remarkGfm, remarkMath, remarkEmoji]
// Add remark-breaks for user messages to handle single newlines as line breaks
if (isUser) {
basePlugins.push(remarkBreaks)
}
return basePlugins
}, [isUser])
// Memoize the rehypePlugins to prevent unnecessary re-renders
const rehypePlugins = useMemo(() => {

View File

@ -0,0 +1,104 @@
import { useBackendUpdater } from '@/hooks/useBackendUpdater'
import { IconDownload } from '@tabler/icons-react'
import { Button } from '@/components/ui/button'
import { useState, useEffect } from 'react'
import { cn } from '@/lib/utils'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { toast } from 'sonner'
const BackendUpdater = () => {
const { t } = useTranslation()
const { updateState, updateBackend, checkForUpdate, setRemindMeLater } =
useBackendUpdater()
const handleUpdate = async () => {
try {
await updateBackend()
setRemindMeLater(true)
toast.success(t('settings:backendUpdater.updateSuccess'))
} catch (error) {
console.error('Backend update failed:', error)
toast.error(t('settings:backendUpdater.updateError'))
}
}
// Check for updates when component mounts
useEffect(() => {
checkForUpdate()
}, [checkForUpdate])
const [backendUpdateState, setBackendUpdateState] = useState({
remindMeLater: false,
isUpdateAvailable: false,
})
useEffect(() => {
setBackendUpdateState({
remindMeLater: updateState.remindMeLater,
isUpdateAvailable: updateState.isUpdateAvailable,
})
}, [updateState])
// Don't show if user clicked remind me later or auto update is enabled
if (backendUpdateState.remindMeLater || updateState.autoUpdateEnabled)
return null
return (
<>
{backendUpdateState.isUpdateAvailable && (
<div
className={cn(
'fixed z-50 min-w-[300px] bottom-3 right-3 bg-main-view text-main-view-fg flex items-center justify-center border border-main-view-fg/10 rounded-lg shadow-md'
)}
>
<div className="px-0 py-4">
<div className="px-4">
<div className="flex items-start gap-2">
<IconDownload
size={20}
className="shrink-0 text-main-view-fg/60 mt-1"
/>
<div>
<div className="text-base font-medium">
{t('settings:backendUpdater.newBackendVersion', {
version: updateState.updateInfo?.newVersion,
})}
</div>
<div className="mt-1 text-main-view-fg/70 font-normal mb-2">
{t('settings:backendUpdater.backendUpdateAvailable')}
</div>
</div>
</div>
</div>
<div className="pt-3 px-4">
<div className="flex gap-x-4 w-full items-center justify-end">
<div className="flex gap-x-5">
<Button
variant="link"
className="px-0 text-main-view-fg/70 remind-me-later"
onClick={() => setRemindMeLater(true)}
>
{t('settings:backendUpdater.remindMeLater')}
</Button>
<Button
onClick={handleUpdate}
disabled={updateState.isUpdating}
>
{updateState.isUpdating
? t('settings:backendUpdater.updating')
: t('settings:backendUpdater.updateNow')}
</Button>
</div>
</div>
</div>
</div>
</div>
)}
</>
)
}
export default BackendUpdater

View File

@ -13,6 +13,7 @@ import {
IconPencil,
IconEye,
IconTool,
IconAlertTriangle,
// IconWorld,
// IconAtom,
// IconCodeCircle2,
@ -105,6 +106,8 @@ export const DialogEditModel = ({
return {
...m,
capabilities: updatedCapabilities,
// Mark that user has manually configured capabilities
_userConfiguredCapabilities: true,
}
}
return m
@ -145,6 +148,21 @@ export const DialogEditModel = ({
</DialogDescription>
</DialogHeader>
{/* Warning Banner */}
<div className="bg-main-view-fg/5 border border-main-view-fg/10 rounded-md p-3">
<div className="flex items-start space-x-3">
<IconAlertTriangle className="size-5 text-yellow-600 mt-0.5 flex-shrink-0" />
<div className="text-sm text-main-view-fg/80">
<p className="font-medium mb-1 text-base">
{t('providers:editModel.warning.title')}
</p>
<p className="text-main-view-fg/70">
{t('providers:editModel.warning.description')}
</p>
</div>
</div>
</div>
<div className="py-1">
<h3 className="text-sm font-medium mb-3">
{t('providers:editModel.capabilities')}

View File

@ -0,0 +1,568 @@
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
DialogTrigger,
DialogFooter,
} from '@/components/ui/dialog'
import { Button } from '@/components/ui/button'
import { Switch } from '@/components/ui/switch'
import { useServiceHub } from '@/hooks/useServiceHub'
import { useState } from 'react'
import { toast } from 'sonner'
import {
IconLoader2,
IconEye,
IconCheck,
IconAlertTriangle,
} from '@tabler/icons-react'
type ImportVisionModelDialogProps = {
provider: ModelProvider
trigger?: React.ReactNode
onSuccess?: (importedModelName?: string) => void
}
export const ImportVisionModelDialog = ({
provider,
trigger,
onSuccess,
}: ImportVisionModelDialogProps) => {
const serviceHub = useServiceHub()
const [open, setOpen] = useState(false)
const [importing, setImporting] = useState(false)
const [isVisionModel, setIsVisionModel] = useState(false)
const [modelFile, setModelFile] = useState<string | null>(null)
const [mmProjFile, setMmProjFile] = useState<string | null>(null)
const [modelName, setModelName] = useState('')
const [validationError, setValidationError] = useState<string | null>(null)
const [isValidating, setIsValidating] = useState(false)
const [mmprojValidationError, setMmprojValidationError] = useState<
string | null
>(null)
const [isValidatingMmproj, setIsValidatingMmproj] = useState(false)
const validateGgufFile = async (
filePath: string,
fileType: 'model' | 'mmproj'
): Promise<void> => {
if (fileType === 'model') {
setIsValidating(true)
setValidationError(null)
} else {
setIsValidatingMmproj(true)
setMmprojValidationError(null)
}
try {
console.log(`Reading GGUF metadata for ${fileType}:`, filePath)
// Handle validation differently for model files vs mmproj files
if (fileType === 'model') {
// For model files, use the standard validateGgufFile method
if (typeof serviceHub.models().validateGgufFile === 'function') {
const result = await serviceHub.models().validateGgufFile(filePath)
if (result.metadata) {
// Log full metadata for debugging
console.log(
`Full GGUF metadata for ${fileType}:`,
JSON.stringify(result.metadata, null, 2)
)
// Check architecture from metadata
const architecture =
result.metadata.metadata?.['general.architecture']
console.log(`${fileType} architecture:`, architecture)
// Model files should NOT be clip
if (architecture === 'clip') {
const errorMessage =
'This model has CLIP architecture and cannot be imported as a text generation model. CLIP models are designed for vision tasks and require different handling.'
setValidationError(errorMessage)
console.error(
'CLIP architecture detected in model file:',
architecture
)
} else {
console.log(
'Model validation passed. Architecture:',
architecture
)
}
}
if (!result.isValid) {
setValidationError(result.error || 'Model validation failed')
console.error('Model validation failed:', result.error)
}
}
} else {
// For mmproj files, we need to manually validate since validateGgufFile rejects CLIP models
try {
// Import the readGgufMetadata function directly from Tauri
const { invoke } = await import('@tauri-apps/api/core')
const metadata = await invoke('plugin:llamacpp|read_gguf_metadata', {
path: filePath,
})
console.log(
`Full GGUF metadata for ${fileType}:`,
JSON.stringify(metadata, null, 2)
)
// Check if architecture matches expected type
const architecture = (
metadata as { metadata?: Record<string, string> }
).metadata?.['general.architecture']
console.log(`${fileType} architecture:`, architecture)
// MMProj files MUST be clip
if (architecture !== 'clip') {
const errorMessage = `This MMProj file has "${architecture}" architecture but should have "clip" architecture. MMProj files must be CLIP models for vision processing.`
setMmprojValidationError(errorMessage)
console.error(
'Non-CLIP architecture detected in mmproj file:',
architecture
)
} else {
console.log(
'MMProj validation passed. Architecture:',
architecture
)
}
} catch (directError) {
console.error('Failed to validate mmproj file directly:', directError)
const errorMessage = `Failed to read MMProj metadata: ${
directError instanceof Error ? directError.message : 'Unknown error'
}`
setMmprojValidationError(errorMessage)
}
}
} catch (error) {
console.error(`Failed to validate ${fileType} file:`, error)
const errorMessage = `Failed to read ${fileType} metadata: ${error instanceof Error ? error.message : 'Unknown error'}`
if (fileType === 'model') {
setValidationError(errorMessage)
} else {
setMmprojValidationError(errorMessage)
}
} finally {
if (fileType === 'model') {
setIsValidating(false)
} else {
setIsValidatingMmproj(false)
}
}
}
const validateModelFile = async (filePath: string): Promise<void> => {
await validateGgufFile(filePath, 'model')
}
const validateMmprojFile = async (filePath: string): Promise<void> => {
await validateGgufFile(filePath, 'mmproj')
}
const handleFileSelect = async (type: 'model' | 'mmproj') => {
const selectedFile = await serviceHub.dialog().open({
multiple: false,
directory: false,
})
if (selectedFile && typeof selectedFile === 'string') {
const fileName = selectedFile.split(/[\\/]/).pop() || ''
if (type === 'model') {
setModelFile(selectedFile)
// Auto-generate model name from GGUF file
const sanitizedName = fileName
.replace(/\s/g, '-')
.replace(/\.(gguf|GGUF)$/, '')
.replace(/[^a-zA-Z0-9/_.-]/g, '') // Remove any characters not allowed in model IDs
setModelName(sanitizedName)
// Validate the selected model file
await validateModelFile(selectedFile)
} else {
setMmProjFile(selectedFile)
// Validate the selected mmproj file
await validateMmprojFile(selectedFile)
}
}
}
const handleImport = async () => {
if (!modelFile) {
toast.error('Please select a model file')
return
}
if (isVisionModel && !mmProjFile) {
toast.error('Please select both model and MMPROJ files for vision models')
return
}
if (!modelName) {
toast.error('Unable to determine model name from file')
return
}
// Check if model already exists
const modelExists = provider.models.some(
(model) => model.name === modelName
)
if (modelExists) {
toast.error('Model already exists', {
description: `${modelName} already imported`,
})
return
}
setImporting(true)
try {
if (isVisionModel && mmProjFile) {
// Import vision model with both files - let backend calculate SHA256 and sizes
await serviceHub.models().pullModel(
modelName,
modelFile,
undefined, // modelSha256 - calculated by backend
undefined, // modelSize - calculated by backend
mmProjFile // mmprojPath
// mmprojSha256 and mmprojSize omitted - calculated by backend
)
} else {
// Import regular model - let backend calculate SHA256 and size
await serviceHub.models().pullModel(modelName, modelFile)
}
toast.success('Model imported successfully', {
description: `${modelName} has been imported`,
})
// Reset form and close dialog
resetForm()
setOpen(false)
onSuccess?.(modelName)
} catch (error) {
console.error('Import model error:', error)
toast.error('Failed to import model', {
description:
error instanceof Error ? error.message : 'Unknown error occurred',
})
} finally {
setImporting(false)
}
}
const resetForm = () => {
setModelFile(null)
setMmProjFile(null)
setModelName('')
setIsVisionModel(false)
setValidationError(null)
setIsValidating(false)
setMmprojValidationError(null)
setIsValidatingMmproj(false)
}
const handleOpenChange = (newOpen: boolean) => {
if (!importing) {
setOpen(newOpen)
if (!newOpen) {
resetForm()
}
}
}
return (
<Dialog open={open} onOpenChange={handleOpenChange}>
<DialogTrigger asChild>{trigger}</DialogTrigger>
<DialogContent>
<DialogHeader>
<DialogTitle className="flex items-center gap-2">
Import Model
</DialogTitle>
<DialogDescription>
Import a GGUF model file to add it to your collection. Enable vision
support for models that work with images.
</DialogDescription>
</DialogHeader>
<div className="space-y-6">
{/* Vision Model Toggle Card */}
<div className="border border-main-view-fg/10 rounded-lg p-4 space-y-3 bg-main-view-fg/5">
<div className="flex items-start space-x-3">
<div className="flex-shrink-0 mt-0.5">
<IconEye size={20} className="text-accent" />
</div>
<div className="flex-1">
<h3 className="font-medium text-main-view-fg">
Vision Model Support
</h3>
<p className="text-sm text-main-view-fg/70">
Enable if your model supports image understanding (requires
MMPROJ file)
</p>
</div>
<Switch
id="vision-model"
checked={isVisionModel}
onCheckedChange={(checked) => {
setIsVisionModel(checked)
if (!checked) {
setMmProjFile(null)
setMmprojValidationError(null)
setIsValidatingMmproj(false)
}
}}
className="mt-1"
/>
</div>
</div>
{/* Model Name Preview */}
{modelName && (
<div className="bg-main-view-fg/5 rounded-lg p-3">
<div className="flex items-center gap-2">
<span className="text-sm font-medium text-main-view-fg/80">
Model will be saved as:
</span>
</div>
<p className="text-base font-mono mt-1 text-main-view-fg">
{modelName}
</p>
</div>
)}
{/* File Selection Area */}
<div className="space-y-4">
{/* Model File Selection */}
<div className="border border-main-view-fg/10 rounded-lg p-4 space-y-3 bg-main-view-fg/5">
<div className="flex items-center gap-2">
<h3 className="font-medium text-main-view-fg">
Model File (GGUF)
</h3>
<span className="text-xs bg-main-view-fg/10 text-main-view-fg/70 px-2 py-1 rounded">
Required
</span>
</div>
{modelFile ? (
<div className="space-y-2">
<div className="bg-accent/10 border border-accent/20 rounded-lg p-3">
<div className="flex items-center justify-between">
<div className="flex items-center gap-2">
{isValidating ? (
<IconLoader2
size={16}
className="text-accent animate-spin"
/>
) : validationError ? (
<IconAlertTriangle
size={16}
className="text-destructive"
/>
) : (
<IconCheck size={16} className="text-accent" />
)}
<span className="text-sm font-medium text-main-view-fg">
{modelFile.split(/[\\/]/).pop()}
</span>
</div>
<Button
variant="link"
size="sm"
onClick={() => handleFileSelect('model')}
disabled={importing || isValidating}
className="text-accent hover:text-accent/80"
>
Change
</Button>
</div>
</div>
{/* Validation Error Display */}
{validationError && (
<div className="bg-destructive/10 border border-destructive/20 rounded-lg p-3">
<div className="flex items-start gap-2">
<IconAlertTriangle
size={16}
className="text-destructive mt-0.5 flex-shrink-0"
/>
<div>
<p className="text-sm font-medium text-destructive">
Model Validation Error
</p>
<p className="text-sm text-destructive/90 mt-1">
{validationError}
</p>
</div>
</div>
</div>
)}
{/* Validation Loading State */}
{isValidating && (
<div className="bg-blue-50 border border-blue-200 rounded-lg p-3">
<div className="flex items-center gap-2">
<IconLoader2
size={16}
className="text-blue-500 animate-spin"
/>
<p className="text-sm text-blue-700">
Validating model file...
</p>
</div>
</div>
)}
</div>
) : (
<Button
type="button"
variant="link"
onClick={() => handleFileSelect('model')}
disabled={importing}
className="w-full h-12 border border-dashed border-main-view-fg/10 bg-main-view text-main-view-fg/50 hover:text-main-view-fg"
>
Select GGUF File
</Button>
)}
</div>
{/* MMPROJ File Selection - only show if vision model is enabled */}
{isVisionModel && (
<div className="border border-main-view-fg/10 rounded-lg p-4 space-y-3 bg-main-view-fg/5">
<div className="flex items-center gap-2">
<h3 className="font-medium text-main-view-fg">MMPROJ File</h3>
<span className="text-xs bg-accent/10 text-accent px-2 py-1 rounded">
Required for Vision
</span>
</div>
{mmProjFile ? (
<div className="space-y-2">
<div className="bg-accent/10 border border-accent/20 rounded-lg p-3">
<div className="flex items-center justify-between">
<div className="flex items-center gap-2">
{isValidatingMmproj ? (
<IconLoader2
size={16}
className="text-accent animate-spin"
/>
) : mmprojValidationError ? (
<IconAlertTriangle
size={16}
className="text-destructive"
/>
) : (
<IconCheck size={16} className="text-accent" />
)}
<span className="text-sm font-medium text-main-view-fg">
{mmProjFile.split(/[\\/]/).pop()}
</span>
</div>
<Button
variant="link"
size="sm"
onClick={() => handleFileSelect('mmproj')}
disabled={importing || isValidatingMmproj}
className="text-accent hover:text-accent/80"
>
Change
</Button>
</div>
</div>
{/* MMProj Validation Error Display */}
{mmprojValidationError && (
<div className="bg-destructive/10 border border-destructive/20 rounded-lg p-3">
<div className="flex items-start gap-2">
<IconAlertTriangle
size={16}
className="text-destructive mt-0.5 flex-shrink-0"
/>
<div>
<p className="text-sm font-medium text-destructive">
MMProj Validation Error
</p>
<p className="text-sm text-destructive/90 mt-1">
{mmprojValidationError}
</p>
</div>
</div>
</div>
)}
{/* MMProj Validation Loading State */}
{isValidatingMmproj && (
<div className="bg-blue-50 border border-blue-200 rounded-lg p-3">
<div className="flex items-center gap-2">
<IconLoader2
size={16}
className="text-blue-500 animate-spin"
/>
<p className="text-sm text-blue-700">
Validating MMProj file...
</p>
</div>
</div>
)}
</div>
) : (
<Button
type="button"
variant="link"
onClick={() => handleFileSelect('mmproj')}
disabled={importing}
className="w-full h-12 border border-dashed border-main-view-fg/10 bg-main-view text-main-view-fg/50 hover:text-main-view-fg"
>
Select MMPROJ File
</Button>
)}
</div>
)}
</div>
</div>
<DialogFooter className="flex gap-2 pt-4">
<Button
variant="link"
onClick={() => handleOpenChange(false)}
disabled={importing}
className="flex-1"
>
Cancel
</Button>
<Button
onClick={handleImport}
disabled={
importing ||
!modelFile ||
!modelName ||
(isVisionModel && !mmProjFile) ||
validationError !== null ||
isValidating ||
mmprojValidationError !== null ||
isValidatingMmproj
}
className="flex-1"
>
{importing && <IconLoader2 className="mr-2 h-4 w-4 animate-spin" />}
{importing ? (
'Importing...'
) : (
<>Import {isVisionModel ? 'Vision ' : ''}Model</>
)}
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
)
}

View File

@ -0,0 +1,352 @@
import { useState, useCallback, useEffect } from 'react'
import { events } from '@janhq/core'
import { ExtensionManager } from '@/lib/extension'
export interface BackendUpdateInfo {
updateNeeded: boolean
newVersion: string
currentVersion?: string
}
interface ExtensionSetting {
key: string
controllerProps?: {
value: unknown
}
}
interface LlamacppExtension {
getSettings?(): Promise<ExtensionSetting[]>
checkBackendForUpdates?(): Promise<BackendUpdateInfo>
updateBackend?(
targetBackend: string
): Promise<{ wasUpdated: boolean; newBackend: string }>
installBackend?(filePath: string): Promise<void>
configureBackends?(): Promise<void>
}
export interface BackendUpdateState {
isUpdateAvailable: boolean
updateInfo: BackendUpdateInfo | null
isUpdating: boolean
remindMeLater: boolean
autoUpdateEnabled: boolean
}
export const useBackendUpdater = () => {
const [updateState, setUpdateState] = useState<BackendUpdateState>({
isUpdateAvailable: false,
updateInfo: null,
isUpdating: false,
remindMeLater: false,
autoUpdateEnabled: false,
})
// Listen for backend update state sync events
useEffect(() => {
const handleUpdateStateSync = (newState: Partial<BackendUpdateState>) => {
setUpdateState((prev) => ({
...prev,
...newState,
}))
}
events.on('onBackendUpdateStateSync', handleUpdateStateSync)
return () => {
events.off('onBackendUpdateStateSync', handleUpdateStateSync)
}
}, [])
// Check auto update setting from llamacpp extension
useEffect(() => {
const checkAutoUpdateSetting = async () => {
try {
// Get llamacpp extension instance
const allExtensions = ExtensionManager.getInstance().listExtensions()
let llamacppExtension =
ExtensionManager.getInstance().getByName('llamacpp-extension')
if (!llamacppExtension) {
// Try to find by type or other properties
llamacppExtension =
allExtensions.find(
(ext) =>
ext.constructor.name.toLowerCase().includes('llamacpp') ||
(ext.type &&
ext.type()?.toString().toLowerCase().includes('inference'))
) || undefined
}
if (llamacppExtension && 'getSettings' in llamacppExtension) {
const extension = llamacppExtension as LlamacppExtension
const settings = await extension.getSettings?.()
const autoUpdateSetting = settings?.find(
(s) => s.key === 'auto_update_engine'
)
setUpdateState((prev) => ({
...prev,
autoUpdateEnabled:
autoUpdateSetting?.controllerProps?.value === true,
}))
}
} catch (error) {
console.error('Failed to check auto update setting:', error)
}
}
checkAutoUpdateSetting()
}, [])
const syncStateToOtherInstances = useCallback(
(partialState: Partial<BackendUpdateState>) => {
// Emit event to sync state across all useBackendUpdater instances
events.emit('onBackendUpdateStateSync', partialState)
},
[]
)
const checkForUpdate = useCallback(
async (resetRemindMeLater = false) => {
try {
// Reset remindMeLater if requested (e.g., when called from settings)
if (resetRemindMeLater) {
const newState = {
remindMeLater: false,
}
setUpdateState((prev) => ({
...prev,
...newState,
}))
syncStateToOtherInstances(newState)
}
// Get llamacpp extension instance
const allExtensions = ExtensionManager.getInstance().listExtensions()
const llamacppExtension =
ExtensionManager.getInstance().getByName('llamacpp-extension')
let extensionToUse = llamacppExtension
if (!llamacppExtension) {
// Try to find by type or other properties
const possibleExtension = allExtensions.find(
(ext) =>
ext.constructor.name.toLowerCase().includes('llamacpp') ||
(ext.type &&
ext.type()?.toString().toLowerCase().includes('inference'))
)
if (!possibleExtension) {
console.error('LlamaCpp extension not found')
return null
}
extensionToUse = possibleExtension
}
if (!extensionToUse || !('checkBackendForUpdates' in extensionToUse)) {
console.error(
'Extension does not support checkBackendForUpdates method'
)
return null
}
// Call the extension's checkBackendForUpdates method
const extension = extensionToUse as LlamacppExtension
const updateInfo = await extension.checkBackendForUpdates?.()
if (updateInfo?.updateNeeded) {
const newState = {
isUpdateAvailable: true,
remindMeLater: false,
updateInfo,
}
setUpdateState((prev) => ({
...prev,
...newState,
}))
syncStateToOtherInstances(newState)
console.log('Backend update available:', updateInfo?.newVersion)
return updateInfo
} else {
// No update available - reset state
const newState = {
isUpdateAvailable: false,
updateInfo: null,
}
setUpdateState((prev) => ({
...prev,
...newState,
}))
syncStateToOtherInstances(newState)
return null
}
} catch (error) {
console.error('Error checking for backend updates:', error)
// Reset state on error
const newState = {
isUpdateAvailable: false,
updateInfo: null,
}
setUpdateState((prev) => ({
...prev,
...newState,
}))
syncStateToOtherInstances(newState)
return null
}
},
[syncStateToOtherInstances]
)
const setRemindMeLater = useCallback(
(remind: boolean) => {
const newState = {
remindMeLater: remind,
}
setUpdateState((prev) => ({
...prev,
...newState,
}))
syncStateToOtherInstances(newState)
},
[syncStateToOtherInstances]
)
const updateBackend = useCallback(async () => {
if (!updateState.updateInfo) return
try {
setUpdateState((prev) => ({
...prev,
isUpdating: true,
}))
// Get llamacpp extension instance
const allExtensions = ExtensionManager.getInstance().listExtensions()
const llamacppExtension =
ExtensionManager.getInstance().getByName('llamacpp-extension')
let extensionToUse = llamacppExtension
if (!llamacppExtension) {
// Try to find by type or other properties
const possibleExtension = allExtensions.find(
(ext) =>
ext.constructor.name.toLowerCase().includes('llamacpp') ||
(ext.type &&
ext.type()?.toString().toLowerCase().includes('inference'))
)
if (!possibleExtension) {
throw new Error('LlamaCpp extension not found')
}
extensionToUse = possibleExtension
}
if (
!extensionToUse ||
!('getSettings' in extensionToUse) ||
!('updateBackend' in extensionToUse)
) {
throw new Error('Extension does not support backend updates')
}
// Get current backend version to construct target backend string
const extension = extensionToUse as LlamacppExtension
const settings = await extension.getSettings?.()
const currentBackendSetting = settings?.find(
(s) => s.key === 'version_backend'
)
const currentBackend = currentBackendSetting?.controllerProps
?.value as string
if (!currentBackend) {
throw new Error('Current backend not found')
}
// Extract backend type from current backend string (e.g., "b3224/cuda12" -> "cuda12")
const [, backendType] = currentBackend.split('/')
const targetBackendString = `${updateState.updateInfo.newVersion}/${backendType}`
// Call the extension's updateBackend method
const result = await extension.updateBackend?.(targetBackendString)
if (result?.wasUpdated) {
// Reset update state
const newState = {
isUpdateAvailable: false,
updateInfo: null,
isUpdating: false,
}
setUpdateState((prev) => ({
...prev,
...newState,
}))
syncStateToOtherInstances(newState)
} else {
throw new Error('Backend update failed')
}
} catch (error) {
console.error('Error updating backend:', error)
setUpdateState((prev) => ({
...prev,
isUpdating: false,
}))
throw error
}
}, [updateState.updateInfo, syncStateToOtherInstances])
const installBackend = useCallback(async (filePath: string) => {
try {
// Get llamacpp extension instance
const allExtensions = ExtensionManager.getInstance().listExtensions()
const llamacppExtension =
ExtensionManager.getInstance().getByName('llamacpp-extension')
let extensionToUse = llamacppExtension
if (!llamacppExtension) {
// Try to find by type or other properties
const possibleExtension = allExtensions.find(
(ext) =>
ext.constructor.name.toLowerCase().includes('llamacpp') ||
(ext.type &&
ext.type()?.toString().toLowerCase().includes('inference'))
)
if (!possibleExtension) {
throw new Error('LlamaCpp extension not found')
}
extensionToUse = possibleExtension
}
if (!extensionToUse || !('installBackend' in extensionToUse)) {
throw new Error('Extension does not support backend installation')
}
// Call the extension's installBackend method
const extension = extensionToUse as LlamacppExtension
await extension.installBackend?.(filePath)
// Refresh backend list to update UI
await extension.configureBackends?.()
} catch (error) {
console.error('Error installing backend:', error)
throw error
}
}, [])
return {
updateState,
checkForUpdate,
updateBackend,
setRemindMeLater,
installBackend,
}
}

View File

@ -93,7 +93,11 @@ export const useModelProvider = create<ModelProviderState>()(
? legacyModels
: models
).find(
(m) => m.id.split(':').slice(0, 2).join(getServiceHub().path().sep()) === model.id
(m) =>
m.id
.split(':')
.slice(0, 2)
.join(getServiceHub().path().sep()) === model.id
)?.settings || model.settings
const existingModel = models.find((m) => m.id === model.id)
return {
@ -227,7 +231,7 @@ export const useModelProvider = create<ModelProviderState>()(
>
}
if (version === 0 && state?.providers) {
if (version <= 1 && state?.providers) {
state.providers.forEach((provider) => {
// Update cont_batching description for llamacpp provider
if (provider.provider === 'llamacpp' && provider.settings) {
@ -270,6 +274,15 @@ export const useModelProvider = create<ModelProviderState>()(
},
}
}
if (!model.settings.no_kv_offload) {
model.settings.no_kv_offload = {
...modelSettings.no_kv_offload,
controller_props: {
...modelSettings.no_kv_offload.controller_props,
},
}
}
})
}
})
@ -277,7 +290,7 @@ export const useModelProvider = create<ModelProviderState>()(
return state
},
version: 1,
version: 2,
}
)
)

View File

@ -182,10 +182,18 @@ export const sendCompletion = async (
'X-Title': 'Jan',
},
}),
// Add Origin header for local providers to avoid CORS issues
...((provider.base_url?.includes('localhost:') || provider.base_url?.includes('127.0.0.1:')) && {
fetch: getServiceHub().providers().fetch(),
defaultHeaders: {
'Origin': 'tauri://localhost',
},
}),
} as ExtendedConfigOptions)
if (
thread.model.id &&
models[providerName]?.models !== true && // Skip if provider accepts any model (models: true)
!Object.values(models[providerName]).flat().includes(thread.model.id) &&
!tokenJS.extendedModelExist(providerName as any, thread.model.id) &&
provider.provider !== 'llamacpp'
@ -389,9 +397,12 @@ export const postMessageProcessing = async (
let toolParameters = {}
if (toolCall.function.arguments.length) {
try {
console.log('Raw tool arguments:', toolCall.function.arguments)
toolParameters = JSON.parse(toolCall.function.arguments)
console.log('Parsed tool parameters:', toolParameters)
} catch (error) {
console.error('Failed to parse tool arguments:', error)
console.error('Raw arguments that failed:', toolCall.function.arguments)
}
}
const approved =
@ -407,9 +418,7 @@ export const postMessageProcessing = async (
const { promise, cancel } = getServiceHub().mcp().callToolWithCancellation({
toolName: toolCall.function.name,
arguments: toolCall.function.arguments.length
? JSON.parse(toolCall.function.arguments)
: {},
arguments: toolCall.function.arguments.length ? toolParameters : {},
})
useAppState.getState().setCancelToolCall(cancel)

View File

@ -21,7 +21,7 @@ export const PlatformFeatures: Record<PlatformFeature, boolean> = {
[PlatformFeature.LOCAL_INFERENCE]: isPlatformTauri(),
// MCP (Model Context Protocol) servers
[PlatformFeature.MCP_SERVERS]: isPlatformTauri(),
[PlatformFeature.MCP_SERVERS]: true,
// Local API server
[PlatformFeature.LOCAL_API_SERVER]: isPlatformTauri(),

View File

@ -144,4 +144,13 @@ export const modelSettings = {
type: 'text',
},
},
no_kv_offload: {
key: 'no_kv_offload',
title: 'Disable KV Offload',
description: 'Disable KV cache offload to GPU (if GPU is used).',
controller_type: 'checkbox',
controller_props: {
value: false,
},
},
}

View File

@ -60,7 +60,11 @@
"tools": "Werkzeuge",
"vision": "Vision",
"embeddings": "Einbettungen",
"notAvailable": "Noch nicht verfügbar"
"notAvailable": "Noch nicht verfügbar",
"warning": {
"title": "Mit Vorsicht fortfahren",
"description": "Das Ändern von Modellfunktionen kann Leistung und Funktionalität beeinträchtigen. Falsche Einstellungen können zu unerwartetem Verhalten oder Fehlern führen."
}
},
"addProvider": "Anbieter hinzufügen",
"addOpenAIProvider": "OpenAI Anbieter hinzufügen",

View File

@ -6,6 +6,11 @@
"noUpdateAvailable": "Du verwendest die neueste Version",
"devVersion": "Entwicklungsversion erkannt",
"updateError": "Fehler beim Suchen nach Updates",
"checkForBackendUpdates": "Llamacpp Updates prüfen",
"checkForBackendUpdatesDesc": "Prüfe, ob eine neuere Version des Llamacpp-Backends verfügbar ist.",
"checkingForBackendUpdates": "Suche nach Llamacpp Updates...",
"noBackendUpdateAvailable": "Du verwendest die neueste Llamacpp Version",
"backendUpdateError": "Fehler beim Suchen nach Llamacpp Updates",
"changeLocation": "Ort ändern",
"copied": "Kopiert",
"copyPath": "Pfad kopieren",
@ -244,5 +249,14 @@
"cancel": "Abbrechen",
"changeLocation": "Ort ändern"
}
},
"backendUpdater": {
"newBackendVersion": "Neue Llamacpp Version {{version}}",
"backendUpdateAvailable": "Llamacpp Update verfügbar",
"remindMeLater": "Später erinnern",
"updating": "Aktualisiere...",
"updateNow": "Jetzt aktualisieren",
"updateSuccess": "Llamacpp erfolgreich aktualisiert",
"updateError": "Fehler beim Aktualisieren von Llamacpp"
}
}

View File

@ -60,7 +60,11 @@
"tools": "Tools",
"vision": "Vision",
"embeddings": "Embeddings",
"notAvailable": "Not available yet"
"notAvailable": "Not available yet",
"warning": {
"title": "Proceed with Caution",
"description": "Modifying model capabilities may affect performance and functionality. Incorrect settings could cause unexpected behavior or errors."
}
},
"addProvider": "Add Provider",
"addOpenAIProvider": "Add OpenAI Provider",

View File

@ -6,6 +6,11 @@
"noUpdateAvailable": "You're running the latest version",
"devVersion": "Development version detected",
"updateError": "Failed to check for updates",
"checkForBackendUpdates": "Check for Llamacpp Updates",
"checkForBackendUpdatesDesc": "Check if a newer version of the Llamacpp backend is available.",
"checkingForBackendUpdates": "Checking for Llamacpp updates...",
"noBackendUpdateAvailable": "You're running the latest Llamacpp version",
"backendUpdateError": "Failed to check for Llamacpp updates",
"changeLocation": "Change Location",
"copied": "Copied",
"copyPath": "Copy Path",
@ -249,5 +254,16 @@
"cancel": "Cancel",
"changeLocation": "Change Location"
}
}
},
"backendUpdater": {
"newBackendVersion": "New Llamacpp Version {{version}}",
"backendUpdateAvailable": "Llamacpp Update Available",
"remindMeLater": "Remind Me Later",
"updating": "Updating...",
"updateNow": "Update Now",
"updateSuccess": "Llamacpp updated successfully",
"updateError": "Failed to update Llamacpp"
},
"backendInstallSuccess": "Backend installed successfully",
"backendInstallError": "Failed to install backend"
}

View File

@ -60,7 +60,11 @@
"tools": "Alat",
"vision": "Visi",
"embeddings": "Embedding",
"notAvailable": "Belum tersedia"
"notAvailable": "Belum tersedia",
"warning": {
"title": "Lanjutkan dengan Hati-hati",
"description": "Memodifikasi kemampuan model dapat mempengaruhi kinerja dan fungsionalitas. Pengaturan yang salah dapat menyebabkan perilaku atau kesalahan yang tidak terduga."
}
},
"addProvider": "Tambah Penyedia",
"addOpenAIProvider": "Tambah Penyedia OpenAI",

View File

@ -6,6 +6,11 @@
"noUpdateAvailable": "Anda menjalankan versi terbaru",
"devVersion": "Versi pengembangan terdeteksi",
"updateError": "Gagal memeriksa pembaruan",
"checkForBackendUpdates": "Periksa Pembaruan Llamacpp",
"checkForBackendUpdatesDesc": "Periksa apakah versi backend Llamacpp yang lebih baru tersedia.",
"checkingForBackendUpdates": "Memeriksa pembaruan Llamacpp...",
"noBackendUpdateAvailable": "Anda menjalankan versi Llamacpp terbaru",
"backendUpdateError": "Gagal memeriksa pembaruan Llamacpp",
"changeLocation": "Ubah Lokasi",
"copied": "Tersalin",
"copyPath": "Salin Jalur",
@ -244,5 +249,14 @@
"cancel": "Batal",
"changeLocation": "Ubah Lokasi"
}
},
"backendUpdater": {
"newBackendVersion": "Versi Llamacpp Baru {{version}}",
"backendUpdateAvailable": "Pembaruan Llamacpp Tersedia",
"remindMeLater": "Ingatkan Saya Nanti",
"updating": "Memperbarui...",
"updateNow": "Perbarui Sekarang",
"updateSuccess": "Llamacpp berhasil diperbarui",
"updateError": "Gagal memperbarui Llamacpp"
}
}

View File

@ -161,7 +161,7 @@
"tools": "Narzędzia",
"vision": "Wizja",
"embeddings": "Osadzenia",
"notAvailable": "Jeszcze nie dostępne"
"notAvailable": "Jeszcze niedostępne"
},
"outOfContextError": {
"truncateInput": "Przytnij Wejście",

View File

@ -60,7 +60,11 @@
"tools": "Narzędzia",
"vision": "Wizja",
"embeddings": "Osadzenia",
"notAvailable": "Jeszcze nie dostępne"
"notAvailable": "Jeszcze niedostępne",
"warning": {
"title": "Zachowaj Ostrożność",
"description": "Modyfikowanie możliwości modelu może wpłynąć na wydajność i funkcjonalność. Nieprawidłowe ustawienia mogą spowodować nieoczekiwane zachowanie lub błędy."
}
},
"addProvider": "Dodaj Dostawcę",
"addOpenAIProvider": "Dodaj Dostawcę OpenAI",

View File

@ -6,6 +6,11 @@
"noUpdateAvailable": "Używasz najnowszej wersji",
"devVersion": "Wykryto wersję deweloperską",
"updateError": "Nie udało się sprawdzić dostępności aktualizacji",
"checkForBackendUpdates": "Sprawdź Aktualizacje Llamacpp",
"checkForBackendUpdatesDesc": "Sprawdza czy dostępna jest nowa wersja backendu Llamacpp.",
"checkingForBackendUpdates": "Sprawdzanie aktualizacji Llamacpp...",
"noBackendUpdateAvailable": "Używasz najnowszej wersji Llamacpp",
"backendUpdateError": "Nie udało się sprawdzić aktualizacji Llamacpp",
"changeLocation": "Zmień Położenie",
"copied": "Skopiowano",
"copyPath": "Skopiuj Ścieżkę",
@ -234,7 +239,7 @@
"reportAnIssueDesc": "Znalazłeś/-aś błąd? Pomóż nam, zgłaszając go na GitHubie.",
"reportIssue": "Zgłoś Problem",
"credits": "Podziękowania",
"creditsDesc1": "Aplikacja Jan jest rozwijana z ❤️ przez Zespół Menlo.",
"creditsDesc1": "Aplikacja 👋 Jan jest rozwijana z ❤️ przez zespół Menlo Research.",
"creditsDesc2": "Szczególne podziękowania dla naszych otwartoźródłowych zależności - zwłaszcza llama.cpp i Tauri - oraz dla naszej wspaniałej społeczności SI."
},
"extensions": {
@ -249,5 +254,14 @@
"cancel": "Anuluj",
"changeLocation": "Zmień Położenie"
}
},
"backendUpdater": {
"newBackendVersion": "Nowa wersja Llamacpp {{version}}",
"backendUpdateAvailable": "Dostępna aktualizacja Llamacpp",
"remindMeLater": "Przypomnij mi później",
"updating": "Aktualizowanie...",
"updateNow": "Aktualizuj teraz",
"updateSuccess": "Llamacpp został pomyślnie zaktualizowany",
"updateError": "Nie udało się zaktualizować Llamacpp"
}
}

View File

@ -60,7 +60,11 @@
"tools": "Công cụ",
"vision": "Thị giác",
"embeddings": "Nhúng",
"notAvailable": "Chưa có"
"notAvailable": "Chưa có",
"warning": {
"title": "Hãy Thận Trọng",
"description": "Việc sửa đổi khả năng của mô hình có thể ảnh hưởng đến hiệu suất và chức năng. Cài đặt không chính xác có thể gây ra hành vi hoặc lỗi không mong muốn."
}
},
"addProvider": "Thêm nhà cung cấp",
"addOpenAIProvider": "Thêm nhà cung cấp OpenAI",

View File

@ -6,6 +6,11 @@
"noUpdateAvailable": "Bạn đang chạy phiên bản mới nhất",
"devVersion": "Đã phát hiện phiên bản phát triển",
"updateError": "Không thể kiểm tra cập nhật",
"checkForBackendUpdates": "Kiểm tra Cập nhật Llamacpp",
"checkForBackendUpdatesDesc": "Kiểm tra xem có phiên bản backend Llamacpp mới hơn không.",
"checkingForBackendUpdates": "Đang kiểm tra cập nhật Llamacpp...",
"noBackendUpdateAvailable": "Bạn đang chạy phiên bản Llamacpp mới nhất",
"backendUpdateError": "Không thể kiểm tra cập nhật Llamacpp",
"changeLocation": "Thay đổi Vị trí",
"copied": "Đã sao chép",
"copyPath": "Sao chép Đường dẫn",
@ -244,5 +249,14 @@
"cancel": "Hủy",
"changeLocation": "Thay đổi vị trí"
}
},
"backendUpdater": {
"newBackendVersion": "Phiên bản Llamacpp mới {{version}}",
"backendUpdateAvailable": "Có cập nhật Llamacpp",
"remindMeLater": "Nhắc tôi sau",
"updating": "Đang cập nhật...",
"updateNow": "Cập nhật ngay",
"updateSuccess": "Cập nhật Llamacpp thành công",
"updateError": "Không thể cập nhật Llamacpp"
}
}

View File

@ -60,7 +60,11 @@
"tools": "工具",
"vision": "视觉",
"embeddings": "嵌入",
"notAvailable": "尚不可用"
"notAvailable": "尚不可用",
"warning": {
"title": "谨慎操作",
"description": "修改模型功能可能会影响性能和功能。错误的设置可能导致意外行为或错误。"
}
},
"addProvider": "添加提供商",
"addOpenAIProvider": "添加 OpenAI 提供商",

View File

@ -6,6 +6,11 @@
"noUpdateAvailable": "您正在运行最新版本",
"devVersion": "检测到开发版本",
"updateError": "检查更新失败",
"checkForBackendUpdates": "检查 Llamacpp 更新",
"checkForBackendUpdatesDesc": "检查是否有更新的 Llamacpp 后端版本。",
"checkingForBackendUpdates": "正在检查 Llamacpp 更新...",
"noBackendUpdateAvailable": "您正在运行最新的 Llamacpp 版本",
"backendUpdateError": "检查 Llamacpp 更新失败",
"changeLocation": "更改位置",
"copied": "已复制",
"copyPath": "复制路径",
@ -244,5 +249,14 @@
"cancel": "取消",
"changeLocation": "更改位置"
}
},
"backendUpdater": {
"newBackendVersion": "新的 Llamacpp 版本 {{version}}",
"backendUpdateAvailable": "Llamacpp 更新可用",
"remindMeLater": "稍后提醒我",
"updating": "正在更新...",
"updateNow": "立即更新",
"updateSuccess": "Llamacpp 更新成功",
"updateError": "更新 Llamacpp 失败"
}
}

View File

@ -60,7 +60,11 @@
"tools": "工具",
"vision": "視覺",
"embeddings": "嵌入",
"notAvailable": "尚不可用"
"notAvailable": "尚不可用",
"warning": {
"title": "請謹慎操作",
"description": "修改模型功能可能會影響效能和功能。錯誤的設定可能導致意外行為或錯誤。"
}
},
"addProvider": "新增提供者",
"addOpenAIProvider": "新增 OpenAI 提供者",

View File

@ -6,6 +6,11 @@
"noUpdateAvailable": "您正在運行最新版本",
"devVersion": "檢測到開發版本",
"updateError": "檢查更新失敗",
"checkForBackendUpdates": "檢查 Llamacpp 更新",
"checkForBackendUpdatesDesc": "檢查是否有更新的 Llamacpp 後端版本。",
"checkingForBackendUpdates": "正在檢查 Llamacpp 更新...",
"noBackendUpdateAvailable": "您正在運行最新的 Llamacpp 版本",
"backendUpdateError": "檢查 Llamacpp 更新失敗",
"changeLocation": "更改位置",
"copied": "已複製",
"copyPath": "複製路徑",
@ -244,5 +249,14 @@
"cancel": "取消",
"changeLocation": "變更位置"
}
},
"backendUpdater": {
"newBackendVersion": "新的 Llamacpp 版本 {{version}}",
"backendUpdateAvailable": "Llamacpp 更新可用",
"remindMeLater": "稍後提醒我",
"updating": "正在更新...",
"updateNow": "立即更新",
"updateSuccess": "Llamacpp 更新成功",
"updateError": "更新 Llamacpp 失敗"
}
}
}

View File

@ -3,6 +3,7 @@ import { createRootRoute, Outlet, useRouterState } from '@tanstack/react-router'
import LeftPanel from '@/containers/LeftPanel'
import DialogAppUpdater from '@/containers/dialogs/AppUpdater'
import BackendUpdater from '@/containers/dialogs/BackendUpdater'
import { Fragment } from 'react/jsx-runtime'
import { AppearanceProvider } from '@/providers/AppearanceProvider'
import { ThemeProvider } from '@/providers/ThemeProvider'
@ -113,6 +114,7 @@ const AppLayout = () => {
{/* Fake absolute panel top to enable window drag */}
<div className="absolute w-full h-10 z-10" data-tauri-drag-region />
<DialogAppUpdater />
{PlatformFeatures[PlatformFeature.LOCAL_INFERENCE] && <BackendUpdater />}
{/* Use ResizablePanelGroup only on larger screens */}
{!isSmallScreen && isLeftPanelOpen ? (
@ -164,7 +166,9 @@ const AppLayout = () => {
</div>
)}
</main>
{PlatformFeatures[PlatformFeature.ANALYTICS] && productAnalyticPrompt && <PromptAnalytic />}
{PlatformFeatures[PlatformFeature.ANALYTICS] && productAnalyticPrompt && (
<PromptAnalytic />
)}
</Fragment>
)
}

View File

@ -23,6 +23,8 @@ import { useTranslation } from '@/i18n/react-i18next-compat'
import { useAppState } from '@/hooks/useAppState'
import { PlatformGuard } from '@/lib/platform/PlatformGuard'
import { PlatformFeature } from '@/lib/platform'
import { isPlatformTauri } from '@/lib/platform/utils'
import { MCPTool } from '@janhq/core'
// Function to mask sensitive values
const maskSensitiveValue = (value: string) => {
@ -91,12 +93,118 @@ export const Route = createFileRoute(route.settings.mcp_servers as any)({
function MCPServers() {
return (
<PlatformGuard feature={PlatformFeature.MCP_SERVERS}>
<MCPServersContent />
{isPlatformTauri() ? <MCPServersDesktop /> : <MCPServersWeb />}
</PlatformGuard>
)
}
function MCPServersContent() {
// Web version of MCP servers - simpler UI without server management
function MCPServersWeb() {
const { t } = useTranslation()
const serviceHub = useServiceHub()
const { allowAllMCPPermissions, setAllowAllMCPPermissions } = useToolApproval()
const [webMcpTools, setWebMcpTools] = useState<MCPTool[]>([])
const [webMcpServers, setWebMcpServers] = useState<string[]>([])
const [webMcpLoading, setWebMcpLoading] = useState(true)
useEffect(() => {
async function loadWebMcpData() {
setWebMcpLoading(true)
try {
const [tools, servers] = await Promise.all([
serviceHub.mcp().getTools(),
serviceHub.mcp().getConnectedServers(),
])
setWebMcpTools(tools)
setWebMcpServers(servers)
} catch (error) {
console.error('Failed to load web MCP data:', error)
setWebMcpTools([])
setWebMcpServers([])
} finally {
setWebMcpLoading(false)
}
}
loadWebMcpData()
}, [serviceHub])
return (
<div className="flex flex-col h-full">
<HeaderPage>
<h1 className="font-medium">{t('common:settings')}</h1>
</HeaderPage>
<div className="flex h-full w-full">
<SettingsMenu />
<div className="p-4 w-full h-[calc(100%-32px)] overflow-y-auto">
<div className="flex flex-col justify-between gap-4 gap-y-3 w-full">
<Card
header={
<div className="flex flex-col mb-4">
<h1 className="text-main-view-fg font-medium text-base">
{t('mcp-servers:title')}
</h1>
<p className="text-sm text-main-view-fg/70 mt-1">
MCP tools are automatically available in your chat sessions
</p>
</div>
}
>
<CardItem
title={t('mcp-servers:allowPermissions')}
description={t('mcp-servers:allowPermissionsDesc')}
actions={
<div className="flex-shrink-0 ml-4">
<Switch
checked={allowAllMCPPermissions}
onCheckedChange={setAllowAllMCPPermissions}
/>
</div>
}
/>
</Card>
<Card>
<CardItem
title="MCP Service Status"
description={
webMcpLoading
? "Loading MCP service status..."
: webMcpServers.length > 0
? `Connected to ${webMcpServers.join(', ')}. ${webMcpTools.length} tools available.`
: "MCP service not connected"
}
descriptionOutside={
webMcpTools.length > 0 && !webMcpLoading && (
<div className="mt-2">
<h4 className="text-sm font-medium text-main-view-fg/80 mb-2">Available Tools:</h4>
<div className="grid grid-cols-1 gap-2">
{webMcpTools.map((tool) => (
<div key={tool.name} className="flex items-start gap-2 p-2 bg-main-view-fg/5 rounded">
<div className="flex-1">
<div className="font-medium text-sm">{tool.name}</div>
<div className="text-xs text-main-view-fg/70">{tool.description}</div>
{tool.server && (
<div className="text-xs text-main-view-fg/50 mt-1">Server: {tool.server}</div>
)}
</div>
</div>
))}
</div>
</div>
)
}
/>
</Card>
</div>
</div>
</div>
</div>
)
}
// Desktop version of MCP servers - full server management capabilities
function MCPServersDesktop() {
const { t } = useTranslation()
const serviceHub = useServiceHub()
const {

View File

@ -1,3 +1,4 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { Card, CardItem } from '@/containers/Card'
import HeaderPage from '@/containers/HeaderPage'
import SettingsMenu from '@/containers/SettingsMenu'
@ -15,6 +16,7 @@ import { DynamicControllerSetting } from '@/containers/dynamicControllerSetting'
import { RenderMarkdown } from '@/containers/RenderMarkdown'
import { DialogEditModel } from '@/containers/dialogs/EditModel'
import { DialogAddModel } from '@/containers/dialogs/AddModel'
import { ImportVisionModelDialog } from '@/containers/dialogs/ImportVisionModelDialog'
import { ModelSetting } from '@/containers/ModelSetting'
import { DialogDeleteModel } from '@/containers/dialogs/DeleteModel'
import { FavoriteModelAction } from '@/containers/FavoriteModelAction'
@ -25,7 +27,12 @@ import DeleteProvider from '@/containers/dialogs/DeleteProvider'
import { useServiceHub } from '@/hooks/useServiceHub'
import { localStorageKey } from '@/constants/localStorage'
import { Button } from '@/components/ui/button'
import { IconFolderPlus, IconLoader, IconRefresh } from '@tabler/icons-react'
import {
IconFolderPlus,
IconLoader,
IconRefresh,
IconUpload,
} from '@tabler/icons-react'
import { toast } from 'sonner'
import { useCallback, useEffect, useState } from 'react'
import { predefinedProviders } from '@/consts/providers'
@ -33,6 +40,7 @@ import { useModelLoad } from '@/hooks/useModelLoad'
import { useLlamacppDevices } from '@/hooks/useLlamacppDevices'
import { PlatformFeatures } from '@/lib/platform/const'
import { PlatformFeature } from '@/lib/platform/types'
import { useBackendUpdater } from '@/hooks/useBackendUpdater'
// as route.threadsDetail
export const Route = createFileRoute('/settings/providers/$providerName')({
@ -73,7 +81,10 @@ function ProviderDetail() {
const [activeModels, setActiveModels] = useState<string[]>([])
const [loadingModels, setLoadingModels] = useState<string[]>([])
const [refreshingModels, setRefreshingModels] = useState(false)
const [importingModel, setImportingModel] = useState(false)
const [isCheckingBackendUpdate, setIsCheckingBackendUpdate] = useState(false)
const [isInstallingBackend, setIsInstallingBackend] = useState(false)
const { checkForUpdate: checkForBackendUpdate, installBackend } =
useBackendUpdater()
const { providerName } = useParams({ from: Route.id })
const { getProviderByName, setProviders, updateProvider } = useModelProvider()
const provider = getProviderByName(providerName)
@ -90,67 +101,75 @@ function ProviderDetail() {
!setting.controller_props.value)
)
const handleImportModel = async () => {
if (!provider) {
return
}
setImportingModel(true)
const selectedFile = await serviceHub.dialog().open({
multiple: false,
directory: false,
})
// If the dialog returns a file path, extract just the file name
const fileName =
typeof selectedFile === 'string'
? selectedFile.split(/[\\/]/).pop()?.replace(/\s/g, '-')
: undefined
if (selectedFile && fileName) {
// Check if model already exists
const modelExists = provider.models.some(
(model) => model.name === fileName
)
if (modelExists) {
toast.error('Model already exists', {
description: `${fileName} already imported`,
})
setImportingModel(false)
return
}
const handleModelImportSuccess = async (importedModelName?: string) => {
// Refresh the provider to update the models list
await serviceHub.providers().getProviders().then(setProviders)
// If a model was imported and it might have vision capabilities, check and update
if (importedModelName && providerName === 'llamacpp') {
try {
await serviceHub.models().pullModel(fileName, typeof selectedFile === 'string' ? selectedFile : selectedFile?.[0])
// Refresh the provider to update the models list
await serviceHub.providers().getProviders().then(setProviders)
toast.success(t('providers:import'), {
id: `import-model-${provider.provider}`,
description: t('providers:importModelSuccess', {
provider: fileName,
}),
})
const mmprojExists = await serviceHub
.models()
.checkMmprojExists(importedModelName)
if (mmprojExists) {
// Get the updated provider after refresh
const { getProviderByName, updateProvider: updateProviderState } =
useModelProvider.getState()
const llamacppProvider = getProviderByName('llamacpp')
if (llamacppProvider) {
const modelIndex = llamacppProvider.models.findIndex(
(m: Model) => m.id === importedModelName
)
if (modelIndex !== -1) {
const model = llamacppProvider.models[modelIndex]
const capabilities = model.capabilities || []
// Add 'vision' capability if not already present AND if user hasn't manually configured capabilities
// Check if model has a custom capabilities config flag
const hasUserConfiguredCapabilities =
(model as any)._userConfiguredCapabilities === true
if (
!capabilities.includes('vision') &&
!hasUserConfiguredCapabilities
) {
const updatedModels = [...llamacppProvider.models]
updatedModels[modelIndex] = {
...model,
capabilities: [...capabilities, 'vision'],
// Mark this as auto-detected, not user-configured
_autoDetectedVision: true,
} as any
updateProviderState('llamacpp', { models: updatedModels })
console.log(
`Vision capability added to model after provider refresh: ${importedModelName}`
)
}
}
}
}
} catch (error) {
console.error(t('providers:importModelError'), error)
toast.error(t('providers:importModelError'), {
description:
error instanceof Error ? error.message : 'Unknown error occurred',
})
} finally {
setImportingModel(false)
console.error('Error checking mmproj existence after import:', error)
}
} else {
setImportingModel(false)
}
}
useEffect(() => {
// Initial data fetch
serviceHub.models().getActiveModels().then((models) => setActiveModels(models || []))
serviceHub
.models()
.getActiveModels()
.then((models) => setActiveModels(models || []))
// Set up interval for real-time updates
const intervalId = setInterval(() => {
serviceHub.models().getActiveModels().then((models) => setActiveModels(models || []))
serviceHub
.models()
.getActiveModels()
.then((models) => setActiveModels(models || []))
}, 5000)
return () => clearInterval(intervalId)
@ -199,7 +218,9 @@ function ProviderDetail() {
setRefreshingModels(true)
try {
const modelIds = await serviceHub.providers().fetchModelsFromProvider(provider)
const modelIds = await serviceHub
.providers()
.fetchModelsFromProvider(provider)
// Create new models from the fetched IDs
const newModels: Model[] = modelIds.map((id) => ({
@ -250,42 +271,122 @@ function ProviderDetail() {
}
}
const handleStartModel = (modelId: string) => {
const handleStartModel = async (modelId: string) => {
// Add model to loading state
setLoadingModels((prev) => [...prev, modelId])
if (provider)
// Original: startModel(provider, modelId).then(() => { setActiveModels((prevModels) => [...prevModels, modelId]) })
serviceHub.models().startModel(provider, modelId)
.then(() => {
// Refresh active models after starting
serviceHub.models().getActiveModels().then((models) => setActiveModels(models || []))
})
.catch((error) => {
console.error('Error starting model:', error)
if (error && typeof error === 'object' && 'message' in error) {
setModelLoadError(error)
} else {
setModelLoadError(`${error}`)
}
})
.finally(() => {
// Remove model from loading state
setLoadingModels((prev) => prev.filter((id) => id !== modelId))
})
if (provider) {
try {
// Start the model with plan result
await serviceHub.models().startModel(provider, modelId)
// Refresh active models after starting
serviceHub
.models()
.getActiveModels()
.then((models) => setActiveModels(models || []))
} catch (error) {
console.error('Error starting model:', error)
if (
error &&
typeof error === 'object' &&
'message' in error &&
typeof error.message === 'string'
) {
setModelLoadError({ message: error.message })
} else {
setModelLoadError(typeof error === 'string' ? error : `${error}`)
}
} finally {
// Remove model from loading state
setLoadingModels((prev) => prev.filter((id) => id !== modelId))
}
}
}
const handleStopModel = (modelId: string) => {
// Original: stopModel(modelId).then(() => { setActiveModels((prevModels) => prevModels.filter((model) => model !== modelId)) })
serviceHub.models().stopModel(modelId)
serviceHub
.models()
.stopModel(modelId)
.then(() => {
// Refresh active models after stopping
serviceHub.models().getActiveModels().then((models) => setActiveModels(models || []))
serviceHub
.models()
.getActiveModels()
.then((models) => setActiveModels(models || []))
})
.catch((error) => {
console.error('Error stopping model:', error)
})
}
const handleCheckForBackendUpdate = useCallback(async () => {
if (provider?.provider !== 'llamacpp') return
setIsCheckingBackendUpdate(true)
try {
const update = await checkForBackendUpdate(true)
if (!update) {
toast.info(t('settings:noBackendUpdateAvailable'))
}
// If update is available, the BackendUpdater dialog will automatically show
} catch (error) {
console.error('Failed to check for backend updates:', error)
toast.error(t('settings:backendUpdateError'))
} finally {
setIsCheckingBackendUpdate(false)
}
}, [provider, checkForBackendUpdate, t])
const handleInstallBackendFromFile = useCallback(async () => {
if (provider?.provider !== 'llamacpp') return
setIsInstallingBackend(true)
try {
// Open file dialog with filter for .tar.gz files
const selectedFile = await serviceHub.dialog().open({
multiple: false,
directory: false,
filters: [
{
name: 'Backend Archives',
extensions: ['tar.gz'],
},
],
})
if (selectedFile && typeof selectedFile === 'string') {
// Process the file path: replace spaces with dashes and convert to lowercase
const processedFilePath = selectedFile
.replace(/\s+/g, '-')
.toLowerCase()
// Install the backend using the llamacpp extension
await installBackend(processedFilePath)
// Extract filename from the selected file path and replace spaces with dashes
const fileName = (
selectedFile.split(/[/\\]/).pop() || selectedFile
).replace(/\s+/g, '-')
toast.success(t('settings:backendInstallSuccess'), {
description: `Llamacpp ${fileName} installed`,
})
// Refresh settings to update backend configuration
await refreshSettings()
}
} catch (error) {
console.error('Failed to install backend from file:', error)
toast.error(t('settings:backendInstallError'), {
description:
error instanceof Error ? error.message : 'Unknown error occurred',
})
} finally {
setIsInstallingBackend(false)
}
}, [provider, serviceHub, refreshSettings, t, installBackend])
// Check if model provider settings are enabled for this platform
if (!PlatformFeatures[PlatformFeature.MODEL_PROVIDER_SETTINGS]) {
return (
@ -434,10 +535,12 @@ function ProviderDetail() {
}
}
serviceHub.providers().updateSettings(
providerName,
updateObj.settings ?? []
)
serviceHub
.providers()
.updateSettings(
providerName,
updateObj.settings ?? []
)
updateProvider(providerName, {
...provider,
...updateObj,
@ -499,6 +602,60 @@ function ProviderDetail() {
<span> is the recommended backend.</span>
</div>
)}
{setting.key === 'version_backend' &&
provider?.provider === 'llamacpp' && (
<div className="mt-2 flex flex-wrap gap-2">
<Button
variant="link"
size="sm"
className="p-0"
onClick={handleCheckForBackendUpdate}
disabled={isCheckingBackendUpdate}
>
<div className="cursor-pointer flex items-center justify-center rounded-sm hover:bg-main-view-fg/15 bg-main-view-fg/10 transition-all duration-200 ease-in-out px-2 py-1 gap-1">
<IconRefresh
size={12}
className={cn(
'text-main-view-fg/50',
isCheckingBackendUpdate &&
'animate-spin'
)}
/>
<span>
{isCheckingBackendUpdate
? t(
'settings:checkingForBackendUpdates'
)
: t(
'settings:checkForBackendUpdates'
)}
</span>
</div>
</Button>
<Button
variant="link"
size="sm"
className="p-0"
onClick={handleInstallBackendFromFile}
disabled={isInstallingBackend}
>
<div className="cursor-pointer flex items-center justify-center rounded-sm hover:bg-main-view-fg/15 bg-main-view-fg/10 transition-all duration-200 ease-in-out px-2 py-1 gap-1">
<IconUpload
size={12}
className={cn(
'text-main-view-fg/50',
isInstallingBackend && 'animate-pulse'
)}
/>
<span>
{isInstallingBackend
? 'Installing Backend...'
: 'Install Backend from File'}
</span>
</div>
</Button>
</div>
)}
</>
}
actions={actionComponent}
@ -553,32 +710,28 @@ function ProviderDetail() {
</>
)}
{provider && provider.provider === 'llamacpp' && (
<Button
variant="link"
size="sm"
className="hover:no-underline"
disabled={importingModel}
onClick={handleImportModel}
>
<div className="cursor-pointer flex items-center justify-center rounded hover:bg-main-view-fg/15 bg-main-view-fg/10 transition-all duration-200 ease-in-out p-1.5 py-1 gap-1 -mr-2">
{importingModel ? (
<IconLoader
size={18}
className="text-main-view-fg/50 animate-spin"
/>
) : (
<IconFolderPlus
size={18}
className="text-main-view-fg/50"
/>
)}
<span className="text-main-view-fg/70">
{importingModel
? 'Importing...'
: t('providers:import')}
</span>
</div>
</Button>
<ImportVisionModelDialog
provider={provider}
onSuccess={handleModelImportSuccess}
trigger={
<Button
variant="link"
size="sm"
className="hover:no-underline !outline-none focus:outline-none active:outline-none"
asChild
>
<div className="cursor-pointer flex items-center justify-center rounded hover:bg-main-view-fg/15 bg-main-view-fg/10 transition-all duration-200 ease-in-out p-1.5 py-1 gap-1 -mr-2">
<IconFolderPlus
size={18}
className="text-main-view-fg/50"
/>
<span className="text-main-view-fg/70">
{t('providers:import')}
</span>
</div>
</Button>
}
/>
)}
</div>
</div>
@ -603,12 +756,10 @@ function ProviderDetail() {
}
actions={
<div className="flex items-center gap-0.5">
{provider && provider.provider !== 'llamacpp' && (
<DialogEditModel
provider={provider}
modelId={model.id}
/>
)}
<DialogEditModel
provider={provider}
modelId={model.id}
/>
{model.settings && (
<ModelSetting
provider={provider}

View File

@ -1,91 +1,216 @@
import { describe, it, expect, vi, beforeEach } from 'vitest'
import { initializeServiceHub, type ServiceHub } from '../index'
import { isPlatformTauri } from '@/lib/platform/utils'
// Mock platform detection
vi.mock('@/lib/platform/utils', () => ({
isPlatformTauri: vi.fn().mockReturnValue(false)
}))
// Mock @jan/extensions-web to return empty extensions for testing
vi.mock('@jan/extensions-web', () => ({
WEB_EXTENSIONS: {}
}))
// Mock console to avoid noise in tests
vi.spyOn(console, 'log').mockImplementation(() => {})
vi.spyOn(console, 'error').mockImplementation(() => {})
describe('ServiceHub Integration Tests', () => {
let serviceHub: ServiceHub
beforeEach(async () => {
vi.clearAllMocks()
serviceHub = await initializeServiceHub()
})
describe('ServiceHub Initialization', () => {
it('should initialize with web services when not on Tauri', async () => {
vi.mocked(isPlatformTauri).mockReturnValue(false)
serviceHub = await initializeServiceHub()
expect(serviceHub).toBeDefined()
expect(console.log).toHaveBeenCalledWith(
'Initializing service hub for platform:',
'Web'
)
})
it('should initialize with Tauri services when on Tauri', async () => {
vi.mocked(isPlatformTauri).mockReturnValue(true)
serviceHub = await initializeServiceHub()
expect(serviceHub).toBeDefined()
expect(console.log).toHaveBeenCalledWith(
'Initializing service hub for platform:',
'Tauri'
)
})
})
describe('Service Access', () => {
it('should provide access to all required services', () => {
const services = [
'theme', 'window', 'events', 'hardware', 'app', 'analytic',
'messages', 'mcp', 'threads', 'providers', 'models', 'assistants',
'dialog', 'opener', 'updater', 'path', 'core', 'deeplink'
]
services.forEach(serviceName => {
expect(typeof serviceHub[serviceName as keyof ServiceHub]).toBe('function')
expect(serviceHub[serviceName as keyof ServiceHub]()).toBeDefined()
})
})
it('should return same service instance on multiple calls', () => {
const themeService1 = serviceHub.theme()
const themeService2 = serviceHub.theme()
expect(themeService1).toBe(themeService2)
})
})
describe('Basic Service Functionality', () => {
it('should have working theme service', () => {
const theme = serviceHub.theme()
expect(typeof theme.setTheme).toBe('function')
expect(typeof theme.getCurrentWindow).toBe('function')
})
it('should have working events service', () => {
const events = serviceHub.events()
expect(typeof events.emit).toBe('function')
expect(typeof events.listen).toBe('function')
})
})
import { describe, it, expect, vi, beforeEach } from 'vitest'
import { initializeServiceHub, type ServiceHub } from '../index'
import { isPlatformTauri } from '@/lib/platform/utils'
// Mock platform detection
vi.mock('@/lib/platform/utils', () => ({
isPlatformTauri: vi.fn().mockReturnValue(false)
}))
// Mock @jan/extensions-web to return empty extensions for testing
vi.mock('@jan/extensions-web', () => ({
WEB_EXTENSIONS: {}
}))
// Mock @janhq/core EngineManager to prevent initialization issues
vi.mock('@janhq/core', () => ({
EngineManager: {
instance: vi.fn(() => ({
engines: new Map()
}))
}
}))
// Mock token.js to avoid initialization issues
vi.mock('token.js', () => ({
models: {}
}))
// Mock ExtensionManager to avoid initialization issues
vi.mock('@/lib/extension', () => ({
ExtensionManager: {
getInstance: vi.fn(() => ({
getEngine: vi.fn()
}))
}
}))
// Mock dynamic imports for web services
vi.mock('../theme/web', () => ({
WebThemeService: vi.fn().mockImplementation(() => ({
setTheme: vi.fn(),
getCurrentWindow: vi.fn()
}))
}))
vi.mock('../app/web', () => ({
WebAppService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../path/web', () => ({
WebPathService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../core/web', () => ({
WebCoreService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../dialog/web', () => ({
WebDialogService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../events/web', () => ({
WebEventsService: vi.fn().mockImplementation(() => ({
emit: vi.fn(),
listen: vi.fn()
}))
}))
vi.mock('../window/web', () => ({
WebWindowService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../deeplink/web', () => ({
WebDeepLinkService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../providers/web', () => ({
WebProvidersService: vi.fn().mockImplementation(() => ({}))
}))
// Mock dynamic imports for Tauri services
vi.mock('../theme/tauri', () => ({
TauriThemeService: vi.fn().mockImplementation(() => ({
setTheme: vi.fn(),
getCurrentWindow: vi.fn()
}))
}))
vi.mock('../window/tauri', () => ({
TauriWindowService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../events/tauri', () => ({
TauriEventsService: vi.fn().mockImplementation(() => ({
emit: vi.fn(),
listen: vi.fn()
}))
}))
vi.mock('../hardware/tauri', () => ({
TauriHardwareService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../app/tauri', () => ({
TauriAppService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../mcp/tauri', () => ({
TauriMCPService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../providers/tauri', () => ({
TauriProvidersService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../dialog/tauri', () => ({
TauriDialogService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../opener/tauri', () => ({
TauriOpenerService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../updater/tauri', () => ({
TauriUpdaterService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../path/tauri', () => ({
TauriPathService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../core/tauri', () => ({
TauriCoreService: vi.fn().mockImplementation(() => ({}))
}))
vi.mock('../deeplink/tauri', () => ({
TauriDeepLinkService: vi.fn().mockImplementation(() => ({}))
}))
// Mock console to avoid noise in tests
vi.spyOn(console, 'log').mockImplementation(() => {})
vi.spyOn(console, 'error').mockImplementation(() => {})
describe('ServiceHub Integration Tests', () => {
let serviceHub: ServiceHub
beforeEach(async () => {
vi.clearAllMocks()
serviceHub = await initializeServiceHub()
})
describe('ServiceHub Initialization', () => {
it('should initialize with web services when not on Tauri', async () => {
vi.mocked(isPlatformTauri).mockReturnValue(false)
serviceHub = await initializeServiceHub()
expect(serviceHub).toBeDefined()
expect(console.log).toHaveBeenCalledWith(
'Initializing service hub for platform:',
'Web'
)
})
it('should initialize with Tauri services when on Tauri', async () => {
vi.mocked(isPlatformTauri).mockReturnValue(true)
serviceHub = await initializeServiceHub()
expect(serviceHub).toBeDefined()
expect(console.log).toHaveBeenCalledWith(
'Initializing service hub for platform:',
'Tauri'
)
})
})
describe('Service Access', () => {
it('should provide access to all required services', () => {
const services = [
'theme', 'window', 'events', 'hardware', 'app', 'analytic',
'messages', 'mcp', 'threads', 'providers', 'models', 'assistants',
'dialog', 'opener', 'updater', 'path', 'core', 'deeplink'
]
services.forEach(serviceName => {
expect(typeof serviceHub[serviceName as keyof ServiceHub]).toBe('function')
expect(serviceHub[serviceName as keyof ServiceHub]()).toBeDefined()
})
})
it('should return same service instance on multiple calls', () => {
const themeService1 = serviceHub.theme()
const themeService2 = serviceHub.theme()
expect(themeService1).toBe(themeService2)
})
})
describe('Basic Service Functionality', () => {
it('should have working theme service', () => {
const theme = serviceHub.theme()
expect(typeof theme.setTheme).toBe('function')
expect(typeof theme.getCurrentWindow).toBe('function')
})
it('should have working events service', () => {
const events = serviceHub.events()
expect(typeof events.emit).toBe('function')
expect(typeof events.listen).toBe('function')
})
})
})

View File

@ -40,6 +40,25 @@ describe('Web-Specific Service Tests', () => {
describe('WebProvidersService', () => {
it('should use browser fetch for API calls', async () => {
// Mock the dependencies before importing
vi.mock('token.js', () => ({
models: {}
}))
vi.mock('@/lib/extension', () => ({
ExtensionManager: {
getInstance: vi.fn(() => ({
getEngine: vi.fn()
}))
}
}))
vi.mock('@janhq/core', () => ({
EngineManager: {
instance: vi.fn(() => ({
engines: new Map()
}))
}
}))
const { WebProvidersService } = await import('../providers/web')
const mockResponse = {
ok: true,
@ -66,7 +85,7 @@ describe('Web-Specific Service Tests', () => {
})
)
expect(models).toEqual(['gpt-4'])
})
}, 10000) // Increase timeout to 10 seconds
})
describe('WebAppService', () => {

View File

@ -157,6 +157,7 @@ class PlatformServiceHub implements ServiceHub {
windowModule,
deepLinkModule,
providersModule,
mcpModule,
] = await Promise.all([
import('./theme/web'),
import('./app/web'),
@ -167,6 +168,7 @@ class PlatformServiceHub implements ServiceHub {
import('./window/web'),
import('./deeplink/web'),
import('./providers/web'),
import('./mcp/web'),
])
this.themeService = new themeModule.WebThemeService()
@ -178,6 +180,7 @@ class PlatformServiceHub implements ServiceHub {
this.windowService = new windowModule.WebWindowService()
this.deepLinkService = new deepLinkModule.WebDeepLinkService()
this.providersService = new providersModule.WebProvidersService()
this.mcpService = new mcpModule.WebMCPService()
}
this.initialized = true

View File

@ -2,9 +2,9 @@
* Default MCP Service - Generic implementation with minimal returns
*/
import { MCPTool } from '@/types/completion'
import { MCPTool, MCPToolCallResult } from '@janhq/core'
import type { MCPServerConfig } from '@/hooks/useMCPServers'
import type { MCPService, MCPConfig, ToolCallResult, ToolCallWithCancellationResult } from './types'
import type { MCPService, MCPConfig, ToolCallWithCancellationResult } from './types'
export class DefaultMCPService implements MCPService {
async updateMCPConfig(configs: string): Promise<void> {
@ -28,7 +28,7 @@ export class DefaultMCPService implements MCPService {
return []
}
async callTool(args: { toolName: string; arguments: object }): Promise<ToolCallResult> {
async callTool(args: { toolName: string; arguments: object }): Promise<MCPToolCallResult> {
console.log('callTool called with args:', args)
return {
error: '',

View File

@ -2,20 +2,15 @@
* MCP Service Types
*/
import { MCPTool } from '@/types/completion'
import { MCPTool, MCPToolCallResult } from '@janhq/core'
import type { MCPServerConfig, MCPServers } from '@/hooks/useMCPServers'
export interface MCPConfig {
mcpServers?: MCPServers
}
export interface ToolCallResult {
error: string
content: { text: string }[]
}
export interface ToolCallWithCancellationResult {
promise: Promise<ToolCallResult>
promise: Promise<MCPToolCallResult>
cancel: () => Promise<void>
token: string
}
@ -26,7 +21,7 @@ export interface MCPService {
getMCPConfig(): Promise<MCPConfig>
getTools(): Promise<MCPTool[]>
getConnectedServers(): Promise<string[]>
callTool(args: { toolName: string; arguments: object }): Promise<ToolCallResult>
callTool(args: { toolName: string; arguments: object }): Promise<MCPToolCallResult>
callToolWithCancellation(args: {
toolName: string
arguments: object

View File

@ -0,0 +1,279 @@
/**
* Web MCP Service - Implementation for web platform
* Uses the MCP extension through ExtensionManager
* Follows OpenAI function calling standards
*/
import type { MCPServerConfig } from '@/hooks/useMCPServers'
import type { MCPService, MCPConfig, ToolCallWithCancellationResult } from './types'
import { ExtensionManager } from '@/lib/extension'
import { ExtensionTypeEnum, MCPExtension, MCPTool, MCPToolCallResult } from '@janhq/core'
export class WebMCPService implements MCPService {
private abortControllers: Map<string, AbortController> = new Map()
private extensionCache: MCPExtension | null = null
private cacheTimestamp = 0
private readonly CACHE_TTL = 5000 // 5 seconds
private getMCPExtension(): MCPExtension | null {
const now = Date.now()
if (this.extensionCache && (now - this.cacheTimestamp) < this.CACHE_TTL) {
return this.extensionCache
}
this.extensionCache = ExtensionManager.getInstance().get<MCPExtension>(
ExtensionTypeEnum.MCP
) || null
this.cacheTimestamp = now
return this.extensionCache
}
private invalidateCache(): void {
this.extensionCache = null
this.cacheTimestamp = 0
}
async updateMCPConfig(configs: string): Promise<void> {
if (!configs || typeof configs !== 'string') {
throw new Error('Invalid MCP configuration provided')
}
// For web platform, configuration is handled by the remote API server
// Invalidate cache to ensure fresh extension retrieval
this.invalidateCache()
}
async restartMCPServers(): Promise<void> {
// For web platform, servers are managed remotely
// This triggers a refresh of available tools
this.invalidateCache()
const extension = this.getMCPExtension()
if (extension) {
try {
await extension.refreshTools()
} catch (error) {
throw new Error(`Failed to restart MCP servers: ${error instanceof Error ? error.message : String(error)}`)
}
}
}
async getMCPConfig(): Promise<MCPConfig> {
// Return empty config since web platform doesn't manage local MCP servers
return {}
}
async getTools(): Promise<MCPTool[]> {
const extension = this.getMCPExtension()
if (!extension) {
return []
}
try {
return await extension.getTools()
} catch (error) {
console.error('Failed to get MCP tools:', error)
return []
}
}
async getConnectedServers(): Promise<string[]> {
const extension = this.getMCPExtension()
if (!extension) {
return []
}
try {
return await extension.getConnectedServers()
} catch (error) {
console.error('Failed to get connected servers:', error)
return []
}
}
async callTool(args: { toolName: string; arguments: object }): Promise<MCPToolCallResult> {
// Validate input parameters
if (!args.toolName || typeof args.toolName !== 'string') {
return {
error: 'Invalid tool name provided',
content: [{ type: 'text', text: 'Tool name must be a non-empty string' }]
}
}
const extension = this.getMCPExtension()
if (!extension) {
return {
error: 'MCP extension not available',
content: [{ type: 'text', text: 'MCP service is not available' }]
}
}
try {
const result = await extension.callTool(args.toolName, args.arguments as Record<string, unknown>)
// Ensure OpenAI-compliant response format
if (!result.content || !Array.isArray(result.content)) {
return {
error: 'Invalid tool response format',
content: [{ type: 'text', text: 'Tool returned invalid response format' }]
}
}
return {
error: result.error || '',
content: result.content.map(item => ({
type: item.type || 'text',
text: item.text || JSON.stringify(item)
}))
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
return {
error: errorMessage,
content: [{ type: 'text', text: `Tool execution failed: ${errorMessage}` }]
}
}
}
callToolWithCancellation(args: {
toolName: string
arguments: object
cancellationToken?: string
}): ToolCallWithCancellationResult {
// Validate input parameters
if (!args.toolName || typeof args.toolName !== 'string') {
const errorResult: MCPToolCallResult = {
error: 'Invalid tool name provided',
content: [{ type: 'text', text: 'Tool name must be a non-empty string' }]
}
return {
promise: Promise.resolve(errorResult),
cancel: async () => {}, // No-op for failed validation
token: 'invalid'
}
}
const token = args.cancellationToken || this.generateCancellationToken()
const abortController = new AbortController()
this.abortControllers.set(token, abortController)
const promise = this.callToolWithAbort(args, abortController.signal)
.finally(() => {
this.abortControllers.delete(token)
})
return {
promise,
cancel: async () => {
const controller = this.abortControllers.get(token)
if (controller && !controller.signal.aborted) {
controller.abort()
}
this.abortControllers.delete(token)
},
token
}
}
private async callToolWithAbort(
args: { toolName: string; arguments: object },
signal: AbortSignal
): Promise<MCPToolCallResult> {
// Check if already aborted
if (signal.aborted) {
return {
error: 'Tool call was cancelled',
content: [{ type: 'text', text: 'Tool call was cancelled by user' }]
}
}
const extension = this.getMCPExtension()
if (!extension) {
return {
error: 'MCP extension not available',
content: [{ type: 'text', text: 'MCP service is not available' }]
}
}
return new Promise((resolve) => {
const abortHandler = () => {
resolve({
error: 'Tool call was cancelled',
content: [{ type: 'text', text: 'Tool call was cancelled by user' }]
})
}
signal.addEventListener('abort', abortHandler, { once: true })
extension.callTool(args.toolName, args.arguments as Record<string, unknown>)
.then(result => {
if (!signal.aborted) {
if (!result.content || !Array.isArray(result.content)) {
resolve({
error: 'Invalid tool response format',
content: [{ type: 'text', text: 'Tool returned invalid response format' }]
})
return
}
resolve({
error: result.error || '',
content: result.content.map(item => ({
type: item.type || 'text',
text: item.text || JSON.stringify(item)
}))
})
}
})
.catch(error => {
if (!signal.aborted) {
const errorMessage = error instanceof Error ? error.message : String(error)
resolve({
error: errorMessage,
content: [{ type: 'text', text: `Tool execution failed: ${errorMessage}` }]
})
}
})
.finally(() => {
signal.removeEventListener('abort', abortHandler)
})
})
}
async cancelToolCall(cancellationToken: string): Promise<void> {
const controller = this.abortControllers.get(cancellationToken)
if (controller) {
controller.abort()
this.abortControllers.delete(cancellationToken)
}
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
async activateMCPServer(name: string, _config: MCPServerConfig): Promise<void> {
// For web platform, server activation is handled remotely
this.invalidateCache()
const extension = this.getMCPExtension()
if (extension) {
try {
await extension.refreshTools()
} catch (error) {
throw new Error(`Failed to activate MCP server ${name}: ${error instanceof Error ? error.message : String(error)}`)
}
}
}
async deactivateMCPServer(name: string): Promise<void> {
// For web platform, server deactivation is handled remotely
this.invalidateCache()
const extension = this.getMCPExtension()
if (extension) {
try {
await extension.refreshTools()
} catch (error) {
throw new Error(`Failed to deactivate MCP server ${name}: ${error instanceof Error ? error.message : String(error)}`)
}
}
}
private generateCancellationToken(): string {
return `mcp_cancel_${Date.now()}_${Math.random().toString(36).substring(2, 11)}`
}
}

View File

@ -11,7 +11,14 @@ import {
modelInfo,
} from '@janhq/core'
import { Model as CoreModel } from '@janhq/core'
import type { ModelsService, ModelCatalog, HuggingFaceRepo, CatalogModel } from './types'
import type {
ModelsService,
ModelCatalog,
HuggingFaceRepo,
CatalogModel,
ModelValidationResult,
ModelPlan,
} from './types'
// TODO: Replace this with the actual provider later
const defaultProvider = 'llamacpp'
@ -151,7 +158,9 @@ export class DefaultModelsService implements ModelsService {
async updateModel(model: Partial<CoreModel>): Promise<void> {
if (model.settings)
this.getEngine()?.updateSettings(model.settings as SettingComponentProps[])
this.getEngine()?.updateSettings(
model.settings as SettingComponentProps[]
)
}
async pullModel(
@ -266,7 +275,10 @@ export class DefaultModelsService implements ModelsService {
if (models) await Promise.all(models.map((model) => this.stopModel(model)))
}
async startModel(provider: ProviderObject, model: string): Promise<SessionInfo | undefined> {
async startModel(
provider: ProviderObject,
model: string
): Promise<SessionInfo | undefined> {
const engine = this.getEngine(provider.provider)
if (!engine) return undefined
@ -312,7 +324,10 @@ export class DefaultModelsService implements ModelsService {
async checkMmprojExistsAndUpdateOffloadMMprojSetting(
modelId: string,
updateProvider?: (providerName: string, data: Partial<ModelProvider>) => void,
updateProvider?: (
providerName: string,
data: Partial<ModelProvider>
) => void,
getProviderByName?: (providerName: string) => ModelProvider | undefined
): Promise<{ exists: boolean; settingsUpdated: boolean }> {
let settingsUpdated = false
@ -374,7 +389,8 @@ export class DefaultModelsService implements ModelsService {
(p: { provider: string }) => p.provider === 'llamacpp'
)
const model = llamacppProvider?.models?.find(
(m: { id: string; settings?: Record<string, unknown> }) => m.id === modelId
(m: { id: string; settings?: Record<string, unknown> }) =>
m.id === modelId
)
if (model?.settings) {
@ -429,7 +445,10 @@ export class DefaultModelsService implements ModelsService {
return false
}
async isModelSupported(modelPath: string, ctxSize?: number): Promise<'RED' | 'YELLOW' | 'GREEN' | 'GREY'> {
async isModelSupported(
modelPath: string,
ctxSize?: number
): Promise<'RED' | 'YELLOW' | 'GREEN' | 'GREY'> {
try {
const engine = this.getEngine('llamacpp') as AIEngine & {
isModelSupported?: (
@ -448,4 +467,81 @@ export class DefaultModelsService implements ModelsService {
return 'GREY' // Error state, assume not supported
}
}
}
async validateGgufFile(filePath: string): Promise<ModelValidationResult> {
try {
const engine = this.getEngine('llamacpp') as AIEngine & {
validateGgufFile?: (path: string) => Promise<ModelValidationResult>
}
if (engine && typeof engine.validateGgufFile === 'function') {
return await engine.validateGgufFile(filePath)
}
// If the specific method isn't available, we can fallback to a basic check
console.warn('validateGgufFile method not available in llamacpp engine')
return {
isValid: true, // Assume valid for now
error: 'Validation method not available',
}
} catch (error) {
console.error(`Error validating GGUF file ${filePath}:`, error)
return {
isValid: false,
error: error instanceof Error ? error.message : 'Unknown error',
}
}
}
async planModelLoad(
modelPath: string,
mmprojPath?: string,
requestedCtx?: number
): Promise<ModelPlan> {
try {
const engine = this.getEngine('llamacpp') as AIEngine & {
planModelLoad?: (
path: string,
mmprojPath?: string,
requestedCtx?: number
) => Promise<ModelPlan>
}
if (engine && typeof engine.planModelLoad === 'function') {
// Get the full absolute path to the model file
const janDataFolderPath = await import('@janhq/core').then((core) =>
core.getJanDataFolderPath()
)
const joinPath = await import('@janhq/core').then(
(core) => core.joinPath
)
const fullModelPath = await joinPath([janDataFolderPath, modelPath])
// mmprojPath is currently unused, but included for compatibility
return await engine.planModelLoad(
fullModelPath,
mmprojPath,
requestedCtx
)
}
// Fallback if method is not available
console.warn('planModelLoad method not available in llamacpp engine')
return {
gpuLayers: 0,
maxContextLength: 2048,
noOffloadKVCache: true,
offloadMmproj: false,
mode: 'Unsupported',
}
} catch (error) {
console.error(`Error planning model load for path ${modelPath}:`, error)
return {
gpuLayers: 0,
maxContextLength: 2048,
noOffloadKVCache: true,
offloadMmproj: false,
mode: 'Unsupported',
}
}
}
}

View File

@ -69,10 +69,33 @@ export interface HuggingFaceRepo {
readme?: string
}
export interface GgufMetadata {
version: number
tensor_count: number
metadata: Record<string, string>
}
export interface ModelValidationResult {
isValid: boolean
error?: string
metadata?: GgufMetadata
}
export interface ModelPlan {
gpuLayers: number
maxContextLength: number
noOffloadKVCache: boolean
offloadMmproj: boolean
mode: 'GPU' | 'Hybrid' | 'CPU' | 'Unsupported'
}
export interface ModelsService {
fetchModels(): Promise<modelInfo[]>
fetchModelCatalog(): Promise<ModelCatalog>
fetchHuggingFaceRepo(repoId: string, hfToken?: string): Promise<HuggingFaceRepo | null>
fetchHuggingFaceRepo(
repoId: string,
hfToken?: string
): Promise<HuggingFaceRepo | null>
convertHfRepoToCatalogModel(repo: HuggingFaceRepo): CatalogModel
updateModel(model: Partial<CoreModel>): Promise<void>
pullModel(
@ -95,13 +118,28 @@ export interface ModelsService {
getActiveModels(provider?: string): Promise<string[]>
stopModel(model: string, provider?: string): Promise<void>
stopAllModels(): Promise<void>
startModel(provider: ProviderObject, model: string): Promise<SessionInfo | undefined>
startModel(
provider: ProviderObject,
model: string
): Promise<SessionInfo | undefined>
isToolSupported(modelId: string): Promise<boolean>
checkMmprojExistsAndUpdateOffloadMMprojSetting(
modelId: string,
updateProvider?: (providerName: string, data: Partial<ModelProvider>) => void,
updateProvider?: (
providerName: string,
data: Partial<ModelProvider>
) => void,
getProviderByName?: (providerName: string) => ModelProvider | undefined
): Promise<{ exists: boolean; settingsUpdated: boolean }>
checkMmprojExists(modelId: string): Promise<boolean>
isModelSupported(modelPath: string, ctxSize?: number): Promise<'RED' | 'YELLOW' | 'GREEN' | 'GREY'>
}
isModelSupported(
modelPath: string,
ctxSize?: number
): Promise<'RED' | 'YELLOW' | 'GREEN' | 'GREY'>
validateGgufFile(filePath: string): Promise<ModelValidationResult>
planModelLoad(
modelPath: string,
mmprojPath?: string,
requestedCtx?: number
): Promise<ModelPlan>
}

View File

@ -75,18 +75,31 @@ export class TauriProvidersService extends DefaultProvidersService {
}) as ProviderSetting[],
models: await Promise.all(
models.map(
async (model) =>
({
async (model) => {
let capabilities: string[] = []
// Check for capabilities
if ('capabilities' in model) {
capabilities = model.capabilities as string[]
} else {
// Try to check tool support, but don't let failures block the model
try {
const toolSupported = await value.isToolSupported(model.id)
if (toolSupported) {
capabilities = [ModelCapabilities.TOOLS]
}
} catch (error) {
console.warn(`Failed to check tool support for model ${model.id}:`, error)
// Continue without tool capabilities if check fails
}
}
return {
id: model.id,
model: model.id,
name: model.name,
description: model.description,
capabilities:
'capabilities' in model
? (model.capabilities as string[])
: (await value.isToolSupported(model.id))
? [ModelCapabilities.TOOLS]
: [],
capabilities,
provider: providerName,
settings: Object.values(modelSettings).reduce(
(acc, setting) => {
@ -105,7 +118,8 @@ export class TauriProvidersService extends DefaultProvidersService {
},
{} as Record<string, ProviderSetting>
),
}) as Model
} as Model
}
)
),
}
@ -129,6 +143,12 @@ export class TauriProvidersService extends DefaultProvidersService {
'Content-Type': 'application/json',
}
// Add Origin header for local providers to avoid CORS issues
// Some local providers (like Ollama) require an Origin header
if (provider.base_url.includes('localhost:') || provider.base_url.includes('127.0.0.1:')) {
headers['Origin'] = 'tauri://localhost'
}
// Only add authentication headers if API key is provided
if (provider.api_key) {
headers['x-api-key'] = provider.api_key

View File

@ -30,6 +30,14 @@ export default defineConfig(({ mode }) => {
'@': path.resolve(__dirname, './src'),
},
},
optimizeDeps: {
exclude: ['@jan/extensions-web'],
},
build: {
rollupOptions: {
external: ['@jan/extensions-web'],
},
},
define: {
IS_TAURI: JSON.stringify(process.env.IS_TAURI),
IS_WEB_APP: JSON.stringify(false),

View File

@ -806,6 +806,73 @@
}
}
},
"/jan/v1/mcp": {
"post": {
"security": [
{
"BearerAuth": []
}
],
"description": "Handles Model Context Protocol (MCP) requests over an HTTP stream. The response is sent as a continuous stream of data.",
"consumes": [
"application/json"
],
"produces": [
"text/event-stream"
],
"tags": [
"Jan",
"Jan-MCP"
],
"summary": "MCP streamable endpoint",
"parameters": [
{
"description": "MCP request payload",
"name": "request",
"in": "body",
"required": true,
"schema": {}
}
],
"responses": {
"200": {
"description": "Streamed response (SSE or chunked transfer)",
"schema": {
"type": "string"
}
}
}
}
},
"/jan/v1/models": {
"get": {
"security": [
{
"BearerAuth": []
}
],
"description": "Retrieves a list of available models that can be used for chat completions or other tasks.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Jan",
"Jan-Models"
],
"summary": "List available models",
"responses": {
"200": {
"description": "Successful response",
"schema": {
"$ref": "#/definitions/app_interfaces_http_routes_jan_v1.ModelsResponse"
}
}
}
}
},
"/jan/v1/organizations": {
"get": {
"security": [
@ -2441,6 +2508,37 @@
}
},
"definitions": {
"app_interfaces_http_routes_jan_v1.Model": {
"type": "object",
"properties": {
"created": {
"type": "integer"
},
"id": {
"type": "string"
},
"object": {
"type": "string"
},
"owned_by": {
"type": "string"
}
}
},
"app_interfaces_http_routes_jan_v1.ModelsResponse": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
"$ref": "#/definitions/app_interfaces_http_routes_jan_v1.Model"
}
},
"object": {
"type": "string"
}
}
},
"app_interfaces_http_routes_jan_v1_auth.GetMeResponse": {
"type": "object",
"properties": {

9735
yarn.lock

File diff suppressed because it is too large Load Diff