diff --git a/.github/workflows/template-tauri-build-linux-x64-external.yml b/.github/workflows/template-tauri-build-linux-x64-external.yml index a88c48267..5c39e17c7 100644 --- a/.github/workflows/template-tauri-build-linux-x64-external.yml +++ b/.github/workflows/template-tauri-build-linux-x64-external.yml @@ -70,10 +70,9 @@ jobs: run: | echo "Version: ${{ inputs.new_version }}" jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = false' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json - mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json + mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json if [ "${{ inputs.channel }}" != "stable" ]; then - jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun", - "usr/lib/Jan-${{ inputs.channel }}/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json + jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json fi jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json @@ -83,7 +82,7 @@ jobs: jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json - + echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------" cat ./src-tauri/plugins/tauri-plugin-hardware/package.json @@ -96,7 +95,7 @@ jobs: ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}" echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------" cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml - + ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}" echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------" cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml @@ -125,7 +124,7 @@ jobs: env: RELEASE_CHANNEL: '${{ inputs.channel }}' AUTO_UPDATER_DISABLED: ${{ inputs.disable_updater && 'true' || 'false' }} - + - name: Upload Artifact uses: actions/upload-artifact@v4 with: @@ -136,4 +135,4 @@ jobs: uses: actions/upload-artifact@v4 with: name: jan-linux-amd64-${{ inputs.new_version }}-AppImage - path: ./src-tauri/target/release/bundle/appimage/*.AppImage \ No newline at end of file + path: ./src-tauri/target/release/bundle/appimage/*.AppImage diff --git a/.github/workflows/template-tauri-build-linux-x64-flatpak.yml b/.github/workflows/template-tauri-build-linux-x64-flatpak.yml index 15d4827f7..d63fae3e7 100644 --- a/.github/workflows/template-tauri-build-linux-x64-flatpak.yml +++ b/.github/workflows/template-tauri-build-linux-x64-flatpak.yml @@ -91,10 +91,9 @@ jobs: echo "Version: ${{ inputs.new_version }}" # Update tauri.conf.json jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json - mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json + mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json if [ "${{ inputs.channel }}" != "stable" ]; then - jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun", - "usr/lib/Jan-${{ inputs.channel }}/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json + jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json fi jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json @@ -104,7 +103,7 @@ jobs: jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json - + echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------" cat ./src-tauri/plugins/tauri-plugin-hardware/package.json @@ -117,7 +116,7 @@ jobs: ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}" echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------" cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml - + ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}" echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------" cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml @@ -128,7 +127,7 @@ jobs: # Temporarily enable devtool on prod build ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools" - cat ./src-tauri/Cargo.toml + cat ./src-tauri/Cargo.toml # Change app name for beta and nightly builds if [ "${{ inputs.channel }}" != "stable" ]; then @@ -139,7 +138,7 @@ jobs: .github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }} cat ./src-tauri/tauri.conf.json - + # Update Cargo.toml ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}" ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools" @@ -184,4 +183,3 @@ jobs: with: name: jan-linux-amd64-flatpak-${{ inputs.new_version }}-AppImage path: ./src-tauri/target/release/bundle/appimage/*.AppImage - diff --git a/.github/workflows/template-tauri-build-linux-x64.yml b/.github/workflows/template-tauri-build-linux-x64.yml index bd9b38369..487571595 100644 --- a/.github/workflows/template-tauri-build-linux-x64.yml +++ b/.github/workflows/template-tauri-build-linux-x64.yml @@ -108,10 +108,9 @@ jobs: echo "Version: ${{ inputs.new_version }}" # Update tauri.conf.json jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json - mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json + mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json if [ "${{ inputs.channel }}" != "stable" ]; then - jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun", - "usr/lib/Jan-${{ inputs.channel }}/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json + jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json fi jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json @@ -121,7 +120,7 @@ jobs: jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json - + echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------" cat ./src-tauri/plugins/tauri-plugin-hardware/package.json @@ -134,7 +133,7 @@ jobs: ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}" echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------" cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml - + ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}" echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------" cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml @@ -156,7 +155,7 @@ jobs: .github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }} cat ./src-tauri/tauri.conf.json - + # Update Cargo.toml ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}" ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools" diff --git a/.github/workflows/template-tauri-build-windows-x64.yml b/.github/workflows/template-tauri-build-windows-x64.yml index 643fef5ac..ed00ef90f 100644 --- a/.github/workflows/template-tauri-build-windows-x64.yml +++ b/.github/workflows/template-tauri-build-windows-x64.yml @@ -54,6 +54,8 @@ on: value: ${{ jobs.build-windows-x64.outputs.WIN_SIG }} FILE_NAME: value: ${{ jobs.build-windows-x64.outputs.FILE_NAME }} + MSI_FILE_NAME: + value: ${{ jobs.build-windows-x64.outputs.MSI_FILE_NAME }} jobs: build-windows-x64: @@ -61,6 +63,7 @@ jobs: outputs: WIN_SIG: ${{ steps.metadata.outputs.WIN_SIG }} FILE_NAME: ${{ steps.metadata.outputs.FILE_NAME }} + MSI_FILE_NAME: ${{ steps.metadata.outputs.MSI_FILE_NAME }} permissions: contents: write steps: @@ -189,9 +192,15 @@ jobs: - name: Upload Artifact uses: actions/upload-artifact@v4 with: - name: jan-windows-${{ inputs.new_version }} + name: jan-windows-exe-${{ inputs.new_version }} path: | ./src-tauri/target/release/bundle/nsis/*.exe + - name: Upload Artifact + uses: actions/upload-artifact@v4 + with: + name: jan-windows-msi-${{ inputs.new_version }} + path: | + ./src-tauri/target/release/bundle/msi/*.msi ## Set output filename for windows - name: Set output filename for windows @@ -201,13 +210,18 @@ jobs: if [ "${{ inputs.channel }}" != "stable" ]; then FILE_NAME=Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64-setup.exe WIN_SIG=$(cat Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64-setup.exe.sig) + + MSI_FILE="Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64_en-US.msi" else FILE_NAME=Jan_${{ inputs.new_version }}_x64-setup.exe WIN_SIG=$(cat Jan_${{ inputs.new_version }}_x64-setup.exe.sig) + + MSI_FILE="Jan_${{ inputs.new_version }}_x64_en-US.msi" fi echo "::set-output name=WIN_SIG::$WIN_SIG" echo "::set-output name=FILE_NAME::$FILE_NAME" + echo "::set-output name=MSI_FILE_NAME::$MSI_FILE" id: metadata ## Upload to s3 for nightly and beta @@ -220,6 +234,8 @@ jobs: # Upload for tauri updater aws s3 cp ./${{ steps.metadata.outputs.FILE_NAME }} s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.FILE_NAME }} aws s3 cp ./${{ steps.metadata.outputs.FILE_NAME }}.sig s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.FILE_NAME }}.sig + + aws s3 cp ./src-tauri/target/release/bundle/msi/${{ steps.metadata.outputs.MSI_FILE_NAME }} s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.MSI_FILE_NAME }} env: AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }} @@ -236,3 +252,13 @@ jobs: asset_path: ./src-tauri/target/release/bundle/nsis/${{ steps.metadata.outputs.FILE_NAME }} asset_name: ${{ steps.metadata.outputs.FILE_NAME }} asset_content_type: application/octet-stream + - name: Upload release assert if public provider is github + if: inputs.public_provider == 'github' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + uses: actions/upload-release-asset@v1.0.1 + with: + upload_url: ${{ inputs.upload_url }} + asset_path: ./src-tauri/target/release/bundle/msi/${{ steps.metadata.outputs.MSI_FILE_NAME }} + asset_name: ${{ steps.metadata.outputs.MSI_FILE_NAME }} + asset_content_type: application/octet-stream diff --git a/.gitignore b/.gitignore index 6b51867ef..e78486abd 100644 --- a/.gitignore +++ b/.gitignore @@ -21,11 +21,13 @@ src-tauri/resources/lib src-tauri/icons !src-tauri/icons/icon.png src-tauri/gen/apple +src-tauri/gen/android src-tauri/resources/bin # Helper tools .opencode OpenCode.md +Claude.md archive/ .cache/ @@ -60,3 +62,4 @@ src-tauri/resources/ ## test test-data llm-docs +.claude/agents diff --git a/Makefile b/Makefile index 9a03ddaad..890f612c8 100644 --- a/Makefile +++ b/Makefile @@ -41,9 +41,25 @@ else @echo "Not macOS; skipping Rust target installation." endif +# Install required Rust targets for Android builds +install-android-rust-targets: + @echo "Checking and installing Android Rust targets..." + @rustup target list --installed | grep -q "aarch64-linux-android" || rustup target add aarch64-linux-android + @rustup target list --installed | grep -q "armv7-linux-androideabi" || rustup target add armv7-linux-androideabi + @rustup target list --installed | grep -q "i686-linux-android" || rustup target add i686-linux-android + @rustup target list --installed | grep -q "x86_64-linux-android" || rustup target add x86_64-linux-android + @echo "Android Rust targets ready!" + +# Install required Rust targets for iOS builds +install-ios-rust-targets: + @echo "Checking and installing iOS Rust targets..." + @rustup target list --installed | grep -q "aarch64-apple-ios" || rustup target add aarch64-apple-ios + @rustup target list --installed | grep -q "aarch64-apple-ios-sim" || rustup target add aarch64-apple-ios-sim + @rustup target list --installed | grep -q "x86_64-apple-ios" || rustup target add x86_64-apple-ios + @echo "iOS Rust targets ready!" + dev: install-and-build yarn download:bin - yarn download:lib yarn dev # Web application targets @@ -58,12 +74,41 @@ build-web-app: install-web-app yarn build:core yarn build:web-app -serve-web-app: +serve-web-app: yarn serve:web-app build-serve-web-app: build-web-app yarn serve:web-app +# Mobile +dev-android: install-and-build install-android-rust-targets + @echo "Setting up Android development environment..." + @if [ ! -d "src-tauri/gen/android" ]; then \ + echo "Android app not initialized. Initializing..."; \ + yarn tauri android init; \ + fi + @echo "Sourcing Android environment setup..." + @bash autoqa/scripts/setup-android-env.sh echo "Android environment ready" + @echo "Starting Android development server..." + yarn dev:android + +dev-ios: install-and-build install-ios-rust-targets + @echo "Setting up iOS development environment..." +ifeq ($(shell uname -s),Darwin) + @if [ ! -d "src-tauri/gen/ios" ]; then \ + echo "iOS app not initialized. Initializing..."; \ + yarn tauri ios init; \ + fi + @echo "Checking iOS development requirements..." + @xcrun --version > /dev/null 2>&1 || (echo "❌ Xcode command line tools not found. Install with: xcode-select --install" && exit 1) + @xcrun simctl list devices available | grep -q "iPhone\|iPad" || (echo "❌ No iOS simulators found. Install simulators through Xcode." && exit 1) + @echo "Starting iOS development server..." + yarn dev:ios +else + @echo "❌ iOS development is only supported on macOS" + @exit 1 +endif + # Linting lint: install-and-build yarn lint @@ -71,7 +116,6 @@ lint: install-and-build # Testing test: lint yarn download:bin - yarn download:lib ifeq ($(OS),Windows_NT) yarn download:windows-installer endif diff --git a/README.md b/README.md index 656917634..b2de4407f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# Jan - Local AI Assistant +# Jan - Open-source ChatGPT replacement -![Jan AI](docs/src/pages/docs/_assets/jan-app.png) +github jan banner

@@ -12,15 +12,13 @@

- Getting Started - - Docs + Getting Started + - Community - Changelog - Bug reports - - Discord

-Jan is an AI assistant that can run 100% offline on your device. Download and run LLMs with -**full control** and **privacy**. +Jan is bringing the best of open-source AI in an easy-to-use product. Download and run LLMs with **full control** and **privacy**. ## Installation @@ -29,41 +27,36 @@ The easiest way to get started is by downloading one of the following versions f - - + - - - -
PlatformStableNightlyDownload
Windows jan.exejan.exe
macOS jan.dmgjan.dmg
Linux (deb) jan.debjan.deb
Linux (AppImage) jan.AppImagejan.AppImage
-Download from [jan.ai](https://jan.ai/) or [GitHub Releases](https://github.com/menloresearch/jan/releases). +Download from [jan.ai](https://jan.ai/) or [GitHub Releases](https://github.com/menloresearch/jan/releases). ## Features -- **Local AI Models**: Download and run LLMs (Llama, Gemma, Qwen, etc.) from HuggingFace -- **Cloud Integration**: Connect to OpenAI, Anthropic, Mistral, Groq, and others +- **Local AI Models**: Download and run LLMs (Llama, Gemma, Qwen, GPT-oss etc.) from HuggingFace +- **Cloud Integration**: Connect to GPT models via OpenAI, Claude models via Anthropic, Mistral, Groq, and others - **Custom Assistants**: Create specialized AI assistants for your tasks - **OpenAI-Compatible API**: Local server at `localhost:1337` for other applications -- **Model Context Protocol**: MCP integration for enhanced capabilities +- **Model Context Protocol**: MCP integration for agentic capabilities - **Privacy First**: Everything runs locally when you want it to ## Build from Source diff --git a/autoqa/scripts/setup-android-env.sh b/autoqa/scripts/setup-android-env.sh new file mode 100755 index 000000000..62adc079f --- /dev/null +++ b/autoqa/scripts/setup-android-env.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# Android Development Environment Setup for Jan + +# Ensure rustup's Rust toolchain is used instead of Homebrew's +export PATH="$HOME/.cargo/bin:$PATH" + +# Set JAVA_HOME for Android builds +export JAVA_HOME=/opt/homebrew/opt/openjdk@17/libexec/openjdk.jdk/Contents/Home +export PATH="/opt/homebrew/opt/openjdk@17/bin:$PATH" + +export ANDROID_HOME="$HOME/Library/Android/sdk" +export ANDROID_NDK_ROOT="$HOME/Library/Android/sdk/ndk/29.0.14033849" +export NDK_HOME="$HOME/Library/Android/sdk/ndk/29.0.14033849" + +# Add Android tools to PATH +export PATH=$PATH:$ANDROID_HOME/platform-tools:$ANDROID_HOME/tools:$ANDROID_HOME/cmdline-tools/latest/bin:$ANDROID_HOME/emulator:$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin + +# Set up CC and CXX for Android compilation +export CC_aarch64_linux_android="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang" +export CXX_aarch64_linux_android="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang++" +export AR_aarch64_linux_android="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ar" +export RANLIB_aarch64_linux_android="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ranlib" + +# Additional environment variables for Rust cross-compilation +export CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang" + +# Only set global CC and AR for Android builds (when TAURI_ANDROID_BUILD is set) +if [ "$TAURI_ANDROID_BUILD" = "true" ]; then + export CC="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang" + export AR="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ar" + echo "Global CC and AR set for Android build" +fi + +# Create symlinks for Android tools if they don't exist +mkdir -p ~/.local/bin +if [ ! -f ~/.local/bin/aarch64-linux-android-ranlib ]; then + ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ranlib ~/.local/bin/aarch64-linux-android-ranlib +fi +if [ ! -f ~/.local/bin/aarch64-linux-android-clang ]; then + ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang ~/.local/bin/aarch64-linux-android-clang +fi +if [ ! -f ~/.local/bin/aarch64-linux-android-clang++ ]; then + ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang++ ~/.local/bin/aarch64-linux-android-clang++ +fi + +# Fix the broken clang symlinks by ensuring base clang is available +if [ ! -f ~/.local/bin/clang ]; then + ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/clang ~/.local/bin/clang +fi +if [ ! -f ~/.local/bin/clang++ ]; then + ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/clang++ ~/.local/bin/clang++ +fi + +# Create symlinks for target-specific ar tools +if [ ! -f ~/.local/bin/aarch64-linux-android-ar ]; then + ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ar ~/.local/bin/aarch64-linux-android-ar +fi +export PATH="$HOME/.local/bin:$PATH" + +echo "Android environment configured:" +echo "ANDROID_HOME: $ANDROID_HOME" +echo "ANDROID_NDK_ROOT: $ANDROID_NDK_ROOT" +echo "PATH includes NDK toolchain: $(echo $PATH | grep -o "ndk.*bin" || echo "NOT FOUND")" + +# Verify required tools +echo -e "\nChecking required tools:" +which adb && echo "✅ adb found" || echo "❌ adb not found" +which emulator && echo "✅ emulator found" || echo "❌ emulator not found" +which $CC_aarch64_linux_android && echo "✅ Android clang found" || echo "❌ Android clang not found" + +# Show available AVDs +echo -e "\nAvailable Android Virtual Devices:" +emulator -list-avds 2>/dev/null || echo "No AVDs found" + +# Execute the provided command +if [ "$1" ]; then + echo -e "\nExecuting: $@" + exec "$@" +fi \ No newline at end of file diff --git a/core/package.json b/core/package.json index eec56a733..203eaf293 100644 --- a/core/package.json +++ b/core/package.json @@ -27,11 +27,13 @@ "devDependencies": { "@npmcli/arborist": "^7.1.0", "@types/node": "^22.10.0", + "@types/react": "19.1.2", "@vitest/coverage-v8": "^2.1.8", "@vitest/ui": "^2.1.8", "eslint": "8.57.0", "happy-dom": "^15.11.6", "pacote": "^21.0.0", + "react": "19.0.0", "request": "^2.88.2", "request-progress": "^3.0.0", "rimraf": "^6.0.1", @@ -44,5 +46,8 @@ "rxjs": "^7.8.1", "ulidx": "^2.3.0" }, + "peerDependencies": { + "react": "19.0.0" + }, "packageManager": "yarn@4.5.3" } diff --git a/core/rolldown.config.mjs b/core/rolldown.config.mjs index fd3329ee0..fbb2bd351 100644 --- a/core/rolldown.config.mjs +++ b/core/rolldown.config.mjs @@ -10,7 +10,7 @@ export default defineConfig([ sourcemap: true, }, platform: 'browser', - external: ['path'], + external: ['path', 'react', 'react-dom', 'react/jsx-runtime'], define: { NODE: JSON.stringify(`${pkgJson.name}/${pkgJson.node}`), VERSION: JSON.stringify(pkgJson.version), diff --git a/core/src/browser/extensions/conversational.test.ts b/core/src/browser/extensions/conversational.test.ts index c08468905..44d1e9b4f 100644 --- a/core/src/browser/extensions/conversational.test.ts +++ b/core/src/browser/extensions/conversational.test.ts @@ -250,4 +250,4 @@ describe('ConversationalExtension', () => { expect(retrievedAssistant.modelId).toBe('') }) -}) \ No newline at end of file +}) diff --git a/core/src/browser/extensions/engines/LocalOAIEngine.test.ts b/core/src/browser/extensions/engines/LocalOAIEngine.test.ts index 5f2563d56..3523c3ce6 100644 --- a/core/src/browser/extensions/engines/LocalOAIEngine.test.ts +++ b/core/src/browser/extensions/engines/LocalOAIEngine.test.ts @@ -131,4 +131,4 @@ describe('LocalOAIEngine', () => { expect(engine.loadedModel).toBeUndefined() }) }) -}) \ No newline at end of file +}) diff --git a/core/src/browser/extensions/mcp.test.ts b/core/src/browser/extensions/mcp.test.ts index ece971809..8ba3f200a 100644 --- a/core/src/browser/extensions/mcp.test.ts +++ b/core/src/browser/extensions/mcp.test.ts @@ -96,4 +96,4 @@ describe('MCPExtension', () => { expect(healthy).toBe(true) }) }) -}) \ No newline at end of file +}) diff --git a/core/src/browser/extensions/mcp.ts b/core/src/browser/extensions/mcp.ts index 7f30a5428..74a008d40 100644 --- a/core/src/browser/extensions/mcp.ts +++ b/core/src/browser/extensions/mcp.ts @@ -1,5 +1,6 @@ -import { MCPInterface, MCPTool, MCPToolCallResult } from '../../types' +import { MCPInterface, MCPTool, MCPToolCallResult, MCPToolComponentProps } from '../../types' import { BaseExtension, ExtensionTypeEnum } from '../extension' +import type { ComponentType } from 'react' /** * MCP (Model Context Protocol) extension for managing tools and server communication. @@ -18,4 +19,16 @@ export abstract class MCPExtension extends BaseExtension implements MCPInterface abstract getConnectedServers(): Promise abstract refreshTools(): Promise abstract isHealthy(): Promise -} \ No newline at end of file + + /** + * Optional method to provide a custom UI component for tools + * @returns A React component or null if no custom component is provided + */ + getToolComponent?(): ComponentType | null + + /** + * Optional method to get the list of tool names that should be disabled by default + * @returns Array of tool names that should be disabled by default for new users + */ + getDefaultDisabledTools?(): Promise +} diff --git a/core/src/browser/models/manager.test.ts b/core/src/browser/models/manager.test.ts index 90626b22e..0e4e728cf 100644 --- a/core/src/browser/models/manager.test.ts +++ b/core/src/browser/models/manager.test.ts @@ -131,4 +131,4 @@ describe('ModelManager', () => { expect(modelManager.models.get('model-2')).toEqual(model2) }) }) -}) \ No newline at end of file +}) diff --git a/core/src/test/setup.ts b/core/src/test/setup.ts index c597a3748..6f3d766bf 100644 --- a/core/src/test/setup.ts +++ b/core/src/test/setup.ts @@ -16,4 +16,4 @@ if (!window.core) { }) } -// Add any other global mocks needed for core tests \ No newline at end of file +// Add any other global mocks needed for core tests diff --git a/core/src/types/mcp/index.ts b/core/src/types/mcp/index.ts index 4ffd501fc..bd809d790 100644 --- a/core/src/types/mcp/index.ts +++ b/core/src/types/mcp/index.ts @@ -1,2 +1,2 @@ export * from './mcpEntity' -export * from './mcpInterface' \ No newline at end of file +export * from './mcpInterface' diff --git a/core/src/types/mcp/mcpEntity.ts b/core/src/types/mcp/mcpEntity.ts index a2259e52e..6c7e0c598 100644 --- a/core/src/types/mcp/mcpEntity.ts +++ b/core/src/types/mcp/mcpEntity.ts @@ -21,4 +21,18 @@ export interface MCPServerInfo { name: string connected: boolean tools?: MCPTool[] -} \ No newline at end of file +} + +/** + * Props for MCP tool UI components + */ +export interface MCPToolComponentProps { + /** List of available MCP tools */ + tools: MCPTool[] + + /** Function to check if a specific tool is currently enabled */ + isToolEnabled: (toolName: string) => boolean + + /** Function to toggle a tool's enabled/disabled state */ + onToolToggle: (toolName: string, enabled: boolean) => void +} diff --git a/core/src/types/mcp/mcpInterface.ts b/core/src/types/mcp/mcpInterface.ts index 15152a83b..a656e9f66 100644 --- a/core/src/types/mcp/mcpInterface.ts +++ b/core/src/types/mcp/mcpInterface.ts @@ -29,4 +29,4 @@ export interface MCPInterface { * Check if MCP service is healthy */ isHealthy(): Promise -} \ No newline at end of file +} diff --git a/docs/_redirects b/docs/_redirects index 17342a020..b72c65400 100644 --- a/docs/_redirects +++ b/docs/_redirects @@ -112,6 +112,12 @@ /docs/remote-models/openrouter /docs/desktop/remote-models/openrouter 302 /docs/server-examples/llmcord /docs/desktop/server-examples/llmcord 302 /docs/server-examples/tabby /docs/desktop/server-examples/tabby 302 +/docs/built-in/tensorrt-llm /docs/desktop/llama-cpp 302 +/docs/desktop/docs/desktop/linux /docs/desktop/install/linux 302 +/windows /docs/desktop/install/windows 302 +/docs/quickstart /docs/ 302 +/docs/desktop/mac /docs/desktop/install/mac 302 +/handbook/open-superintelligence /handbook/why/open-superintelligence 302 /guides/integrations/continue/ /docs/desktop/server-examples/continue-dev 302 /continue-dev /docs/desktop/server-examples/continue-dev 302 @@ -130,4 +136,4 @@ /local-server/troubleshooting /docs/desktop/troubleshooting 302 /mcp /docs/desktop/mcp 302 /quickstart /docs/desktop/quickstart 302 -/server-examples/continue-dev /docs/desktop/server-examples/continue-dev 302 \ No newline at end of file +/server-examples/continue-dev /docs/desktop/server-examples/continue-dev 302 diff --git a/docs/plopfile.js b/docs/plopfile.js index a31caa889..0c384af85 100644 --- a/docs/plopfile.js +++ b/docs/plopfile.js @@ -6,7 +6,7 @@ const camelCase = (str) => { return str.replace(/[-_](\w)/g, (_, c) => c.toUpperCase()) } -const categories = ['building-jan', 'research'] +const categories = ['building-jan', 'research', 'guides'] /** * @param {import("plop").NodePlopAPI} plop diff --git a/docs/public/assets/images/general/ai-for-teacher.jpeg b/docs/public/assets/images/general/ai-for-teacher.jpeg new file mode 100644 index 000000000..c600389b3 Binary files /dev/null and b/docs/public/assets/images/general/ai-for-teacher.jpeg differ diff --git a/docs/public/assets/images/general/assistants-ai-for-teachers.jpeg b/docs/public/assets/images/general/assistants-ai-for-teachers.jpeg new file mode 100644 index 000000000..b9aedf135 Binary files /dev/null and b/docs/public/assets/images/general/assistants-ai-for-teachers.jpeg differ diff --git a/docs/public/assets/images/general/chatgpt-alternative-jan.jpeg b/docs/public/assets/images/general/chatgpt-alternative-jan.jpeg new file mode 100644 index 000000000..c4e18913a Binary files /dev/null and b/docs/public/assets/images/general/chatgpt-alternative-jan.jpeg differ diff --git a/docs/src/pages/post/_assets/cover-kernel-benchmarking.png b/docs/public/assets/images/general/cover-kernel-benchmarking.png similarity index 100% rename from docs/src/pages/post/_assets/cover-kernel-benchmarking.png rename to docs/public/assets/images/general/cover-kernel-benchmarking.png diff --git a/docs/src/pages/post/_assets/deepseek-r1-locally-jan.jpg b/docs/public/assets/images/general/deepseek-r1-locally-jan.jpg similarity index 100% rename from docs/src/pages/post/_assets/deepseek-r1-locally-jan.jpg rename to docs/public/assets/images/general/deepseek-r1-locally-jan.jpg diff --git a/docs/src/pages/post/_assets/gpt-oss locally.jpeg b/docs/public/assets/images/general/gpt-oss locally.jpeg similarity index 100% rename from docs/src/pages/post/_assets/gpt-oss locally.jpeg rename to docs/public/assets/images/general/gpt-oss locally.jpeg diff --git a/docs/public/assets/images/general/is-chatgpt-down.jpg b/docs/public/assets/images/general/is-chatgpt-down.jpg new file mode 100644 index 000000000..2a515d344 Binary files /dev/null and b/docs/public/assets/images/general/is-chatgpt-down.jpg differ diff --git a/docs/public/assets/images/general/jan-ai-for-teacher.mp4 b/docs/public/assets/images/general/jan-ai-for-teacher.mp4 new file mode 100644 index 000000000..3930e6e26 Binary files /dev/null and b/docs/public/assets/images/general/jan-ai-for-teacher.mp4 differ diff --git a/docs/public/assets/images/general/jan-assistants-ai-for-legal.jpeg b/docs/public/assets/images/general/jan-assistants-ai-for-legal.jpeg new file mode 100644 index 000000000..b49de6380 Binary files /dev/null and b/docs/public/assets/images/general/jan-assistants-ai-for-legal.jpeg differ diff --git a/docs/public/assets/images/general/jan-for-ai-law-assistant-chat.jpeg b/docs/public/assets/images/general/jan-for-ai-law-assistant-chat.jpeg new file mode 100644 index 000000000..5501b5e70 Binary files /dev/null and b/docs/public/assets/images/general/jan-for-ai-law-assistant-chat.jpeg differ diff --git a/docs/src/pages/post/_assets/offline-chatgpt-alternatives-jan.jpg b/docs/public/assets/images/general/offline-chatgpt-alternatives-jan.jpg similarity index 100% rename from docs/src/pages/post/_assets/offline-chatgpt-alternatives-jan.jpg rename to docs/public/assets/images/general/offline-chatgpt-alternatives-jan.jpg diff --git a/docs/src/pages/post/_assets/qwen3-settings-jan-ai.jpeg b/docs/public/assets/images/general/qwen3-30b-settings.jpg similarity index 100% rename from docs/src/pages/post/_assets/qwen3-settings-jan-ai.jpeg rename to docs/public/assets/images/general/qwen3-30b-settings.jpg diff --git a/docs/src/pages/post/_assets/research-result-local.png b/docs/public/assets/images/general/research-result-local.png similarity index 100% rename from docs/src/pages/post/_assets/research-result-local.png rename to docs/public/assets/images/general/research-result-local.png diff --git a/docs/src/pages/post/_assets/run-ai-locally-with-jan.jpg b/docs/public/assets/images/general/run-ai-locally-with-jan.jpg similarity index 100% rename from docs/src/pages/post/_assets/run-ai-locally-with-jan.jpg rename to docs/public/assets/images/general/run-ai-locally-with-jan.jpg diff --git a/docs/public/sitemap-0.xml b/docs/public/sitemap-0.xml deleted file mode 100644 index 131222295..000000000 --- a/docs/public/sitemap-0.xml +++ /dev/null @@ -1,125 +0,0 @@ - - -https://jan.ai2025-09-24T03:40:05.491Zdaily1 -https://jan.ai/api-reference2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/api-reference/api-reference2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/api-reference/architecture2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/api-reference/configuration2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/api-reference/development2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/api-reference/installation2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/blog2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2023-12-21-faster-inference-across-platform2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-01-16-settings-options-right-panel2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-01-29-local-api-server2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-02-05-jan-data-folder2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-02-10-jan-is-more-stable2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-02-26-home-servers-with-helm2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-03-06-ui-revamp-settings2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-03-11-import-models2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-03-19-nitro-tensorrt-llm-extension2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-04-02-groq-api-integration2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-04-15-new-mistral-extension2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-04-25-llama3-command-r-hugginface2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-05-20-llamacpp-upgrade-new-remote-models2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-06-21-nvidia-nim-support2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-07-15-claude-3-5-support2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-09-01-llama3-1-gemma2-support2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-09-17-improved-cpu-performance2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-10-24-jan-stable2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-11-22-jan-bugs2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-11.14-jan-supports-qwen-coder2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-12-03-jan-is-faster2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-12-05-jan-hot-fix-mac2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2024-12-30-jan-new-privacy2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-01-06-key-issues-resolved2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-01-23-deepseek-r1-jan2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-02-18-advanced-llama.cpp-settings2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-03-14-jan-security-patch2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-05-14-jan-qwen3-patch2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-06-19-jan-ui-revamp2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-06-26-jan-nano-mcp2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-07-17-responsive-ui2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-07-31-llamacpp-tutorials2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-08-07-gpt-oss2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-08-14-general-improvs2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-08-28-image-support2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/changelog/2025-09-18-auto-optimize-vision-imports2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/api-server2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/assistants2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/data-folder2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/install/linux2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/install/mac2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/install/windows2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/jan-models/jan-nano-1282025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/jan-models/jan-nano-322025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/jan-models/jan-v12025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/jan-models/lucy2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/llama-cpp2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/llama-cpp-server2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/manage-models2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp-examples/browser/browserbase2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp-examples/data-analysis/e2b2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp-examples/data-analysis/jupyter2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp-examples/deepresearch/octagon2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp-examples/design/canva2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp-examples/productivity/linear2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp-examples/productivity/todoist2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp-examples/search/exa2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/mcp-examples/search/serper2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/model-parameters2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/privacy2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/privacy-policy2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/quickstart2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/remote-models/anthropic2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/remote-models/cohere2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/remote-models/google2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/remote-models/groq2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/remote-models/huggingface2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/remote-models/mistralai2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/remote-models/openai2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/remote-models/openrouter2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/server-examples/continue-dev2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/server-examples/llmcord2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/server-examples/n8n2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/server-examples/tabby2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/server-settings2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/server-troubleshooting2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/settings2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/desktop/troubleshooting2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/api-reference2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/api-reference-administration2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/api-reference-authentication2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/api-reference-chat2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/api-reference-chat-conversations2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/api-reference-conversations2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/api-reference-jan-responses2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/api-reference-jan-server2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/architecture2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/configuration2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/development2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/installation2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/docs/server/overview2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/download2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/handbook2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/handbook/betting-on-open-source2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/handbook/open-superintelligence2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/benchmarking-nvidia-tensorrt-llm2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/bitdefender2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/data-is-moat2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/deepresearch2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/deepseek-r1-locally2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/jan-v1-for-research2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/offline-chatgpt-alternative2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/qwen3-settings2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/rag-is-not-enough2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/run-ai-models-locally2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/post/run-gpt-oss-locally2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/privacy2025-09-24T03:40:05.492Zdaily1 -https://jan.ai/support2025-09-24T03:40:05.492Zdaily1 - \ No newline at end of file diff --git a/docs/src/components/Blog/index.tsx b/docs/src/components/Blog/index.tsx index ca10cf408..16899df2c 100644 --- a/docs/src/components/Blog/index.tsx +++ b/docs/src/components/Blog/index.tsx @@ -19,6 +19,10 @@ const Blog = () => { name: 'Research', id: 'research', }, + { + name: 'Guides', + id: 'guides', + }, ] return ( diff --git a/docs/src/components/FooterMenu/index.tsx b/docs/src/components/FooterMenu/index.tsx index 68e1e6e78..317def75f 100644 --- a/docs/src/components/FooterMenu/index.tsx +++ b/docs/src/components/FooterMenu/index.tsx @@ -16,7 +16,10 @@ const FOOTER_MENUS: FooterMenu[] = [ { title: 'Company', links: [ - { name: 'Vision', href: '/', comingSoon: true }, + { + name: 'Open Superintelligence', + href: '/handbook/why/open-superintelligence', + }, { name: 'Handbook', href: '/handbook' }, { name: 'Community', href: 'https://discord.com/invite/FTk2MvZwJH' }, { name: 'Careers', href: 'https://menlo.bamboohr.com/careers' }, diff --git a/docs/src/components/Navbar.tsx b/docs/src/components/Navbar.tsx index 51044e9c7..b12fcaa73 100644 --- a/docs/src/components/Navbar.tsx +++ b/docs/src/components/Navbar.tsx @@ -4,7 +4,7 @@ import { useRouter } from 'next/router' import { cn } from '@/lib/utils' import { FaDiscord, FaGithub } from 'react-icons/fa' import { FiDownload } from 'react-icons/fi' -import { FaXTwitter } from 'react-icons/fa6' +import { FaXTwitter, FaLinkedinIn } from 'react-icons/fa6' import { Button } from './ui/button' import LogoJanSVG from '@/assets/icons/logo-jan.svg' @@ -113,6 +113,43 @@ const Navbar = ({ noScroll }: { noScroll?: boolean }) => { + +
  • + +
  • @@ -232,6 +269,14 @@ const Navbar = ({ noScroll }: { noScroll?: boolean }) => { > + + + +} + +const StatusIcon = ({ status }: { status: string }) => { + switch (status) { + case 'operational': + return + case 'degraded': + case 'partial_outage': + return + case 'major_outage': + return + case 'under_maintenance': + return + default: + return + } +} + +const getStatusColor = (status: string) => { + switch (status) { + case 'operational': + return 'bg-green-100 text-green-800 border-green-200 dark:bg-green-900/20 dark:text-green-300 dark:border-green-800' + case 'degraded': + case 'partial_outage': + return 'bg-yellow-100 text-yellow-800 border-yellow-200 dark:bg-yellow-900/20 dark:text-yellow-300 dark:border-yellow-800' + case 'major_outage': + return 'bg-red-100 text-red-800 border-red-200 dark:bg-red-900/20 dark:text-red-300 dark:border-red-800' + case 'under_maintenance': + return 'bg-blue-100 text-blue-800 border-blue-200 dark:bg-blue-900/20 dark:text-blue-300 dark:border-blue-800' + default: + return 'bg-gray-100 text-gray-800 border-gray-200 dark:bg-gray-900/20 dark:text-gray-300 dark:border-gray-800' + } +} + +const getStatusText = (status: string) => { + switch (status) { + case 'operational': + return 'All Systems Operational' + case 'degraded': + return 'Degraded Performance' + case 'partial_outage': + return 'Partial Service Outage' + case 'major_outage': + return 'Major Service Outage' + case 'under_maintenance': + return 'Under Maintenance' + default: + return 'Status Unknown' + } +} + +export const OpenAIStatusChecker: React.FC = () => { + const [statusData, setStatusData] = useState(null) + const [loading, setLoading] = useState(true) + const [error, setError] = useState(null) + const [lastRefresh, setLastRefresh] = useState(new Date()) + + const fetchStatus = useCallback(async () => { + setLoading(true) + setError(null) + + try { + console.log('Fetching real OpenAI status...') + + // Use CORS proxy to fetch real OpenAI status + const proxyUrl = 'https://api.allorigins.win/get?url=' + const targetUrl = 'https://status.openai.com/api/v2/status.json' + + const response = await fetch(proxyUrl + encodeURIComponent(targetUrl)) + + if (!response.ok) { + throw new Error(`Proxy returned ${response.status}`) + } + + const proxyData = await response.json() + const openaiData = JSON.parse(proxyData.contents) + + console.log('Real OpenAI data received:', openaiData) + + // Transform real OpenAI data to our format + const transformedData: StatusData = { + status: mapOpenAIStatusClient( + openaiData.status?.indicator || 'operational' + ), + lastUpdated: openaiData.page?.updated_at || new Date().toISOString(), + incidents: (openaiData.incidents || []).slice(0, 3), + } + + setStatusData(transformedData) + setLastRefresh(new Date()) + console.log('✅ Real OpenAI status loaded successfully!') + } catch (err) { + console.error('Failed to fetch real status:', err) + + // Fallback: try alternative proxy + try { + console.log('Trying alternative proxy...') + const altResponse = await fetch( + `https://cors-anywhere.herokuapp.com/https://status.openai.com/api/v2/summary.json` + ) + + if (altResponse.ok) { + const altData = await altResponse.json() + setStatusData({ + status: mapOpenAIStatusClient( + altData.status?.indicator || 'operational' + ), + lastUpdated: new Date().toISOString(), + incidents: [], + }) + setLastRefresh(new Date()) + console.log('✅ Alternative proxy worked!') + return + } + } catch (altErr) { + console.log('Alternative proxy also failed') + } + + // Final fallback + setError('Unable to fetch real-time status') + setStatusData({ + status: 'operational' as const, + lastUpdated: new Date().toISOString(), + incidents: [], + }) + setLastRefresh(new Date()) + console.log('Using fallback status') + } finally { + setLoading(false) + } + }, []) + + // Client-side status mapping function + const mapOpenAIStatusClient = (indicator: string): StatusData['status'] => { + switch (indicator.toLowerCase()) { + case 'none': + case 'operational': + return 'operational' + case 'minor': + return 'degraded' + case 'major': + return 'partial_outage' + case 'critical': + return 'major_outage' + case 'maintenance': + return 'under_maintenance' + default: + return 'operational' as const // Default to operational + } + } + + useEffect(() => { + fetchStatus() + // Refresh every 2 minutes for more real-time updates + const interval = setInterval(fetchStatus, 2 * 60 * 1000) + return () => clearInterval(interval) + }, [fetchStatus]) + + const handleRefresh = () => { + fetchStatus() + } + + if (loading && !statusData) { + return ( +
    +
    + + + Checking OpenAI Status... + +
    +
    + ) + } + + if (error) { + return ( +
    +
    +
    + +
    +

    + Unable to Check Status +

    +

    {error}

    +
    +
    + +
    +
    + ) + } + + return ( +
    + ) +} diff --git a/docs/src/pages/api-reference/installation.mdx b/docs/src/pages/api-reference/installation.mdx index de0609a08..266962089 100644 --- a/docs/src/pages/api-reference/installation.mdx +++ b/docs/src/pages/api-reference/installation.mdx @@ -3,7 +3,7 @@ title: Installation description: Install and deploy Jan Server on Kubernetes using minikube and Helm. --- -## Prerequisites +# Prerequisites Jan Server requires the following tools installed on your system: diff --git a/docs/src/pages/docs/_meta.json b/docs/src/pages/docs/_meta.json index 5b6962032..ee07f5d54 100644 --- a/docs/src/pages/docs/_meta.json +++ b/docs/src/pages/docs/_meta.json @@ -9,7 +9,7 @@ }, "desktop": { "type": "page", - "title": "Jan Desktop & Mobile" + "title": "Jan Desktop" }, "server": { "type": "page", diff --git a/docs/src/pages/docs/desktop/_meta.json b/docs/src/pages/docs/desktop/_meta.json index 36c70cf27..1745297cb 100644 --- a/docs/src/pages/docs/desktop/_meta.json +++ b/docs/src/pages/docs/desktop/_meta.json @@ -42,6 +42,5 @@ }, "settings": "Settings", "data-folder": "Jan Data Folder", - "troubleshooting": "Troubleshooting", - "privacy": "Privacy" + "troubleshooting": "Troubleshooting" } diff --git a/docs/src/pages/docs/desktop/index.mdx b/docs/src/pages/docs/desktop/index.mdx index a6ebed221..852f097a5 100644 --- a/docs/src/pages/docs/desktop/index.mdx +++ b/docs/src/pages/docs/desktop/index.mdx @@ -22,228 +22,52 @@ keywords: import { Callout } from 'nextra/components' import FAQBox from '@/components/FaqBox' -# Jan - -![Jan's Cover Image](./_assets/jan-app-new.png) - -## Jan's Goal - -> We're working towards open superintelligence to make a viable open-source alternative to platforms like ChatGPT -and Claude that anyone can own and run. - -## What is Jan Today - -Jan is an open-source AI platform that runs on your hardware. We believe AI should be in the hands of many, not -controlled by a few tech giants. - -Today, Jan is: -- **A desktop app** that runs AI models locally or connects to cloud providers -- **A model hub** making the latest open-source models accessible -- **A connector system** that lets AI interact with real-world tools via MCP - -Tomorrow, Jan aims to be a complete ecosystem where open models rival or exceed closed alternatives. +# Overview -We're building this with the open-source AI community, using the best available tools, and sharing everything -we learn along the way. +We're building [Open Superintelligence](https://jan.ai/handbook/open-superintelligence) together. -## The Jan Ecosystem +Jan is an open-source replacement for ChatGPT: +- AI Models: Use AI models with agentic capabilities + - [Open-source Models](/docs/desktop/manage-models): Run open-source locally + - [Cloud Models](/docs/desktop/remote-models/anthropic): Connect to remote models with API keys +- [Assistants](/docs/desktop/assistants): Create custom AI assistants +- [MCP Servers](/docs/desktop/mcp): Integrate MCP Servers to give agentic capabilities to AI models +- Jan Hub: Browse, install, and [manage models](/docs/desktop/manage-models) +- Local API Server: Expose an [OpenAI-compatible API](/docs/desktop/api-server) from your own machine or server -### Jan Apps -**Available Now:** -- **Desktop**: Full-featured AI workstation for Windows, Mac, and Linux +## Product Suite -**Coming Late 2025:** -- **Mobile**: Jan on your phone -- **Web**: Browser-based access at jan.ai -- **Server**: Self-hosted for teams -- **Extensions**: Browser extension for Chrome-based browsers +Jan is a full [product suite](https://en.wikipedia.org/wiki/Software_suite) that offers an alternative to Big AI: +- [Jan Desktop](/docs/desktop/quickstart): macOS, Windows, and Linux apps with offline mode +- [Jan Web](https://chat.jan.ai): Jan on browser, a direct alternative to chatgpt.com +- Jan Mobile: iOS and Android apps (Coming Soon) +- [Jan Server](/docs/server): deploy locally, in your cloud, or on-prem +- [Jan Models](/docs/models): Open-source models optimized for deep research, tool use, and reasoning -### Jan Model Hub -Making open-source AI accessible to everyone: -- **Easy Downloads**: One-click model installation -- **Jan Models**: Our own models optimized for local use - - **Jan-v1**: 4B reasoning model specialized in web search - - **Research Models** - - **Jan-Nano (32k/128k)**: 4B model for web search with MCP tools - - **Lucy**: 1.7B mobile-optimized for web search -- **Community Models**: Any GGUF from Hugging Face works in Jan -- **Cloud Models**: Connect your API keys for OpenAI, Anthropic, Gemini, and more +### Extending Jan (Coming Soon) +Jan helps you customize and align Open Superintelligence: +- Jan Connectors: Extend Jan with integrations +- Jan Studio: Fine-tune, align, and guardrail +- Evals: Benchmark models across industries, regions, and alignment dimensions +## Principles -### Jan Connectors Hub -Connect AI to the tools you use daily via [Model Context Protocol](./mcp): +- [Open source](https://www.redhat.com/en/blog/open-source-culture-9-core-principles-and-values): [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) licensed, built in public. +- No [vendor lock-in](https://en.wikipedia.org/wiki/Vendor_lock-in): Switch freely between local and frontier models. +- [Right to Repair](https://en.wikipedia.org/wiki/Right_to_repair): Inspect, audit, and modify your AI stack. -**Creative & Design:** -- **Canva**: Generate and edit designs - -**Data & Analysis:** -- **Jupyter**: Run Python notebooks -- **E2B**: Execute code in sandboxes - -**Web & Search:** -- **Browserbase & Browser Use**: Browser automation -- **Exa, Serper, Perplexity**: Advanced web search -- **Octagon**: Deep research capabilities - -**Productivity:** -- **Linear**: Project management -- **Todoist**: Task management - -## Core Features - -- **Run Models Locally**: Download any GGUF model from Hugging Face, use OpenAI's gpt-oss models, -or connect to cloud providers -- **OpenAI-Compatible API**: Local server at `localhost:1337` works with tools like -[Continue](./server-examples/continue-dev) and [Cline](https://cline.bot/) -- **Extend with MCP Tools**: Browser automation, web search, data analysis, and design tools, all -through natural language -- **Your Choice of Infrastructure**: Run on your laptop, self-host on your servers (soon), or use -cloud when you need it - -## Philosophy - -Jan is built to be user-owned: -- **Open Source**: Apache 2.0 license -- **Local First**: Your data stays on your device. Internet is optional -- **Privacy Focused**: We don't collect or sell user data. See our [Privacy Policy](./privacy) -- **No Lock-in**: Export your data anytime. Use any model. Switch between local and cloud - - -The best AI is the one you control. Not the one that others control for you. - - -## The Path Forward - -### What Works Today -- Run powerful models locally on consumer hardware -- Connect to any cloud provider with your API keys -- Use MCP tools for real-world tasks -- Access transparent model evaluations - -### What We're Building -- More specialized models that excel at specific tasks -- Expanded app ecosystem (mobile, web, extensions) -- Richer connector ecosystem -- An evaluation framework to build better models - -### The Long-Term Vision -We're working towards open superintelligence where: -- Open models match or exceed closed alternatives -- Anyone can run powerful AI on their own hardware -- The community drives innovation, not corporations -- AI capabilities are owned by users, not rented - - -This is an ambitious goal without a guaranteed path. We're betting on the open-source community, improved -hardware, and better techniques, but we're honest that this is a journey, not a destination we've reached. - - -## Quick Start - -1. [Download Jan](./quickstart) for your operating system -2. Choose a model - download locally or add cloud API keys -3. Start chatting or connect tools via MCP -4. Build with our [local API](./api-server) +Jan grows through contribution. It is shaped by many and belongs to everyone who uses it. ## Acknowledgements -Jan is built on the shoulders of giants: -- [Llama.cpp](https://github.com/ggerganov/llama.cpp) for inference -- [Model Context Protocol](https://modelcontextprotocol.io) for tool integration -- The open-source community that makes this possible +> Good artists copy, great artists steal. -## FAQs +Jan exists because we've borrowed, learned, and built on the work of others. - - Jan is an open-source AI platform working towards a viable alternative to Big Tech AI. Today it's a desktop app that runs models locally or connects to cloud providers. Tomorrow it aims to be a complete ecosystem rivaling platforms like ChatGPT and Claude. - - - - Other platforms are models behind APIs you rent. Jan is a complete AI ecosystem you own. Run any model, use real tools through MCP, keep your data private, and never pay subscriptions for local use. - - - - **Jan Models:** - - Jan-Nano (32k/128k) - Research and analysis with MCP integration - - Lucy - Mobile-optimized search (1.7B) - - Jan-v1 - Reasoning and tool use (4B) - - **Open Source:** - - OpenAI's gpt-oss models (120b and 20b) - - Any GGUF model from Hugging Face - - **Cloud (with your API keys):** - - OpenAI, Anthropic, Mistral, Groq, and more - - - - MCP (Model Context Protocol) lets AI interact with real applications. Instead of just generating text, your AI can create designs in Canva, analyze data in Jupyter, browse the web, and execute code - all through conversation. - - - - **Supported OS**: - - [Windows 10+](/docs/desktop/install/windows#compatibility) - - [macOS 12+](/docs/desktop/install/mac#compatibility) - - [Linux (Ubuntu 20.04+)](/docs/desktop/install/linux) - - **Hardware**: - - Minimum: 8GB RAM, 10GB storage - - Recommended: 16GB RAM, GPU (NVIDIA/AMD/Intel/Apple), 50GB storage - - - - Honestly? It's ambitious and uncertain. We believe the combination of rapidly improving open models, better consumer hardware, community innovation, and specialized models working together can eventually rival closed platforms. But this is a multi-year journey with no guarantees. What we can guarantee is that we'll keep building in the open, with the community, towards this goal. - - - - Right now, Jan can: - - Run models like Llama, Mistral, and our own Jan models locally - - Connect to cloud providers if you want more power - - Use MCP tools to create designs, analyze data, browse the web, and more - - Work completely offline once models are downloaded - - Provide an OpenAI-compatible API for developers - - - - **Local use**: Always free, no catches - **Cloud models**: You pay providers directly (we add no markup) - **Jan cloud**: Optional paid services coming 2025 - - The core platform will always be free and open source. - - - - - Runs 100% offline once models are downloaded - - All data stored locally in [Jan Data Folder](/docs/desktop/data-folder) - - No telemetry without explicit consent - - Open source code you can audit - - - When using cloud providers through Jan, their privacy policies apply. - - - - - Yes. Download directly or build from [source](https://github.com/menloresearch/jan). Jan Server for production deployments coming late 2025. - - - - - **Jan Web**: Beta late 2025 - - **Jan Mobile**: Late 2025 - - **Jan Server**: Late 2025 - - All versions will sync seamlessly. - - - - - Code: [GitHub](https://github.com/menloresearch/jan) - - Community: [Discord](https://discord.gg/FTk2MvZwJH) - - Testing: Help evaluate models and report bugs - - Documentation: Improve guides and tutorials - - - - Yes! We love hiring from our community. Check [Careers](https://menlo.bamboohr.com/careers). - +- [llama.cpp](https://github.com/ggerganov/llama.cpp) and [GGML](https://github.com/ggerganov/ggml) for efficient inference +- [r/LocalLLaMA](https://www.reddit.com/r/LocalLLaMA/) for ideas, feedback, and debate +- [Model Context Protocol](https://modelcontextprotocol.io) for MCP integrations +- [PostHog](https://posthog.com/docs) for docs inspiration +- The open-source community for contributions, bug reports, and improvements diff --git a/docs/src/pages/docs/desktop/install/linux.mdx b/docs/src/pages/docs/desktop/install/linux.mdx index 2d42a59f1..2d9d39f9f 100644 --- a/docs/src/pages/docs/desktop/install/linux.mdx +++ b/docs/src/pages/docs/desktop/install/linux.mdx @@ -1,11 +1,12 @@ --- title: Linux -description: Get started quickly with Jan, an AI chat application that runs 100% offline on your desktop & mobile (*coming soon*). +description: Download Jan on Linux to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline. keywords: [ Jan, Customizable Intelligence, LLM, local AI, + Jan on Linux, privacy focus, free and open source, private and offline, @@ -18,15 +19,17 @@ keywords: installation, "desktop" ] +twitter: + card: summary_large_image + site: "@jandotai" + title: "Jan on Linux" + description: "Download Jan on Linux to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline." --- - import FAQBox from '@/components/FaqBox' import { Tabs, Callout, Steps } from 'nextra/components' import { Settings } from 'lucide-react' - - # Linux Installation Instructions for installing Jan on Linux. @@ -244,7 +247,7 @@ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64 ### Step 2: Enable GPU Acceleration 1. Navigate to **Settings** () > **Local Engine** > **Llama.cpp** -2. Select appropriate backend in **llama-cpp Backend**. Details in our [guide](/docs/desktop/local-engines/llama-cpp). +2. Select appropriate backend in **llama-cpp Backend**. Details in our [llama.cpp guide](/docs/desktop/llama-cpp). CUDA offers better performance than Vulkan. diff --git a/docs/src/pages/docs/desktop/install/mac.mdx b/docs/src/pages/docs/desktop/install/mac.mdx index 827329d6e..b784f2ecf 100644 --- a/docs/src/pages/docs/desktop/install/mac.mdx +++ b/docs/src/pages/docs/desktop/install/mac.mdx @@ -1,11 +1,11 @@ --- title: Mac -description: Get started quickly with Jan - a local AI that runs on your computer. Install Jan and pick your model to start chatting. -keywords: +description: Download Jan on Mac to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline. [ Jan, Customizable Intelligence, LLM, local AI, + Jan on Mac, privacy focus, free and open source, private and offline, @@ -18,6 +18,11 @@ keywords: installation, "desktop" ] +twitter: + card: summary_large_image + site: "@jandotai" + title: "Jan on Mac" + description: "Download Jan on Mac to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline." --- import { Tabs } from 'nextra/components' diff --git a/docs/src/pages/docs/desktop/install/windows.mdx b/docs/src/pages/docs/desktop/install/windows.mdx index 2c56e2319..6e858a2b5 100644 --- a/docs/src/pages/docs/desktop/install/windows.mdx +++ b/docs/src/pages/docs/desktop/install/windows.mdx @@ -1,10 +1,11 @@ --- title: Windows -description: Run AI models locally on your Windows machine with Jan. Quick setup guide for local inference and chat. +description: Download Jan on Windows to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline. keywords: [ Jan, Customizable Intelligence, LLM, + Jan on Windows, local AI, privacy focus, free and open source, @@ -18,6 +19,11 @@ keywords: installation, "desktop" ] +twitter: + card: summary_large_image + site: "@jandotai" + title: "Jan on Windows" + description: "Download Jan on Windows to run AI models locally. Jan is a free, open-source ChatGPT alternative to run offline." --- import { Tabs, Callout, Steps } from 'nextra/components' diff --git a/docs/src/pages/docs/desktop/jan-models/jan-nano-32.mdx b/docs/src/pages/docs/desktop/jan-models/jan-nano-32.mdx index b216f3b96..5f1446e42 100644 --- a/docs/src/pages/docs/desktop/jan-models/jan-nano-32.mdx +++ b/docs/src/pages/docs/desktop/jan-models/jan-nano-32.mdx @@ -59,7 +59,7 @@ The model and its different model variants are fully supported by Jan. ## Using Jan-Nano-32k **Step 1** -Download Jan from [here](https://jan.ai/docs/desktop/). +Download Jan from [here](https://jan.ai/download/). **Step 2** Go to the Hub Tab, search for Jan-Nano-Gguf, and click on the download button to the best model size for your system. @@ -118,8 +118,8 @@ Here are some example queries to showcase Jan-Nano's web search capabilities: - 4xA6000 for vllm server (inferencing) - What frontend should I use? - - Jan Beta (recommended) - Minimalistic and polished interface - - Download link: https://jan.ai/docs/desktop/beta + - Jan (recommended) + - Download link: https://jan.ai/download - Getting Jinja errors in LM Studio? - Use Qwen3 template from other LM Studio compatible models diff --git a/docs/src/pages/docs/desktop/server-examples/tabby.mdx b/docs/src/pages/docs/desktop/server-examples/tabby.mdx index 917f40550..f25c89dab 100644 --- a/docs/src/pages/docs/desktop/server-examples/tabby.mdx +++ b/docs/src/pages/docs/desktop/server-examples/tabby.mdx @@ -90,7 +90,7 @@ Refer to the following documentation to install the Tabby extension on your favo Tabby offers an [Answer Engine](https://tabby.tabbyml.com/docs/administration/answer-engine/) on the homepage, which can leverage the Jan LLM and related contexts like code, documentation, and web pages to answer user questions. -Simply open the Tabby homepage at [localhost:8080](http://localhost:8080) and ask your questions. +Simply open the Tabby homepage at http://localhost:8080 and ask your questions. ### IDE Chat Sidebar diff --git a/docs/src/pages/docs/desktop/settings.mdx b/docs/src/pages/docs/desktop/settings.mdx index 6bc750f43..cd4d01ede 100644 --- a/docs/src/pages/docs/desktop/settings.mdx +++ b/docs/src/pages/docs/desktop/settings.mdx @@ -108,7 +108,7 @@ You can help improve Jan by sharing anonymous usage data: 2. You can change this setting at any time -Read more about that we collect with opt-in users at [Privacy](/docs/desktop/privacy). +Read more about that we collect with opt-in users at [Privacy](/privacy).
    @@ -141,7 +141,7 @@ This action cannot be undone. ### Jan Data Folder -Jan stores your data locally in your own filesystem in a universal file format. See detailed [Jan Folder Structure](docs/data-folder#folder-structure). +Jan stores your data locally in your own filesystem in a universal file format. See detailed [Jan Folder Structure](/docs/desktop/data-folder#directory-structure). **1. Open Jan Data Folder** diff --git a/docs/src/pages/docs/desktop/troubleshooting.mdx b/docs/src/pages/docs/desktop/troubleshooting.mdx index 16bbdfa9a..6d6c02703 100644 --- a/docs/src/pages/docs/desktop/troubleshooting.mdx +++ b/docs/src/pages/docs/desktop/troubleshooting.mdx @@ -328,14 +328,14 @@ This command ensures that the necessary permissions are granted for Jan's instal When you start a chat with a model and encounter a **Failed to Fetch** or **Something's Amiss** error, here are some possible solutions to resolve it: **1. Check System & Hardware Requirements** -- Hardware dependencies: Ensure your device meets all [hardware requirements](docs/desktop/troubleshooting#step-1-verify-hardware-and-system-requirements) -- OS: Ensure your operating system meets the minimum requirements ([Mac](/docs/desktop/install/mac#minimum-requirements), [Windows](/docs/desktop/install/windows#compatibility), [Linux](/docs/desktop/install/linux#compatibility)) +- Hardware dependencies: Ensure your device meets all [hardware requirements](troubleshooting) +- OS: Ensure your operating system meets the minimum requirements ([Mac](https://www.jan.ai/docs/desktop/install/mac#minimum-requirements), [Windows](/windows#compatibility), [Linux](https://www.jan.ai/docs/desktop/install/linux#compatibility) - RAM: Choose models that use less than 80% of your available RAM - For 8GB systems: Use models under 6GB - For 16GB systems: Use models under 13GB **2. Check Model Parameters** -- In **Engine Settings** in right sidebar, check your `ngl` ([number of GPU layers](/docs/desktop/models/model-parameters#engine-parameters)) setting to see if it's too high +- In **Engine Settings** in right sidebar, check your `ngl` ([number of GPU layers](/docs/desktop/model-parameters)) setting to see if it's too high - Start with a lower NGL value and increase gradually based on your GPU memory **3. Port Conflicts** diff --git a/docs/src/pages/handbook/_meta.json b/docs/src/pages/handbook/_meta.json index 482ca4f93..4a44a889f 100644 --- a/docs/src/pages/handbook/_meta.json +++ b/docs/src/pages/handbook/_meta.json @@ -1,5 +1,4 @@ { "index": "Overview", - "open-superintelligence": "Open Superintelligence", - "betting-on-open-source": "Betting on Open-Source" + "why": "Why does Jan exist?" } diff --git a/docs/src/pages/handbook/index.mdx b/docs/src/pages/handbook/index.mdx index 2c64eff72..64f0abdf8 100644 --- a/docs/src/pages/handbook/index.mdx +++ b/docs/src/pages/handbook/index.mdx @@ -18,31 +18,6 @@ Jan's Handbook is a [living document](https://en.wikipedia.org/wiki/Living_docum ## Why does Jan exist? -### [Open Superintelligence](/handbook/open-superintelligence) -Building superintelligence that belongs to everyone, not just a few tech giants. We believe the future of AI should be open, accessible, and owned by the people who use it. - -### [Betting on Open-Source](/handbook/betting-on-open-source) +- [Open Superintelligence](/handbook/open-superintelligence) - Building superintelligence that belongs to everyone, not just a few tech giants. We believe the future of AI should be open, accessible, and owned by the people who use it. +- [Betting on Open-Source](/handbook/betting-on-open-source) Why we're betting on open-source as the future of AI and technology. Open-source has consistently won in the long term, and AI will be no different. - ---- - -## Quick Links - -- **For the curious**: Start with [Open Superintelligence](/handbook/open-superintelligence) -- **For developers**: Learn about [Betting on Open-Source](/handbook/betting-on-open-source) -- **For contributors**: Check out our [GitHub](https://github.com/menloresearch/jan) and [Discord](https://discord.gg/FTk2MvZwJH) - -## Our North Star - -We're building superintelligence that: - -- **Works anywhere**: From your laptop to your data center -- **Belongs to you**: Download it, own it, modify it -- **Scales infinitely**: One person or ten thousand, same platform -- **Improves constantly**: Community-driven development - -This isn't just about making AI accessible. It's about ensuring the most transformative technology in human history can be owned by those who use it. - ---- - -_"The future of AI isn't about choosing between local or cloud. It's about having both, and everything in between, working perfectly together."_ diff --git a/docs/src/pages/handbook/why/_meta.json b/docs/src/pages/handbook/why/_meta.json new file mode 100644 index 000000000..b201b2e5f --- /dev/null +++ b/docs/src/pages/handbook/why/_meta.json @@ -0,0 +1,4 @@ +{ + "open-superintelligence": "Why Jan exists", + "betting-on-open-source": "Why we're betting on open-source" +} diff --git a/docs/src/pages/handbook/betting-on-open-source.mdx b/docs/src/pages/handbook/why/betting-on-open-source.mdx similarity index 73% rename from docs/src/pages/handbook/betting-on-open-source.mdx rename to docs/src/pages/handbook/why/betting-on-open-source.mdx index a0560d53e..bac971cbc 100644 --- a/docs/src/pages/handbook/betting-on-open-source.mdx +++ b/docs/src/pages/handbook/why/betting-on-open-source.mdx @@ -1,11 +1,11 @@ --- -title: "Why Open-Source" +title: "Why Jan is betting on Open-Source" description: "Why we're betting on open-source." --- # Why Open-Source -AI today is concentrated in the hands of a few companies. They ask for trust, while keeping the levers of control hidden. We think that's a mistake. +AI today is concentrated in the hands of [a few companies](https://stratechery.com/2025/tech-philosophy-and-ai-opportunity/). They ask for trust, while keeping the levers of control hidden. We think that's a mistake. When you depend on one vendor, your future is tied to their roadmap, their politics, their survival. If they get acquired, pivot, or shut down; you're stuck. @@ -16,9 +16,9 @@ Depending on a closed vendor means giving up more than flexibility: AI has become critical infrastructure. Nations, enterprises, even small teams rely on it to think and decide. And yet, control sits with a few vendors who decide the terms of access. We believe that's not control. That's dependency dressed up as convenience. One of the most powerful invention is being steered by a handful of executives. Their values shape what billions can say, build, or ask. -*This cannot stand. It must be changed.* +This can't stand. It must be changed. -## Jan's Bet +## How we see We don't believe the future of AI should be dictated by a few firms in San Francisco, Beijing, or anywhere else. @@ -30,4 +30,4 @@ That's why we're building Jan, a full product suite: - Jan Server - Hub, Store, evals, guardrails, the ecosystem around it -The goal is to be the open-source replacement for ChatGPT and other BigAI products, with models and tools you can run, own, and trust. +The goal is to be the [open-source replacement for ChatGPT](https://jan.ai/) and other BigAI products, with models and tools you can run, own, and trust. diff --git a/docs/src/pages/handbook/open-superintelligence.mdx b/docs/src/pages/handbook/why/open-superintelligence.mdx similarity index 67% rename from docs/src/pages/handbook/open-superintelligence.mdx rename to docs/src/pages/handbook/why/open-superintelligence.mdx index 5174f712b..68ab46311 100644 --- a/docs/src/pages/handbook/open-superintelligence.mdx +++ b/docs/src/pages/handbook/why/open-superintelligence.mdx @@ -5,9 +5,13 @@ description: "Short answer: Open Superintelligence." # Why does Jan exist? -> Short answer: Open Superintelligence. +import { Callout } from 'nextra/components' -In 1879, Edison lit a single street in [Menlo Park](https://en.wikipedia.org/wiki/Menlo_Park,_California). What mattered wasn't the bulb. It was that power could reach homes, schools, and factories. + +Short answer: Open Superintelligence. + + +In 1879, [Edison](https://en.wikipedia.org/wiki/Thomas_Edison) lit a single street in [Menlo Park](https://en.wikipedia.org/wiki/Menlo_Park,_California). What mattered wasn't the bulb. It was that power could reach homes, schools, and factories. Electricity changed the world only when it became universal. Standard plugs, cheap generation, lines everywhere. People stopped talking about electricity and started using light, cold chains, and machines. @@ -19,13 +23,13 @@ Jan exists to push intelligence toward the first path: Open Superintelligence yo > The world is made, and can be remade. -Every industrial wave redefined critical aspects of our daily lives: -- Factories introduced shift clocks and wage rhythms -- Steam gave way to electricity and standardized parts -- Rail, telegraph, and later networks changed how decisions travel -- Each wave pulled new bargains into being skills, schools, safety nets, labor law +Every industrial wave redefined new defaults of our daily lives: +- [Factories](https://en.wikipedia.org/wiki/Factory) created the modern job +- [Electricity](https://en.wikipedia.org/wiki/Electricity) created the modern home +- [Railroads](https://en.wikipedia.org/wiki/Rail_transport#History) and [telegraphs](https://en.wikipedia.org/wiki/Telegraphy#History) created the modern nation +- [The Internet](https://en.wikipedia.org/wiki/Internet) created the modern world -So what we're interested in is who is going to write the new defaults and share in the gains. +Open Superintelligence will create what comes next. What we're interested in is who is going to write the new defaults and share in the gains. Technology doesn’t choose its path, people do. Power accrues to whoever designs, deploys, and profits from the system: - If intelligence is closed and centralized, the gains concentrate diff --git a/docs/src/pages/post/_assets/claude.jpeg b/docs/src/pages/post/_assets/claude.jpeg new file mode 100644 index 000000000..1b5dc1158 Binary files /dev/null and b/docs/src/pages/post/_assets/claude.jpeg differ diff --git a/docs/src/pages/post/_assets/create-assistant-1.jpeg b/docs/src/pages/post/_assets/create-assistant-1.jpeg new file mode 100644 index 000000000..4c005b6ed Binary files /dev/null and b/docs/src/pages/post/_assets/create-assistant-1.jpeg differ diff --git a/docs/src/pages/post/_assets/gemini.jpeg b/docs/src/pages/post/_assets/gemini.jpeg new file mode 100644 index 000000000..acd333607 Binary files /dev/null and b/docs/src/pages/post/_assets/gemini.jpeg differ diff --git a/docs/src/pages/post/_assets/jan-assistant-for-law.png b/docs/src/pages/post/_assets/jan-assistant-for-law.png new file mode 100644 index 000000000..298d92f0f Binary files /dev/null and b/docs/src/pages/post/_assets/jan-assistant-for-law.png differ diff --git a/docs/src/pages/post/_assets/jan-for-ai-law-assistant-chat.jpeg b/docs/src/pages/post/_assets/jan-for-ai-law-assistant-chat.jpeg new file mode 100644 index 000000000..5501b5e70 Binary files /dev/null and b/docs/src/pages/post/_assets/jan-for-ai-law-assistant-chat.jpeg differ diff --git a/docs/src/pages/post/_assets/lm-studio.jpeg b/docs/src/pages/post/_assets/lm-studio.jpeg new file mode 100644 index 000000000..ee6ee3825 Binary files /dev/null and b/docs/src/pages/post/_assets/lm-studio.jpeg differ diff --git a/docs/src/pages/post/_assets/perplexity.jpeg b/docs/src/pages/post/_assets/perplexity.jpeg new file mode 100644 index 000000000..6a73f9b55 Binary files /dev/null and b/docs/src/pages/post/_assets/perplexity.jpeg differ diff --git a/docs/src/pages/post/_meta.json b/docs/src/pages/post/_meta.json index e5472dd61..1962ffa9e 100644 --- a/docs/src/pages/post/_meta.json +++ b/docs/src/pages/post/_meta.json @@ -20,5 +20,10 @@ "title": "Research", "display": "normal", "href": "/blog?category=research" + }, + "guides-cat": { + "title": "Guides", + "display": "normal", + "href": "/blog?category=guides" } } diff --git a/docs/src/pages/post/ai-for-law.mdx b/docs/src/pages/post/ai-for-law.mdx new file mode 100644 index 000000000..a42b70ced --- /dev/null +++ b/docs/src/pages/post/ai-for-law.mdx @@ -0,0 +1,123 @@ +--- +title: "Private AI for legal professionals who need confidentiality" +description: "It's possible to use AI without risking client data. Jan helps lawyers save time while keeping clients safe." +tags: AI, ai for law, ai for lawyers, ChatGPT alternative, Jan, local AI, offline AI +categories: guides +date: 2025-09-30 +ogImage: assets/images/general/jan-for-ai-law-assistant-chat.jpeg +twitter: + card: summary_large_image + site: "@jandotai" + title: "Private AI for legal professionals who need confidentiality" + description: "It's possible to use AI without risking client data. Jan helps lawyers save time while keeping clients safe." + image: assets/images/general/jan-assistants-ai-for-legal.jpeg +--- +import { Callout } from 'nextra/components' +import CTABlog from '@/components/Blog/CTA' +import { OpenAIStatusChecker } from '@/components/OpenAIStatusChecker' + +# Private AI for legal professionals who need confidentiality + +![AI for Law](/assets/images/general/jan-for-ai-law-assistant-chat.jpeg) + +Yes, it's possible to use AI in legal work without risking client data. + + +Client trust depends on privacy. Sending documents into public AI tools risks compliance and reputation. + + +Start by [downloading Jan](/download) and installing the **Jan v1 model**. Once installed, you can create assistants tailored to your practice and keep contracts, case notes, and client files under your control. + + +**Why use Jan for legal tasks** +- Runs locally on your hardware, no cloud uploads +- Keeps chats and interactions private +- Works offline once installed +- Lets you build assistants for your own workflows + + +--- + +## Create your assistant + +Once Jan is installed with the **Jan v1 model**, onboarding will guide you through downloading and setup. + +Click **Create assistant** to start: +![Create your first AI assistant in Jan](./_assets/create-assistant-1.jpeg) +*Create your first assistant in Jan* + +Add an assistant name and prompt: +![Jan assistant for contract review](./_assets/jan-assistant-for-law.png) +*Example of a Jan assistant for contract review* + +You can create assistants using specific prompts. Below are examples for common legal workflows. + +--- + +## Contract review assistant + +AI can help lawyers move faster through long contracts by pointing out what matters most. + +**Prompt for Jan:** +> You are a contract review assistant. +> When I paste a contract: +> - Highlight risky or unusual clauses +> - Flag ambiguous or missing terms +> - Summarize the agreement in plain English for a non-lawyer client +> Format your response with sections: **Risks**, **Ambiguities/Missing**, **Summary**. +> Do not provide legal advice. + +--- + +## Drafting assistant + +Use AI to create first drafts of NDAs, service agreements, or client letters. You still refine the output, but AI saves time on boilerplate. + +**Prompt for Jan:** +> You are a drafting assistant. +> When asked to draft a legal agreement or client letter: +> - Produce a professional first version +> - Use clear, concise language +> - Leave placeholders like [Party Name], [Date], [Amount] for details +> - Structure output with headings, numbered clauses, and consistent formatting +> Do not provide legal advice. + +--- + +## Case preparation assistant + +Case prep often means reading hundreds of pages. AI can turn depositions, discovery files, or judgments into concise notes. + +![Jan legal case preparation assistant](./_assets/jan-for-ai-law-assistant-chat.jpeg) +*Jan chat interface for case preparation — process documents and extract key information* + +**Prompt for Jan:** +> You are a case preparation assistant. +> When I provide case materials: +> - Extract key facts, issues, and arguments +> - Present them as bullet points under headings: **Facts**, **Issues**, **Arguments** +> - Keep summaries concise (under 500 words unless I request more) +> Use plain English, no speculation or legal conclusions. + +--- + +## Knowledge management assistant + +Law firms accumulate memos, policies, and precedents. AI can help organize and retrieve them quickly. + +**Prompt for Jan:** +> You are a knowledge management assistant. +> When I ask questions about internal documents: +> - Return concise summaries or direct excerpts +> - Always cite the source (e.g., “Policy Manual, Section 4”) +> - If not found in provided material, reply “Not found in documents.” +> Do not invent information. + +--- + +## Final note + +AI in legal practice is not about replacing lawyers. It’s about handling repetitive tasks safely so you can focus on real decisions. +With private AI, you gain efficiency without compromising client confidentiality. + + diff --git a/docs/src/pages/post/ai-for-teachers.mdx b/docs/src/pages/post/ai-for-teachers.mdx new file mode 100644 index 000000000..cd2c2b60b --- /dev/null +++ b/docs/src/pages/post/ai-for-teachers.mdx @@ -0,0 +1,134 @@ +--- +title: "AI for teachers who care about student privacy" +description: "Use AI in teaching without risking student data. Jan helps teachers plan lessons, grade faster, and communicate with parents." +tags: AI, ai for teachers, ChatGPT alternative, Jan, local AI, offline AI, education +categories: guides +date: 2025-10-01 +ogImage: assets/images/general/ai-for-teacher.jpeg +twitter: + card: summary_large_image + site: "@jandotai" + title: "AI for teachers who care about student privacy" + description: "Use AI in teaching without risking student data. Jan helps teachers plan lessons, grade faster, and communicate with parents." + image: assets/images/general/ai-for-teacher.jpeg +--- +import { Callout } from 'nextra/components' +import CTABlog from '@/components/Blog/CTA' + +# AI for teachers who care about student privacy + +![AI for teachers](/assets/images/general/ai-for-teacher.jpeg) + +AI can help teachers handle the work that piles up outside class. It can draft a lesson outline, suggest feedback on essays, or turn notes into a polite parent email. These are the tasks that usually stretch into evenings and weekends. + + +Most AI tools like ChatGPT run in the cloud. Sharing lesson plans, student writing, or parent details there risks compliance and trust. + + +That's where Jan comes in: +- [Download Jan](/download) +- You get the same time-saving help +- Your data never leaves your device. + + +*See how teachers use Jan for AI-powered lesson planning and grading* + + +**Why use Jan for teaching** +- Runs locally, no cloud servers +- Keeps lesson plans and student data private +- Works offline once installed +- Lets you build assistants for your daily teaching tasks + + +--- + +## Create your assistant + +Once Jan is installed, click **Create assistant** and add one of the prompts below. Each assistant is for a specific classroom task. + +![Create your first AI assistant in Jan](/assets/images/general/assistants-ai-for-teachers.jpeg) + +--- + +## Lesson planning assistant + +AI can draft lesson outlines in minutes. You adapt and refine them for your students. + +**Prompt for Jan:** +> You are a lesson planning assistant. +> When I give you a topic or subject: +> - Suggest a lesson outline with objectives, activities, and discussion questions +> - Adjust for different grade levels if I specify +> - Keep plans practical and realistic for a classroom setting + +Example ask: For Grade 6 science on ecosystems. Objectives: define food chains, explain producer/consumer roles. Activity: group poster on an ecosystem. Questions: How would removing one species affect the whole system? + +--- + +## Grading support assistant + +AI won't replace your judgment, but it can make feedback faster and more consistent. + +**Prompt for Jan:** +> You are a grading support assistant. +> When I paste student writing or answers: +> - Highlight strengths and areas for improvement +> - Suggest short, constructive feedback I can reuse +> - Keep tone supportive and professional +> Do not assign final grades. + +Example: For a history essay. Strength: clear thesis. Improvement: weak evidence. Feedback: "Great thesis and structure. Next time, support your points with specific historical examples." + +--- + +## Parent communication assistant + +Writing parent emails is important but time-consuming. + +**Prompt for Jan:** +> You are a parent communication assistant. +> When I give you key points about a student: +> - Draft a polite and empathetic email to parents +> - Use clear and professional language +> - Keep tone supportive, not overly formal +> Only include details I provide. + +Example: Notes: “Student is falling behind on homework, otherwise engaged in class.” - Output: a short, encouraging message suggesting a check-in at home. + +--- + +## Classroom resources assistant + +Generate quizzes, worksheets, or practice activities at short notice. + +**Prompt for Jan:** +> You are a classroom resource assistant. +> When I provide a topic or subject: +> - Generate sample quiz questions (multiple choice and short answer) +> - Suggest short practice activities +> - Provide answer keys separately +> Keep material age-appropriate for the level I specify. + +Example: For Grade 4 fractions. 5 multiple-choice questions with answer key, plus a quick worksheet with 3 practice problems. + +--- + +## Getting started + +1. [Download Jan](/download). +2. Install the Jan model (guided in-app) +3. Create your first assistant using one of the prompts above +4. Test with non-sensitive examples first +5. Use it in real classroom tasks once you're comfortable + +--- + +## Final note + +AI isn't here to replace teachers. It's here to take repetitive tasks off your plate so you can focus on teaching. With Jan, you can use AI confidently without risking student privacy. + + diff --git a/docs/src/pages/post/benchmarking-nvidia-tensorrt-llm.mdx b/docs/src/pages/post/benchmarking-nvidia-tensorrt-llm.mdx index 0d4bc9aa2..9fa67ea07 100644 --- a/docs/src/pages/post/benchmarking-nvidia-tensorrt-llm.mdx +++ b/docs/src/pages/post/benchmarking-nvidia-tensorrt-llm.mdx @@ -17,7 +17,7 @@ Jan now supports [NVIDIA TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) i We've been excited for TensorRT-LLM for a while, and [had a lot of fun implementing it](https://github.com/menloresearch/nitro-tensorrt-llm). As part of the process, we've run some benchmarks, to see how TensorRT-LLM fares on consumer hardware (e.g. [4090s](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/), [3090s](https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/)) we commonly see in the [Jan's hardware community](https://discord.com/channels/1107178041848909847/1201834752206974996). - **Give it a try!** Jan's [TensorRT-LLM extension](/docs/desktop/built-in/tensorrt-llm) is available in Jan v0.4.9 and up ([see more](/docs/desktop/built-in/tensorrt-llm)). We precompiled some TensorRT-LLM models for you to try: `Mistral 7b`, `TinyLlama-1.1b`, `TinyJensen-1.1b` 😂 + **Give it a try!** Jan's TensorRT-LLM extension is available in Jan v0.4.9. We precompiled some TensorRT-LLM models for you to try: `Mistral 7b`, `TinyLlama-1.1b`, `TinyJensen-1.1b` 😂 Bugs or feedback? Let us know on [GitHub](https://github.com/menloresearch/jan) or via [Discord](https://discord.com/channels/1107178041848909847/1201832734704795688). diff --git a/docs/src/pages/post/chatgpt-alternatives.mdx b/docs/src/pages/post/chatgpt-alternatives.mdx new file mode 100644 index 000000000..36f44e5c3 --- /dev/null +++ b/docs/src/pages/post/chatgpt-alternatives.mdx @@ -0,0 +1,120 @@ +--- +title: "ChatGPT alternatives that actually replace it" +description: "See the best ChatGPT alternatives in 2025. We've listed tools that are alternatives to ChatGPT." +tags: AI, ChatGPT alternative, ChatGPT alternatives, alternative to chatgpt, Jan, local AI, privacy, open source, offline AI +categories: guides +date: 2025-09-29 +ogImage: assets/images/general/chatgpt-alternative-jan.jpeg +twitter: + card: summary_large_image + site: "@jandotai" + title: "ChatGPT alternatives that actually replace it." + description: "See the best ChatGPT alternatives in 2025. We've listed tools that are alternatives to ChatGPT." + image: assets/images/general/chatgpt-alternative-jan.jpeg +--- +import { Callout } from 'nextra/components' +import CTABlog from '@/components/Blog/CTA' + +# Best ChatGPT Alternatives + +ChatGPT works well, but it always needs internet, has usage limits, and isn't private. + +If you want options that fit different needs, offline use, privacy, or specialized tasks, see the best alternatives to ChatGPT available for specific use cases. + +## Comparison: ChatGPT Alternatives + +| ChatGPT Alternative | Offline | Key Strength | Best For | +| ------------------------- | ------- | ---------------------------- | -------------------------- | +| **[Jan](https://jan.ai)** | Yes | Runs Cloud + Offline, open-source | Best overall ChatGPT replacement | +| Claude | - | Strong writing and reasoning | Creative text & code | +| Gemini | - | Integrated with Google | Research tasks, image generation | +| Perplexity | - | Fast, with cited answers | Research and fact-checking | +| LM Studio | Yes | Runs open models on PC | Coding and experiments | + +### Jan is the best ChatGPT alternative + +![Use Jan to chat with AI models without internet access](/assets/images/general/chatgpt-alternative-jan.jpeg) +*Jan as an open-source alternative to ChatGPT* + +Jan is the most complete ChatGPT alternative available today. It enables: +- Use AI in online & offline (even on a plain) +- Agentic actions supported +- MCP servers supported for tools + +Unlike ChatGPT, it runs on your computer, which means: +- Offline AI capabilities (see [Offline ChatGPT post](https://www.jan.ai/post/offline-chatgpt-alternative) for details) +- 100% private +- Open-source & Free + + Jan is an [open-source replacement for ChatGPT.](https://www.jan.ai/) + +### Claude is the most notable online alternative +![Claude](./_assets/claude.jpeg) + +Claude has become the main online rival to ChatGPT. It stands out for writing, reasoning, and coding. + +- Handles very long documents and context well +- Strong for essays, research papers, and structured text +- Popular with developers for code explanations and debugging +- Cloud-only, no offline mode +- Filters outputs heavily, sometimes too restrictive + +### Gemini is the Google's integrated alternative +![Gemini](./_assets/gemini.jpeg) + +Gemini ties directly into Google’s apps and search. Great for users in the Google ecosystem. + +- Built into Gmail, Docs, and Google Search +- Good for real-time research and fact-checking +- Strong at pulling web context into answers +- Requires Google account, fully online +- Privacy concerns: all tied to Google services + +### Perplexity is the research-focused alternative +![Perplexity](./_assets/perplexity.jpeg) + +Perplexity is built for fact-checking and quick research, not creativity. + +- Always cites sources for answers +- Strong at summarizing current web info +- Very fast for Q&A style use +- Limited in creativity and open-ended writing +- Cloud-only, daily free usage caps + +### LM Studio is the experimental alternative + +![LM Studio](./_assets/lm-studio.jpeg) + +LM Studio is not a ChatGPT replacement but a local tool for running open models. + +- Lets you test and run open-source models on PC +- Offline by default, works without internet +- Flexible setup for developers and technical users +- Requires decent hardware (RAM/VRAM) + +LM Studio is not beginner-friendly compared to Jan. + +## Choosing the right ChatGPT alternative for you: + +- Best overall replacement: [Jan](https://www.jan.ai/) +- For writing & storytelling: Claude +- For research & web knowledge: Perplexity or Gemini +- For productivity & office work: Microsoft Copilot +- For experimentation with open-source models for technical people: LM Studio + +Most ChatGPT alternatives are still cloud-based and limited. If you want full privacy, offline use, and no restrictions, the best ChatGPT alternative is [Jan](https://www.jan.ai/). + +### Can I use ChatGPT offline? +No. ChatGPT always requires internet. For offline AI, use Jan. + +### What’s the best free ChatGPT alternative? +Jan is free, open-source, and runs offline. Others like Claude or Perplexity have limited free tiers but are cloud-based. + +### Which ChatGPT alternative is best for writing? +Claude is strong for essays, reports, and structured writing. You could use [open-source models](https://www.jan.ai/post/run-ai-models-locally) in Jan too. + +### Which ChatGPT alternative is best for research? +Perplexity and Gemini pull real-time web data with citations. + +### What’s the closest full replacement to ChatGPT? +Jan. It runs locally, works offline, and feels like ChatGPT without restrictions. \ No newline at end of file diff --git a/docs/src/pages/post/deepresearch.mdx b/docs/src/pages/post/deepresearch.mdx index 11edd4f04..50cfc19ad 100644 --- a/docs/src/pages/post/deepresearch.mdx +++ b/docs/src/pages/post/deepresearch.mdx @@ -4,13 +4,13 @@ description: "A simple guide to replicating Deep Research results for free, with tags: AI, local models, Jan, GGUF, Deep Research, local AI categories: guides date: 2025-08-04 -ogImage: _assets/research-result-local.png +ogImage: assets/images/general/research-result-local.png twitter: card: summary_large_image site: "@jandotai" title: "Replicating Deep Research with Jan" description: "Learn how to replicate Deep Research results with Jan." - image: _assets/research-result-local.jpg + image: assets/images/general/research-result-local.png --- import { Callout } from 'nextra/components' @@ -125,8 +125,8 @@ any version with Model Context Protocol in it (>`v0.6.3`). **The Key: Assistants + Tools** -Running deep research in Jan can be accomplished by combining [custom assistants](https://jan.ai/docs/assistants) -with [MCP search tools](https://jan.ai/docs/desktop/mcp-examples/search/exa). This pairing allows any model—local or +Running deep research in Jan can be accomplished by combining [custom assistants](https://jan.ai/docs/desktop/assistants) +with [MCP search tools](https://jan.ai/docs/mcp-examples/search/exa). This pairing allows any model—local or cloud—to follow a systematic research workflow, to create a report similar to that of other providers, with some visible limitations (for now). diff --git a/docs/src/pages/post/deepseek-r1-locally.mdx b/docs/src/pages/post/deepseek-r1-locally.mdx index c9fb229b5..6d09532e9 100644 --- a/docs/src/pages/post/deepseek-r1-locally.mdx +++ b/docs/src/pages/post/deepseek-r1-locally.mdx @@ -4,7 +4,7 @@ description: "A straightforward guide to running DeepSeek R1 locally regardless tags: DeepSeek, R1, local AI, Jan, GGUF, Qwen, Llama categories: guides date: 2025-01-31 -ogImage: assets/deepseek-r1-locally-jan.jpg +ogImage: assets/images/general/deepseek-r1-locally-jan.jpg twitter: card: summary_large_image site: "@jandotai" @@ -17,7 +17,7 @@ import CTABlog from '@/components/Blog/CTA' # Run DeepSeek R1 locally on your device (Beginner-Friendly Guide) -![DeepSeek R1 running locally in Jan AI interface, showing the chat interface and model settings](./_assets/deepseek-r1-locally-jan.jpg) +![DeepSeek R1 running locally in Jan AI interface, showing the chat interface and model settings](/assets/images/general/deepseek-r1-locally-jan.jpg) DeepSeek R1 is one of the best open-source models in the market right now, and you can run DeepSeek R1 on your own computer! diff --git a/docs/src/pages/post/how-we-benchmark-kernels.mdx b/docs/src/pages/post/how-we-benchmark-kernels.mdx index dca80b095..6d5f6d947 100644 --- a/docs/src/pages/post/how-we-benchmark-kernels.mdx +++ b/docs/src/pages/post/how-we-benchmark-kernels.mdx @@ -3,7 +3,7 @@ title: "How we (try to) benchmark GPU kernels accurately" description: "We present the process behind how we decided to benchmark GPU kernels and iteratively improved our benchmarking pipeline" tags: "" categories: research -ogImage: "./_assets/cover-kernel-benchmarking.png" +ogImage: assets/images/general/cover-kernel-benchmarking.png date: 2025-09-17 --- diff --git a/docs/src/pages/post/is-chatgpt-down-use-jan.mdx b/docs/src/pages/post/is-chatgpt-down-use-jan.mdx new file mode 100644 index 000000000..dfd9843fd --- /dev/null +++ b/docs/src/pages/post/is-chatgpt-down-use-jan.mdx @@ -0,0 +1,124 @@ +--- +title: "If ChatGPT is down, switch to AI that never goes down" +description: "Check if ChatGPT down right now, and learn how to use AI that never goes down." +tags: AI, ChatGPT down, ChatGPT alternative, Jan, local AI, offline AI, ChatGPT at capacity +categories: guides +date: 2025-09-30 +ogImage: assets/images/general/is-chatgpt-down.jpg +twitter: + card: summary_large_image + site: "@jandotai" + title: "Realtime Status: Is ChatGPT down?" + description: "Check if ChatGPT is down right now with our real-time status checker, and learn how to use AI that never goes offline." + image: assets/images/general/is-chatgpt-down.jpg +--- +import { Callout } from 'nextra/components' +import CTABlog from '@/components/Blog/CTA' +import { OpenAIStatusChecker } from '@/components/OpenAIStatusChecker' + +# If ChatGPT is down, switch to AI that never goes down + +If you're seeing ChatGPT is down, it could be a good signal to switch to [Jan](https://www.jan.ai/), AI that never goes down. + +## 🔴 Realtime Status: Is ChatGPT down? + +This live tracker shows if ChatGPT is down right now. + + + +### ChatGPT Status Indicators + +
    +
    +
    +
    + Operational +
    +

    All systems are functioning normally with no reported issues.

    +
    + +
    +
    +
    + Degraded Performance +
    +

    Services are running but may be slower than usual.

    +
    + +
    +
    +
    + Partial Outage +
    +

    Some features or regions may be experiencing issues.

    +
    + +
    +
    +
    + Major Outage +
    +

    Significant service disruption affecting most users.

    +
    +
    + +## Skip the downtime with Jan + +When ChatGPT is down, Jan keeps working. Jan is an open-source ChatGPT alternative that runs on your computer - no servers, no outages, no waiting. + +![Jan running when ChatGPT is down](/assets/images/general/is-chatgpt-down.jpg) +*Jan works even when ChatGPT doesn't.* + +### Why Jan never goes down: +- **Runs locally** - No dependency on external servers +- **Always available** - Works offline, even on flights +- **No capacity limits** - Uses your computer's resources +- **100% private** - Your conversations stay on your device + +### Get started in 3 mins: +1. Download Jan: [jan.ai](https://jan.ai) +2. Install a model: Choose from Jan, Qwen, or other top models +3. Start chatting: Similar design as ChatGPT, but always available if you use local models + + +**Pro tip:** Keep both ChatGPT and Jan. You'll never lose productivity to outages again. + + +Jan runs AI models locally, so you don't need internet access. That means Jan is unaffected when ChatGPT is down. + +### Why does ChatGPT goes down? +There could be multiple reasons: +- Too many users at once +- Data center or API downtime +- Planned or uplanned updates +- Limited in some locations + +ChatGPT depends on OpenAI’s servers. If those go down, so does ChatGPT. Jan users don't affect by ChatGPT's outage. + +### Common ChatGPT Errors + +When ChatGPT experiences issues, you might see these error messages: + +- "ChatGPT is at capacity right now": Too many users online, try again later +- "Error in message stream": Connection problems with OpenAI servers +- "Something went wrong": General server error, refresh and retry +- "Network error": Internet connectivity issues on your end or OpenAI's +- "Rate limit exceeded": Too many requests sent, wait before trying again +- "This model is currently overloaded": High demand for specific model + +## Quick answers about ChatGPT status + +### Is ChatGPT down? +Check the ChatGPT realtime status above. [See if ChatGPT is down right now.](http://localhost:3000/post/is-chatgpt-down-use-jan#-realtime-status-is-chatgpt-down) + +### Why is ChatGPT down? +Usually server overload, maintenance, or outages at OpenAI. + +### What does "ChatGPT is at capacity" mean? +Too many users are online at the same time. You’ll need to wait or switch to Jan instead. + +### Is ChatGPT shutting down? +No, ChatGPT isn't shutting down. Outages are temporary. + +### Can I use ChatGPT offline? +No. ChatGPT always requires internet. For [offline AI](https://www.jan.ai/post/offline-chatgpt-alternative), use [Jan](https://jan.ai). \ No newline at end of file diff --git a/docs/src/pages/post/offline-chatgpt-alternative.mdx b/docs/src/pages/post/offline-chatgpt-alternative.mdx index d73bec712..7f94cc23e 100644 --- a/docs/src/pages/post/offline-chatgpt-alternative.mdx +++ b/docs/src/pages/post/offline-chatgpt-alternative.mdx @@ -1,32 +1,36 @@ --- title: "Offline ChatGPT: You can't run ChatGPT offline, do this instead" -description: "Learn how to use AI offline with Jan - a free, open-source alternative to ChatGPT that works 100% offline on your computer." -tags: AI, ChatGPT alternative, offline AI, Jan, local AI, privacy +description: "Use offline AI with Jan, a free & open-source alternative to ChatGPT that runs 100% offline." +tags: AI, chatgpt offline, ChatGPT alternative, offline AI, Jan, local AI, privacy categories: guides date: 2025-02-08 -ogImage: _assets/offline-chatgpt-alternatives-jan.jpg +ogImage: assets/images/general/offline-chatgpt-alternatives-jan.jpg twitter: card: summary_large_image site: "@jandotai" title: "Offline ChatGPT: You can't run ChatGPT offline, do this instead" - description: "Want to use ChatGPT offline? Learn how to run AI models locally with Jan - free, open-source, and works without internet." - image: _assets/offline-chatgpt-alternatives-jan.jpg + description: "Use offline AI with Jan, a free & open-source alternative to ChatGPT that runs 100% offline." + image: assets/images/general/offline-chatgpt-alternatives-jan.jpg --- import { Callout } from 'nextra/components' import CTABlog from '@/components/Blog/CTA' # Offline ChatGPT: You can't run ChatGPT offline, do this instead -ChatGPT is a cloud-based service that requires internet access. However, it's not the only way to use AI. You can run AI models offline on your device with [Jan](https://jan.ai/). It's completely free, open-source, and gives you 100% offline capability. You can even use AI on a plane! +ChatGPT itself can't run offline. ChatGPT can't run offline. You can’t download it. It always needs internet, because it runs on OpenAI's servers. - +If you want offline AI, you need local models. The easiest way: [Jan, an open-source replacement of ChatGPT](https://jan.ai/). It's free, open-source, and works 100% offline. With Jan, you can even use AI on a plane. + + **Quick Summary:** -- ChatGPT always needs internet - it can't run offline -- Jan lets you run AI models 100% offline on your computer -- It's free and open-source -- Works on Mac, Windows, and Linux +- ChatGPT always needs internet - no offline mode +- Use Jan to use AI models 100% offline +- It's free & open-source, and works on Mac, Windows, and Linux +## How to use AI offline? +Offline AI means the model runs on your computer. So no internet needed, 100% private, and data never leaves your device. With Jan you can run offline AI models locally. + ## Jan as an offline ChatGPT alternative ![Use Jan to chat with AI models without internet access](./_assets/offline-chatgpt-alternative-ai-without-internet.jpg) @@ -42,23 +46,25 @@ Go to [jan.ai](https://jan.ai) and download the version for your computer (Mac, ### 2. Download an AI model -You'll need an AI model to use AI offline, so download a model from Jan. Once it's on your computer, you don't need internet anymore. +You'll need an AI model to use AI offline, so download a model from Jan. Once it's on your computer, you don't need internet anymore. You can also use GPT models via Jan - check [running gpt-oss locally](https://www.jan.ai/post/run-gpt-oss-locally) post to see it. ![Choose an AI model that works offline](./_assets/jan-model-selection.jpg "Find the perfect AI model for offline use") *Select an AI model that matches your needs and computer capabilities* - -**Which model should you choose?** +### Which model should you choose? - For most computers: Try Mistral 7B or DeepSeek - they're similar to ChatGPT 3.5 - For older computers: Use smaller 3B models - For gaming PCs: You can try larger 13B models + Don't worry about choosing - Jan will automatically recommend models that work well on your computer. +If you'd like to learn more about local AI, check [how to run AI models locally as a beginner](https://www.jan.ai/post/run-ai-models-locally) article. + ### 3. Start using AI offline -![Chat with AI offline using Jan's interface](./_assets/run-ai-locally-with-jan.jpg "Experience ChatGPT-like interactions without internet") +![Chat with AI offline using Jan's interface](/assets/images/general/run-ai-locally-with-jan.jpg "Experience ChatGPT-like interactions without internet") *Use Jan's clean interface to chat with AI - no internet required* Once downloaded, you can use AI anywhere, anytime: @@ -71,12 +77,7 @@ Once downloaded, you can use AI anywhere, anytime: ## How to chat with your docs in Jan? -To chat with your docs in Jan, you need to activate experimental mode. - -![Activate experimental mode in Jan's settings](./_assets/chat-with-your-docs-offline-ai.jpg "Enable experimental features to chat with your documents") -*Turn on experimental mode in settings to chat with your docs* - -After activating experimental mode, simply add your files and ask questions about them. +Simply add your files and ask questions about them. ![Chat with your documents using Jan](./_assets/chat-with-docs-prompt.jpg "Ask questions about your documents offline") *Chat with your documents privately - no internet needed* @@ -97,17 +98,17 @@ Local AI makes possible offline AI use, so Jan is going to be your first step to 4. **No Server Issues:** No more "ChatGPT is at capacity" 5. **Your Choice of Models:** Use newer models as they come out -**"Is it really free? What's the catch?"** +### "Is Jan really free? What's the catch?" Yes, it's completely free and open source. Jan is built by developers who believe in making AI accessible to everyone. -**"How does it compare to ChatGPT?"** +### How does Jan compare to ChatGPT?" Modern open-source models like DeepSeek and Mistral are very capable. While they might not match GPT-4, they're perfect for most tasks and getting better every month. -**"Do I need a powerful computer?"** +### "Do I need a powerful computer?" If your computer is from the last 5 years, it will likely work fine. You need about 8GB of RAM and 10GB of free space for comfortable usage. -**"What about my privacy?"** -Everything stays on your computer. Your conversations, documents, and data never leave your device unless you choose to share them. +### "What about my privacy?" +Everything stays on your computer with Jan. Your conversations, documents, and data never leave your device unless you choose to share them. Want to learn more about the technical side? Check our detailed [guide on running AI models locally](/post/run-ai-models-locally). It's not required to [use AI offline](https://jan.ai/) but helps understand how it all works. @@ -116,3 +117,20 @@ Want to learn more about the technical side? Check our detailed [guide on runnin [Join our Discord community](https://discord.gg/Exe46xPMbK) for support and tips on using Jan as your offline ChatGPT alternative. + +### FAQ + +#### Can I download ChatGPT for offline use? +No. ChatGPT is cloud-only. + +#### How to use ChatGPT offline? +You can't. ChatGPT has no offline mode. Use Jan instead for a ChatGPT-like offline experience. + +#### Does ChatGPT have internet access? +Yes. It runs in the cloud. + +#### What's the best way to use AI offline? +Download Jan and run models like Mistral, DeepSeek, or GPT-OSS locally. + +#### What's GPT offline? +OpenAI has open-source models you can run locally but not via ChatGPT. One of them is [gpt-oss](https://www.jan.ai/post/run-gpt-oss-locally) and you can run it via Jan. \ No newline at end of file diff --git a/docs/src/pages/post/qwen3-settings.mdx b/docs/src/pages/post/qwen3-settings.mdx index c4635451c..07af8b9ba 100644 --- a/docs/src/pages/post/qwen3-settings.mdx +++ b/docs/src/pages/post/qwen3-settings.mdx @@ -50,7 +50,7 @@ Thinking mode is powerful, but greedy decoding kills its output. It'll repeat or ## Quick summary -![Qwen3 settings](./_assets/qwen3-settings-jan-ai.jpeg) +![Qwen3 settings](/assets/images/general/qwen3-30b-settings.jpg) ### Non-thinking mode (`enable_thinking=False`) diff --git a/docs/src/pages/post/run-ai-models-locally.mdx b/docs/src/pages/post/run-ai-models-locally.mdx index efe8bc594..315d9aad2 100644 --- a/docs/src/pages/post/run-ai-models-locally.mdx +++ b/docs/src/pages/post/run-ai-models-locally.mdx @@ -4,7 +4,7 @@ description: "A straightforward guide to running AI models locally on your compu tags: AI, local models, Jan, GGUF, privacy, local AI categories: guides date: 2025-01-31 -ogImage: assets/run-ai-locally-with-jan.jpg +ogImage: assets/images/general/run-ai-locally-with-jan.jpg twitter: card: summary_large_image site: "@jandotai" @@ -35,7 +35,7 @@ Most people think running AI models locally is complicated. It's not. Anyone can That's all to run your first AI model locally! -![Jan's simple and clean chat interface for local AI](./_assets/run-ai-locally-with-jan.jpg "Jan's easy-to-use chat interface after installation") +![Jan's simple and clean chat interface for local AI](/assets/images/general/run-ai-locally-with-jan.jpg "Jan's easy-to-use chat interface after installation") *Jan's easy-to-use chat interface after installation.* Keep reading to learn key terms of local AI and the things you should know before running AI models locally. diff --git a/docs/src/pages/post/run-gpt-oss-locally.mdx b/docs/src/pages/post/run-gpt-oss-locally.mdx index 5f71e8b45..795738644 100644 --- a/docs/src/pages/post/run-gpt-oss-locally.mdx +++ b/docs/src/pages/post/run-gpt-oss-locally.mdx @@ -4,21 +4,19 @@ description: "Complete 5-minute beginner guide to running OpenAI's gpt-oss local tags: OpenAI, gpt-oss, local AI, Jan, privacy, Apache-2.0, llama.cpp, Ollama, LM Studio categories: guides date: 2025-08-06 -ogImage: assets/gpt-oss%20locally.jpeg +ogImage: assets/images/general/gpt-oss locally.jpeg twitter: card: summary_large_image site: "@jandotai" title: "Run OpenAI's gpt-oss Locally in 5 Minutes (Beginner Guide)" description: "Complete 5-minute beginner guide to running OpenAI's gpt-oss locally with Jan AI for private, offline conversations." - image: assets/gpt-oss%20locally.jpeg + image: assets/images/general/gpt-oss locally.jpeg --- import { Callout } from 'nextra/components' import CTABlog from '@/components/Blog/CTA' # Run OpenAI's gpt-oss Locally in 5 mins -![gpt-oss running locally in Jan interface](./_assets/gpt-oss%20locally.jpeg) - OpenAI launched [gpt-oss](https://openai.com/index/introducing-gpt-oss/), marking their return to open-source AI after GPT-2. This model is designed to run locally on consumer hardware. This guide shows you how to install and run gpt-oss on your computer for private, offline AI conversations. ## What is gpt-oss? diff --git a/docs/theme.config.tsx b/docs/theme.config.tsx index 8b71c4cca..f3d1ab69c 100644 --- a/docs/theme.config.tsx +++ b/docs/theme.config.tsx @@ -107,14 +107,15 @@ const config: DocsThemeConfig = { head: function useHead() { const { title, frontMatter } = useConfig() const { asPath } = useRouter() - const titleTemplate = - (asPath.includes('/desktop') + const titleTemplate = asPath.includes('/post/') + ? (frontMatter?.title || title) + : (asPath.includes('/desktop') ? 'Jan Desktop' : asPath.includes('/server') ? 'Jan Server' : 'Jan') + - ' - ' + - (frontMatter?.title || title) + ' - ' + + (frontMatter?.title || title) return ( diff --git a/extensions-web/package.json b/extensions-web/package.json index 232ba13fa..aa536e9fe 100644 --- a/extensions-web/package.json +++ b/extensions-web/package.json @@ -22,6 +22,9 @@ }, "devDependencies": { "@janhq/core": "workspace:*", + "@tabler/icons-react": "^3.34.0", + "@types/react": "19.1.2", + "react": "19.0.0", "typescript": "5.9.2", "vite": "5.4.20", "vitest": "2.1.9", @@ -29,6 +32,8 @@ }, "peerDependencies": { "@janhq/core": "*", + "@tabler/icons-react": "*", + "react": "19.0.0", "zustand": "5.0.3" }, "dependencies": { diff --git a/extensions-web/src/conversational-web/const.ts b/extensions-web/src/conversational-web/const.ts index 0ad7e9049..4a860c500 100644 --- a/extensions-web/src/conversational-web/const.ts +++ b/extensions-web/src/conversational-web/const.ts @@ -14,4 +14,4 @@ export const DEFAULT_ASSISTANT = { name: 'Jan', avatar: '👋', created_at: 1747029866.542, -} \ No newline at end of file +} diff --git a/extensions-web/src/conversational-web/extension.ts b/extensions-web/src/conversational-web/extension.ts index 7c31f1c31..0e01e2ca3 100644 --- a/extensions-web/src/conversational-web/extension.ts +++ b/extensions-web/src/conversational-web/extension.ts @@ -11,6 +11,9 @@ import { } from '@janhq/core' import { RemoteApi } from './api' import { getDefaultAssistant, ObjectParser, combineConversationItemsToMessages } from './utils' +import { ApiError } from '../shared/types/errors' + +const CONVERSATION_NOT_FOUND_EVENT = 'conversation-not-found' export default class ConversationalExtensionWeb extends ConversationalExtension { private remoteApi: RemoteApi | undefined @@ -111,6 +114,15 @@ export default class ConversationalExtensionWeb extends ConversationalExtension return messages } catch (error) { console.error('Failed to list messages:', error) + // Check if it's a 404 error (conversation not found) + if (error instanceof ApiError && error.isNotFound()) { + // Trigger a navigation event to redirect to home + // We'll use a custom event that the web app can listen to + window.dispatchEvent(new CustomEvent(CONVERSATION_NOT_FOUND_EVENT, { + detail: { threadId, error: error.message } + })) + } + return [] } } diff --git a/extensions-web/src/jan-provider-web/api.ts b/extensions-web/src/jan-provider-web/api.ts index 436ee06b6..97a9608f2 100644 --- a/extensions-web/src/jan-provider-web/api.ts +++ b/extensions-web/src/jan-provider-web/api.ts @@ -5,9 +5,45 @@ import { getSharedAuthService, JanAuthService } from '../shared' import { JanModel, janProviderStore } from './store' +import { ApiError } from '../shared/types/errors' // JAN_API_BASE is defined in vite.config.ts +// Constants +const TEMPORARY_CHAT_ID = 'temporary-chat' + +/** + * Determines the appropriate API endpoint and request payload based on chat type + * @param request - The chat completion request + * @returns Object containing endpoint URL and processed request payload + */ +function getChatCompletionConfig(request: JanChatCompletionRequest, stream: boolean = false) { + const isTemporaryChat = request.conversation_id === TEMPORARY_CHAT_ID + + // For temporary chats, use the stateless /chat/completions endpoint + // For regular conversations, use the stateful /conv/chat/completions endpoint + const endpoint = isTemporaryChat + ? `${JAN_API_BASE}/chat/completions` + : `${JAN_API_BASE}/conv/chat/completions` + + const payload = { + ...request, + stream, + ...(isTemporaryChat ? { + // For temporary chat: don't store anything, remove conversation metadata + conversation_id: undefined, + } : { + // For regular chat: store everything, use conversation metadata + store: true, + store_reasoning: true, + conversation: request.conversation_id, + conversation_id: undefined, + }) + } + + return { endpoint, payload, isTemporaryChat } +} + export interface JanModelsResponse { object: string data: JanModel[] @@ -102,7 +138,8 @@ export class JanApiClient { return models } catch (error) { - const errorMessage = error instanceof Error ? error.message : 'Failed to fetch models' + const errorMessage = error instanceof ApiError ? error.message : + error instanceof Error ? error.message : 'Failed to fetch models' janProviderStore.setError(errorMessage) janProviderStore.setLoadingModels(false) throw error @@ -115,22 +152,18 @@ export class JanApiClient { try { janProviderStore.clearError() + const { endpoint, payload } = getChatCompletionConfig(request, false) + return await this.authService.makeAuthenticatedRequest( - `${JAN_API_BASE}/conv/chat/completions`, + endpoint, { method: 'POST', - body: JSON.stringify({ - ...request, - stream: false, - store: true, - store_reasoning: true, - conversation: request.conversation_id, - conversation_id: undefined, - }), + body: JSON.stringify(payload), } ) } catch (error) { - const errorMessage = error instanceof Error ? error.message : 'Failed to create chat completion' + const errorMessage = error instanceof ApiError ? error.message : + error instanceof Error ? error.message : 'Failed to create chat completion' janProviderStore.setError(errorMessage) throw error } @@ -144,23 +177,17 @@ export class JanApiClient { ): Promise { try { janProviderStore.clearError() - + const authHeader = await this.authService.getAuthHeader() - - const response = await fetch(`${JAN_API_BASE}/conv/chat/completions`, { + const { endpoint, payload } = getChatCompletionConfig(request, true) + + const response = await fetch(endpoint, { method: 'POST', headers: { 'Content-Type': 'application/json', ...authHeader, }, - body: JSON.stringify({ - ...request, - stream: true, - store: true, - store_reasoning: true, - conversation: request.conversation_id, - conversation_id: undefined, - }), + body: JSON.stringify(payload), }) if (!response.ok) { @@ -216,7 +243,8 @@ export class JanApiClient { reader.releaseLock() } } catch (error) { - const err = error instanceof Error ? error : new Error('Unknown error occurred') + const err = error instanceof ApiError ? error : + error instanceof Error ? error : new Error('Unknown error occurred') janProviderStore.setError(err.message) onError?.(err) throw err @@ -230,7 +258,8 @@ export class JanApiClient { await this.getModels() console.log('Jan API client initialized successfully') } catch (error) { - const errorMessage = error instanceof Error ? error.message : 'Failed to initialize API client' + const errorMessage = error instanceof ApiError ? error.message : + error instanceof Error ? error.message : 'Failed to initialize API client' janProviderStore.setError(errorMessage) throw error } finally { @@ -239,4 +268,4 @@ export class JanApiClient { } } -export const janApiClient = JanApiClient.getInstance() \ No newline at end of file +export const janApiClient = JanApiClient.getInstance() diff --git a/extensions-web/src/jan-provider-web/index.ts b/extensions-web/src/jan-provider-web/index.ts index 70cbf7770..4d3a4008a 100644 --- a/extensions-web/src/jan-provider-web/index.ts +++ b/extensions-web/src/jan-provider-web/index.ts @@ -1 +1 @@ -export { default } from './provider' \ No newline at end of file +export { default } from './provider' diff --git a/extensions-web/src/jan-provider-web/provider.ts b/extensions-web/src/jan-provider-web/provider.ts index cfbe18e2e..3375fd351 100644 --- a/extensions-web/src/jan-provider-web/provider.ts +++ b/extensions-web/src/jan-provider-web/provider.ts @@ -15,6 +15,7 @@ import { } from '@janhq/core' // cspell: disable-line import { janApiClient, JanChatMessage } from './api' import { janProviderStore } from './store' +import { ApiError } from '../shared/types/errors' // Jan models support tools via MCP const JAN_MODEL_CAPABILITIES = ['tools'] as const @@ -192,7 +193,8 @@ export default class JanProviderWeb extends AIEngine { console.error(`Failed to unload Jan session ${sessionId}:`, error) return { success: false, - error: error instanceof Error ? error.message : 'Unknown error', + error: error instanceof ApiError ? error.message : + error instanceof Error ? error.message : 'Unknown error', } } } diff --git a/extensions-web/src/jan-provider-web/store.ts b/extensions-web/src/jan-provider-web/store.ts index 02cc70686..2ff341147 100644 --- a/extensions-web/src/jan-provider-web/store.ts +++ b/extensions-web/src/jan-provider-web/store.ts @@ -92,4 +92,4 @@ export const janProviderStore = { useJanProviderStore.getState().clearError(), reset: () => useJanProviderStore.getState().reset(), -} \ No newline at end of file +} diff --git a/extensions-web/src/mcp-web/components/WebSearchButton.tsx b/extensions-web/src/mcp-web/components/WebSearchButton.tsx new file mode 100644 index 000000000..86fa08906 --- /dev/null +++ b/extensions-web/src/mcp-web/components/WebSearchButton.tsx @@ -0,0 +1,54 @@ +import { useMemo, useCallback } from 'react' +import { IconWorld } from '@tabler/icons-react' +import { MCPToolComponentProps } from '@janhq/core' + +// List of tool names considered as web search tools +const WEB_SEARCH_TOOL_NAMES = ['google_search', 'scrape']; + +export const WebSearchButton = ({ + tools, + isToolEnabled, + onToolToggle, +}: MCPToolComponentProps) => { + const webSearchTools = useMemo( + () => tools.filter((tool) => WEB_SEARCH_TOOL_NAMES.includes(tool.name)), + [tools] + ) + + // Early return if no web search tools available + if (webSearchTools.length === 0) { + return null + } + + // Check if all web search tools are enabled + const isEnabled = useMemo( + () => webSearchTools.every((tool) => isToolEnabled(tool.name)), + [webSearchTools, isToolEnabled] + ) + + const handleToggle = useCallback(() => { + // Toggle all web search tools at once + const newState = !isEnabled + webSearchTools.forEach((tool) => { + onToolToggle(tool.name, newState) + }) + }, [isEnabled, webSearchTools, onToolToggle]) + + return ( + + ) +} diff --git a/extensions-web/src/mcp-web/components/index.ts b/extensions-web/src/mcp-web/components/index.ts new file mode 100644 index 000000000..7f9bc47da --- /dev/null +++ b/extensions-web/src/mcp-web/components/index.ts @@ -0,0 +1 @@ +export { WebSearchButton } from './WebSearchButton' diff --git a/extensions-web/src/mcp-web/index.ts b/extensions-web/src/mcp-web/index.ts index 5e13846a7..3d588753f 100644 --- a/extensions-web/src/mcp-web/index.ts +++ b/extensions-web/src/mcp-web/index.ts @@ -4,11 +4,13 @@ * Uses official MCP TypeScript SDK with proper session handling */ -import { MCPExtension, MCPTool, MCPToolCallResult } from '@janhq/core' +import { MCPExtension, MCPTool, MCPToolCallResult, MCPToolComponentProps } from '@janhq/core' import { getSharedAuthService, JanAuthService } from '../shared' import { Client } from '@modelcontextprotocol/sdk/client/index.js' import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js' import { JanMCPOAuthProvider } from './oauth-provider' +import { WebSearchButton } from './components' +import type { ComponentType } from 'react' // JAN_API_BASE is defined in vite.config.ts (defaults to 'https://api-dev.jan.ai/jan/v1') declare const JAN_API_BASE: string @@ -232,4 +234,27 @@ export default class MCPExtensionWeb extends MCPExtension { throw error } } -} \ No newline at end of file + + /** + * Provides a custom UI component for web search tools + * @returns The WebSearchButton component + */ + getToolComponent(): ComponentType | null { + return WebSearchButton + } + + /** + * Returns the list of tool names that should be disabled by default for new users + * All MCP web tools are disabled by default to prevent accidental API usage + * @returns Array of tool names to disable by default + */ + async getDefaultDisabledTools(): Promise { + try { + const tools = await this.getTools() + return tools.map(tool => tool.name) + } catch (error) { + console.error('Failed to get default disabled tools:', error) + return [] + } + } +} diff --git a/extensions-web/src/mcp-web/oauth-provider.ts b/extensions-web/src/mcp-web/oauth-provider.ts index fd37c3ece..7d14264d6 100644 --- a/extensions-web/src/mcp-web/oauth-provider.ts +++ b/extensions-web/src/mcp-web/oauth-provider.ts @@ -57,4 +57,4 @@ export class JanMCPOAuthProvider implements OAuthClientProvider { async codeVerifier(): Promise { throw new Error('Code verifier not supported') } -} \ No newline at end of file +} diff --git a/extensions-web/src/shared/auth/service.ts b/extensions-web/src/shared/auth/service.ts index 1895ff8c4..eb15c4893 100644 --- a/extensions-web/src/shared/auth/service.ts +++ b/extensions-web/src/shared/auth/service.ts @@ -16,6 +16,7 @@ import { logoutUser, refreshToken, guestLogin } from './api' import { AuthProviderRegistry } from './registry' import { AuthBroadcast } from './broadcast' import type { ProviderType } from './providers' +import { ApiError } from '../types/errors' const authProviderRegistry = new AuthProviderRegistry() @@ -160,7 +161,7 @@ export class JanAuthService { this.tokenExpiryTime = Date.now() + tokens.expires_in * 1000 } catch (error) { console.error('Failed to refresh access token:', error) - if (error instanceof Error && error.message.includes('401')) { + if (error instanceof ApiError && error.isStatus(401)) { await this.handleSessionExpired() } throw error @@ -305,9 +306,7 @@ export class JanAuthService { if (!response.ok) { const errorText = await response.text() - throw new Error( - `API request failed: ${response.status} ${response.statusText} - ${errorText}` - ) + throw new ApiError(response.status, response.statusText, errorText) } return response.json() @@ -418,7 +417,7 @@ export class JanAuthService { ) } catch (error) { console.error('Failed to fetch user profile:', error) - if (error instanceof Error && error.message.includes('401')) { + if (error instanceof ApiError && error.isStatus(401)) { // Authentication failed - handle session expiry await this.handleSessionExpired() return null diff --git a/extensions-web/src/shared/types/errors.ts b/extensions-web/src/shared/types/errors.ts new file mode 100644 index 000000000..650507a7b --- /dev/null +++ b/extensions-web/src/shared/types/errors.ts @@ -0,0 +1,50 @@ +/** + * Shared error types for API responses + */ + +export class ApiError extends Error { + public readonly status: number + public readonly statusText: string + public readonly responseText: string + + constructor(status: number, statusText: string, responseText: string, message?: string) { + super(message || `API request failed: ${status} ${statusText} - ${responseText}`) + this.name = 'ApiError' + this.status = status + this.statusText = statusText + this.responseText = responseText + + // Maintains proper stack trace for where our error was thrown (only available on V8) + if ((Error as any).captureStackTrace) { + (Error as any).captureStackTrace(this, ApiError) + } + } + + /** + * Check if this is a specific HTTP status code + */ + isStatus(code: number): boolean { + return this.status === code + } + + /** + * Check if this is a 404 Not Found error + */ + isNotFound(): boolean { + return this.status === 404 + } + + /** + * Check if this is a client error (4xx) + */ + isClientError(): boolean { + return this.status >= 400 && this.status < 500 + } + + /** + * Check if this is a server error (5xx) + */ + isServerError(): boolean { + return this.status >= 500 && this.status < 600 + } +} diff --git a/extensions-web/src/types.ts b/extensions-web/src/types.ts index 47ef0be71..4d52032c9 100644 --- a/extensions-web/src/types.ts +++ b/extensions-web/src/types.ts @@ -38,4 +38,4 @@ export interface IndexedDBConfig { keyPath: string indexes?: { name: string; keyPath: string | string[]; unique?: boolean }[] }[] -} \ No newline at end of file +} diff --git a/extensions-web/src/types/global.d.ts b/extensions-web/src/types/global.d.ts index a6e82d759..8d70d398b 100644 --- a/extensions-web/src/types/global.d.ts +++ b/extensions-web/src/types/global.d.ts @@ -2,4 +2,4 @@ export {} declare global { declare const JAN_API_BASE: string -} \ No newline at end of file +} diff --git a/extensions-web/src/vite-env.d.ts b/extensions-web/src/vite-env.d.ts index 151aa6856..11f02fe2a 100644 --- a/extensions-web/src/vite-env.d.ts +++ b/extensions-web/src/vite-env.d.ts @@ -1 +1 @@ -/// \ No newline at end of file +/// diff --git a/extensions-web/tsconfig.json b/extensions-web/tsconfig.json index e90dd4997..b39b50ee5 100644 --- a/extensions-web/tsconfig.json +++ b/extensions-web/tsconfig.json @@ -3,6 +3,7 @@ "target": "ES2020", "module": "ESNext", "moduleResolution": "bundler", + "jsx": "react-jsx", "allowSyntheticDefaultImports": true, "esModuleInterop": true, "strict": true, diff --git a/extensions-web/vite.config.ts b/extensions-web/vite.config.ts index 89cfb7d0e..8c144b0ab 100644 --- a/extensions-web/vite.config.ts +++ b/extensions-web/vite.config.ts @@ -9,11 +9,11 @@ export default defineConfig({ fileName: 'index' }, rollupOptions: { - external: ['@janhq/core', 'zustand'] + external: ['@janhq/core', 'zustand', 'react', 'react-dom', 'react/jsx-runtime', '@tabler/icons-react'] }, emptyOutDir: false // Don't clean the output directory }, define: { JAN_API_BASE: JSON.stringify(process.env.JAN_API_BASE || 'https://api-dev.jan.ai/v1'), } -}) \ No newline at end of file +}) diff --git a/extensions/llamacpp-extension/src/backend.ts b/extensions/llamacpp-extension/src/backend.ts index d60ecc138..a313e01c6 100644 --- a/extensions/llamacpp-extension/src/backend.ts +++ b/extensions/llamacpp-extension/src/backend.ts @@ -1,9 +1,8 @@ import { getJanDataFolderPath, fs, joinPath, events } from '@janhq/core' import { invoke } from '@tauri-apps/api/core' -import { getProxyConfig } from './util' +import { getProxyConfig, basenameNoExt } from './util' import { dirname, basename } from '@tauri-apps/api/path' import { getSystemInfo } from '@janhq/tauri-plugin-hardware-api' - /* * Reads currently installed backends in janDataFolderPath * @@ -73,10 +72,7 @@ async function fetchRemoteSupportedBackends( if (!name.startsWith(prefix)) continue - const backend = name - .replace(prefix, '') - .replace('.tar.gz', '') - .replace('.zip', '') + const backend = basenameNoExt(name).slice(prefix.length) if (supportedBackends.includes(backend)) { remote.push({ version, backend }) diff --git a/extensions/llamacpp-extension/src/type.d.ts b/extensions/llamacpp-extension/src/type.d.ts new file mode 100644 index 000000000..88fc84a17 --- /dev/null +++ b/extensions/llamacpp-extension/src/type.d.ts @@ -0,0 +1,12 @@ +export {} + +declare global { + interface RequestInit { + /** + * Tauri HTTP plugin option for connection timeout in milliseconds. + */ + connectTimeout?: number + } +} + + diff --git a/extensions/llamacpp-extension/src/util.ts b/extensions/llamacpp-extension/src/util.ts index 1511eafec..b72766579 100644 --- a/extensions/llamacpp-extension/src/util.ts +++ b/extensions/llamacpp-extension/src/util.ts @@ -1,3 +1,23 @@ +// File path utilities +export function basenameNoExt(filePath: string): string { + const VALID_EXTENSIONS = [".tar.gz", ".zip"]; + + // handle VALID extensions first + for (const ext of VALID_EXTENSIONS) { + if (filePath.toLowerCase().endsWith(ext)) { + return filePath.slice(0, -ext.length); + } + } + + // fallback: remove only the last extension + const lastDotIndex = filePath.lastIndexOf('.'); + if (lastDotIndex > 0) { + return filePath.slice(0, lastDotIndex); + } + + return filePath; +} + // Zustand proxy state structure interface ProxyState { proxyEnabled: boolean diff --git a/extensions/yarn.lock b/extensions/yarn.lock index 0751c0069..f4a58c14f 100644 --- a/extensions/yarn.lock +++ b/extensions/yarn.lock @@ -342,41 +342,41 @@ __metadata: "@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension": version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=f9bdfe&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension" + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=f15485&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension" dependencies: rxjs: "npm:^7.8.1" ulidx: "npm:^2.3.0" - checksum: 10c0/417ea9bd3e5b53264596d2ee816c3e24299f8b721f6ea951d078342555da457ebca4d5b1e116bf187ac77ec0a9e3341211d464f4ffdbd2a3915139523688d41d + checksum: 10c0/257621cb56db31a4dd3a2b509ec4c61217022e74bbd39cf6a1a172073654b9a65eee94ef9c1b4d4f5d2231d159c8818cb02846f3d88fe14f102f43169ad3737c languageName: node linkType: hard "@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension": version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=f9bdfe&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension" + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=f15485&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension" dependencies: rxjs: "npm:^7.8.1" ulidx: "npm:^2.3.0" - checksum: 10c0/417ea9bd3e5b53264596d2ee816c3e24299f8b721f6ea951d078342555da457ebca4d5b1e116bf187ac77ec0a9e3341211d464f4ffdbd2a3915139523688d41d + checksum: 10c0/257621cb56db31a4dd3a2b509ec4c61217022e74bbd39cf6a1a172073654b9a65eee94ef9c1b4d4f5d2231d159c8818cb02846f3d88fe14f102f43169ad3737c languageName: node linkType: hard "@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fdownload-extension%40workspace%3Adownload-extension": version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=f9bdfe&locator=%40janhq%2Fdownload-extension%40workspace%3Adownload-extension" + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=f15485&locator=%40janhq%2Fdownload-extension%40workspace%3Adownload-extension" dependencies: rxjs: "npm:^7.8.1" ulidx: "npm:^2.3.0" - checksum: 10c0/417ea9bd3e5b53264596d2ee816c3e24299f8b721f6ea951d078342555da457ebca4d5b1e116bf187ac77ec0a9e3341211d464f4ffdbd2a3915139523688d41d + checksum: 10c0/257621cb56db31a4dd3a2b509ec4c61217022e74bbd39cf6a1a172073654b9a65eee94ef9c1b4d4f5d2231d159c8818cb02846f3d88fe14f102f43169ad3737c languageName: node linkType: hard "@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fllamacpp-extension%40workspace%3Allamacpp-extension": version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=f9bdfe&locator=%40janhq%2Fllamacpp-extension%40workspace%3Allamacpp-extension" + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=f15485&locator=%40janhq%2Fllamacpp-extension%40workspace%3Allamacpp-extension" dependencies: rxjs: "npm:^7.8.1" ulidx: "npm:^2.3.0" - checksum: 10c0/417ea9bd3e5b53264596d2ee816c3e24299f8b721f6ea951d078342555da457ebca4d5b1e116bf187ac77ec0a9e3341211d464f4ffdbd2a3915139523688d41d + checksum: 10c0/257621cb56db31a4dd3a2b509ec4c61217022e74bbd39cf6a1a172073654b9a65eee94ef9c1b4d4f5d2231d159c8818cb02846f3d88fe14f102f43169ad3737c languageName: node linkType: hard diff --git a/package.json b/package.json index 50eb8ecaf..cf3767e66 100644 --- a/package.json +++ b/package.json @@ -12,6 +12,8 @@ "scripts": { "lint": "yarn workspace @janhq/web-app lint", "dev": "yarn dev:tauri", + "ios": "yarn tauri ios dev", + "android": "yarn tauri android dev", "build": "yarn build:web && yarn build:tauri", "test": "vitest run", "test:watch": "vitest", @@ -24,12 +26,18 @@ "serve:web-app": "yarn workspace @janhq/web-app serve:web", "build:serve:web-app": "yarn build:web-app && yarn serve:web-app", "dev:tauri": "yarn build:icon && yarn copy:assets:tauri && cross-env IS_CLEAN=true tauri dev", + "dev:ios": "yarn build:extensions-web && yarn copy:assets:mobile && RUSTC_WRAPPER= yarn tauri ios dev --features mobile", + "dev:android": "yarn build:extensions-web && yarn copy:assets:mobile && cross-env IS_CLEAN=true TAURI_ANDROID_BUILD=true yarn tauri android dev --features mobile", + "build:android": "yarn build:icon && yarn copy:assets:mobile && cross-env IS_CLEAN=true TAURI_ANDROID_BUILD=true yarn tauri android build -- --no-default-features --features mobile", + "build:ios": "yarn copy:assets:mobile && yarn tauri ios build -- --no-default-features --features mobile", + "build:ios:device": "yarn build:icon && yarn copy:assets:mobile && yarn tauri ios build -- --no-default-features --features mobile --export-method debugging", "copy:assets:tauri": "cpx \"pre-install/*.tgz\" \"src-tauri/resources/pre-install/\" && cpx \"LICENSE\" \"src-tauri/resources/\"", + "copy:assets:mobile": "cpx \"pre-install/*.tgz\" \"src-tauri/resources/pre-install/\" && cpx \"LICENSE\" \"src-tauri/resources/\"", "download:lib": "node ./scripts/download-lib.mjs", "download:bin": "node ./scripts/download-bin.mjs", "download:windows-installer": "node ./scripts/download-win-installer-deps.mjs", - "build:tauri:win32": "yarn download:bin && yarn download:lib && yarn download:windows-installer && yarn tauri build", - "build:tauri:linux": "yarn download:bin && yarn download:lib && NO_STRIP=1 ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build && ./src-tauri/build-utils/buildAppImage.sh", + "build:tauri:win32": "yarn download:bin && yarn download:windows-installer && yarn tauri build", + "build:tauri:linux": "yarn download:bin && NO_STRIP=1 ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build && ./src-tauri/build-utils/buildAppImage.sh", "build:tauri:darwin": "yarn download:bin && yarn tauri build --target universal-apple-darwin", "build:tauri": "yarn build:icon && yarn copy:assets:tauri && run-script-os", "build:tauri:plugin:api": "cd src-tauri/plugins && yarn install && yarn workspaces foreach -Apt run build", @@ -58,7 +66,9 @@ "hoistingLimits": "workspaces" }, "resolutions": { - "yallist": "4.0.0" + "yallist": "4.0.0", + "@types/react": "19.1.2", + "@types/react-dom": "19.1.2" }, "packageManager": "yarn@4.5.3" } diff --git a/scripts/download-bin.mjs b/scripts/download-bin.mjs index 36e17b3f0..68f09bf5f 100644 --- a/scripts/download-bin.mjs +++ b/scripts/download-bin.mjs @@ -1,4 +1,3 @@ -console.log('Script is running') // scripts/download.js import https from 'https' import fs, { copyFile, mkdirSync } from 'fs' @@ -69,7 +68,10 @@ function getPlatformArch() { arch === 'arm64' ? 'aarch64-apple-darwin' : 'x86_64-apple-darwin' } else if (platform === 'linux') { bunPlatform = arch === 'arm64' ? 'linux-aarch64' : 'linux-x64' - uvPlatform = arch === 'arm64' ? 'aarch64-unknown-linux-gnu' : 'x86_64-unknown-linux-gnu' + uvPlatform = + arch === 'arm64' + ? 'aarch64-unknown-linux-gnu' + : 'x86_64-unknown-linux-gnu' } else if (platform === 'win32') { bunPlatform = 'windows-x64' // Bun has limited Windows support uvPlatform = 'x86_64-pc-windows-msvc' @@ -81,6 +83,10 @@ function getPlatformArch() { } async function main() { + if (process.env.SKIP_BINARIES) { + console.log('Skipping binaries download.') + process.exit(0) + } console.log('Starting main function') const platform = os.platform() const { bunPlatform, uvPlatform } = getPlatformArch() @@ -100,13 +106,11 @@ async function main() { } // Adjust these URLs based on latest releases - const bunVersion = '1.2.10' // Example Bun version - const bunUrl = `https://github.com/oven-sh/bun/releases/download/bun-v${bunVersion}/bun-${bunPlatform}.zip` + const bunUrl = `https://github.com/oven-sh/bun/releases/latest/download/bun-${bunPlatform}.zip` - const uvVersion = '0.6.17' // Example UV version - let uvUrl = `https://github.com/astral-sh/uv/releases/download/${uvVersion}/uv-${uvPlatform}.tar.gz` + let uvUrl = `https://github.com/astral-sh/uv/releases/latest/download/uv-${uvPlatform}.tar.gz` if (platform === 'win32') { - uvUrl = `https://github.com/astral-sh/uv/releases/download/${uvVersion}/uv-${uvPlatform}.zip` + uvUrl = `https://github.com/astral-sh/uv/releases/latest/download/uv-${uvPlatform}.zip` } console.log(`Downloading Bun for ${bunPlatform}...`) @@ -124,29 +128,45 @@ async function main() { if (err) { console.log('Add execution permission failed!', err) } - }); + }) if (platform === 'darwin') { - copyFile(path.join(binDir, 'bun'), path.join(binDir, 'bun-x86_64-apple-darwin'), (err) => { - if (err) { - console.log("Error Found:", err); - } - }) - copyFile(path.join(binDir, 'bun'), path.join(binDir, 'bun-aarch64-apple-darwin'), (err) => { - if (err) { - console.log("Error Found:", err); - } - }) - copyFile(path.join(binDir, 'bun'), path.join(binDir, 'bun-universal-apple-darwin'), (err) => { + copyFile( + path.join(binDir, 'bun'), + path.join(binDir, 'bun-x86_64-apple-darwin'), + (err) => { if (err) { - console.log("Error Found:", err); + console.log('Error Found:', err) } - }) - } else if (platform === 'linux') { - copyFile(path.join(binDir, 'bun'), path.join(binDir, 'bun-x86_64-unknown-linux-gnu'), (err) => { - if (err) { - console.log("Error Found:", err); } - }) + ) + copyFile( + path.join(binDir, 'bun'), + path.join(binDir, 'bun-aarch64-apple-darwin'), + (err) => { + if (err) { + console.log('Error Found:', err) + } + } + ) + copyFile( + path.join(binDir, 'bun'), + path.join(binDir, 'bun-universal-apple-darwin'), + (err) => { + if (err) { + console.log('Error Found:', err) + } + } + ) + } else if (platform === 'linux') { + copyFile( + path.join(binDir, 'bun'), + path.join(binDir, 'bun-x86_64-unknown-linux-gnu'), + (err) => { + if (err) { + console.log('Error Found:', err) + } + } + ) } } catch (err) { // Expect EEXIST error @@ -157,11 +177,15 @@ async function main() { path.join(binDir) ) if (platform === 'win32') { - copyFile(path.join(binDir, 'bun.exe'), path.join(binDir, 'bun-x86_64-pc-windows-msvc.exe'), (err) => { - if (err) { - console.log("Error Found:", err); + copyFile( + path.join(binDir, 'bun.exe'), + path.join(binDir, 'bun-x86_64-pc-windows-msvc.exe'), + (err) => { + if (err) { + console.log('Error Found:', err) + } } - }) + ) } } catch (err) { // Expect EEXIST error @@ -176,52 +200,66 @@ async function main() { await decompress(uvPath, tempBinDir) } try { - copySync( - path.join(tempBinDir, `uv-${uvPlatform}`, 'uv'), - path.join(binDir) - ) + copySync(path.join(tempBinDir, `uv-${uvPlatform}`, 'uv'), path.join(binDir)) fs.chmod(path.join(binDir, 'uv'), 0o755, (err) => { if (err) { console.log('Add execution permission failed!', err) } - }); + }) if (platform === 'darwin') { - copyFile(path.join(binDir, 'uv'), path.join(binDir, 'uv-x86_64-apple-darwin'), (err) => { - if (err) { - console.log("Error Found:", err); + copyFile( + path.join(binDir, 'uv'), + path.join(binDir, 'uv-x86_64-apple-darwin'), + (err) => { + if (err) { + console.log('Error Found:', err) + } } - }) - copyFile(path.join(binDir, 'uv'), path.join(binDir, 'uv-aarch64-apple-darwin'), (err) => { - if (err) { - console.log("Error Found:", err); + ) + copyFile( + path.join(binDir, 'uv'), + path.join(binDir, 'uv-aarch64-apple-darwin'), + (err) => { + if (err) { + console.log('Error Found:', err) + } } - }) - copyFile(path.join(binDir, 'uv'), path.join(binDir, 'uv-universal-apple-darwin'), (err) => { - if (err) { - console.log("Error Found:", err); + ) + copyFile( + path.join(binDir, 'uv'), + path.join(binDir, 'uv-universal-apple-darwin'), + (err) => { + if (err) { + console.log('Error Found:', err) + } } - }) + ) } else if (platform === 'linux') { - copyFile(path.join(binDir, 'uv'), path.join(binDir, 'uv-x86_64-unknown-linux-gnu'), (err) => { - if (err) { - console.log("Error Found:", err); + copyFile( + path.join(binDir, 'uv'), + path.join(binDir, 'uv-x86_64-unknown-linux-gnu'), + (err) => { + if (err) { + console.log('Error Found:', err) + } } - }) + ) } } catch (err) { // Expect EEXIST error } try { - copySync( - path.join(tempBinDir, 'uv.exe'), - path.join(binDir) - ) + copySync(path.join(tempBinDir, 'uv.exe'), path.join(binDir)) if (platform === 'win32') { - copyFile(path.join(binDir, 'uv.exe'), path.join(binDir, 'uv-x86_64-pc-windows-msvc.exe'), (err) => { - if (err) { - console.log("Error Found:", err); + copyFile( + path.join(binDir, 'uv.exe'), + path.join(binDir, 'uv-x86_64-pc-windows-msvc.exe'), + (err) => { + if (err) { + console.log('Error Found:', err) + } } - }) + ) } } catch (err) { // Expect EEXIST error diff --git a/scripts/download-lib.mjs b/scripts/download-lib.mjs deleted file mode 100644 index d2086b36e..000000000 --- a/scripts/download-lib.mjs +++ /dev/null @@ -1,86 +0,0 @@ -console.log('Script is running') -// scripts/download-lib.mjs -import https from 'https' -import fs, { mkdirSync } from 'fs' -import os from 'os' -import path from 'path' -import { copySync } from 'cpx' - -function download(url, dest) { - return new Promise((resolve, reject) => { - console.log(`Downloading ${url} to ${dest}`) - const file = fs.createWriteStream(dest) - https - .get(url, (response) => { - console.log(`Response status code: ${response.statusCode}`) - if ( - response.statusCode >= 300 && - response.statusCode < 400 && - response.headers.location - ) { - // Handle redirect - const redirectURL = response.headers.location - console.log(`Redirecting to ${redirectURL}`) - download(redirectURL, dest).then(resolve, reject) // Recursive call - return - } else if (response.statusCode !== 200) { - reject(`Failed to get '${url}' (${response.statusCode})`) - return - } - response.pipe(file) - file.on('finish', () => { - file.close(resolve) - }) - }) - .on('error', (err) => { - fs.unlink(dest, () => reject(err.message)) - }) - }) -} - -async function main() { - console.log('Starting main function') - const platform = os.platform() // 'darwin', 'linux', 'win32' - const arch = os.arch() // 'x64', 'arm64', etc. - - if (arch != 'x64') return - - let filename - if (platform == 'linux') - filename = 'libvulkan.so' - else if (platform == 'win32') - filename = 'vulkan-1.dll' - else - return - - const url = `https://catalog.jan.ai/${filename}` - - const libDir = 'src-tauri/resources/lib' - const tempDir = 'scripts/dist' - - try { - mkdirSync('scripts/dist') - } catch (err) { - // Expect EEXIST error if the directory already exists - } - - console.log(`Downloading libvulkan...`) - const savePath = path.join(tempDir, filename) - if (!fs.existsSync(savePath)) { - await download(url, savePath) - } - - // copy to tauri resources - try { - copySync(savePath, libDir) - } catch (err) { - // Expect EEXIST error - } - - console.log('Downloads completed.') -} - -main().catch((err) => { - console.error('Error:', err) - process.exit(1) -}) diff --git a/src-tauri/.cargo/config.toml b/src-tauri/.cargo/config.toml index 830adb1f1..9089e8115 100644 --- a/src-tauri/.cargo/config.toml +++ b/src-tauri/.cargo/config.toml @@ -3,3 +3,20 @@ # see https://github.com/tauri-apps/tauri/pull/4383#issuecomment-1212221864 __TAURI_WORKSPACE__ = "true" ENABLE_SYSTEM_TRAY_ICON = "false" + +[target.aarch64-linux-android] +linker = "aarch64-linux-android21-clang" +ar = "llvm-ar" +rustflags = ["-C", "link-arg=-fuse-ld=lld"] + +[target.armv7-linux-androideabi] +linker = "armv7a-linux-androideabi21-clang" +ar = "llvm-ar" + +[target.x86_64-linux-android] +linker = "x86_64-linux-android21-clang" +ar = "llvm-ar" + +[target.i686-linux-android] +linker = "i686-linux-android21-clang" +ar = "llvm-ar" diff --git a/src-tauri/.gitignore b/src-tauri/.gitignore index 40726cbe0..02bc782bf 100644 --- a/src-tauri/.gitignore +++ b/src-tauri/.gitignore @@ -2,6 +2,7 @@ # will have compiled files and executables /target/ /gen/schemas +/gen/android binaries !binaries/download.sh !binaries/download.bat \ No newline at end of file diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 85a90422a..da2ca059e 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -85,6 +85,19 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "getrandom 0.3.3", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -149,11 +162,11 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "ash" -version = "0.38.0+1.3.281" +version = "0.37.3+1.3.251" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb44936d800fea8f016d7f2311c6a4f97aebd5dc86f09906139ec848cf3a46f" +checksum = "39e9c3835d686b0a6084ab4234fcd1b07dbf6e4767dce60874b12356a25ecd4a" dependencies = [ - "libloading 0.8.8", + "libloading 0.7.4", ] [[package]] @@ -166,7 +179,7 @@ dependencies = [ "futures-channel", "futures-util", "rand 0.9.2", - "raw-window-handle", + "raw-window-handle 0.6.2", "serde", "serde_repr", "tokio", @@ -510,6 +523,20 @@ name = "bytemuck" version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f154e572231cb6ba2bd1176980827e3d5dc04cc183a75dea38109fbdd672d29" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] [[package]] name = "byteorder" @@ -802,11 +829,22 @@ checksum = "fa95a34622365fa5bbf40b20b75dba8dfa8c94c734aea8ac9a5ca38af14316f1" dependencies = [ "bitflags 2.9.1", "core-foundation 0.10.1", - "core-graphics-types", + "core-graphics-types 0.2.0", "foreign-types 0.5.0", "libc", ] +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation 0.9.4", + "libc", +] + [[package]] name = "core-graphics-types" version = "0.2.0" @@ -845,6 +883,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -1888,13 +1935,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +dependencies = [ + "bytemuck", + "cfg-if", + "crunchy", +] + [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.8", ] [[package]] @@ -2619,6 +2677,15 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +dependencies = [ + "libc", +] + [[package]] name = "markup5ever" version = "0.14.1" @@ -2747,7 +2814,7 @@ dependencies = [ "log", "ndk-sys", "num_enum", - "raw-window-handle", + "raw-window-handle 0.6.2", "thiserror 1.0.69", ] @@ -2869,6 +2936,15 @@ dependencies = [ "libloading 0.8.8", ] +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", +] + [[package]] name = "objc-sys" version = "0.3.5" @@ -3177,6 +3253,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-src" +version = "300.5.2+3.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d270b79e2926f5150189d475bc7e9d2c69f9c4697b185fa917d5a32b792d21b4" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.109" @@ -3185,6 +3270,7 @@ checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] @@ -3900,6 +3986,12 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "raw-window-handle" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9" + [[package]] name = "raw-window-handle" version = "0.6.2" @@ -4090,7 +4182,7 @@ dependencies = [ "objc2-app-kit", "objc2-core-foundation", "objc2-foundation 0.3.1", - "raw-window-handle", + "raw-window-handle 0.6.2", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4759,7 +4851,7 @@ dependencies = [ "objc2 0.5.2", "objc2-foundation 0.2.2", "objc2-quartz-core 0.2.2", - "raw-window-handle", + "raw-window-handle 0.6.2", "redox_syscall", "wasm-bindgen", "web-sys", @@ -5028,7 +5120,7 @@ dependencies = [ "objc2-foundation 0.3.1", "once_cell", "parking_lot", - "raw-window-handle", + "raw-window-handle 0.6.2", "scopeguard", "tao-macros", "unicode-segmentation", @@ -5103,7 +5195,7 @@ dependencies = [ "objc2-web-kit", "percent-encoding", "plist", - "raw-window-handle", + "raw-window-handle 0.6.2", "reqwest 0.12.22", "serde", "serde_json", @@ -5233,7 +5325,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37e5858cc7b455a73ab4ea2ebc08b5be33682c00ff1bf4cad5537d4fb62499d9" dependencies = [ "log", - "raw-window-handle", + "raw-window-handle 0.6.2", "rfd", "serde", "serde_json", @@ -5280,6 +5372,7 @@ dependencies = [ "sysinfo", "tauri", "tauri-plugin", + "vulkano", ] [[package]] @@ -5323,6 +5416,7 @@ dependencies = [ "sysinfo", "tauri", "tauri-plugin", + "tauri-plugin-hardware", "thiserror 2.0.12", "tokio", ] @@ -5488,7 +5582,7 @@ dependencies = [ "objc2 0.6.1", "objc2-ui-kit", "objc2-web-kit", - "raw-window-handle", + "raw-window-handle 0.6.2", "serde", "serde_json", "tauri-utils", @@ -5514,7 +5608,7 @@ dependencies = [ "objc2-foundation 0.3.1", "once_cell", "percent-encoding", - "raw-window-handle", + "raw-window-handle 0.6.2", "softbuffer", "tao", "tauri-runtime", @@ -5638,6 +5732,15 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + [[package]] name = "time" version = "0.3.41" @@ -6153,6 +6256,15 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "vk-parse" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81086c28be67a8759cd80cbb3c8f7b520e0874605fc5eb74d5a1c9c2d1878e79" +dependencies = [ + "xml-rs", +] + [[package]] name = "vswhom" version = "0.1.0" @@ -6182,6 +6294,48 @@ dependencies = [ "memchr", ] +[[package]] +name = "vulkano" +version = "0.34.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a26f2897a92a30931fceef3d6d1156a1089d9681fb2be73be92bbf24ae2ddf2" +dependencies = [ + "ahash 0.8.12", + "ash", + "bytemuck", + "core-graphics-types 0.1.3", + "crossbeam-queue", + "half", + "heck 0.4.1", + "indexmap 2.10.0", + "libloading 0.8.8", + "objc", + "once_cell", + "parking_lot", + "proc-macro2", + "quote", + "raw-window-handle 0.5.2", + "regex", + "serde", + "serde_json", + "smallvec", + "thread_local", + "vk-parse", + "vulkano-macros", +] + +[[package]] +name = "vulkano-macros" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52be622d364272fd77e298e7f68e8547ae66e7687cb86eb85335412cee7e3965" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -6516,7 +6670,7 @@ dependencies = [ "objc2-app-kit", "objc2-core-foundation", "objc2-foundation 0.3.1", - "raw-window-handle", + "raw-window-handle 0.6.2", "windows-sys 0.59.0", "windows-version", ] @@ -7088,7 +7242,7 @@ dependencies = [ "objc2-web-kit", "once_cell", "percent-encoding", - "raw-window-handle", + "raw-window-handle 0.6.2", "sha2", "soup3", "tao-macros", @@ -7143,6 +7297,12 @@ dependencies = [ "rustix", ] +[[package]] +name = "xml-rs" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" + [[package]] name = "yoke" version = "0.8.0" diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 51e84f880..43738b032 100644 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -22,7 +22,19 @@ default = [ "tauri/macos-private-api", "tauri/tray-icon", "tauri/test", - "tauri/custom-protocol" + "tauri/custom-protocol", + "desktop", +] +hardware = ["dep:tauri-plugin-hardware"] +deep-link = ["dep:tauri-plugin-deep-link"] +desktop = [ + "deep-link", + "hardware" +] +mobile = [ + "tauri/protocol-asset", + "tauri/test", + "tauri/wry", ] test-tauri = [ "tauri/wry", @@ -31,6 +43,7 @@ test-tauri = [ "tauri/macos-private-api", "tauri/tray-icon", "tauri/test", + "desktop", ] [build-dependencies] @@ -46,7 +59,7 @@ hyper = { version = "0.14", features = ["server"] } jan-utils = { path = "./utils" } libloading = "0.8.7" log = "0.4" -reqwest = { version = "0.11", features = ["json", "blocking", "stream"] } +reqwest = { version = "0.11", features = ["json", "blocking", "stream", "native-tls-vendored"] } rmcp = { version = "0.6.0", features = [ "client", "transport-sse-client", @@ -60,11 +73,11 @@ serde_json = "1.0" serde_yaml = "0.9.34" tar = "0.4" zip = "0.6" -tauri-plugin-deep-link = { version = "2.3.4" } tauri-plugin-dialog = "2.2.1" -tauri-plugin-hardware = { path = "./plugins/tauri-plugin-hardware" } -tauri-plugin-http = { version = "2", features = ["unsafe-headers"] } +tauri-plugin-deep-link = { version = "2", optional = true } +tauri-plugin-hardware = { path = "./plugins/tauri-plugin-hardware", optional = true } tauri-plugin-llamacpp = { path = "./plugins/tauri-plugin-llamacpp" } +tauri-plugin-http = { version = "2", features = ["unsafe-headers"] } tauri-plugin-log = "2.0.0-rc" tauri-plugin-opener = "2.2.7" tauri-plugin-os = "2.2.1" @@ -94,4 +107,26 @@ windows-sys = { version = "0.60.2", features = ["Win32_Storage_FileSystem"] } [target.'cfg(not(any(target_os = "android", target_os = "ios")))'.dependencies] tauri-plugin-updater = "2" once_cell = "1.18" -tauri-plugin-single-instance = { version = "2.3.4", features = ["deep-link"] } +tauri-plugin-single-instance = { version = "2", features = ["deep-link"] } + +[target.'cfg(any(target_os = "android", target_os = "ios"))'.dependencies] +tauri-plugin-dialog = { version = "2.2.1", default-features = false } +tauri-plugin-http = { version = "2", default-features = false } +tauri-plugin-log = { version = "2.0.0-rc", default-features = false } +tauri-plugin-opener = { version = "2.2.7", default-features = false } +tauri-plugin-os = { version = "2.2.1", default-features = false } +tauri-plugin-shell = { version = "2.2.0", default-features = false } +tauri-plugin-store = { version = "2", default-features = false } + +# Release profile optimizations for minimal binary size +[profile.release] +opt-level = "z" # Optimize for size +lto = "fat" # Aggressive Link Time Optimization +strip = "symbols" # Strip debug symbols for smaller binary +codegen-units = 1 # Reduce parallel codegen for better optimization +panic = "abort" # Don't unwind on panic, saves space +overflow-checks = false # Disable overflow checks for size +debug = false # No debug info +debug-assertions = false # No debug assertions +incremental = false # Disable incremental compilation for release +rpath = false # Don't include rpath diff --git a/src-tauri/capabilities/default.json b/src-tauri/capabilities/default.json index e594bf023..5c5e7d48d 100644 --- a/src-tauri/capabilities/default.json +++ b/src-tauri/capabilities/default.json @@ -18,11 +18,10 @@ "os:default", "opener:default", "log:default", - "updater:default", "dialog:default", - "deep-link:default", "core:webview:allow-create-webview-window", "opener:allow-open-url", + "store:default", { "identifier": "http:default", "allow": [ @@ -54,9 +53,6 @@ "url": "http://0.0.0.0:*" } ] - }, - "store:default", - "llamacpp:default", - "hardware:default" + } ] } diff --git a/src-tauri/capabilities/desktop.json b/src-tauri/capabilities/desktop.json new file mode 100644 index 000000000..41be646d3 --- /dev/null +++ b/src-tauri/capabilities/desktop.json @@ -0,0 +1,63 @@ +{ + "$schema": "../gen/schemas/desktop-schema.json", + "identifier": "desktop", + "description": "enables the default permissions for desktop platforms", + "windows": ["main"], + "remote": { + "urls": ["http://*"] + }, + "platforms": ["linux", "macOS", "windows"], + "permissions": [ + "core:default", + "core:webview:allow-set-webview-zoom", + "core:window:allow-start-dragging", + "core:window:allow-set-theme", + "shell:allow-spawn", + "shell:allow-open", + "core:app:allow-set-app-theme", + "core:window:allow-set-focus", + "os:default", + "opener:default", + "log:default", + "dialog:default", + "core:webview:allow-create-webview-window", + "opener:allow-open-url", + "store:default", + "llamacpp:default", + "deep-link:default", + "hardware:default", + + { + "identifier": "http:default", + "allow": [ + { + "url": "https://*:*" + }, + { + "url": "http://*:*" + } + ], + "deny": [] + }, + { + "identifier": "shell:allow-execute", + "allow": [] + }, + { + "identifier": "opener:allow-open-url", + "description": "opens the default permissions for the core module", + "windows": ["*"], + "allow": [ + { + "url": "https://*" + }, + { + "url": "http://127.0.0.1:*" + }, + { + "url": "http://0.0.0.0:*" + } + ] + } + ] +} \ No newline at end of file diff --git a/src-tauri/capabilities/mobile.json b/src-tauri/capabilities/mobile.json new file mode 100644 index 000000000..fdbda476a --- /dev/null +++ b/src-tauri/capabilities/mobile.json @@ -0,0 +1,58 @@ +{ + "$schema": "../gen/schemas/mobile-schema.json", + "identifier": "mobile", + "description": "enables the default permissions for mobile platforms", + "windows": ["main"], + "remote": { + "urls": ["http://*"] + }, + "permissions": [ + "core:default", + "core:webview:allow-set-webview-zoom", + "core:window:allow-start-dragging", + "core:window:allow-set-theme", + "shell:allow-spawn", + "shell:allow-open", + "core:app:allow-set-app-theme", + "core:window:allow-set-focus", + "os:default", + "opener:default", + "log:default", + "dialog:default", + "core:webview:allow-create-webview-window", + "opener:allow-open-url", + "store:default", + { + "identifier": "http:default", + "allow": [ + { + "url": "https://*:*" + }, + { + "url": "http://*:*" + } + ], + "deny": [] + }, + { + "identifier": "shell:allow-execute", + "allow": [] + }, + { + "identifier": "opener:allow-open-url", + "description": "opens the default permissions for the core module", + "windows": ["*"], + "allow": [ + { + "url": "https://*" + }, + { + "url": "http://127.0.0.1:*" + }, + { + "url": "http://0.0.0.0:*" + } + ] + } + ] +} \ No newline at end of file diff --git a/src-tauri/capabilities/system-monitor-window.json b/src-tauri/capabilities/system-monitor-window.json index 740bb82cc..68a75e9fb 100644 --- a/src-tauri/capabilities/system-monitor-window.json +++ b/src-tauri/capabilities/system-monitor-window.json @@ -3,6 +3,7 @@ "identifier": "system-monitor-window", "description": "enables permissions for the system monitor window", "windows": ["system-monitor-window"], + "platforms": ["linux", "macOS", "windows"], "permissions": [ "core:default", "core:window:allow-start-dragging", diff --git a/src-tauri/gen/android/app/src/main/assets/resources/LICENSE b/src-tauri/gen/android/app/src/main/assets/resources/LICENSE new file mode 100644 index 000000000..d614b967f --- /dev/null +++ b/src-tauri/gen/android/app/src/main/assets/resources/LICENSE @@ -0,0 +1,19 @@ +Jan + +Copyright 2025 Menlo Research + +This product includes software developed by Menlo Research (https://menlo.ai). + +Licensed under the Apache License, Version 2.0 (the "License"); +You may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Attribution is requested in user-facing documentation and materials, where appropriate. \ No newline at end of file diff --git a/src-tauri/plugins/tauri-plugin-hardware/Cargo.toml b/src-tauri/plugins/tauri-plugin-hardware/Cargo.toml index eb74d32d1..5e6f983fc 100644 --- a/src-tauri/plugins/tauri-plugin-hardware/Cargo.toml +++ b/src-tauri/plugins/tauri-plugin-hardware/Cargo.toml @@ -11,15 +11,19 @@ exclude = ["/examples", "/dist-js", "/guest-js", "/node_modules"] links = "tauri-plugin-hardware" [dependencies] -ash = "0.38.0" libc = "0.2" log = "0.4" -nvml-wrapper = "0.10.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" sysinfo = "0.34.2" tauri = { version = "2.5.0", default-features = false, features = ["test"] } +# Desktop-only dependencies +[target.'cfg(not(any(target_os = "android", target_os = "ios")))'.dependencies] +vulkano = "0.34" +ash = "0.37" +nvml-wrapper = "0.10.0" + # Windows-specific dependencies [target.'cfg(windows)'.dependencies] libloading = "0.8" diff --git a/src-tauri/plugins/tauri-plugin-hardware/src/commands.rs b/src-tauri/plugins/tauri-plugin-hardware/src/commands.rs index 56e78f1c1..ac13eb7f2 100644 --- a/src-tauri/plugins/tauri-plugin-hardware/src/commands.rs +++ b/src-tauri/plugins/tauri-plugin-hardware/src/commands.rs @@ -1,14 +1,12 @@ use crate::{ - helpers::get_jan_libvulkan_path, types::{CpuStaticInfo, SystemInfo, SystemUsage}, vendor::{nvidia, vulkan}, SYSTEM_INFO, }; use sysinfo::System; -use tauri::Runtime; #[tauri::command] -pub fn get_system_info(app: tauri::AppHandle) -> SystemInfo { +pub fn get_system_info() -> SystemInfo { SYSTEM_INFO .get_or_init(|| { let mut system = System::new(); @@ -19,15 +17,7 @@ pub fn get_system_info(app: tauri::AppHandle) -> SystemInfo { gpu_map.insert(gpu.uuid.clone(), gpu); } - // try system vulkan first - let paths = vec!["".to_string(), get_jan_libvulkan_path(app.clone())]; - let mut vulkan_gpus = vec![]; - for path in paths { - vulkan_gpus = vulkan::get_vulkan_gpus(&path); - if !vulkan_gpus.is_empty() { - break; - } - } + let vulkan_gpus = vulkan::get_vulkan_gpus(); for gpu in vulkan_gpus { match gpu_map.get_mut(&gpu.uuid) { @@ -64,7 +54,7 @@ pub fn get_system_info(app: tauri::AppHandle) -> SystemInfo { } #[tauri::command] -pub fn get_system_usage(app: tauri::AppHandle) -> SystemUsage { +pub fn get_system_usage() -> SystemUsage { let mut system = System::new(); system.refresh_memory(); @@ -81,7 +71,7 @@ pub fn get_system_usage(app: tauri::AppHandle) -> SystemUsage { cpu: cpu_usage, used_memory: system.used_memory() / 1024 / 1024, // bytes to MiB, total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB, - gpus: get_system_info(app.clone()) + gpus: get_system_info() .gpus .iter() .map(|gpu| gpu.get_usage()) diff --git a/src-tauri/plugins/tauri-plugin-hardware/src/helpers.rs b/src-tauri/plugins/tauri-plugin-hardware/src/helpers.rs deleted file mode 100644 index 22bcc8669..000000000 --- a/src-tauri/plugins/tauri-plugin-hardware/src/helpers.rs +++ /dev/null @@ -1,20 +0,0 @@ -use tauri::{path::BaseDirectory, Manager, Runtime}; - -pub fn get_jan_libvulkan_path(app: tauri::AppHandle) -> String { - let lib_name = if cfg!(target_os = "windows") { - "vulkan-1.dll" - } else if cfg!(target_os = "linux") { - "libvulkan.so" - } else { - return "".to_string(); - }; - - // NOTE: this does not work in test mode (mock app) - match app.path().resolve( - format!("resources/lib/{}", lib_name), - BaseDirectory::Resource, - ) { - Ok(lib_path) => lib_path.to_string_lossy().to_string(), - Err(_) => "".to_string(), - } -} diff --git a/src-tauri/plugins/tauri-plugin-hardware/src/lib.rs b/src-tauri/plugins/tauri-plugin-hardware/src/lib.rs index 228a3731e..3a069892e 100644 --- a/src-tauri/plugins/tauri-plugin-hardware/src/lib.rs +++ b/src-tauri/plugins/tauri-plugin-hardware/src/lib.rs @@ -2,12 +2,10 @@ mod commands; mod constants; pub mod cpu; pub mod gpu; -mod helpers; mod types; pub mod vendor; pub use constants::*; -pub use helpers::*; pub use types::*; use std::sync::OnceLock; diff --git a/src-tauri/plugins/tauri-plugin-hardware/src/tests.rs b/src-tauri/plugins/tauri-plugin-hardware/src/tests.rs index 1d4975104..f27554579 100644 --- a/src-tauri/plugins/tauri-plugin-hardware/src/tests.rs +++ b/src-tauri/plugins/tauri-plugin-hardware/src/tests.rs @@ -4,15 +4,13 @@ use tauri::test::mock_app; #[test] fn test_system_info() { - let app = mock_app(); - let info = get_system_info(app.handle().clone()); + let info = get_system_info(); println!("System Static Info: {:?}", info); } #[test] fn test_system_usage() { - let app = mock_app(); - let usage = get_system_usage(app.handle().clone()); + let usage = get_system_usage(); println!("System Usage Info: {:?}", usage); } @@ -23,23 +21,23 @@ mod cpu_tests { #[test] fn test_cpu_static_info_new() { let cpu_info = CpuStaticInfo::new(); - + // Test that all fields are populated assert!(!cpu_info.name.is_empty()); assert_ne!(cpu_info.name, "unknown"); // Should have detected a CPU name assert!(cpu_info.core_count > 0); assert!(!cpu_info.arch.is_empty()); - + // Architecture should be one of the expected values assert!( - cpu_info.arch == "aarch64" || - cpu_info.arch == "arm64" || - cpu_info.arch == "x86_64" || - cpu_info.arch == std::env::consts::ARCH + cpu_info.arch == "aarch64" + || cpu_info.arch == "arm64" + || cpu_info.arch == "x86_64" + || cpu_info.arch == std::env::consts::ARCH ); - + // Extensions should be a valid list (can be empty on non-x86) - + println!("CPU Info: {:?}", cpu_info); } @@ -48,7 +46,7 @@ mod cpu_tests { // Test that multiple calls return consistent information let info1 = CpuStaticInfo::new(); let info2 = CpuStaticInfo::new(); - + assert_eq!(info1.name, info2.name); assert_eq!(info1.core_count, info2.core_count); assert_eq!(info1.arch, info2.arch); @@ -72,19 +70,41 @@ mod cpu_tests { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn test_x86_extensions() { let cpu_info = CpuStaticInfo::new(); - + // On x86/x86_64, we should always have at least FPU assert!(cpu_info.extensions.contains(&"fpu".to_string())); - + // Check that all extensions are valid x86 feature names let valid_extensions = [ - "fpu", "mmx", "sse", "sse2", "sse3", "ssse3", "sse4_1", "sse4_2", - "pclmulqdq", "avx", "avx2", "avx512_f", "avx512_dq", "avx512_ifma", - "avx512_pf", "avx512_er", "avx512_cd", "avx512_bw", "avx512_vl", - "avx512_vbmi", "avx512_vbmi2", "avx512_vnni", "avx512_bitalg", - "avx512_vpopcntdq", "avx512_vp2intersect", "aes", "f16c" + "fpu", + "mmx", + "sse", + "sse2", + "sse3", + "ssse3", + "sse4_1", + "sse4_2", + "pclmulqdq", + "avx", + "avx2", + "avx512_f", + "avx512_dq", + "avx512_ifma", + "avx512_pf", + "avx512_er", + "avx512_cd", + "avx512_bw", + "avx512_vl", + "avx512_vbmi", + "avx512_vbmi2", + "avx512_vnni", + "avx512_bitalg", + "avx512_vpopcntdq", + "avx512_vp2intersect", + "aes", + "f16c", ]; - + for ext in &cpu_info.extensions { assert!( valid_extensions.contains(&ext.as_str()), @@ -98,7 +118,7 @@ mod cpu_tests { #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] fn test_non_x86_extensions() { let cpu_info = CpuStaticInfo::new(); - + // On non-x86 architectures, extensions should be empty assert!(cpu_info.extensions.is_empty()); } @@ -106,15 +126,15 @@ mod cpu_tests { #[test] fn test_arch_detection() { let cpu_info = CpuStaticInfo::new(); - + // Architecture should be a valid string assert!(!cpu_info.arch.is_empty()); - + // Should be one of the common architectures let common_archs = ["x86_64", "aarch64", "arm", "arm64", "x86"]; let is_common_arch = common_archs.iter().any(|&arch| cpu_info.arch == arch); let is_compile_time_arch = cpu_info.arch == std::env::consts::ARCH; - + assert!( is_common_arch || is_compile_time_arch, "Unexpected architecture: {}", @@ -125,11 +145,11 @@ mod cpu_tests { #[test] fn test_cpu_info_serialization() { let cpu_info = CpuStaticInfo::new(); - + // Test that the struct can be serialized (since it derives Serialize) let serialized = serde_json::to_string(&cpu_info); assert!(serialized.is_ok()); - + let json_str = serialized.unwrap(); assert!(json_str.contains("name")); assert!(json_str.contains("core_count")); diff --git a/src-tauri/plugins/tauri-plugin-hardware/src/vendor/amd.rs b/src-tauri/plugins/tauri-plugin-hardware/src/vendor/amd.rs index 62d90ca1b..7521fd2b0 100644 --- a/src-tauri/plugins/tauri-plugin-hardware/src/vendor/amd.rs +++ b/src-tauri/plugins/tauri-plugin-hardware/src/vendor/amd.rs @@ -126,13 +126,13 @@ mod windows_impl { pub iOSDisplayIndex: c_int, } - type ADL_MAIN_MALLOC_CALLBACK = Option *mut c_void>; - type ADL_MAIN_CONTROL_CREATE = unsafe extern "C" fn(ADL_MAIN_MALLOC_CALLBACK, c_int) -> c_int; - type ADL_MAIN_CONTROL_DESTROY = unsafe extern "C" fn() -> c_int; - type ADL_ADAPTER_NUMBEROFADAPTERS_GET = unsafe extern "C" fn(*mut c_int) -> c_int; - type ADL_ADAPTER_ADAPTERINFO_GET = unsafe extern "C" fn(*mut AdapterInfo, c_int) -> c_int; - type ADL_ADAPTER_ACTIVE_GET = unsafe extern "C" fn(c_int, *mut c_int) -> c_int; - type ADL_GET_DEDICATED_VRAM_USAGE = + type AdlMainMallocCallback = Option *mut c_void>; + type ADLMAINCONTROLCREATE = unsafe extern "C" fn(AdlMainMallocCallback, c_int) -> c_int; + type ADLMAINCONTROLDESTROY = unsafe extern "C" fn() -> c_int; + type AdlAdapterNumberofadaptersGet = unsafe extern "C" fn(*mut c_int) -> c_int; + type AdlAdapterAdapterinfoGet = unsafe extern "C" fn(*mut AdapterInfo, c_int) -> c_int; + type AdlAdapterActiveGet = unsafe extern "C" fn(c_int, *mut c_int) -> c_int; + type AdlGetDedicatedVramUsage = unsafe extern "C" fn(*mut c_void, c_int, *mut c_int) -> c_int; // === ADL Memory Allocator === @@ -144,24 +144,24 @@ mod windows_impl { unsafe { let lib = Library::new("atiadlxx.dll").or_else(|_| Library::new("atiadlxy.dll"))?; - let adl_main_control_create: Symbol = - lib.get(b"ADL_Main_Control_Create")?; - let adl_main_control_destroy: Symbol = - lib.get(b"ADL_Main_Control_Destroy")?; - let adl_adapter_number_of_adapters_get: Symbol = - lib.get(b"ADL_Adapter_NumberOfAdapters_Get")?; - let adl_adapter_adapter_info_get: Symbol = - lib.get(b"ADL_Adapter_AdapterInfo_Get")?; - let adl_adapter_active_get: Symbol = - lib.get(b"ADL_Adapter_Active_Get")?; - let adl_get_dedicated_vram_usage: Symbol = + let adlmaincontrolcreate: Symbol = + lib.get(b"AdlMainControlCreate")?; + let adlmaincontroldestroy: Symbol = + lib.get(b"AdlMainControlDestroy")?; + let adl_adapter_number_of_adapters_get: Symbol = + lib.get(b"AdlAdapterNumberofadaptersGet")?; + let adl_adapter_adapter_info_get: Symbol = + lib.get(b"AdlAdapterAdapterinfoGet")?; + let AdlAdapterActiveGet: Symbol = + lib.get(b"AdlAdapterActiveGet")?; + let AdlGetDedicatedVramUsage: Symbol = lib.get(b"ADL2_Adapter_DedicatedVRAMUsage_Get")?; // TODO: try to put nullptr here. then we don't need direct libc dep - if adl_main_control_create(Some(adl_malloc), 1) != 0 { + if adlmaincontrolcreate(Some(adl_malloc), 1) != 0 { return Err("ADL initialization error!".into()); } - // NOTE: after this call, we must call ADL_Main_Control_Destroy + // NOTE: after this call, we must call AdlMainControlDestroy // whenver we encounter an error let mut num_adapters: c_int = 0; @@ -184,11 +184,11 @@ mod windows_impl { for adapter in adapter_info.iter() { let mut is_active = 0; - adl_adapter_active_get(adapter.iAdapterIndex, &mut is_active); + AdlAdapterActiveGet(adapter.iAdapterIndex, &mut is_active); if is_active != 0 { let mut vram_mb = 0; - let _ = adl_get_dedicated_vram_usage( + let _ = AdlGetDedicatedVramUsage( ptr::null_mut(), adapter.iAdapterIndex, &mut vram_mb, @@ -202,7 +202,7 @@ mod windows_impl { } } - adl_main_control_destroy(); + adlmaincontroldestroy(); Ok(vram_usages) } diff --git a/src-tauri/plugins/tauri-plugin-hardware/src/vendor/nvidia.rs b/src-tauri/plugins/tauri-plugin-hardware/src/vendor/nvidia.rs index 006ca66ba..083c0fdae 100644 --- a/src-tauri/plugins/tauri-plugin-hardware/src/vendor/nvidia.rs +++ b/src-tauri/plugins/tauri-plugin-hardware/src/vendor/nvidia.rs @@ -1,7 +1,13 @@ -use crate::types::{GpuInfo, GpuUsage, Vendor}; -use nvml_wrapper::{error::NvmlError, Nvml}; -use std::sync::OnceLock; +use crate::types::{GpuInfo, GpuUsage}; +#[cfg(not(any(target_os = "android", target_os = "ios")))] +use { + crate::types::Vendor, + nvml_wrapper::{error::NvmlError, Nvml}, + std::sync::OnceLock, +}; + +#[cfg(not(any(target_os = "android", target_os = "ios")))] static NVML: OnceLock> = OnceLock::new(); #[derive(Debug, Clone, serde::Serialize)] @@ -10,11 +16,13 @@ pub struct NvidiaInfo { pub compute_capability: String, } +#[cfg(not(any(target_os = "android", target_os = "ios")))] fn get_nvml() -> Option<&'static Nvml> { NVML.get_or_init(|| { + // Try to initialize NVML, with fallback for Linux let result = Nvml::init().or_else(|e| { - // fallback if cfg!(target_os = "linux") { + log::debug!("NVML init failed, trying Linux fallback: {}", e); let lib_path = std::ffi::OsStr::new("libnvidia-ml.so.1"); Nvml::builder().lib_path(lib_path).init() } else { @@ -22,11 +30,13 @@ fn get_nvml() -> Option<&'static Nvml> { } }); - // NvmlError doesn't implement Copy, so we have to store an Option in OnceLock match result { - Ok(nvml) => Some(nvml), + Ok(nvml) => { + log::debug!("NVML initialized successfully"); + Some(nvml) + } Err(e) => { - log::error!("Unable to initialize NVML: {}", e); + log::debug!("Unable to initialize NVML: {}", e); None } } @@ -36,70 +46,111 @@ fn get_nvml() -> Option<&'static Nvml> { impl GpuInfo { pub fn get_usage_nvidia(&self) -> GpuUsage { - let index = match self.nvidia_info { - Some(ref nvidia_info) => nvidia_info.index, - None => { - log::error!("get_usage_nvidia() called on non-NVIDIA GPU"); - return self.get_usage_unsupported(); - } - }; - let closure = || -> Result { - let nvml = get_nvml().ok_or(NvmlError::Unknown)?; - let device = nvml.device_by_index(index)?; - let mem_info = device.memory_info()?; - Ok(GpuUsage { - uuid: self.uuid.clone(), - used_memory: mem_info.used / 1024 / 1024, // bytes to MiB - total_memory: mem_info.total / 1024 / 1024, // bytes to MiB - }) - }; - closure().unwrap_or_else(|e| { - log::error!("Failed to get memory usage for NVIDIA GPU {}: {}", index, e); - self.get_usage_unsupported() + #[cfg(any(target_os = "android", target_os = "ios"))] + { + log::warn!("NVIDIA GPU usage detection is not supported on mobile platforms"); + return self.get_usage_unsupported(); + } + + #[cfg(not(any(target_os = "android", target_os = "ios")))] + { + let index = match &self.nvidia_info { + Some(nvidia_info) => nvidia_info.index, + None => { + log::error!("get_usage_nvidia() called on non-NVIDIA GPU"); + return self.get_usage_unsupported(); + } + }; + + self.get_nvidia_memory_usage(index) + .unwrap_or_else(|e| { + log::error!("Failed to get memory usage for NVIDIA GPU {}: {}", index, e); + self.get_usage_unsupported() + }) + } + } + + #[cfg(not(any(target_os = "android", target_os = "ios")))] + fn get_nvidia_memory_usage(&self, index: u32) -> Result { + let nvml = get_nvml().ok_or(NvmlError::Unknown)?; + let device = nvml.device_by_index(index)?; + let mem_info = device.memory_info()?; + + Ok(GpuUsage { + uuid: self.uuid.clone(), + used_memory: mem_info.used / (1024 * 1024), // bytes to MiB + total_memory: mem_info.total / (1024 * 1024), // bytes to MiB }) } } pub fn get_nvidia_gpus() -> Vec { - let closure = || -> Result, NvmlError> { - let nvml = get_nvml().ok_or(NvmlError::Unknown)?; - let num_gpus = nvml.device_count()?; - let driver_version = nvml.sys_driver_version()?; + #[cfg(any(target_os = "android", target_os = "ios"))] + { + // On mobile platforms, NVIDIA GPU detection is not supported + log::info!("NVIDIA GPU detection is not supported on mobile platforms"); + vec![] + } - let mut gpus = Vec::with_capacity(num_gpus as usize); - for i in 0..num_gpus { - let device = nvml.device_by_index(i)?; - gpus.push(GpuInfo { - name: device.name()?, - total_memory: device.memory_info()?.total / 1024 / 1024, // bytes to MiB - vendor: Vendor::NVIDIA, - uuid: { - let mut uuid = device.uuid()?; - if uuid.starts_with("GPU-") { - uuid = uuid[4..].to_string(); - } - uuid - }, - driver_version: driver_version.clone(), - nvidia_info: Some(NvidiaInfo { - index: i, - compute_capability: { - let cc = device.cuda_compute_capability()?; - format!("{}.{}", cc.major, cc.minor) - }, - }), - vulkan_info: None, - }); - } - - Ok(gpus) - }; - - match closure() { - Ok(gpus) => gpus, - Err(e) => { - log::error!("Failed to get NVIDIA GPUs: {}", e); - vec![] - } + #[cfg(not(any(target_os = "android", target_os = "ios")))] + { + get_nvidia_gpus_internal() } } + +#[cfg(not(any(target_os = "android", target_os = "ios")))] +fn get_nvidia_gpus_internal() -> Vec { + let nvml = match get_nvml() { + Some(nvml) => nvml, + None => { + log::debug!("NVML not available"); + return vec![]; + } + }; + + let (num_gpus, driver_version) = match (nvml.device_count(), nvml.sys_driver_version()) { + (Ok(count), Ok(version)) => (count, version), + (Err(e), _) | (_, Err(e)) => { + log::error!("Failed to get NVIDIA system info: {}", e); + return vec![]; + } + }; + + let mut gpus = Vec::with_capacity(num_gpus as usize); + + for i in 0..num_gpus { + match create_gpu_info(nvml, i, &driver_version) { + Ok(gpu_info) => gpus.push(gpu_info), + Err(e) => log::warn!("Failed to get info for NVIDIA GPU {}: {}", i, e), + } + } + + gpus +} + +#[cfg(not(any(target_os = "android", target_os = "ios")))] +fn create_gpu_info(nvml: &Nvml, index: u32, driver_version: &str) -> Result { + let device = nvml.device_by_index(index)?; + let memory_info = device.memory_info()?; + let compute_capability = device.cuda_compute_capability()?; + + let uuid = device.uuid()?; + let clean_uuid = if uuid.starts_with("GPU-") { + uuid[4..].to_string() + } else { + uuid + }; + + Ok(GpuInfo { + name: device.name()?, + total_memory: memory_info.total / (1024 * 1024), // bytes to MiB + vendor: Vendor::NVIDIA, + uuid: clean_uuid, + driver_version: driver_version.to_string(), + nvidia_info: Some(NvidiaInfo { + index, + compute_capability: format!("{}.{}", compute_capability.major, compute_capability.minor), + }), + vulkan_info: None, + }) +} diff --git a/src-tauri/plugins/tauri-plugin-hardware/src/vendor/tests.rs b/src-tauri/plugins/tauri-plugin-hardware/src/vendor/tests.rs index 078efe91b..d683e4d91 100644 --- a/src-tauri/plugins/tauri-plugin-hardware/src/vendor/tests.rs +++ b/src-tauri/plugins/tauri-plugin-hardware/src/vendor/tests.rs @@ -12,10 +12,122 @@ fn test_get_nvidia_gpus() { #[test] fn test_get_vulkan_gpus() { - let gpus = vulkan::get_vulkan_gpus(""); + let gpus = vulkan::get_vulkan_gpus(); for (i, gpu) in gpus.iter().enumerate() { println!("GPU {}:", i); println!(" {:?}", gpu); println!(" {:?}", gpu.get_usage()); } } + +#[cfg(not(any(target_os = "android", target_os = "ios")))] +#[test] +fn test_get_vulkan_gpus_on_desktop() { + let gpus = vulkan::get_vulkan_gpus(); + + // Test that function returns without panicking on desktop platforms + assert!(gpus.len() >= 0); + + // If GPUs are found, verify they have valid properties + for (i, gpu) in gpus.iter().enumerate() { + println!("Desktop GPU {}:", i); + println!(" Name: {}", gpu.name); + println!(" Vendor: {:?}", gpu.vendor); + println!(" Total Memory: {} MB", gpu.total_memory); + println!(" UUID: {}", gpu.uuid); + println!(" Driver Version: {}", gpu.driver_version); + + // Verify that GPU properties are not empty/default values + assert!(!gpu.name.is_empty(), "GPU name should not be empty"); + assert!(!gpu.uuid.is_empty(), "GPU UUID should not be empty"); + + // Test vulkan-specific info is present + if let Some(vulkan_info) = &gpu.vulkan_info { + println!(" Vulkan API Version: {}", vulkan_info.api_version); + println!(" Device Type: {}", vulkan_info.device_type); + assert!(!vulkan_info.api_version.is_empty(), "Vulkan API version should not be empty"); + assert!(!vulkan_info.device_type.is_empty(), "Device type should not be empty"); + } + } +} + +#[cfg(target_os = "android")] +#[test] +fn test_get_vulkan_gpus_on_android() { + let gpus = vulkan::get_vulkan_gpus(); + + // Test that function returns without panicking on Android + assert!(gpus.len() >= 0); + + // Android-specific validation + for (i, gpu) in gpus.iter().enumerate() { + println!("Android GPU {}:", i); + println!(" Name: {}", gpu.name); + println!(" Vendor: {:?}", gpu.vendor); + println!(" Total Memory: {} MB", gpu.total_memory); + println!(" UUID: {}", gpu.uuid); + println!(" Driver Version: {}", gpu.driver_version); + + // Verify C string parsing works correctly with i8 on Android + assert!(!gpu.name.is_empty(), "GPU name should not be empty on Android"); + assert!(!gpu.uuid.is_empty(), "GPU UUID should not be empty on Android"); + + // Android devices should typically have Adreno, Mali, or PowerVR GPUs + // The name parsing should handle i8 char arrays correctly + assert!( + gpu.name.chars().all(|c| c.is_ascii() || c.is_ascii_control()), + "GPU name should contain valid characters when parsed from i8 array" + ); + + if let Some(vulkan_info) = &gpu.vulkan_info { + println!(" Vulkan API Version: {}", vulkan_info.api_version); + println!(" Device Type: {}", vulkan_info.device_type); + // Verify API version parsing works with Android's i8 char arrays + assert!( + vulkan_info.api_version.matches('.').count() >= 2, + "API version should be in format X.Y.Z" + ); + } + } +} + +#[cfg(target_os = "ios")] +#[test] +fn test_get_vulkan_gpus_on_ios() { + let gpus = vulkan::get_vulkan_gpus(); + + // Note: iOS doesn't support Vulkan natively, so this might return empty + // But the function should still work without crashing + assert!(gpus.len() >= 0); + + // iOS-specific validation (if any Vulkan implementation is available via MoltenVK) + for (i, gpu) in gpus.iter().enumerate() { + println!("iOS GPU {}:", i); + println!(" Name: {}", gpu.name); + println!(" Vendor: {:?}", gpu.vendor); + println!(" Total Memory: {} MB", gpu.total_memory); + println!(" UUID: {}", gpu.uuid); + println!(" Driver Version: {}", gpu.driver_version); + + // Verify C string parsing works correctly with i8 on iOS + assert!(!gpu.name.is_empty(), "GPU name should not be empty on iOS"); + assert!(!gpu.uuid.is_empty(), "GPU UUID should not be empty on iOS"); + + // iOS devices should typically have Apple GPU (if Vulkan is available via MoltenVK) + // The name parsing should handle i8 char arrays correctly + assert!( + gpu.name.chars().all(|c| c.is_ascii() || c.is_ascii_control()), + "GPU name should contain valid characters when parsed from i8 array" + ); + + if let Some(vulkan_info) = &gpu.vulkan_info { + println!(" Vulkan API Version: {}", vulkan_info.api_version); + println!(" Device Type: {}", vulkan_info.device_type); + // Verify API version parsing works with iOS's i8 char arrays + assert!( + vulkan_info.api_version.matches('.').count() >= 2, + "API version should be in format X.Y.Z" + ); + } + } +} diff --git a/src-tauri/plugins/tauri-plugin-hardware/src/vendor/vulkan.rs b/src-tauri/plugins/tauri-plugin-hardware/src/vendor/vulkan.rs index 6a9bf21aa..372e11037 100644 --- a/src-tauri/plugins/tauri-plugin-hardware/src/vendor/vulkan.rs +++ b/src-tauri/plugins/tauri-plugin-hardware/src/vendor/vulkan.rs @@ -1,5 +1,13 @@ -use crate::types::{GpuInfo, Vendor}; -use ash::{vk, Entry}; +use crate::types::GpuInfo; + +#[cfg(not(any(target_os = "android", target_os = "ios")))] +use { + crate::types::Vendor, + vulkano::device::physical::PhysicalDeviceType, + vulkano::instance::{Instance, InstanceCreateInfo}, + vulkano::memory::MemoryHeapFlags, + vulkano::VulkanLibrary, +}; #[derive(Debug, Clone, serde::Serialize)] pub struct VulkanInfo { @@ -9,6 +17,7 @@ pub struct VulkanInfo { pub device_id: u32, } +#[cfg(not(any(target_os = "android", target_os = "ios")))] fn parse_uuid(bytes: &[u8; 16]) -> String { format!( "{:02x}{:02x}{:02x}{:02x}-\ @@ -35,96 +44,79 @@ fn parse_uuid(bytes: &[u8; 16]) -> String { ) } -pub fn get_vulkan_gpus(lib_path: &str) -> Vec { - match get_vulkan_gpus_internal(lib_path) { - Ok(gpus) => gpus, - Err(e) => { - log::error!("Failed to get Vulkan GPUs: {:?}", e); - vec![] +pub fn get_vulkan_gpus() -> Vec { + #[cfg(any(target_os = "android", target_os = "ios"))] + { + log::info!("Vulkan GPU detection is not supported on mobile platforms"); + vec![] + } + + #[cfg(not(any(target_os = "android", target_os = "ios")))] + { + match get_vulkan_gpus_internal() { + Ok(gpus) => gpus, + Err(e) => { + log::error!("Failed to get Vulkan GPUs: {:?}", e); + vec![] + } } } } -fn parse_c_string(buf: &[i8]) -> String { - unsafe { std::ffi::CStr::from_ptr(buf.as_ptr()) } - .to_str() - .unwrap_or_default() - .to_string() -} +#[cfg(not(any(target_os = "android", target_os = "ios")))] +fn get_vulkan_gpus_internal() -> Result, Box> { + let library = VulkanLibrary::new()?; -fn get_vulkan_gpus_internal(lib_path: &str) -> Result, Box> { - let entry = if lib_path.is_empty() { - unsafe { Entry::load()? } - } else { - unsafe { Entry::load_from(lib_path)? } - }; - let app_info = vk::ApplicationInfo { - api_version: vk::make_api_version(0, 1, 1, 0), - ..Default::default() - }; - let create_info = vk::InstanceCreateInfo { - p_application_info: &app_info, - ..Default::default() - }; - let instance = unsafe { entry.create_instance(&create_info, None)? }; + let instance = Instance::new( + library, + InstanceCreateInfo { + application_name: Some("Jan GPU Detection".into()), + application_version: vulkano::Version::V1_1, + ..Default::default() + }, + )?; let mut device_info_list = vec![]; - for (i, device) in unsafe { instance.enumerate_physical_devices()? } - .iter() - .enumerate() - { - // create a chain of properties struct for VkPhysicalDeviceProperties2(3) - // https://registry.khronos.org/vulkan/specs/latest/man/html/VkPhysicalDeviceProperties2.html - // props2 -> driver_props -> id_props - let mut id_props = vk::PhysicalDeviceIDProperties::default(); - let mut driver_props = vk::PhysicalDeviceDriverProperties { - p_next: &mut id_props as *mut _ as *mut std::ffi::c_void, - ..Default::default() - }; - let mut props2 = vk::PhysicalDeviceProperties2 { - p_next: &mut driver_props as *mut _ as *mut std::ffi::c_void, - ..Default::default() - }; - unsafe { - instance.get_physical_device_properties2(*device, &mut props2); - } + for (i, physical_device) in instance.enumerate_physical_devices()?.enumerate() { + let properties = physical_device.properties(); - let props = props2.properties; - if props.device_type == vk::PhysicalDeviceType::CPU { + if properties.device_type == PhysicalDeviceType::Cpu { continue; } + let memory_properties = physical_device.memory_properties(); + let total_memory: u64 = memory_properties + .memory_heaps + .iter() + .filter(|heap| heap.flags.intersects(MemoryHeapFlags::DEVICE_LOCAL)) + .map(|heap| heap.size / (1024 * 1024)) + .sum(); + + let device_uuid = physical_device.properties().device_uuid.unwrap_or([0; 16]); + let driver_version = format!("{}", properties.driver_version); + let device_info = GpuInfo { - name: parse_c_string(&props.device_name), - total_memory: unsafe { instance.get_physical_device_memory_properties(*device) } - .memory_heaps - .iter() - .filter(|heap| heap.flags.contains(vk::MemoryHeapFlags::DEVICE_LOCAL)) - .map(|heap| heap.size / (1024 * 1024)) - .sum(), - vendor: Vendor::from_vendor_id(props.vendor_id), - uuid: parse_uuid(&id_props.device_uuid), - driver_version: parse_c_string(&driver_props.driver_info), + name: properties.device_name.clone(), + total_memory, + vendor: Vendor::from_vendor_id(properties.vendor_id), + uuid: parse_uuid(&device_uuid), + driver_version, nvidia_info: None, vulkan_info: Some(VulkanInfo { index: i as u64, - device_type: format!("{:?}", props.device_type), + device_type: format!("{:?}", properties.device_type), api_version: format!( "{}.{}.{}", - vk::api_version_major(props.api_version), - vk::api_version_minor(props.api_version), - vk::api_version_patch(props.api_version) + properties.api_version.major, + properties.api_version.minor, + properties.api_version.patch ), - device_id: props.device_id, + device_id: properties.device_id, }), }; device_info_list.push(device_info); } - unsafe { - instance.destroy_instance(None); - } - Ok(device_info_list) } diff --git a/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/commands.rs b/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/commands.rs index c636fa8bd..03e949eba 100644 --- a/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/commands.rs +++ b/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/commands.rs @@ -3,7 +3,6 @@ use super::utils::{estimate_kv_cache_internal, read_gguf_metadata_internal}; use crate::gguf::types::{KVCacheError, KVCacheEstimate, ModelSupportStatus}; use std::collections::HashMap; use std::fs; -use tauri::Runtime; use tauri_plugin_hardware::get_system_info; /// Read GGUF metadata from a model file #[tauri::command] @@ -49,16 +48,15 @@ pub async fn get_model_size(path: String) -> Result { } #[tauri::command] -pub async fn is_model_supported( +pub async fn is_model_supported( path: String, ctx_size: Option, - app_handle: tauri::AppHandle, ) -> Result { // Get model size let model_size = get_model_size(path.clone()).await?; // Get system info - let system_info = get_system_info(app_handle.clone()); + let system_info = get_system_info(); log::info!("modelSize: {}", model_size); diff --git a/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/model_planner.rs b/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/model_planner.rs index 118894871..14642af60 100644 --- a/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/model_planner.rs +++ b/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/model_planner.rs @@ -3,7 +3,6 @@ use crate::gguf::utils::estimate_kv_cache_internal; use crate::gguf::utils::read_gguf_metadata_internal; use serde::{Deserialize, Serialize}; use std::collections::HashMap; -use tauri::Runtime; use tauri_plugin_hardware::get_system_info; #[derive(Serialize, Deserialize, Clone, Debug)] @@ -27,15 +26,14 @@ pub enum ModelMode { } #[tauri::command] -pub async fn plan_model_load( +pub async fn plan_model_load( path: String, memory_mode: String, mmproj_path: Option, requested_ctx: Option, - app: tauri::AppHandle, ) -> Result { let model_size = get_model_size(path.clone()).await?; - let sys_info = get_system_info(app.clone()); + let sys_info = get_system_info(); let gguf = read_gguf_metadata_internal(path.clone()).await?; let mut mmproj_size: u64 = 0; diff --git a/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/utils.rs b/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/utils.rs index 50e3f4a14..cdbbf92d5 100644 --- a/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/utils.rs +++ b/src-tauri/plugins/tauri-plugin-llamacpp/src/gguf/utils.rs @@ -62,6 +62,7 @@ pub async fn estimate_kv_cache_internal( ctx_size: Option, ) -> Result { log::info!("Received ctx_size parameter: {:?}", ctx_size); + log::info!("Received model metadata:\n{:?}", &meta); let arch = meta .get("general.architecture") .ok_or(KVCacheError::ArchitectureNotFound)?; @@ -94,15 +95,43 @@ pub async fn estimate_kv_cache_internal( let key_len_key = format!("{}.attention.key_length", arch); let val_len_key = format!("{}.attention.value_length", arch); - let key_len = meta + let mut key_len = meta .get(&key_len_key) .and_then(|s| s.parse::().ok()) .unwrap_or(0); - let val_len = meta + let mut val_len = meta .get(&val_len_key) .and_then(|s| s.parse::().ok()) .unwrap_or(0); + // Fallback: calculate from embedding_length if key/val lengths not found + if key_len == 0 || val_len == 0 { + let emb_len_key = format!("{}.embedding_length", arch); + let emb_len = meta + .get(&emb_len_key) + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + if emb_len > 0 && n_head > 0 { + // For most transformers: head_dim = embedding_length / total_heads + let total_heads = meta + .get(&n_head_key) + .and_then(|s| s.parse::().ok()) + .unwrap_or(n_head); + + let head_dim = emb_len / total_heads; + key_len = head_dim; + val_len = head_dim; + + log::info!( + "Calculated key_len and val_len from embedding_length: {} / {} heads = {} per head", + emb_len, + total_heads, + head_dim + ); + } + } + if key_len == 0 || val_len == 0 { return Err(KVCacheError::EmbeddingLengthInvalid); } diff --git a/src-tauri/plugins/tauri-plugin-llamacpp/src/process.rs b/src-tauri/plugins/tauri-plugin-llamacpp/src/process.rs index 3de983c51..06d83fcb0 100644 --- a/src-tauri/plugins/tauri-plugin-llamacpp/src/process.rs +++ b/src-tauri/plugins/tauri-plugin-llamacpp/src/process.rs @@ -1,8 +1,6 @@ use std::collections::HashSet; -use std::time::Duration; use sysinfo::{Pid, System}; use tauri::{Manager, Runtime, State}; -use tokio::time::timeout; use crate::state::{LlamacppState, SessionInfo}; use jan_utils::generate_random_port; @@ -56,6 +54,8 @@ pub async fn get_random_available_port( pub async fn graceful_terminate_process(child: &mut tokio::process::Child) { use nix::sys::signal::{kill, Signal}; use nix::unistd::Pid; + use std::time::Duration; + use tokio::time::timeout; if let Some(raw_pid) = child.id() { let raw_pid = raw_pid as i32; diff --git a/src-tauri/resources/LICENSE b/src-tauri/resources/LICENSE new file mode 100644 index 000000000..d614b967f --- /dev/null +++ b/src-tauri/resources/LICENSE @@ -0,0 +1,19 @@ +Jan + +Copyright 2025 Menlo Research + +This product includes software developed by Menlo Research (https://menlo.ai). + +Licensed under the Apache License, Version 2.0 (the "License"); +You may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Attribution is requested in user-facing documentation and materials, where appropriate. \ No newline at end of file diff --git a/src-tauri/src/core/app/commands.rs b/src-tauri/src/core/app/commands.rs index ba3e493b3..0d9c66c12 100644 --- a/src-tauri/src/core/app/commands.rs +++ b/src-tauri/src/core/app/commands.rs @@ -58,8 +58,8 @@ pub fn get_app_configurations(app_handle: tauri::AppHandle) -> Ap } #[tauri::command] -pub fn update_app_configuration( - app_handle: tauri::AppHandle, +pub fn update_app_configuration( + app_handle: tauri::AppHandle, configuration: AppConfiguration, ) -> Result<(), String> { let configuration_file = get_configuration_file_path(app_handle); @@ -155,13 +155,13 @@ pub fn default_data_folder_path(app_handle: tauri::AppHandle) -> } #[tauri::command] -pub fn get_user_home_path(app: AppHandle) -> String { +pub fn get_user_home_path(app: AppHandle) -> String { return get_app_configurations(app.clone()).data_folder; } #[tauri::command] -pub fn change_app_data_folder( - app_handle: tauri::AppHandle, +pub fn change_app_data_folder( + app_handle: tauri::AppHandle, new_data_folder: String, ) -> Result<(), String> { // Get current data folder path diff --git a/src-tauri/src/core/downloads/commands.rs b/src-tauri/src/core/downloads/commands.rs index f2187046a..6d50ed1a3 100644 --- a/src-tauri/src/core/downloads/commands.rs +++ b/src-tauri/src/core/downloads/commands.rs @@ -3,12 +3,12 @@ use super::models::DownloadItem; use crate::core::app::commands::get_jan_data_folder_path; use crate::core::state::AppState; use std::collections::HashMap; -use tauri::State; +use tauri::{Runtime, State}; use tokio_util::sync::CancellationToken; #[tauri::command] -pub async fn download_files( - app: tauri::AppHandle, +pub async fn download_files( + app: tauri::AppHandle, state: State<'_, AppState>, items: Vec, task_id: &str, diff --git a/src-tauri/src/core/downloads/helpers.rs b/src-tauri/src/core/downloads/helpers.rs index 137bbdd3d..d3d8f6b7c 100644 --- a/src-tauri/src/core/downloads/helpers.rs +++ b/src-tauri/src/core/downloads/helpers.rs @@ -6,7 +6,7 @@ use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; use std::collections::HashMap; use std::path::Path; use std::time::Duration; -use tauri::Emitter; +use tauri::{Emitter, Runtime}; use tokio::fs::File; use tokio::io::AsyncWriteExt; use tokio_util::sync::CancellationToken; @@ -25,7 +25,7 @@ pub fn err_to_string(e: E) -> String { async fn validate_downloaded_file( item: &DownloadItem, save_path: &Path, - app: &tauri::AppHandle, + app: &tauri::AppHandle, cancel_token: &CancellationToken, ) -> Result<(), String> { // Skip validation if no verification data is provided @@ -298,7 +298,7 @@ pub async fn _get_file_size( /// Downloads multiple files in parallel with individual progress tracking pub async fn _download_files_internal( - app: tauri::AppHandle, + app: tauri::AppHandle, items: &[DownloadItem], headers: &HashMap, task_id: &str, @@ -423,7 +423,7 @@ pub async fn _download_files_internal( /// Downloads a single file without blocking other downloads async fn download_single_file( - app: tauri::AppHandle, + app: tauri::AppHandle, item: &DownloadItem, header_map: &HeaderMap, save_path: &std::path::Path, @@ -465,7 +465,11 @@ async fn download_single_file( .await .map_err(err_to_string)?; - log::info!("Started downloading: {}", item.url); + // Decode URL for better readability in logs + let decoded_url = url::Url::parse(&item.url) + .map(|u| u.to_string()) + .unwrap_or_else(|_| item.url.clone()); + log::info!("Started downloading: {}", decoded_url); let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?; let mut download_delta = 0u64; let mut initial_progress = 0u64; @@ -584,7 +588,11 @@ async fn download_single_file( .await .map_err(err_to_string)?; - log::info!("Finished downloading: {}", item.url); + // Decode URL for better readability in logs + let decoded_url = url::Url::parse(&item.url) + .map(|u| u.to_string()) + .unwrap_or_else(|_| item.url.clone()); + log::info!("Finished downloading: {}", decoded_url); Ok(save_path.to_path_buf()) } diff --git a/src-tauri/src/core/extensions/commands.rs b/src-tauri/src/core/extensions/commands.rs index 784c71f46..4c5a44a53 100644 --- a/src-tauri/src/core/extensions/commands.rs +++ b/src-tauri/src/core/extensions/commands.rs @@ -1,24 +1,24 @@ use std::fs; use std::path::PathBuf; -use tauri::AppHandle; +use tauri::{AppHandle, Runtime}; use crate::core::app::commands::get_jan_data_folder_path; use crate::core::setup; #[tauri::command] -pub fn get_jan_extensions_path(app_handle: tauri::AppHandle) -> PathBuf { +pub fn get_jan_extensions_path(app_handle: tauri::AppHandle) -> PathBuf { get_jan_data_folder_path(app_handle).join("extensions") } #[tauri::command] -pub fn install_extensions(app: AppHandle) { +pub fn install_extensions(app: AppHandle) { if let Err(err) = setup::install_extensions(app, true) { log::error!("Failed to install extensions: {}", err); } } #[tauri::command] -pub fn get_active_extensions(app: AppHandle) -> Vec { +pub fn get_active_extensions(app: AppHandle) -> Vec { let mut path = get_jan_extensions_path(app); path.push("extensions.json"); log::info!("get jan extensions, path: {:?}", path); diff --git a/src-tauri/src/core/filesystem/commands.rs b/src-tauri/src/core/filesystem/commands.rs index a37cc00ec..fe44052b8 100644 --- a/src-tauri/src/core/filesystem/commands.rs +++ b/src-tauri/src/core/filesystem/commands.rs @@ -140,7 +140,7 @@ pub fn readdir_sync( #[tauri::command] pub fn write_yaml( - app: tauri::AppHandle, + app: tauri::AppHandle, data: serde_json::Value, save_path: &str, ) -> Result<(), String> { @@ -161,7 +161,7 @@ pub fn write_yaml( } #[tauri::command] -pub fn read_yaml(app: tauri::AppHandle, path: &str) -> Result { +pub fn read_yaml(app: tauri::AppHandle, path: &str) -> Result { let jan_data_folder = crate::core::app::commands::get_jan_data_folder_path(app.clone()); let path = jan_utils::normalize_path(&jan_data_folder.join(path)); if !path.starts_with(&jan_data_folder) { @@ -178,7 +178,7 @@ pub fn read_yaml(app: tauri::AppHandle, path: &str) -> Result Result<(), String> { +pub fn decompress(app: tauri::AppHandle, path: &str, output_dir: &str) -> Result<(), String> { let jan_data_folder = crate::core::app::commands::get_jan_data_folder_path(app.clone()); let path_buf = jan_utils::normalize_path(&jan_data_folder.join(path)); diff --git a/src-tauri/src/core/mcp/commands.rs b/src-tauri/src/core/mcp/commands.rs index 3bef12149..a86db598e 100644 --- a/src-tauri/src/core/mcp/commands.rs +++ b/src-tauri/src/core/mcp/commands.rs @@ -80,7 +80,7 @@ pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) -> } #[tauri::command] -pub async fn restart_mcp_servers(app: AppHandle, state: State<'_, AppState>) -> Result<(), String> { +pub async fn restart_mcp_servers(app: AppHandle, state: State<'_, AppState>) -> Result<(), String> { let servers = state.mcp_servers.clone(); // Stop the servers stop_mcp_servers(state.mcp_servers.clone()).await?; @@ -119,7 +119,7 @@ pub async fn reset_mcp_restart_count( #[tauri::command] pub async fn get_connected_servers( - _app: AppHandle, + _app: AppHandle, state: State<'_, AppState>, ) -> Result, String> { let servers = state.mcp_servers.clone(); @@ -293,7 +293,7 @@ pub async fn cancel_tool_call( } #[tauri::command] -pub async fn get_mcp_configs(app: AppHandle) -> Result { +pub async fn get_mcp_configs(app: AppHandle) -> Result { let mut path = get_jan_data_folder_path(app); path.push("mcp_config.json"); @@ -308,7 +308,7 @@ pub async fn get_mcp_configs(app: AppHandle) -> Result { } #[tauri::command] -pub async fn save_mcp_configs(app: AppHandle, configs: String) -> Result<(), String> { +pub async fn save_mcp_configs(app: AppHandle, configs: String) -> Result<(), String> { let mut path = get_jan_data_folder_path(app); path.push("mcp_config.json"); log::info!("save mcp configs, path: {:?}", path); diff --git a/src-tauri/src/core/mcp/helpers.rs b/src-tauri/src/core/mcp/helpers.rs index 80a8b5f86..48c92ba2c 100644 --- a/src-tauri/src/core/mcp/helpers.rs +++ b/src-tauri/src/core/mcp/helpers.rs @@ -25,7 +25,7 @@ use crate::core::{ mcp::models::McpServerConfig, state::{AppState, RunningServiceEnum, SharedMcpServers}, }; -use jan_utils::can_override_npx; +use jan_utils::{can_override_npx, can_override_uvx}; /// Calculate exponential backoff delay with jitter /// @@ -627,19 +627,31 @@ async fn schedule_mcp_start_task( } } else { let mut cmd = Command::new(config_params.command.clone()); - if config_params.command.clone() == "npx" && can_override_npx() { + let bun_x_path = if cfg!(windows) { + bin_path.join("bun.exe") + } else { + bin_path.join("bun") + }; + if config_params.command.clone() == "npx" + && can_override_npx(bun_x_path.display().to_string()) + { let mut cache_dir = app_path.clone(); cache_dir.push(".npx"); - let bun_x_path = format!("{}/bun", bin_path.display()); - cmd = Command::new(bun_x_path); + cmd = Command::new(bun_x_path.display().to_string()); cmd.arg("x"); cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap().to_string()); } - if config_params.command.clone() == "uvx" { + + let uv_path = if cfg!(windows) { + bin_path.join("uv.exe") + } else { + bin_path.join("uv") + }; + if config_params.command.clone() == "uvx" && can_override_uvx(uv_path.display().to_string()) + { let mut cache_dir = app_path.clone(); cache_dir.push(".uvx"); - let bun_x_path = format!("{}/uv", bin_path.display()); - cmd = Command::new(bun_x_path); + cmd = Command::new(uv_path); cmd.arg("tool"); cmd.arg("run"); cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap().to_string()); @@ -934,3 +946,47 @@ pub async fn should_restart_server( } } } + +// Add a new server configuration to the MCP config file +pub fn add_server_config( + app_handle: tauri::AppHandle, + server_key: String, + server_value: Value, +) -> Result<(), String> { + add_server_config_with_path(app_handle, server_key, server_value, None) +} + +// Add a new server configuration to the MCP config file with custom path support +pub fn add_server_config_with_path( + app_handle: tauri::AppHandle, + server_key: String, + server_value: Value, + config_filename: Option<&str>, +) -> Result<(), String> { + let config_filename = config_filename.unwrap_or("mcp_config.json"); + let config_path = get_jan_data_folder_path(app_handle).join(config_filename); + + let mut config: Value = serde_json::from_str( + &std::fs::read_to_string(&config_path) + .map_err(|e| format!("Failed to read config file: {e}"))?, + ) + .map_err(|e| format!("Failed to parse config: {e}"))?; + + config + .as_object_mut() + .ok_or("Config root is not an object")? + .entry("mcpServers") + .or_insert_with(|| Value::Object(serde_json::Map::new())) + .as_object_mut() + .ok_or("mcpServers is not an object")? + .insert(server_key, server_value); + + std::fs::write( + &config_path, + serde_json::to_string_pretty(&config) + .map_err(|e| format!("Failed to serialize config: {e}"))?, + ) + .map_err(|e| format!("Failed to write config file: {e}"))?; + + Ok(()) +} diff --git a/src-tauri/src/core/mcp/tests.rs b/src-tauri/src/core/mcp/tests.rs index 081a188e8..d973ce647 100644 --- a/src-tauri/src/core/mcp/tests.rs +++ b/src-tauri/src/core/mcp/tests.rs @@ -1,9 +1,10 @@ -use super::helpers::run_mcp_commands; +use super::helpers::{add_server_config, add_server_config_with_path, run_mcp_commands}; use crate::core::app::commands::get_jan_data_folder_path; use crate::core::state::SharedMcpServers; use std::collections::HashMap; use std::fs::File; use std::io::Write; +use std::path::PathBuf; use std::sync::Arc; use tauri::test::mock_app; use tokio::sync::Mutex; @@ -27,8 +28,7 @@ async fn test_run_mcp_commands() { .expect("Failed to write to config file"); // Call the run_mcp_commands function - let servers_state: SharedMcpServers = - Arc::new(Mutex::new(HashMap::new())); + let servers_state: SharedMcpServers = Arc::new(Mutex::new(HashMap::new())); let result = run_mcp_commands(app.handle(), servers_state).await; // Assert that the function returns Ok(()) @@ -37,3 +37,188 @@ async fn test_run_mcp_commands() { // Clean up the mock config file std::fs::remove_file(&config_path).expect("Failed to remove config file"); } + +#[test] +fn test_add_server_config_new_file() { + let app = mock_app(); + let app_path = get_jan_data_folder_path(app.handle().clone()); + let config_path = app_path.join("mcp_config_test_new.json"); + + // Ensure the directory exists + if let Some(parent) = config_path.parent() { + std::fs::create_dir_all(parent).expect("Failed to create parent directory"); + } + + // Create initial config file with empty mcpServers + let mut file = File::create(&config_path).expect("Failed to create config file"); + file.write_all(b"{\"mcpServers\":{}}") + .expect("Failed to write to config file"); + drop(file); + + // Test adding a new server config + let server_value = serde_json::json!({ + "command": "npx", + "args": ["-y", "test-server"], + "env": { "TEST_API_KEY": "test_key" }, + "active": false + }); + + let result = add_server_config_with_path( + app.handle().clone(), + "test_server".to_string(), + server_value.clone(), + Some("mcp_config_test_new.json"), + ); + + assert!(result.is_ok(), "Failed to add server config: {:?}", result); + + // Verify the config was added correctly + let config_content = std::fs::read_to_string(&config_path) + .expect("Failed to read config file"); + let config: serde_json::Value = serde_json::from_str(&config_content) + .expect("Failed to parse config"); + + assert!(config["mcpServers"]["test_server"].is_object()); + assert_eq!(config["mcpServers"]["test_server"]["command"], "npx"); + assert_eq!(config["mcpServers"]["test_server"]["args"][0], "-y"); + assert_eq!(config["mcpServers"]["test_server"]["args"][1], "test-server"); + + // Clean up + std::fs::remove_file(&config_path).expect("Failed to remove config file"); +} + +#[test] +fn test_add_server_config_existing_servers() { + let app = mock_app(); + let app_path = get_jan_data_folder_path(app.handle().clone()); + let config_path = app_path.join("mcp_config_test_existing.json"); + + // Ensure the directory exists + if let Some(parent) = config_path.parent() { + std::fs::create_dir_all(parent).expect("Failed to create parent directory"); + } + + // Create config file with existing server + let initial_config = serde_json::json!({ + "mcpServers": { + "existing_server": { + "command": "existing_command", + "args": ["arg1"], + "active": true + } + } + }); + + let mut file = File::create(&config_path).expect("Failed to create config file"); + file.write_all(serde_json::to_string_pretty(&initial_config).unwrap().as_bytes()) + .expect("Failed to write to config file"); + drop(file); + + // Add new server + let new_server_value = serde_json::json!({ + "command": "new_command", + "args": ["new_arg"], + "active": false + }); + + let result = add_server_config_with_path( + app.handle().clone(), + "new_server".to_string(), + new_server_value, + Some("mcp_config_test_existing.json"), + ); + + assert!(result.is_ok(), "Failed to add server config: {:?}", result); + + // Verify both servers exist + let config_content = std::fs::read_to_string(&config_path) + .expect("Failed to read config file"); + let config: serde_json::Value = serde_json::from_str(&config_content) + .expect("Failed to parse config"); + + // Check existing server is still there + assert!(config["mcpServers"]["existing_server"].is_object()); + assert_eq!(config["mcpServers"]["existing_server"]["command"], "existing_command"); + + // Check new server was added + assert!(config["mcpServers"]["new_server"].is_object()); + assert_eq!(config["mcpServers"]["new_server"]["command"], "new_command"); + + // Clean up + std::fs::remove_file(&config_path).expect("Failed to remove config file"); +} + +#[test] +fn test_add_server_config_missing_config_file() { + let app = mock_app(); + let app_path = get_jan_data_folder_path(app.handle().clone()); + + // Ensure the directory exists + if let Some(parent) = app_path.parent() { + std::fs::create_dir_all(parent).ok(); + } + std::fs::create_dir_all(&app_path).ok(); + + let config_path = app_path.join("mcp_config.json"); + + // Ensure the file doesn't exist + if config_path.exists() { + std::fs::remove_file(&config_path).ok(); + } + + let server_value = serde_json::json!({ + "command": "test", + "args": [], + "active": false + }); + + let result = add_server_config( + app.handle().clone(), + "test".to_string(), + server_value, + ); + + assert!(result.is_err(), "Expected error when config file doesn't exist"); + assert!(result.unwrap_err().contains("Failed to read config file")); +} + +#[cfg(not(target_os = "windows"))] +#[test] +fn test_bin_path_construction_with_join() { + // Test that PathBuf::join properly constructs paths + let bin_path = PathBuf::from("/usr/local/bin"); + let bun_path = bin_path.join("bun"); + + assert_eq!(bun_path.to_string_lossy(), "/usr/local/bin/bun"); + + // Test conversion to String via display() + let bun_path_str = bun_path.display().to_string(); + assert_eq!(bun_path_str, "/usr/local/bin/bun"); +} + +#[cfg(not(target_os = "windows"))] +#[test] +fn test_uv_path_construction_with_join() { + // Test that PathBuf::join properly constructs paths for uv + let bin_path = PathBuf::from("/usr/local/bin"); + let uv_path = bin_path.join("uv"); + + assert_eq!(uv_path.to_string_lossy(), "/usr/local/bin/uv"); + + // Test conversion to String via display() + let uv_path_str = uv_path.display().to_string(); + assert_eq!(uv_path_str, "/usr/local/bin/uv"); +} + +#[cfg(target_os = "windows")] +#[test] +fn test_bin_path_construction_windows() { + // Test Windows-style paths + let bin_path = PathBuf::from(r"C:\Program Files\bin"); + let bun_path = bin_path.join("bun.exe"); + + assert_eq!(bun_path.to_string_lossy(), r"C:\Program Files\bin\bun.exe"); + + let bun_path_str = bun_path.display().to_string(); + assert_eq!(bun_path_str, r"C:\Program Files\bin\bun.exe"); +} diff --git a/src-tauri/src/core/server/commands.rs b/src-tauri/src/core/server/commands.rs index 85450bee5..286d40cc1 100644 --- a/src-tauri/src/core/server/commands.rs +++ b/src-tauri/src/core/server/commands.rs @@ -14,12 +14,12 @@ pub async fn start_server( api_key: String, trusted_hosts: Vec, proxy_timeout: u64, -) -> Result { +) -> Result { let server_handle = state.server_handle.clone(); let plugin_state: State = app_handle.state(); let sessions = plugin_state.llama_server_process.clone(); - proxy::start_server( + let actual_port = proxy::start_server( server_handle, sessions, host, @@ -31,7 +31,7 @@ pub async fn start_server( ) .await .map_err(|e| e.to_string())?; - Ok(true) + Ok(actual_port) } #[tauri::command] diff --git a/src-tauri/src/core/server/proxy.rs b/src-tauri/src/core/server/proxy.rs index 4baf36503..12398ac02 100644 --- a/src-tauri/src/core/server/proxy.rs +++ b/src-tauri/src/core/server/proxy.rs @@ -715,7 +715,7 @@ pub async fn start_server( proxy_api_key: String, trusted_hosts: Vec>, proxy_timeout: u64, -) -> Result> { +) -> Result> { let mut handle_guard = server_handle.lock().await; if handle_guard.is_some() { return Err("Server is already running".into()); @@ -767,7 +767,9 @@ pub async fn start_server( }); *handle_guard = Some(server_task); - Ok(true) + let actual_port = addr.port(); + log::info!("Jan API server started successfully on port {}", actual_port); + Ok(actual_port) } pub async fn stop_server( diff --git a/src-tauri/src/core/setup.rs b/src-tauri/src/core/setup.rs index c88e62a8d..38eca440e 100644 --- a/src-tauri/src/core/setup.rs +++ b/src-tauri/src/core/setup.rs @@ -3,39 +3,27 @@ use std::{ fs::{self, File}, io::Read, path::PathBuf, + sync::Arc, }; use tar::Archive; +use tauri::{ + App, Emitter, Manager, Runtime, Wry +}; + +#[cfg(desktop)] use tauri::{ menu::{Menu, MenuItem, PredefinedMenuItem}, tray::{MouseButton, MouseButtonState, TrayIcon, TrayIconBuilder, TrayIconEvent}, - App, Emitter, Manager, }; -use tauri_plugin_store::StoreExt; -// use tokio::sync::Mutex; -// use tokio::time::{sleep, Duration}; // Using tokio::sync::Mutex -// // MCP +use tauri_plugin_store::Store; + +use crate::core::mcp::helpers::add_server_config; -// MCP use super::{ - app::commands::get_jan_data_folder_path, extensions::commands::get_jan_extensions_path, - mcp::helpers::run_mcp_commands, state::AppState, + extensions::commands::get_jan_extensions_path, mcp::helpers::run_mcp_commands, state::AppState, }; -pub fn install_extensions(app: tauri::AppHandle, force: bool) -> Result<(), String> { - let mut store_path = get_jan_data_folder_path(app.clone()); - store_path.push("store.json"); - let store = app.store(store_path).expect("Store not initialized"); - let stored_version = store - .get("version") - .and_then(|v| v.as_str().map(String::from)) - .unwrap_or_default(); - - let app_version = app - .config() - .version - .clone() - .unwrap_or_else(|| "".to_string()); - +pub fn install_extensions(app: tauri::AppHandle, force: bool) -> Result<(), String> { let extensions_path = get_jan_extensions_path(app.clone()); let pre_install_path = app .path() @@ -50,13 +38,8 @@ pub fn install_extensions(app: tauri::AppHandle, force: bool) -> Result<(), Stri if std::env::var("IS_CLEAN").is_ok() { clean_up = true; } - log::info!( - "Installing extensions. Clean up: {}, Stored version: {}, App version: {}", - clean_up, - stored_version, - app_version - ); - if !clean_up && stored_version == app_version && extensions_path.exists() { + log::info!("Installing extensions. Clean up: {}", clean_up); + if !clean_up && extensions_path.exists() { return Ok(()); } @@ -160,10 +143,36 @@ pub fn install_extensions(app: tauri::AppHandle, force: bool) -> Result<(), Stri ) .map_err(|e| e.to_string())?; - // Store the new app version - store.set("version", serde_json::json!(app_version)); - store.save().expect("Failed to save store"); + Ok(()) +} +// Migrate MCP servers configuration +pub fn migrate_mcp_servers( + app_handle: tauri::AppHandle, + store: Arc>, +) -> Result<(), String> { + let mcp_version = store + .get("mcp_version") + .and_then(|v| v.as_i64()) + .unwrap_or_else(|| 0); + if mcp_version < 1 { + log::info!("Migrating MCP schema version 1"); + let result = add_server_config( + app_handle, + "exa".to_string(), + serde_json::json!({ + "command": "npx", + "args": ["-y", "exa-mcp-server"], + "env": { "EXA_API_KEY": "YOUR_EXA_API_KEY_HERE" }, + "active": false + }), + ); + if let Err(e) = result { + log::error!("Failed to add server config: {}", e); + } + } + store.set("mcp_version", 1); + store.save().expect("Failed to save store"); Ok(()) } @@ -197,10 +206,10 @@ pub fn extract_extension_manifest( Ok(None) } -pub fn setup_mcp(app: &App) { +pub fn setup_mcp(app: &App) { let state = app.state::(); let servers = state.mcp_servers.clone(); - let app_handle: tauri::AppHandle = app.handle().clone(); + let app_handle = app.handle().clone(); tauri::async_runtime::spawn(async move { if let Err(e) = run_mcp_commands(&app_handle, servers).await { log::error!("Failed to run mcp commands: {}", e); @@ -211,6 +220,7 @@ pub fn setup_mcp(app: &App) { }); } +#[cfg(desktop)] pub fn setup_tray(app: &App) -> tauri::Result { let show_i = MenuItem::with_id(app.handle(), "open", "Open Jan", true, None::<&str>)?; let quit_i = MenuItem::with_id(app.handle(), "quit", "Quit", true, None::<&str>)?; diff --git a/src-tauri/src/core/system/commands.rs b/src-tauri/src/core/system/commands.rs index a8b58d745..f5e9d7618 100644 --- a/src-tauri/src/core/system/commands.rs +++ b/src-tauri/src/core/system/commands.rs @@ -1,6 +1,6 @@ use std::fs; use std::path::PathBuf; -use tauri::{AppHandle, Manager, State}; +use tauri::{AppHandle, Manager, Runtime, State}; use tauri_plugin_llamacpp::cleanup_llama_processes; use crate::core::app::commands::{ @@ -11,13 +11,16 @@ use crate::core::mcp::helpers::clean_up_mcp_servers; use crate::core::state::AppState; #[tauri::command] -pub fn factory_reset(app_handle: tauri::AppHandle, state: State<'_, AppState>) { - // close window - let windows = app_handle.webview_windows(); - for (label, window) in windows.iter() { - window.close().unwrap_or_else(|_| { - log::warn!("Failed to close window: {:?}", label); - }); +pub fn factory_reset(app_handle: tauri::AppHandle, state: State<'_, AppState>) { + // close window (not available on mobile platforms) + #[cfg(not(any(target_os = "ios", target_os = "android")))] + { + let windows = app_handle.webview_windows(); + for (label, window) in windows.iter() { + window.close().unwrap_or_else(|_| { + log::warn!("Failed to close window: {:?}", label); + }); + } } let data_folder = get_jan_data_folder_path(app_handle.clone()); log::info!("Factory reset, removing data folder: {:?}", data_folder); @@ -46,12 +49,12 @@ pub fn factory_reset(app_handle: tauri::AppHandle, state: State<'_, AppState>) { } #[tauri::command] -pub fn relaunch(app: AppHandle) { +pub fn relaunch(app: AppHandle) { app.restart() } #[tauri::command] -pub fn open_app_directory(app: AppHandle) { +pub fn open_app_directory(app: AppHandle) { let app_path = app.path().app_data_dir().unwrap(); if cfg!(target_os = "windows") { std::process::Command::new("explorer") @@ -93,7 +96,7 @@ pub fn open_file_explorer(path: String) { } #[tauri::command] -pub async fn read_logs(app: AppHandle) -> Result { +pub async fn read_logs(app: AppHandle) -> Result { let log_path = get_jan_data_folder_path(app).join("logs").join("app.log"); if log_path.exists() { let content = fs::read_to_string(log_path).map_err(|e| e.to_string())?; diff --git a/src-tauri/src/core/threads/commands.rs b/src-tauri/src/core/threads/commands.rs index a9012193a..44ac1964d 100644 --- a/src-tauri/src/core/threads/commands.rs +++ b/src-tauri/src/core/threads/commands.rs @@ -127,7 +127,6 @@ pub async fn create_message( .ok_or("Missing thread_id")?; id.to_string() }; - ensure_thread_dir_exists(app_handle.clone(), &thread_id)?; let path = get_messages_path(app_handle.clone(), &thread_id); if message.get("id").is_none() { @@ -140,6 +139,9 @@ pub async fn create_message( let lock = get_lock_for_thread(&thread_id).await; let _guard = lock.lock().await; + // Ensure directory exists right before file operations to handle race conditions + ensure_thread_dir_exists(app_handle.clone(), &thread_id)?; + let mut file: File = fs::OpenOptions::new() .create(true) .append(true) @@ -148,6 +150,9 @@ pub async fn create_message( let data = serde_json::to_string(&message).map_err(|e| e.to_string())?; writeln!(file, "{}", data).map_err(|e| e.to_string())?; + + // Explicitly flush to ensure data is written before returning + file.flush().map_err(|e| e.to_string())?; } Ok(message) diff --git a/src-tauri/src/core/threads/helpers.rs b/src-tauri/src/core/threads/helpers.rs index 0edcf41b2..76d2c2e59 100644 --- a/src-tauri/src/core/threads/helpers.rs +++ b/src-tauri/src/core/threads/helpers.rs @@ -3,7 +3,7 @@ use std::io::{BufRead, BufReader, Write}; use tauri::Runtime; // For async file write serialization -use once_cell::sync::Lazy; +use std::sync::OnceLock; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::Mutex; @@ -11,12 +11,12 @@ use tokio::sync::Mutex; use super::utils::{get_messages_path, get_thread_metadata_path}; // Global per-thread locks for message file writes -pub static MESSAGE_LOCKS: Lazy>>>> = - Lazy::new(|| Mutex::new(HashMap::new())); +pub static MESSAGE_LOCKS: OnceLock>>>> = OnceLock::new(); /// Get a lock for a specific thread to ensure thread-safe message file operations pub async fn get_lock_for_thread(thread_id: &str) -> Arc> { - let mut locks = MESSAGE_LOCKS.lock().await; + let locks = MESSAGE_LOCKS.get_or_init(|| Mutex::new(HashMap::new())); + let mut locks = locks.lock().await; let lock = locks .entry(thread_id.to_string()) .or_insert_with(|| Arc::new(Mutex::new(()))) diff --git a/src-tauri/src/core/threads/mod.rs b/src-tauri/src/core/threads/mod.rs index fb76bee8c..25225d538 100644 --- a/src-tauri/src/core/threads/mod.rs +++ b/src-tauri/src/core/threads/mod.rs @@ -13,7 +13,6 @@ pub mod commands; mod constants; pub mod helpers; -pub mod models; pub mod utils; #[cfg(test)] diff --git a/src-tauri/src/core/threads/models.rs b/src-tauri/src/core/threads/models.rs deleted file mode 100644 index 5038c6def..000000000 --- a/src-tauri/src/core/threads/models.rs +++ /dev/null @@ -1,103 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct Thread { - pub id: String, - pub object: String, - pub title: String, - pub assistants: Vec, - pub created: i64, - pub updated: i64, - pub metadata: Option, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ThreadMessage { - pub id: String, - pub object: String, - pub thread_id: String, - pub assistant_id: Option, - pub attachments: Option>, - pub role: String, - pub content: Vec, - pub status: String, - pub created_at: i64, - pub completed_at: i64, - pub metadata: Option, - pub type_: Option, - pub error_code: Option, - pub tool_call_id: Option, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct Attachment { - pub file_id: Option, - pub tools: Option>, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(tag = "type")] -pub enum Tool { - #[serde(rename = "file_search")] - FileSearch, - #[serde(rename = "code_interpreter")] - CodeInterpreter, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ThreadContent { - pub type_: String, - pub text: Option, - pub image_url: Option, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ContentValue { - pub value: String, - pub annotations: Vec, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ImageContentValue { - pub detail: Option, - pub url: Option, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ThreadAssistantInfo { - pub id: String, - pub name: String, - pub model: ModelInfo, - pub instructions: Option, - pub tools: Option>, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ModelInfo { - pub id: String, - pub name: String, - pub settings: serde_json::Value, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(tag = "type")] -pub enum AssistantTool { - #[serde(rename = "code_interpreter")] - CodeInterpreter, - #[serde(rename = "retrieval")] - Retrieval, - #[serde(rename = "function")] - Function { - name: String, - description: Option, - parameters: Option, - }, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ThreadState { - pub has_more: bool, - pub waiting_for_response: bool, - pub error: Option, - pub last_message: Option, -} diff --git a/src-tauri/src/core/threads/tests.rs b/src-tauri/src/core/threads/tests.rs index 5b4aaec57..8d3524d06 100644 --- a/src-tauri/src/core/threads/tests.rs +++ b/src-tauri/src/core/threads/tests.rs @@ -1,4 +1,3 @@ -use crate::core::app::commands::get_jan_data_folder_path; use super::commands::*; use serde_json::json; @@ -9,11 +8,18 @@ use tauri::test::{mock_app, MockRuntime}; // Helper to create a mock app handle with a temp data dir fn mock_app_with_temp_data_dir() -> (tauri::App, PathBuf) { let app = mock_app(); - let data_dir = get_jan_data_folder_path(app.handle().clone()); + // Create a unique test directory to avoid race conditions between parallel tests + let unique_id = std::thread::current().id(); + let timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos(); + let data_dir = std::env::current_dir() + .unwrap_or_else(|_| PathBuf::from(".")) + .join(format!("test-data-{:?}-{}", unique_id, timestamp)); println!("Mock app data dir: {}", data_dir.display()); - // Patch get_data_dir to use temp dir (requires get_data_dir to be overridable or injectable) - // For now, we assume get_data_dir uses tauri::api::path::app_data_dir(&app_handle) - // and that we can set the environment variable to redirect it. + // Ensure the unique test directory exists + let _ = fs::create_dir_all(&data_dir); (app, data_dir) } @@ -82,7 +88,7 @@ async fn test_create_and_list_messages() { let messages = list_messages(app.handle().clone(), thread_id.clone()) .await .unwrap(); - assert!(messages.len() > 0); + assert!(messages.len() > 0, "Expected at least one message, but got none. Thread ID: {}", thread_id); assert_eq!(messages[0]["role"], "user"); // Clean up diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index dad155875..abd12ddb7 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -10,11 +10,10 @@ use jan_utils::generate_app_token; use std::{collections::HashMap, sync::Arc}; use tauri::{Emitter, Manager, RunEvent}; use tauri_plugin_llamacpp::cleanup_llama_processes; +use tauri_plugin_store::StoreExt; use tokio::sync::Mutex; -use crate::core::setup::setup_tray; - -#[cfg_attr(mobile, tauri::mobile_entry_point)] +#[cfg_attr(all(mobile, any(target_os = "android", target_os = "ios")), tauri::mobile_entry_point)] pub fn run() { let mut builder = tauri::Builder::default(); #[cfg(desktop)] @@ -22,29 +21,29 @@ pub fn run() { builder = builder.plugin(tauri_plugin_single_instance::init(|_app, argv, _cwd| { println!("a new app instance was opened with {argv:?} and the deep link event was already triggered"); // when defining deep link schemes at runtime, you must also check `argv` here - let arg = argv.iter().find(|arg| arg.starts_with("jan://")); - if let Some(deep_link) = arg { - println!("deep link: {deep_link}"); - // handle the deep link, e.g., emit an event to the webview - _app.app_handle().emit("deep-link", deep_link).unwrap(); - if let Some(window) = _app.app_handle().get_webview_window("main") { - let _ = window.set_focus(); - } - } })); } - let app = builder + let mut app_builder = builder .plugin(tauri_plugin_os::init()) - .plugin(tauri_plugin_deep_link::init()) .plugin(tauri_plugin_dialog::init()) .plugin(tauri_plugin_opener::init()) .plugin(tauri_plugin_http::init()) .plugin(tauri_plugin_store::Builder::new().build()) - .plugin(tauri_plugin_updater::Builder::new().build()) .plugin(tauri_plugin_shell::init()) - .plugin(tauri_plugin_llamacpp::init()) - .plugin(tauri_plugin_hardware::init()) + .plugin(tauri_plugin_llamacpp::init()); + + #[cfg(feature = "deep-link")] + { + app_builder = app_builder.plugin(tauri_plugin_deep_link::init()); + } + + #[cfg(not(any(target_os = "android", target_os = "ios")))] + { + app_builder = app_builder.plugin(tauri_plugin_hardware::init()); + } + + let app = app_builder .invoke_handler(tauri::generate_handler![ // FS commands - Deperecate soon core::filesystem::commands::join_path, @@ -120,21 +119,6 @@ pub fn run() { server_handle: Arc::new(Mutex::new(None)), tool_call_cancellations: Arc::new(Mutex::new(HashMap::new())), }) - .on_window_event(|window, event| match event { - tauri::WindowEvent::CloseRequested { api, .. } => { - if option_env!("ENABLE_SYSTEM_TRAY_ICON").unwrap_or("false") == "true" { - #[cfg(target_os = "macos")] - window - .app_handle() - .set_activation_policy(tauri::ActivationPolicy::Accessory) - .unwrap(); - - window.hide().unwrap(); - api.prevent_close(); - } - } - _ => {} - }) .setup(|app| { app.handle().plugin( tauri_plugin_log::Builder::default() @@ -149,22 +133,51 @@ pub fn run() { ]) .build(), )?; - app.handle() - .plugin(tauri_plugin_updater::Builder::new().build())?; - // Install extensions - if let Err(e) = setup::install_extensions(app.handle().clone(), false) { + #[cfg(not(any(target_os = "ios", target_os = "android")))] + app.handle().plugin(tauri_plugin_updater::Builder::new().build())?; + + // Start migration + let mut store_path = get_jan_data_folder_path(app.handle().clone()); + store_path.push("store.json"); + let store = app + .handle() + .store(store_path) + .expect("Store not initialized"); + let stored_version = store + .get("version") + .and_then(|v| v.as_str().map(String::from)) + .unwrap_or_default(); + let app_version = app + .config() + .version + .clone() + .unwrap_or_else(|| "".to_string()); + // Migrate extensions + if let Err(e) = + setup::install_extensions(app.handle().clone(), stored_version != app_version) + { log::error!("Failed to install extensions: {}", e); } - if option_env!("ENABLE_SYSTEM_TRAY_ICON").unwrap_or("false") == "true" { - log::info!("Enabling system tray icon"); - let _ = setup_tray(app); + // Migrate MCP servers + if let Err(e) = setup::migrate_mcp_servers(app.handle().clone(), store.clone()) { + log::error!("Failed to migrate MCP servers: {}", e); } - #[cfg(any(windows, target_os = "linux"))] + // Store the new app version + store.set("version", serde_json::json!(app_version)); + store.save().expect("Failed to save store"); + // Migration completed + + #[cfg(desktop)] + if option_env!("ENABLE_SYSTEM_TRAY_ICON").unwrap_or("false") == "true" { + log::info!("Enabling system tray icon"); + let _ = setup::setup_tray(app); + } + + #[cfg(all(feature = "deep-link", any(windows, target_os = "linux")))] { use tauri_plugin_deep_link::DeepLinkExt; - app.deep_link().register_all()?; } setup_mcp(app); @@ -179,12 +192,15 @@ pub fn run() { // This is called when the app is actually exiting (e.g., macOS dock quit) // We can't prevent this, so run cleanup quickly let app_handle = app.clone(); - // Hide window immediately - if let Some(window) = app_handle.get_webview_window("main") { - let _ = window.hide(); - } tokio::task::block_in_place(|| { tauri::async_runtime::block_on(async { + // Hide window immediately (not available on mobile platforms) + if let Some(window) = app_handle.get_webview_window("main") { + #[cfg(not(any(target_os = "ios", target_os = "android")))] + { let _ = window.hide(); } + let _ = window.emit("kill-mcp-servers", ()); + } + // Quick cleanup with shorter timeout let state = app_handle.state::(); let _ = clean_up_mcp_servers(state).await; diff --git a/src-tauri/tauri b/src-tauri/tauri new file mode 100755 index 000000000..f944754d6 --- /dev/null +++ b/src-tauri/tauri @@ -0,0 +1,2 @@ +#!/usr/bin/env node +import('../node_modules/@tauri-apps/cli/tauri.js'); \ No newline at end of file diff --git a/src-tauri/tauri.android.conf.json b/src-tauri/tauri.android.conf.json new file mode 100644 index 000000000..a0b795207 --- /dev/null +++ b/src-tauri/tauri.android.conf.json @@ -0,0 +1,20 @@ +{ + "identifier": "jan.ai.app", + "build": { + "devUrl": null, + "frontendDist": "../web-app/dist" + }, + "app": { + "security": { + "capabilities": ["mobile"] + } + }, + "plugins": {}, + "bundle": { + "resources": ["resources/LICENSE"], + "externalBin": [], + "android": { + "minSdkVersion": 24 + } + } +} \ No newline at end of file diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 6aaa66bb7..b0df3fc2f 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -40,6 +40,7 @@ } ], "security": { + "capabilities": ["default"], "csp": { "default-src": "'self' customprotocol: asset: http://localhost:* http://127.0.0.1:* ws://localhost:* ws://127.0.0.1:*", "connect-src": "ipc: http://ipc.localhost http://127.0.0.1:* ws://localhost:* ws://127.0.0.1:* https: http:", @@ -72,10 +73,10 @@ "windows": { "installMode": "passive" } - }, - "deep-link": { "schemes": ["jan"] } + } }, "bundle": { + "publisher": "Menlo Research Pte. Ltd.", "active": true, "createUpdaterArtifacts": false, "icon": [ diff --git a/src-tauri/tauri.ios.conf.json b/src-tauri/tauri.ios.conf.json new file mode 100644 index 000000000..546cb4950 --- /dev/null +++ b/src-tauri/tauri.ios.conf.json @@ -0,0 +1,21 @@ +{ + "build": { + "devUrl": null, + "frontendDist": "../web-app/dist" + }, + "identifier": "jan.ai.app", + "app": { + "security": { + "capabilities": ["mobile"] + } + }, + "plugins": {}, + "bundle": { + "active": true, + "iOS": { + "developmentTeam": "" + }, + "resources": ["resources/LICENSE"], + "externalBin": [] + } +} \ No newline at end of file diff --git a/src-tauri/tauri.linux.conf.json b/src-tauri/tauri.linux.conf.json index 80e7446ff..85f39ba50 100644 --- a/src-tauri/tauri.linux.conf.json +++ b/src-tauri/tauri.linux.conf.json @@ -1,18 +1,20 @@ { + "app": { + "security": { + "capabilities": ["desktop", "system-monitor-window"] + } + }, "bundle": { "targets": ["deb", "appimage"], - "resources": ["resources/pre-install/**/*", "resources/LICENSE"], - "externalBin": ["resources/bin/uv"], + "resources": ["resources/LICENSE"], "linux": { "appimage": { "bundleMediaFramework": false, - "files": { - } + "files": {} }, "deb": { "files": { - "usr/bin/bun": "resources/bin/bun", - "usr/lib/Jan/resources/lib/libvulkan.so": "resources/lib/libvulkan.so" + "usr/bin/bun": "resources/bin/bun" } } } diff --git a/src-tauri/tauri.macos.conf.json b/src-tauri/tauri.macos.conf.json index d7d80f669..2113bd0fa 100644 --- a/src-tauri/tauri.macos.conf.json +++ b/src-tauri/tauri.macos.conf.json @@ -1,7 +1,11 @@ { + "app": { + "security": { + "capabilities": ["desktop", "system-monitor-window"] + } + }, "bundle": { "targets": ["app", "dmg"], - "resources": ["resources/pre-install/**/*", "resources/LICENSE"], - "externalBin": ["resources/bin/bun", "resources/bin/uv"] + "resources": ["resources/LICENSE"] } } diff --git a/src-tauri/tauri.windows.conf.json b/src-tauri/tauri.windows.conf.json index 16cb9b10a..91e2eb374 100644 --- a/src-tauri/tauri.windows.conf.json +++ b/src-tauri/tauri.windows.conf.json @@ -1,7 +1,17 @@ { + "app": { + "security": { + "capabilities": ["desktop"] + } + }, + "bundle": { "targets": ["nsis"], - "resources": ["resources/pre-install/**/*", "resources/lib/vulkan-1.dll", "resources/lib/vc_redist.x64.exe", "resources/LICENSE"], + "resources": [ + "resources/pre-install/**/*", + "resources/lib/vc_redist.x64.exe", + "resources/LICENSE" + ], "externalBin": ["resources/bin/bun", "resources/bin/uv"], "windows": { "nsis": { diff --git a/src-tauri/utils/src/system.rs b/src-tauri/utils/src/system.rs index d4ebc79af..efb137550 100644 --- a/src-tauri/utils/src/system.rs +++ b/src-tauri/utils/src/system.rs @@ -1,5 +1,5 @@ -/// Checks AVX2 CPU support for npx override with bun binary -pub fn can_override_npx() -> bool { +/// Checks if npx can be overridden with bun binary +pub fn can_override_npx(bun_path: String) -> bool { // We need to check the CPU for the AVX2 instruction support if we are running under MacOS // with Intel CPU. We can override `npx` command with `bun` only if CPU is // supporting AVX2, otherwise we need to use default `npx` binary @@ -13,10 +13,31 @@ pub fn can_override_npx() -> bool { return false; // we cannot override npx with bun binary } } - + // Check if bun_path exists + if !std::path::Path::new(bun_path.as_str()).exists() { + #[cfg(feature = "logging")] + log::warn!( + "bun binary not found at '{}', default npx binary will be used", + bun_path + ); + return false; + } true // by default, we can override npx with bun binary } +/// Checks if uv_path exists and determines if uvx can be overridden with the uv binary +pub fn can_override_uvx(uv_path: String) -> bool { + if !std::path::Path::new(uv_path.as_str()).exists() { + #[cfg(feature = "logging")] + log::warn!( + "uv binary not found at '{}', default uvx binary will be used", + uv_path + ); + return false; + } + true // by default, we can override uvx with uv binary +} + /// Setup library paths for different operating systems pub fn setup_library_path(library_path: Option<&str>, command: &mut tokio::process::Command) { if let Some(lib_path) = library_path { @@ -60,7 +81,6 @@ pub fn setup_library_path(library_path: Option<&str>, command: &mut tokio::proce pub fn setup_windows_process_flags(command: &mut tokio::process::Command) { #[cfg(all(windows, target_arch = "x86_64"))] { - use std::os::windows::process::CommandExt; const CREATE_NO_WINDOW: u32 = 0x0800_0000; const CREATE_NEW_PROCESS_GROUP: u32 = 0x0000_0200; command.creation_flags(CREATE_NO_WINDOW | CREATE_NEW_PROCESS_GROUP); diff --git a/web-app/index.html b/web-app/index.html index fc264d096..dd2e76ee6 100644 --- a/web-app/index.html +++ b/web-app/index.html @@ -1,12 +1,26 @@ - + - - + + - + Jan +
    diff --git a/web-app/package.json b/web-app/package.json index da7849f87..88bbe411a 100644 --- a/web-app/package.json +++ b/web-app/package.json @@ -21,8 +21,8 @@ "@dnd-kit/core": "6.3.1", "@dnd-kit/modifiers": "9.0.0", "@dnd-kit/sortable": "10.0.0", - "@jan/extensions-web": "link:../extensions-web", - "@janhq/core": "link:../core", + "@jan/extensions-web": "workspace:*", + "@janhq/core": "workspace:*", "@radix-ui/react-accordion": "1.2.11", "@radix-ui/react-avatar": "1.1.10", "@radix-ui/react-dialog": "1.1.15", diff --git a/web-app/src/__tests__/i18n.test.ts b/web-app/src/__tests__/i18n.test.ts index 644bc019d..262d93194 100644 --- a/web-app/src/__tests__/i18n.test.ts +++ b/web-app/src/__tests__/i18n.test.ts @@ -49,4 +49,4 @@ describe('i18n module', () => { expect(i18nModule[exportName]).toBeDefined() }) }) -}) \ No newline at end of file +}) diff --git a/web-app/src/__tests__/main.test.tsx b/web-app/src/__tests__/main.test.tsx index c105482bf..aec753d56 100644 --- a/web-app/src/__tests__/main.test.tsx +++ b/web-app/src/__tests__/main.test.tsx @@ -76,4 +76,4 @@ describe('main.tsx', () => { await import('../main') }).rejects.toThrow() }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/dialog.test.tsx b/web-app/src/components/ui/__tests__/dialog.test.tsx index b4c1f5aab..aeb0cbf52 100644 --- a/web-app/src/components/ui/__tests__/dialog.test.tsx +++ b/web-app/src/components/ui/__tests__/dialog.test.tsx @@ -416,4 +416,4 @@ describe('Dialog Components', () => { expect(screen.getByText('Dialog description')).toHaveAttribute('data-slot', 'dialog-description') expect(screen.getByText('Footer button').closest('div')).toHaveAttribute('data-slot', 'dialog-footer') }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/dropdown-menu.test.tsx b/web-app/src/components/ui/__tests__/dropdown-menu.test.tsx index 7b0da6f76..541ae0f93 100644 --- a/web-app/src/components/ui/__tests__/dropdown-menu.test.tsx +++ b/web-app/src/components/ui/__tests__/dropdown-menu.test.tsx @@ -853,4 +853,4 @@ describe('DropdownMenu Components', () => { expect(handleItemClick).toHaveBeenCalledTimes(1) }) }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/dropdrawer.test.tsx b/web-app/src/components/ui/__tests__/dropdrawer.test.tsx index 6203d9f4e..cef88b9d9 100644 --- a/web-app/src/components/ui/__tests__/dropdrawer.test.tsx +++ b/web-app/src/components/ui/__tests__/dropdrawer.test.tsx @@ -530,4 +530,4 @@ describe('DropDrawer Component', () => { expect(trigger).toHaveAttribute('aria-haspopup', 'dialog') }) }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/hover-card.test.tsx b/web-app/src/components/ui/__tests__/hover-card.test.tsx index 71e78cb7f..5e0a39628 100644 --- a/web-app/src/components/ui/__tests__/hover-card.test.tsx +++ b/web-app/src/components/ui/__tests__/hover-card.test.tsx @@ -165,4 +165,4 @@ describe('HoverCard Components', () => { expect(screen.getByText('Hover content')).toBeDefined() }) }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/input.test.tsx b/web-app/src/components/ui/__tests__/input.test.tsx index 2ae18adad..ddf2fa7db 100644 --- a/web-app/src/components/ui/__tests__/input.test.tsx +++ b/web-app/src/components/ui/__tests__/input.test.tsx @@ -93,4 +93,4 @@ describe('Input', () => { fireEvent.blur(input) expect(handleBlur).toHaveBeenCalledTimes(1) }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/popover.test.tsx b/web-app/src/components/ui/__tests__/popover.test.tsx index cec809bb7..b76d1ce04 100644 --- a/web-app/src/components/ui/__tests__/popover.test.tsx +++ b/web-app/src/components/ui/__tests__/popover.test.tsx @@ -436,4 +436,4 @@ describe('Popover Components', () => { }) }) }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/progress.test.tsx b/web-app/src/components/ui/__tests__/progress.test.tsx index daa4b5c05..90a7bc70f 100644 --- a/web-app/src/components/ui/__tests__/progress.test.tsx +++ b/web-app/src/components/ui/__tests__/progress.test.tsx @@ -84,4 +84,4 @@ describe('Progress', () => { // For values over 100, the transform should be positive expect(indicator?.style.transform).toContain('translateX(--50%)') }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/radio-group.test.tsx b/web-app/src/components/ui/__tests__/radio-group.test.tsx index a788931d8..1cb85e7c6 100644 --- a/web-app/src/components/ui/__tests__/radio-group.test.tsx +++ b/web-app/src/components/ui/__tests__/radio-group.test.tsx @@ -59,4 +59,4 @@ describe('RadioGroup', () => { expect(screen.getByLabelText('HTTP')).toBeChecked() expect(screen.getByLabelText('SSE')).not.toBeChecked() }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/sheet.test.tsx b/web-app/src/components/ui/__tests__/sheet.test.tsx index dc21bbe66..988e512c0 100644 --- a/web-app/src/components/ui/__tests__/sheet.test.tsx +++ b/web-app/src/components/ui/__tests__/sheet.test.tsx @@ -260,4 +260,4 @@ describe('Sheet Components', () => { expect(screen.getByText('Main Content')).toBeInTheDocument() expect(screen.getByText('Close')).toBeInTheDocument() }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/skeleton.test.tsx b/web-app/src/components/ui/__tests__/skeleton.test.tsx index 273be182e..39d9535a7 100644 --- a/web-app/src/components/ui/__tests__/skeleton.test.tsx +++ b/web-app/src/components/ui/__tests__/skeleton.test.tsx @@ -61,4 +61,4 @@ describe('Skeleton', () => { expect(skeleton).toHaveClass('w-full') expect(skeleton).toHaveClass('bg-red-500') }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/slider.test.tsx b/web-app/src/components/ui/__tests__/slider.test.tsx index 5fd72f766..2a15441b2 100644 --- a/web-app/src/components/ui/__tests__/slider.test.tsx +++ b/web-app/src/components/ui/__tests__/slider.test.tsx @@ -190,4 +190,4 @@ describe('Slider', () => { expect(thumb).toHaveClass('border-accent', 'bg-main-view', 'rounded-full') }) }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/sonner.test.tsx b/web-app/src/components/ui/__tests__/sonner.test.tsx index 72aca5526..8b2fc762d 100644 --- a/web-app/src/components/ui/__tests__/sonner.test.tsx +++ b/web-app/src/components/ui/__tests__/sonner.test.tsx @@ -90,4 +90,4 @@ describe('Toaster Component', () => { expect(toaster).toHaveAttribute('data-rich-colors', 'true') expect(toaster).toHaveAttribute('data-close-button', 'true') }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/switch.test.tsx b/web-app/src/components/ui/__tests__/switch.test.tsx index d872dbc11..0db35d716 100644 --- a/web-app/src/components/ui/__tests__/switch.test.tsx +++ b/web-app/src/components/ui/__tests__/switch.test.tsx @@ -189,4 +189,4 @@ describe('Switch', () => { const switchElement = document.querySelector('[data-slot="switch"]') expect(switchElement).toHaveClass('data-[state=unchecked]:bg-main-view-fg/20') }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/textarea.test.tsx b/web-app/src/components/ui/__tests__/textarea.test.tsx index 6daf09e4d..806cf9515 100644 --- a/web-app/src/components/ui/__tests__/textarea.test.tsx +++ b/web-app/src/components/ui/__tests__/textarea.test.tsx @@ -113,4 +113,4 @@ describe('Textarea', () => { const textarea = screen.getByRole('textbox') expect(textarea).toHaveAttribute('cols', '50') }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/__tests__/tooltip.test.tsx b/web-app/src/components/ui/__tests__/tooltip.test.tsx index 4221751d4..46c68e0b8 100644 --- a/web-app/src/components/ui/__tests__/tooltip.test.tsx +++ b/web-app/src/components/ui/__tests__/tooltip.test.tsx @@ -111,4 +111,4 @@ describe('Tooltip Components', () => { expect(screen.getByText('First')).toBeInTheDocument() expect(screen.getByText('Second')).toBeInTheDocument() }) -}) \ No newline at end of file +}) diff --git a/web-app/src/components/ui/radio-group.tsx b/web-app/src/components/ui/radio-group.tsx index b5fa0593a..e604ac901 100644 --- a/web-app/src/components/ui/radio-group.tsx +++ b/web-app/src/components/ui/radio-group.tsx @@ -39,4 +39,4 @@ const RadioGroupItem = React.forwardRef< }) RadioGroupItem.displayName = RadioGroupPrimitive.Item.displayName -export { RadioGroup, RadioGroupItem } \ No newline at end of file +export { RadioGroup, RadioGroupItem } diff --git a/web-app/src/constants/__tests__/windows.test.ts b/web-app/src/constants/__tests__/windows.test.ts index f9d388f98..3f55184e1 100644 --- a/web-app/src/constants/__tests__/windows.test.ts +++ b/web-app/src/constants/__tests__/windows.test.ts @@ -33,4 +33,4 @@ describe('windows constants', () => { expect(value.length).toBeGreaterThan(0) }) }) -}) \ No newline at end of file +}) diff --git a/web-app/src/constants/chat.ts b/web-app/src/constants/chat.ts new file mode 100644 index 000000000..e1649049f --- /dev/null +++ b/web-app/src/constants/chat.ts @@ -0,0 +1,6 @@ +/** + * Chat-related constants + */ + +export const TEMPORARY_CHAT_ID = 'temporary-chat' +export const TEMPORARY_CHAT_QUERY_ID = 'temporary-chat' diff --git a/web-app/src/containers/ChatInput.tsx b/web-app/src/containers/ChatInput.tsx index cba580ebd..95bdc9b39 100644 --- a/web-app/src/containers/ChatInput.tsx +++ b/web-app/src/containers/ChatInput.tsx @@ -4,7 +4,6 @@ import TextareaAutosize from 'react-textarea-autosize' import { cn } from '@/lib/utils' import { usePrompt } from '@/hooks/usePrompt' import { useThreads } from '@/hooks/useThreads' -import { useThreadManagement } from '@/hooks/useThreadManagement' import { useCallback, useEffect, useRef, useState } from 'react' import { Button } from '@/components/ui/button' import { @@ -38,6 +37,9 @@ import { useTools } from '@/hooks/useTools' import { TokenCounter } from '@/components/TokenCounter' import { useMessages } from '@/hooks/useMessages' import { useShallow } from 'zustand/react/shallow' +import { McpExtensionToolLoader } from './McpExtensionToolLoader' +import { ExtensionTypeEnum, MCPExtension } from '@janhq/core' +import { ExtensionManager } from '@/lib/extension' type ChatInputProps = { className?: string @@ -65,8 +67,6 @@ const ChatInput = ({ const prompt = usePrompt((state) => state.prompt) const setPrompt = usePrompt((state) => state.setPrompt) const currentThreadId = useThreads((state) => state.currentThreadId) - const updateThread = useThreads((state) => state.updateThread) - const { getFolderById } = useThreadManagement() const { t } = useTranslation() const spellCheckChatInput = useGeneralSetting( (state) => state.spellCheckChatInput @@ -171,7 +171,13 @@ const ChatInput = ({ // Check if there are active MCP servers const hasActiveMCPServers = connectedServers.length > 0 || tools.length > 0 - const handleSendMesage = (prompt: string) => { + // Get MCP extension and its custom component + const extensionManager = ExtensionManager.getInstance() + const mcpExtension = extensionManager.get(ExtensionTypeEnum.MCP) + const MCPToolComponent = mcpExtension?.getToolComponent?.() + + + const handleSendMesage = async (prompt: string) => { if (!selectedModel) { setMessage('Please select a model to start chatting.') return @@ -183,31 +189,10 @@ const ChatInput = ({ sendMessage( prompt, true, - uploadedFiles.length > 0 ? uploadedFiles : undefined + uploadedFiles.length > 0 ? uploadedFiles : undefined, + projectId ) setUploadedFiles([]) - - // Handle project assignment for new threads - if (projectId && !currentThreadId) { - const project = getFolderById(projectId) - if (project) { - // Use setTimeout to ensure the thread is created first - setTimeout(() => { - const newCurrentThreadId = useThreads.getState().currentThreadId - if (newCurrentThreadId) { - updateThread(newCurrentThreadId, { - metadata: { - project: { - id: project.id, - name: project.name, - updated_at: project.updated_at, - }, - }, - }) - } - }, 100) - } - } } useEffect(() => { @@ -719,60 +704,72 @@ const ChatInput = ({ {selectedModel?.capabilities?.includes('tools') && hasActiveMCPServers && ( - - - + ) : ( + // Use default tools dropdown + + -
    { - setDropdownToolsAvailable(false) - e.stopPropagation() - }} + - { - setDropdownToolsAvailable(isOpen) - if (isOpen) { - setTooltipToolsAvailable(false) - } +
    { + setDropdownToolsAvailable(false) + e.stopPropagation() }} > - {(isOpen, toolsCount) => { - return ( -
    - - {toolsCount > 0 && ( -
    - - {toolsCount > 99 ? '99+' : toolsCount} - -
    - )} -
    - ) - }} - -
    -
    - -

    {t('tools')}

    -
    - - + { + setDropdownToolsAvailable(isOpen) + if (isOpen) { + setTooltipToolsAvailable(false) + } + }} + > + {(isOpen, toolsCount) => { + return ( +
    + + {toolsCount > 0 && ( +
    + + {toolsCount > 99 ? '99+' : toolsCount} + +
    + )} +
    + ) + }} +
    +
    +
    + +

    {t('tools')}

    +
    +
    +
    + ) )} {selectedModel?.capabilities?.includes('web_search') && ( diff --git a/web-app/src/containers/DropdownModelProvider.tsx b/web-app/src/containers/DropdownModelProvider.tsx index 8f9ea35a8..a8614f89d 100644 --- a/web-app/src/containers/DropdownModelProvider.tsx +++ b/web-app/src/containers/DropdownModelProvider.tsx @@ -6,7 +6,7 @@ import { PopoverTrigger, } from '@/components/ui/popover' import { useModelProvider } from '@/hooks/useModelProvider' -import { cn, getProviderTitle } from '@/lib/utils' +import { cn, getProviderTitle, getModelDisplayName } from '@/lib/utils' import { highlightFzfMatch } from '@/utils/highlight' import Capabilities from './Capabilities' import { IconSettings, IconX } from '@tabler/icons-react' @@ -240,7 +240,7 @@ const DropdownModelProvider = ({ // Update display model when selection changes useEffect(() => { if (selectedProvider && selectedModel) { - setDisplayModel(selectedModel.id) + setDisplayModel(getModelDisplayName(selectedModel)) } else { setDisplayModel(t('common:selectAModel')) } @@ -326,7 +326,7 @@ const DropdownModelProvider = ({ // Create Fzf instance for fuzzy search const fzfInstance = useMemo(() => { return new Fzf(searchableItems, { - selector: (item) => item.model.id.toLowerCase(), + selector: (item) => `${getModelDisplayName(item.model)} ${item.model.id}`.toLowerCase(), }) }, [searchableItems]) @@ -390,7 +390,7 @@ const DropdownModelProvider = ({ const handleSelect = useCallback( async (searchableModel: SearchableModel) => { // Immediately update display to prevent double-click issues - setDisplayModel(searchableModel.model.id) + setDisplayModel(getModelDisplayName(searchableModel.model)) setSearchValue('') setOpen(false) @@ -576,7 +576,7 @@ const DropdownModelProvider = ({ /> - {searchableModel.model.id} + {getModelDisplayName(searchableModel.model)}
    {capabilities.length > 0 && ( @@ -669,7 +669,7 @@ const DropdownModelProvider = ({ className="text-main-view-fg/80 text-sm" title={searchableModel.model.id} > - {searchableModel.model.id} + {getModelDisplayName(searchableModel.model)}
    {capabilities.length > 0 && ( diff --git a/web-app/src/containers/HeaderPage.tsx b/web-app/src/containers/HeaderPage.tsx index 7c47e9273..91dbe4c7a 100644 --- a/web-app/src/containers/HeaderPage.tsx +++ b/web-app/src/containers/HeaderPage.tsx @@ -1,27 +1,69 @@ import { useLeftPanel } from '@/hooks/useLeftPanel' import { cn } from '@/lib/utils' -import { IconLayoutSidebar } from '@tabler/icons-react' -import { ReactNode } from '@tanstack/react-router' +import { useMobileScreen, useSmallScreen } from '@/hooks/useMediaQuery' +import { IconLayoutSidebar, IconMessage, IconMessageFilled } from '@tabler/icons-react' +import { ReactNode } from 'react' +import { useRouter } from '@tanstack/react-router' +import { route } from '@/constants/routes' +import { PlatformFeatures } from '@/lib/platform/const' +import { PlatformFeature } from '@/lib/platform/types' +import { TEMPORARY_CHAT_QUERY_ID } from '@/constants/chat' type HeaderPageProps = { children?: ReactNode } const HeaderPage = ({ children }: HeaderPageProps) => { const { open, setLeftPanel } = useLeftPanel() + const isMobile = useMobileScreen() + const isSmallScreen = useSmallScreen() + const router = useRouter() + const currentPath = router.state.location.pathname + + const isHomePage = currentPath === route.home + + // Parse temporary chat flag from URL search params directly to avoid invariant errors + const searchString = window.location.search + const urlSearchParams = new URLSearchParams(searchString) + const isTemporaryChat = isHomePage && urlSearchParams.get(TEMPORARY_CHAT_QUERY_ID) === 'true' + + const handleChatToggle = () => { + console.log('Chat toggle clicked!', { isTemporaryChat, isHomePage, currentPath }) + if (isHomePage) { + if (isTemporaryChat) { + console.log('Switching to regular chat') + router.navigate({ to: route.home, search: {} }) + } else { + console.log('Switching to temporary chat') + router.navigate({ to: route.home, search: { [TEMPORARY_CHAT_QUERY_ID]: true } }) + } + } + } return (
    -
    +
    {!open && ( )} - {children} +
    + {children} +
    + + {/* Temporary Chat Toggle - Only show on home page if feature is enabled */} + {PlatformFeatures[PlatformFeature.TEMPORARY_CHAT] && isHomePage && ( +
    + +
    + )}
    ) diff --git a/web-app/src/containers/LeftPanel.tsx b/web-app/src/containers/LeftPanel.tsx index 24f3bf911..a7be18576 100644 --- a/web-app/src/containers/LeftPanel.tsx +++ b/web-app/src/containers/LeftPanel.tsx @@ -1,4 +1,4 @@ -import { Link, useRouterState } from '@tanstack/react-router' +import { Link, useRouterState, useNavigate } from '@tanstack/react-router' import { useLeftPanel } from '@/hooks/useLeftPanel' import { cn } from '@/lib/utils' import { @@ -58,6 +58,9 @@ const mainMenus = [ route: route.project, isEnabled: true, }, +] + +const secondaryMenus = [ { title: 'common:assistants', icon: IconClipboardSmile, @@ -82,6 +85,7 @@ const LeftPanel = () => { const open = useLeftPanel((state) => state.open) const setLeftPanel = useLeftPanel((state) => state.setLeftPanel) const { t } = useTranslation() + const navigate = useNavigate() const [searchTerm, setSearchTerm] = useState('') const { isAuthenticated } = useAuth() @@ -150,6 +154,7 @@ const LeftPanel = () => { } }, [setLeftPanel, open]) + const currentPath = useRouterState({ select: (state) => state.location.pathname, }) @@ -159,7 +164,7 @@ const LeftPanel = () => { const getFilteredThreads = useThreads((state) => state.getFilteredThreads) const threads = useThreads((state) => state.threads) - const { folders, addFolder, updateFolder, deleteFolder, getFolderById } = + const { folders, addFolder, updateFolder, getFolderById } = useThreadManagement() // Project dialog states @@ -200,19 +205,21 @@ const LeftPanel = () => { setDeleteProjectConfirmOpen(true) } - const confirmProjectDelete = () => { - if (deletingProjectId) { - deleteFolder(deletingProjectId) - setDeleteProjectConfirmOpen(false) - setDeletingProjectId(null) - } + const handleProjectDeleteClose = () => { + setDeleteProjectConfirmOpen(false) + setDeletingProjectId(null) } - const handleProjectSave = (name: string) => { + const handleProjectSave = async (name: string) => { if (editingProjectKey) { - updateFolder(editingProjectKey, name) + await updateFolder(editingProjectKey, name) } else { - addFolder(name) + const newProject = await addFolder(name) + // Navigate to the newly created project + navigate({ + to: '/project/$projectId', + params: { projectId: newProject.id }, + }) } setProjectDialogOpen(false) setEditingProjectKey(null) @@ -234,7 +241,7 @@ const LeftPanel = () => { return ( <> {/* Backdrop overlay for small screens */} - {isSmallScreen && open && ( + {isSmallScreen && open && !IS_IOS && !IS_ANDROID && (
    { @@ -257,7 +264,7 @@ const LeftPanel = () => { isResizableContext && 'h-full w-full', // Small screen context: fixed positioning and styling isSmallScreen && - 'fixed h-[calc(100%-16px)] bg-app z-50 rounded-sm border border-left-panel-fg/10 m-2 px-1 w-48', + 'fixed h-full pb-[calc(env(safe-area-inset-bottom)+env(safe-area-inset-top))] bg-main-view z-50 md:border border-left-panel-fg/10 px-1 w-full md:w-48', // Default context: original styling !isResizableContext && !isSmallScreen && @@ -487,7 +494,7 @@ const LeftPanel = () => { )}
    -
    +
    {favoritedThreads.length > 0 && ( <> @@ -607,6 +614,44 @@ const LeftPanel = () => {
    + + {secondaryMenus.map((menu) => { + if (!menu.isEnabled) { + return null + } + + // Regular menu items must have route and icon + if (!menu.route || !menu.icon) return null + + const isActive = (() => { + // Settings routes + if (menu.route.includes(route.settings.index)) { + return currentPath.includes(route.settings.index) + } + + // Default exact match for other routes + return currentPath === menu.route + })() + return ( + isSmallScreen && setLeftPanel(false)} + data-test-id={`menu-${menu.title}`} + activeOptions={{ exact: true }} + className={cn( + 'flex items-center gap-1.5 cursor-pointer hover:bg-left-panel-fg/10 py-1 px-1 rounded', + isActive && 'bg-left-panel-fg/10' + )} + > + + + {t(menu.title)} + + + ) + })} + {PlatformFeatures[PlatformFeature.AUTHENTICATION] && (
    @@ -633,8 +678,8 @@ const LeftPanel = () => { /> | null +} + +export const McpExtensionToolLoader = ({ + tools, + hasActiveMCPServers, + selectedModelHasTools, + initialMessage, + MCPToolComponent, +}: McpExtensionToolLoaderProps) => { + // Get tool management hooks + const { isToolDisabled, setToolDisabledForThread, setDefaultDisabledTools, getDefaultDisabledTools } = useToolAvailable() + const { getCurrentThread } = useThreads() + const currentThread = getCurrentThread() + + // Handle tool toggle for custom component + const handleToolToggle = (toolName: string, enabled: boolean) => { + if (initialMessage) { + const currentDefaults = getDefaultDisabledTools() + if (enabled) { + setDefaultDisabledTools(currentDefaults.filter((name) => name !== toolName)) + } else { + setDefaultDisabledTools([...currentDefaults, toolName]) + } + } else if (currentThread?.id) { + setToolDisabledForThread(currentThread.id, toolName, enabled) + } + } + + const isToolEnabled = (toolName: string): boolean => { + if (initialMessage) { + return !getDefaultDisabledTools().includes(toolName) + } else if (currentThread?.id) { + return !isToolDisabled(currentThread.id, toolName) + } + return false + } + + // Only render if we have the custom MCP component and conditions are met + if (!selectedModelHasTools || !hasActiveMCPServers || !MCPToolComponent) { + return null + } + + return ( + + ) +} diff --git a/web-app/src/containers/ModelSetting.tsx b/web-app/src/containers/ModelSetting.tsx index 9a3bfd814..079b735aa 100644 --- a/web-app/src/containers/ModelSetting.tsx +++ b/web-app/src/containers/ModelSetting.tsx @@ -14,7 +14,7 @@ import { Button } from '@/components/ui/button' import { DynamicControllerSetting } from '@/containers/dynamicControllerSetting' import { useModelProvider } from '@/hooks/useModelProvider' import { useServiceHub } from '@/hooks/useServiceHub' -import { cn } from '@/lib/utils' +import { cn, getModelDisplayName } from '@/lib/utils' import { useTranslation } from '@/i18n/react-i18next-compat' type ModelSettingProps = { @@ -261,7 +261,7 @@ export function ModelSetting({ - {t('common:modelSettings.title', { modelId: model.id })} + {t('common:modelSettings.title', { modelId: getModelDisplayName(model) })} {t('common:modelSettings.description')} diff --git a/web-app/src/containers/RenderMarkdown.tsx b/web-app/src/containers/RenderMarkdown.tsx index 31d08cf10..c941b512d 100644 --- a/web-app/src/containers/RenderMarkdown.tsx +++ b/web-app/src/containers/RenderMarkdown.tsx @@ -1,4 +1,3 @@ -/* eslint-disable react-hooks/exhaustive-deps */ import ReactMarkdown, { Components } from 'react-markdown' import remarkGfm from 'remark-gfm' import remarkEmoji from 'remark-emoji' diff --git a/web-app/src/containers/SettingsMenu.tsx b/web-app/src/containers/SettingsMenu.tsx index da0e94870..78389233d 100644 --- a/web-app/src/containers/SettingsMenu.tsx +++ b/web-app/src/containers/SettingsMenu.tsx @@ -30,12 +30,15 @@ const SettingsMenu = () => { // On web: exclude llamacpp provider as it's not available const activeProviders = providers.filter((provider) => { if (!provider.active) return false - + // On web version, hide llamacpp provider - if (!PlatformFeatures[PlatformFeature.LOCAL_INFERENCE] && provider.provider === 'llama.cpp') { + if ( + !PlatformFeatures[PlatformFeature.LOCAL_INFERENCE] && + provider.provider === 'llama.cpp' + ) { return false } - + return true }) @@ -92,7 +95,7 @@ const SettingsMenu = () => { title: 'common:keyboardShortcuts', route: route.settings.shortcuts, hasSubMenu: false, - isEnabled: true, + isEnabled: PlatformFeatures[PlatformFeature.SHORTCUT], }, { title: 'common:hardware', @@ -137,7 +140,7 @@ const SettingsMenu = () => { return ( <> - )} -
    - +
    + +
    + + {t(menu.title)} + + {menu.hasSubMenu && ( + + )} +
    + - {/* Sub-menu for model providers */} - {menu.hasSubMenu && expandedProviders && ( -
    - {activeProviders.map((provider) => { - const isActive = matches.some( - (match) => - match.routeId === '/settings/providers/$providerName' && - 'providerName' in match.params && - match.params.providerName === provider.provider - ) + {/* Sub-menu for model providers */} + {menu.hasSubMenu && expandedProviders && ( +
    + {activeProviders.map((provider) => { + const isActive = matches.some( + (match) => + match.routeId === + '/settings/providers/$providerName' && + 'providerName' in match.params && + match.params.providerName === provider.provider + ) - return ( -
    - - )} -
    + ) + })} +
    + )} +
    ) })}
    diff --git a/web-app/src/containers/SetupScreen.tsx b/web-app/src/containers/SetupScreen.tsx index bce474836..dadc16362 100644 --- a/web-app/src/containers/SetupScreen.tsx +++ b/web-app/src/containers/SetupScreen.tsx @@ -6,6 +6,8 @@ import HeaderPage from './HeaderPage' import { isProd } from '@/lib/version' import { useTranslation } from '@/i18n/react-i18next-compat' import { localStorageKey } from '@/constants/localStorage' +import { PlatformFeatures } from '@/lib/platform/const' +import { PlatformFeature } from '@/lib/platform' function SetupScreen() { const { t } = useTranslation() @@ -21,7 +23,7 @@ function SetupScreen() {
    -
    +

    {t('setup:welcome')} @@ -31,22 +33,24 @@ function SetupScreen() {

    - -
    -

    - {t('setup:localModel')} -

    -
    - - } - >
    + {PlatformFeatures[PlatformFeature.LOCAL_INFERENCE] && ( + +
    +

    + {t('setup:localModel')} +

    +
    + + } + /> + )} } - > + />
    diff --git a/web-app/src/containers/ThinkingBlock.tsx b/web-app/src/containers/ThinkingBlock.tsx index 68ab8644f..211fda9ff 100644 --- a/web-app/src/containers/ThinkingBlock.tsx +++ b/web-app/src/containers/ThinkingBlock.tsx @@ -3,6 +3,7 @@ import { create } from 'zustand' import { RenderMarkdown } from './RenderMarkdown' import { useAppState } from '@/hooks/useAppState' import { useTranslation } from '@/i18n/react-i18next-compat' +import { extractThinkingContent } from '@/lib/utils' interface Props { text: string @@ -43,19 +44,6 @@ const ThinkingBlock = ({ id, text }: Props) => { setThinkingState(id, newExpandedState) } - // Extract thinking content from either format - const extractThinkingContent = (text: string) => { - return text - .replace(/<\/?think>/g, '') - .replace(/<\|channel\|>analysis<\|message\|>/g, '') - .replace(/<\|start\|>assistant<\|channel\|>final<\|message\|>/g, '') - .replace(/assistant<\|channel\|>final<\|message\|>/g, '') - .replace(/<\|channel\|>/g, '') // remove any remaining channel markers - .replace(/<\|message\|>/g, '') // remove any remaining message markers - .replace(/<\|start\|>/g, '') // remove any remaining start markers - .trim() - } - const thinkingContent = extractThinkingContent(text) if (!thinkingContent) return null diff --git a/web-app/src/containers/ThreadList.tsx b/web-app/src/containers/ThreadList.tsx index b58d1872a..a8dadbb62 100644 --- a/web-app/src/containers/ThreadList.tsx +++ b/web-app/src/containers/ThreadList.tsx @@ -23,7 +23,7 @@ import { useThreads } from '@/hooks/useThreads' import { useThreadManagement } from '@/hooks/useThreadManagement' import { useLeftPanel } from '@/hooks/useLeftPanel' import { useMessages } from '@/hooks/useMessages' -import { cn } from '@/lib/utils' +import { cn, extractThinkingContent } from '@/lib/utils' import { useSmallScreen } from '@/hooks/useMediaQuery' import { @@ -47,9 +47,11 @@ const SortableItem = memo( ({ thread, variant, + currentProjectId, }: { thread: Thread variant?: 'default' | 'project' + currentProjectId?: string }) => { const { attributes, @@ -108,6 +110,18 @@ const SortableItem = memo( return (thread.title || '').replace(/]*>|<\/span>/g, '') }, [thread.title]) + const availableProjects = useMemo(() => { + return folders + .filter((f) => { + // Exclude the current project page we're on + if (f.id === currentProjectId) return false + // Exclude the project this thread is already assigned to + if (f.id === thread.metadata?.project?.id) return false + return true + }) + .sort((a, b) => b.updated_at - a.updated_at) + }, [folders, currentProjectId, thread.metadata?.project?.id]) + const assignThreadToProject = (threadId: string, projectId: string) => { const project = getFolderById(projectId) if (project && updateThread) { @@ -167,14 +181,10 @@ const SortableItem = memo( )} > {thread.title || t('common:newThread')} - {variant === 'project' && ( - <> - {variant === 'project' && getLastMessageInfo?.content && ( -
    - {getLastMessageInfo.content} -
    - )} - + {variant === 'project' && getLastMessageInfo?.content && ( + + {extractThinkingContent(getLastMessageInfo.content)} + )}
    @@ -185,7 +195,10 @@ const SortableItem = memo( { e.preventDefault() e.stopPropagation() @@ -227,29 +240,27 @@ const SortableItem = memo( Add to project - {folders.length === 0 ? ( + {availableProjects.length === 0 ? ( No projects available ) : ( - folders - .sort((a, b) => b.updated_at - a.updated_at) - .map((folder) => ( - { - e.stopPropagation() - assignThreadToProject(thread.id, folder.id) - }} - > - - - {folder.name} - - - )) + availableProjects.map((folder) => ( + { + e.stopPropagation() + assignThreadToProject(thread.id, folder.id) + }} + > + + + {folder.name} + + + )) )} {thread.metadata?.project && ( <> @@ -297,9 +308,10 @@ type ThreadListProps = { isFavoriteSection?: boolean variant?: 'default' | 'project' showDate?: boolean + currentProjectId?: string } -function ThreadList({ threads, variant = 'default' }: ThreadListProps) { +function ThreadList({ threads, variant = 'default', currentProjectId }: ThreadListProps) { const sortedThreads = useMemo(() => { return threads.sort((a, b) => { return (b.updated || 0) - (a.updated || 0) @@ -323,7 +335,7 @@ function ThreadList({ threads, variant = 'default' }: ThreadListProps) { strategy={verticalListSortingStrategy} > {sortedThreads.map((thread, index) => ( - + ))} diff --git a/web-app/src/containers/ThreadPadding.tsx b/web-app/src/containers/ThreadPadding.tsx new file mode 100644 index 000000000..3f4c725c3 --- /dev/null +++ b/web-app/src/containers/ThreadPadding.tsx @@ -0,0 +1,19 @@ +import { useThreadScrolling } from '@/hooks/useThreadScrolling' + +export const ThreadPadding = ({ + threadId, + scrollContainerRef, +}: { + threadId: string + scrollContainerRef: React.RefObject +}) => { + // Get padding height for ChatGPT-style message positioning + const { paddingHeight } = useThreadScrolling(threadId, scrollContainerRef) + return ( +
    + ) +} diff --git a/web-app/src/containers/__tests__/AvatarEmoji.test.tsx b/web-app/src/containers/__tests__/AvatarEmoji.test.tsx index ea44b95a0..e0ec9488a 100644 --- a/web-app/src/containers/__tests__/AvatarEmoji.test.tsx +++ b/web-app/src/containers/__tests__/AvatarEmoji.test.tsx @@ -121,4 +121,4 @@ describe('AvatarEmoji Component', () => { const img = screen.getByRole('img') expect(img).toHaveAttribute('alt', 'Custom avatar') }) -}) \ No newline at end of file +}) diff --git a/web-app/src/containers/__tests__/ChatInput.simple.test.tsx b/web-app/src/containers/__tests__/ChatInput.simple.test.tsx index a1c71baa8..8992edbc0 100644 --- a/web-app/src/containers/__tests__/ChatInput.simple.test.tsx +++ b/web-app/src/containers/__tests__/ChatInput.simple.test.tsx @@ -36,4 +36,4 @@ describe('ChatInput Simple Tests', () => { const sendButton = screen.getByTestId('send-message-button') expect(sendButton).toHaveTextContent('Send') }) -}) \ No newline at end of file +}) diff --git a/web-app/src/containers/__tests__/ChatInput.test.tsx b/web-app/src/containers/__tests__/ChatInput.test.tsx index 50b0b6172..1d296e15d 100644 --- a/web-app/src/containers/__tests__/ChatInput.test.tsx +++ b/web-app/src/containers/__tests__/ChatInput.test.tsx @@ -9,6 +9,7 @@ import { useAppState } from '@/hooks/useAppState' import { useGeneralSetting } from '@/hooks/useGeneralSetting' import { useModelProvider } from '@/hooks/useModelProvider' import { useChat } from '@/hooks/useChat' +import type { ThreadModel } from '@/types/threads' // Mock dependencies with mutable state let mockPromptState = { @@ -138,18 +139,70 @@ vi.mock('../MovingBorder', () => ({ vi.mock('../DropdownModelProvider', () => ({ __esModule: true, - default: () =>
    Model Dropdown
    , + default: () =>
    Model Dropdown
    , +})) + +vi.mock('../loaders/ModelLoader', () => ({ + ModelLoader: () =>
    Model Loader
    , })) vi.mock('../DropdownToolsAvailable', () => ({ __esModule: true, default: ({ children }: { children: (isOpen: boolean, toolsCount: number) => React.ReactNode }) => { - return
    {children(false, 0)}
    + return
    {children(false, 0)}
    }, })) -vi.mock('../loaders/ModelLoader', () => ({ - ModelLoader: () =>
    Loading...
    , +vi.mock('@/components/ui/button', () => ({ + Button: ({ children, onClick, disabled, ...props }: any) => ( + + ), +})) + +vi.mock('@/components/ui/tooltip', () => ({ + Tooltip: ({ children }: { children: React.ReactNode }) =>
    {children}
    , + TooltipContent: ({ children }: { children: React.ReactNode }) =>
    {children}
    , + TooltipProvider: ({ children }: { children: React.ReactNode }) =>
    {children}
    , + TooltipTrigger: ({ children }: { children: React.ReactNode }) =>
    {children}
    , +})) + +vi.mock('react-textarea-autosize', () => ({ + default: ({ value, onChange, onKeyDown, placeholder, disabled, className, minRows, maxRows, onHeightChange, ...props }: any) => ( +