Compare commits

..

No commits in common. "dev" and "release/v0.6.5" have entirely different histories.

1601 changed files with 46390 additions and 125480 deletions

View File

@ -1,31 +0,0 @@
---
allowed-tools: Bash(gh issue view:*), Bash(gh search:*), Bash(gh issue list:*), Bash(gh api:*), Bash(gh issue comment:*)
description: Find duplicate GitHub issues
---
Find up to 3 likely duplicate issues for a given GitHub issue.
To do this, follow these steps precisely:
1. Use an agent to check if the Github issue (a) is closed, (b) does not need to be deduped (eg. because it is broad product feedback without a specific solution, or positive feedback), or (c) already has a duplicates comment that you made earlier. If so, do not proceed.
2. Use an agent to view a Github issue, and ask the agent to return a summary of the issue
3. Then, launch 5 parallel agents to search Github for duplicates of this issue, using diverse keywords and search approaches, using the summary from #1
4. Next, feed the results from #1 and #2 into another agent, so that it can filter out false positives, that are likely not actually duplicates of the original issue. If there are no duplicates remaining, do not proceed.
5. Finally, comment back on the issue with a list of up to three duplicate issues (or zero, if there are no likely duplicates)
Notes (be sure to tell this to your agents, too):
- Use `gh` to interact with Github, rather than web fetch
- Do not use other tools, beyond `gh` (eg. don't use other MCP servers, file edit, etc.)
- Make a todo list first
- For your comment, follow the following format precisely (assuming for this example that you found 3 suspected duplicates):
---
Found 3 possible duplicate issues:
1. <link to issue>
2. <link to issue>
3. <link to issue>
---

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
make clean
# To reproduce https://github.com/menloresearch/jan/pull/5463
TAURI_TOOLKIT_PATH="${XDG_CACHE_HOME:-$HOME/.cache}/tauri"
mkdir -p "$TAURI_TOOLKIT_PATH"
wget https://github.com/linuxdeploy/linuxdeploy/releases/download/1-alpha-20250213-2/linuxdeploy-x86_64.AppImage -O "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
chmod +x "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
jq '.bundle.resources = ["resources/pre-install/**/*"] | .bundle.externalBin = ["binaries/cortex-server", "resources/bin/uv"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
make build-tauri
cp ./src-tauri/resources/bin/bun ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/bin/bun
mkdir -p ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/engines
cp -f ./src-tauri/binaries/deps/*.so* ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
cp -f ./src-tauri/binaries/*.so* ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
cp -rf ./src-tauri/binaries/engines ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
APP_IMAGE=./src-tauri/target/release/bundle/appimage/$(ls ./src-tauri/target/release/bundle/appimage/ | grep AppImage | head -1)
echo $APP_IMAGE
rm -f $APP_IMAGE
/opt/bin/appimagetool ./src-tauri/target/release/bundle/appimage/Jan.AppDir $APP_IMAGE

View File

@ -14,3 +14,7 @@ sudo apt install -yqq libwebkit2gtk-4.1-dev \
librsvg2-dev \ librsvg2-dev \
xdg-utils \ xdg-utils \
libfuse2 libfuse2
sudo mkdir -p /opt/bin
sudo wget https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage -O /opt/bin/appimagetool
sudo chmod +x /opt/bin/appimagetool

View File

@ -2,7 +2,6 @@
name: 🚀 Feature Request name: 🚀 Feature Request
about: Suggest an idea for this project 😻! about: Suggest an idea for this project 😻!
title: 'idea: ' title: 'idea: '
type: Idea
--- ---
## Problem Statement ## Problem Statement

View File

@ -1,27 +1,12 @@
--- ---
name: 🌟 Epic name: 🌟 Epic
about: User stories and specs about: Major building block that advances Jan's goals
title: 'epic: ' title: 'epic: '
type: Epic type: Epic
--- ---
## User Stories ## Goal
- As a [user type], I can [do something] so that [outcome] ## Tasklist
## Not in scope ## Out of scope
-
## User Flows & Designs
- Key user flows
- Figma link
- Edge cases
- Error states
## Engineering Decisions
- **Technical Approach:** Brief outline of the solution.
- **Key Trade-offs:** Whats been considered/rejected and why.
- **Dependencies:** APIs, services, libraries, teams.

View File

@ -1,24 +1,13 @@
--- ---
name: 🎯 Goal name: 🎯 Goal
about: Roadmap goals for our users about: External communication of Jan's roadmap and objectives
title: 'goal: ' title: 'goal: '
type: Goal type: Goal
--- ---
## 🎯 Goal ## Goal
<!-- Short description of our goal -->
## 📖 Context ## Tasklist
<!-- Give a description of our current context -->
## ✅ Scope ## Out of scope
<!-- High lever description of what we are going to deliver -->
## ❌ Out of Scope
<!-- What we are not targeting / delivering / discussing in this goal -->
## 🛠 Deliverables
<!-- What we are the tangible deliverables for this goal -->
## ❓Open questions
<!-- What are we not sure about and need to discuss more -->

View File

@ -1,5 +1,5 @@
blank_issues_enabled: true blank_issues_enabled: true
contact_links: contact_links:
- name: Jan Discussions - name: Jan Discussions
url: https://github.com/orgs/janhq/discussions/categories/q-a url: https://github.com/orgs/menloresearch/discussions/categories/q-a
about: Get help, discuss features & roadmap, and share your projects about: Get help, discuss features & roadmap, and share your projects

View File

@ -1,19 +1,17 @@
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/dependabot-options-reference#package-ecosystem- # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/dependabot-options-reference#package-ecosystem-
version: 2 version: 2
updates: updates:
- package-ecosystem: 'cargo' - package-ecosystem: "cargo"
directory: 'src-tauri' directory: "src-tauri"
schedule: schedule:
interval: 'weekly' interval: "weekly"
open-pull-requests-limit: 0 - package-ecosystem: "npm"
- package-ecosystem: 'npm'
directories: directories:
- '/' - "/"
- 'core' - "core"
- 'docs' - "docs"
- 'extensions' - "extensions"
- 'extensions/*' - "extensions/*"
- 'web-app' - "web-app"
schedule: schedule:
interval: 'weekly' interval: "weekly"
open-pull-requests-limit: 0

28
.github/scripts/electron-checksum.py vendored Normal file
View File

@ -0,0 +1,28 @@
import hashlib
import base64
import sys
def hash_file(file_path):
# Create a SHA-512 hash object
sha512 = hashlib.sha512()
# Read and update the hash object with the content of the file
with open(file_path, 'rb') as f:
while True:
data = f.read(1024 * 1024) # Read in 1 MB chunks
if not data:
break
sha512.update(data)
# Obtain the hash result and encode it in base64
hash_base64 = base64.b64encode(sha512.digest()).decode('utf-8')
return hash_base64
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python3 script.py <file_path>")
sys.exit(1)
file_path = sys.argv[1]
hash_base64_output = hash_file(file_path)
print(hash_base64_output)

View File

@ -7,7 +7,6 @@ on:
jobs: jobs:
assign_milestone: assign_milestone:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
permissions: permissions:
pull-requests: write pull-requests: write
issues: write issues: write

View File

@ -1,37 +0,0 @@
name: Manual trigger AutoQA Test Runner
on:
workflow_dispatch:
inputs:
jan_app_url_windows:
description: 'URL to download Jan app for Windows (.exe)'
required: true
type: string
default: 'https://delta.jan.ai/nightly/Jan-nightly_0.6.5-758_x64-setup.exe'
jan_app_url_ubuntu:
description: 'URL to download Jan app for Ubuntu (.deb)'
required: true
type: string
default: 'https://delta.jan.ai/nightly/Jan-nightly_0.6.5-758_amd64.deb'
jan_app_url_macos:
description: 'URL to download Jan app for macOS (.dmg)'
required: true
type: string
default: 'https://delta.jan.ai/nightly/Jan-nightly_0.6.5-758_universal.dmg'
is_nightly:
description: 'Is this a nightly build?'
required: true
type: boolean
default: true
jobs:
call-autoqa-template:
uses: ./.github/workflows/autoqa-template.yml
with:
jan_app_windows_source: ${{ inputs.jan_app_url_windows }}
jan_app_ubuntu_source: ${{ inputs.jan_app_url_ubuntu }}
jan_app_macos_source: ${{ inputs.jan_app_url_macos }}
is_nightly: ${{ inputs.is_nightly }}
source_type: 'url'
secrets:
RP_TOKEN: ${{ secrets.RP_TOKEN }}

View File

@ -1,330 +0,0 @@
name: AutoQA Migration (Manual)
on:
workflow_dispatch:
inputs:
old_windows_installer:
description: 'Windows OLD installer URL or path (.exe)'
required: true
type: string
new_windows_installer:
description: 'Windows NEW installer URL or path (.exe)'
required: true
type: string
old_ubuntu_installer:
description: 'Ubuntu OLD installer URL or path (.deb)'
required: false
type: string
default: ''
new_ubuntu_installer:
description: 'Ubuntu NEW installer URL or path (.deb)'
required: false
type: string
default: ''
old_macos_installer:
description: 'macOS OLD installer URL or path (.dmg)'
required: false
type: string
default: ''
new_macos_installer:
description: 'macOS NEW installer URL or path (.dmg)'
required: false
type: string
default: ''
migration_test_case:
description: 'Specific migration test case key (leave empty to run all)'
required: false
type: string
default: ''
max_turns:
description: 'Maximum turns per test phase'
required: false
type: number
default: 65
jobs:
migration-windows:
runs-on: windows-11-nvidia-gpu
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Clean existing Jan installations
shell: powershell
run: |
.\autoqa\scripts\windows_cleanup.ps1 -IsNightly $false
- name: Download OLD and NEW installers
shell: powershell
run: |
# Download OLD installer using existing script
.\autoqa\scripts\windows_download.ps1 `
-WorkflowInputUrl "${{ inputs.old_windows_installer }}" `
-WorkflowInputIsNightly "false" `
-RepoVariableUrl "" `
-RepoVariableIsNightly "" `
-DefaultUrl "" `
-DefaultIsNightly ""
$oldSrc = Join-Path $env:TEMP 'jan-installer.exe'
$oldOut = Join-Path $env:TEMP 'jan-old.exe'
Copy-Item -Path $oldSrc -Destination $oldOut -Force
# Download NEW installer using existing script
.\autoqa\scripts\windows_download.ps1 `
-WorkflowInputUrl "${{ inputs.new_windows_installer }}" `
-WorkflowInputIsNightly "false" `
-RepoVariableUrl "" `
-RepoVariableIsNightly "" `
-DefaultUrl "" `
-DefaultIsNightly ""
$newSrc = Join-Path $env:TEMP 'jan-installer.exe'
$newOut = Join-Path $env:TEMP 'jan-new.exe'
Copy-Item -Path $newSrc -Destination $newOut -Force
Write-Host "OLD installer: $oldOut"
Write-Host "NEW installer: $newOut"
echo "OLD_VERSION=$oldOut" | Out-File -FilePath $env:GITHUB_ENV -Append
echo "NEW_VERSION=$newOut" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run migration tests (Windows)
working-directory: autoqa
shell: powershell
env:
RP_TOKEN: ${{ secrets.RP_TOKEN }}
ENABLE_REPORTPORTAL: 'true'
RP_ENDPOINT: 'https://reportportal.menlo.ai'
RP_PROJECT: 'default_personal'
run: |
$case = "${{ inputs.migration_test_case }}"
$caseArg = ""
if ($case -and $case.Trim() -ne "") { $caseArg = "--migration-test-case `"$case`"" }
python main.py --enable-migration-test --old-version "$env:OLD_VERSION" --new-version "$env:NEW_VERSION" --max-turns ${{ inputs.max_turns }} $caseArg
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-recordings-${{ github.run_number }}-windows
path: autoqa/recordings/
- name: Upload trajectories
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-trajectories-${{ github.run_number }}-windows
path: autoqa/trajectories/
- name: Cleanup after tests
if: always()
shell: powershell
run: |
.\autoqa\scripts\windows_post_cleanup.ps1 -IsNightly $false
migration-ubuntu:
if: inputs.old_ubuntu_installer != '' && inputs.new_ubuntu_installer != ''
runs-on: ubuntu-22-04-nvidia-gpu
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y \
x11-utils \
python3-tk \
python3-dev \
wmctrl \
xdotool \
libnss3-dev \
libgconf-2-4 \
libxss1 \
libasound2 \
libxtst6 \
libgtk-3-0 \
libgbm-dev \
libxshmfence1 \
libxrandr2 \
libpangocairo-1.0-0 \
libatk1.0-0 \
libcairo-gobject2 \
libgdk-pixbuf2.0-0 \
gnome-screenshot \
xvfb
- name: Setup script permissions
run: |
chmod +x autoqa/scripts/setup_permissions.sh || true
./autoqa/scripts/setup_permissions.sh || true
- name: Clean existing Jan installations
run: |
./autoqa/scripts/ubuntu_cleanup.sh
- name: Download OLD and NEW installers
run: |
set -e
# Download OLD installer using existing script
./autoqa/scripts/ubuntu_download.sh \
"${{ inputs.old_ubuntu_installer }}" \
"false" \
"" \
"" \
"" \
""
cp /tmp/jan-installer.deb /tmp/jan-old.deb
# Download NEW installer using existing script
./autoqa/scripts/ubuntu_download.sh \
"${{ inputs.new_ubuntu_installer }}" \
"false" \
"" \
"" \
"" \
""
cp /tmp/jan-installer.deb /tmp/jan-new.deb
echo "OLD_VERSION=/tmp/jan-old.deb" >> $GITHUB_ENV
echo "NEW_VERSION=/tmp/jan-new.deb" >> $GITHUB_ENV
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run migration tests (Ubuntu)
working-directory: autoqa
run: |
case="${{ inputs.migration_test_case }}"
caseArg=""
if [ -n "${case}" ]; then caseArg="--migration-test-case \"${case}\""; fi
xvfb-run -a python main.py --enable-migration-test --old-version "${OLD_VERSION}" --new-version "${NEW_VERSION}" --max-turns ${{ inputs.max_turns }} ${caseArg}
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-recordings-${{ github.run_number }}-ubuntu
path: autoqa/recordings/
- name: Upload trajectories
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-trajectories-${{ github.run_number }}-ubuntu
path: autoqa/trajectories/
- name: Cleanup after tests
if: always()
run: |
./autoqa/scripts/ubuntu_post_cleanup.sh "false"
migration-macos:
if: inputs.old_macos_installer != '' && inputs.new_macos_installer != ''
runs-on: macos-selfhosted-15-arm64-cua
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Setup script permissions
run: |
chmod +x autoqa/scripts/setup_permissions.sh || true
./autoqa/scripts/setup_permissions.sh || true
- name: Clean existing Jan installations
run: |
./autoqa/scripts/macos_cleanup.sh
- name: Download OLD and NEW installers
run: |
set -e
# Download OLD installer using existing script
./autoqa/scripts/macos_download.sh \
"${{ inputs.old_macos_installer }}" \
"false" \
"" \
"" \
"" \
""
cp /tmp/jan-installer.dmg /tmp/jan-old.dmg
# Download NEW installer using existing script
./autoqa/scripts/macos_download.sh \
"${{ inputs.new_macos_installer }}" \
"false" \
"" \
"" \
"" \
""
cp /tmp/jan-installer.dmg /tmp/jan-new.dmg
echo "OLD_VERSION=/tmp/jan-old.dmg" >> $GITHUB_ENV
echo "NEW_VERSION=/tmp/jan-new.dmg" >> $GITHUB_ENV
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run migration tests (macOS)
working-directory: autoqa
run: |
case="${{ inputs.migration_test_case }}"
caseArg=""
if [ -n "${case}" ]; then caseArg="--migration-test-case \"${case}\""; fi
python main.py --enable-migration-test --old-version "${OLD_VERSION}" --new-version "${NEW_VERSION}" --max-turns ${{ inputs.max_turns }} ${caseArg}
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-recordings-${{ github.run_number }}-macos
path: autoqa/recordings/
- name: Upload trajectories
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: migration-trajectories-${{ github.run_number }}-macos
path: autoqa/trajectories/
- name: Cleanup after tests
if: always()
run: |
./autoqa/scripts/macos_post_cleanup.sh

View File

@ -1,121 +0,0 @@
name: AutoQA Reliability (Manual)
on:
workflow_dispatch:
inputs:
source_type:
description: 'App source type (url)'
required: true
type: choice
options: [url]
default: url
jan_app_windows_source:
description: 'Windows installer URL path (used when source_type=url or to select artifact)'
required: true
type: string
default: 'https://catalog.jan.ai/windows/Jan_0.6.8_x64-setup.exe'
jan_app_ubuntu_source:
description: 'Ubuntu .deb URL path'
required: true
type: string
default: 'https://delta.jan.ai/nightly/Jan-nightly_0.6.4-728_amd64.deb'
jan_app_macos_source:
description: 'macOS .dmg URL path'
required: true
type: string
default: 'https://delta.jan.ai/nightly/Jan-nightly_0.6.4-728_universal.dmg'
is_nightly:
description: 'Is the app a nightly build?'
required: true
type: boolean
default: true
reliability_phase:
description: 'Reliability phase'
required: true
type: choice
options: [development, deployment]
default: development
reliability_runs:
description: 'Custom runs (0 uses phase default)'
required: true
type: number
default: 0
reliability_test_path:
description: 'Test file path (relative to autoqa working directory)'
required: true
type: string
default: 'tests/base/settings/app-data.txt'
jobs:
reliability-windows:
runs-on: windows-11-nvidia-gpu
timeout-minutes: 60
env:
DEFAULT_JAN_APP_URL: 'https://catalog.jan.ai/windows/Jan_0.6.8_x64-setup.exe'
DEFAULT_IS_NIGHTLY: 'false'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Clean existing Jan installations
shell: powershell
run: |
.\autoqa\scripts\windows_cleanup.ps1 -IsNightly "${{ inputs.is_nightly }}"
- name: Download/Prepare Jan app
shell: powershell
run: |
.\autoqa\scripts\windows_download.ps1 `
-WorkflowInputUrl "${{ inputs.jan_app_windows_source }}" `
-WorkflowInputIsNightly "${{ inputs.is_nightly }}" `
-RepoVariableUrl "${{ vars.JAN_APP_URL }}" `
-RepoVariableIsNightly "${{ vars.IS_NIGHTLY }}" `
-DefaultUrl "$env:DEFAULT_JAN_APP_URL" `
-DefaultIsNightly "$env:DEFAULT_IS_NIGHTLY"
- name: Install Jan app
shell: powershell
run: |
.\autoqa\scripts\windows_install.ps1 -IsNightly "$env:IS_NIGHTLY"
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run reliability tests
working-directory: autoqa
shell: powershell
run: |
$runs = "${{ inputs.reliability_runs }}"
$runsArg = ""
if ([int]$runs -gt 0) { $runsArg = "--reliability-runs $runs" }
python main.py --enable-reliability-test --reliability-phase "${{ inputs.reliability_phase }}" --reliability-test-path "${{ inputs.reliability_test_path }}" $runsArg
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: reliability-recordings-${{ github.run_number }}-${{ runner.os }}
path: autoqa/recordings/
- name: Upload trajectories
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: reliability-trajectories-${{ github.run_number }}-${{ runner.os }}
path: autoqa/trajectories/
- name: Cleanup after tests
if: always()
shell: powershell
run: |
.\autoqa\scripts\windows_post_cleanup.ps1 -IsNightly "${{ inputs.is_nightly }}"

View File

@ -1,471 +0,0 @@
name: Auto QA Test Runner Template
on:
workflow_call:
inputs:
jan_app_windows_source:
description: 'Windows app source - can be URL or local path'
required: true
type: string
jan_app_ubuntu_source:
description: 'Ubuntu app source - can be URL or local path'
required: true
type: string
jan_app_macos_source:
description: 'macOS app source - can be URL or local path'
required: true
type: string
is_nightly:
description: 'Is this a nightly build?'
required: true
type: boolean
default: true
source_type:
description: 'Source type: url or local'
required: true
type: string
default: 'url'
artifact_name_windows:
description: 'Windows artifact name (only needed for local)'
required: false
type: string
default: ''
artifact_name_ubuntu:
description: 'Ubuntu artifact name (only needed for local)'
required: false
type: string
default: ''
artifact_name_macos:
description: 'macOS artifact name (only needed for local)'
required: false
type: string
default: ''
secrets:
RP_TOKEN:
description: 'ReportPortal API token'
required: true
jobs:
windows:
runs-on: windows-11-nvidia-gpu
timeout-minutes: 60
env:
DEFAULT_JAN_APP_URL: 'https://catalog.jan.ai/windows/Jan-nightly_0.6.5-758_x64-setup.exe'
DEFAULT_IS_NIGHTLY: 'true'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Download artifact (if source_type is local)
if: inputs.source_type == 'local'
uses: actions/download-artifact@v4
with:
name: ${{ inputs.artifact_name_windows }}
path: ${{ runner.temp }}/windows-artifact
- name: Clean existing Jan installations
shell: powershell
run: |
.\autoqa\scripts\windows_cleanup.ps1 -IsNightly "${{ inputs.is_nightly }}"
- name: Download/Prepare Jan app
shell: powershell
run: |
if ("${{ inputs.source_type }}" -eq "local") {
# Find the exe file in the artifact
$exeFile = Get-ChildItem -Path "${{ runner.temp }}/windows-artifact" -Recurse -Filter "*.exe" | Select-Object -First 1
if ($exeFile) {
Write-Host "[SUCCESS] Found local installer: $($exeFile.FullName)"
Copy-Item -Path $exeFile.FullName -Destination "$env:TEMP\jan-installer.exe" -Force
Write-Host "[SUCCESS] Installer copied to: $env:TEMP\jan-installer.exe"
# Don't set JAN_APP_PATH here - let the install script set it to the correct installed app path
echo "IS_NIGHTLY=${{ inputs.is_nightly }}" >> $env:GITHUB_ENV
} else {
Write-Error "[FAILED] No .exe file found in artifact"
exit 1
}
} else {
# Use the existing download script for URLs
.\autoqa\scripts\windows_download.ps1 `
-WorkflowInputUrl "${{ inputs.jan_app_windows_source }}" `
-WorkflowInputIsNightly "${{ inputs.is_nightly }}" `
-RepoVariableUrl "${{ vars.JAN_APP_URL }}" `
-RepoVariableIsNightly "${{ vars.IS_NIGHTLY }}" `
-DefaultUrl "$env:DEFAULT_JAN_APP_URL" `
-DefaultIsNightly "$env:DEFAULT_IS_NIGHTLY"
}
- name: Install Jan app
shell: powershell
run: |
.\autoqa\scripts\windows_install.ps1 -IsNightly "$env:IS_NIGHTLY"
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run Auto QA Tests
working-directory: autoqa
shell: powershell
env:
RP_TOKEN: ${{ secrets.RP_TOKEN }}
ENABLE_REPORTPORTAL: 'true'
RP_ENDPOINT: 'https://reportportal.menlo.ai'
RP_PROJECT: 'default_personal'
MAX_TURNS: '50'
DELAY_BETWEEN_TESTS: '3'
LAUNCH_NAME: 'CI AutoQA Run Windows - ${{ github.run_number }} - ${{ github.ref_name }}'
run: |
.\scripts\run_tests.ps1 -JanAppPath "$env:JAN_APP_PATH" -ProcessName "$env:JAN_PROCESS_NAME" -RpToken "$env:RP_TOKEN"
- name: Collect Jan logs for artifact upload
if: always()
shell: powershell
run: |
$logDirs = @(
"$env:APPDATA\Jan-nightly\data\logs",
"$env:APPDATA\Jan\data\logs"
)
$dest = "autoqa\jan-logs"
mkdir $dest -Force | Out-Null
foreach ($dir in $logDirs) {
if (Test-Path $dir) {
Copy-Item "$dir\*.log" $dest -Force -ErrorAction SilentlyContinue
}
}
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: ${{ inputs.is_nightly && 'jan-nightly' || 'jan' }}-recordings-${{ github.run_number }}-${{ runner.os }}
path: autoqa/recordings/
- name: Upload Jan logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.is_nightly && 'jan-nightly' || 'jan' }}-logs-${{ github.run_number }}-${{ runner.os }}
path: autoqa/jan-logs/
- name: Cleanup after tests
if: always()
shell: powershell
run: |
.\autoqa\scripts\windows_post_cleanup.ps1 -IsNightly "${{ inputs.is_nightly }}"
ubuntu:
runs-on: ubuntu-22-04-nvidia-gpu
timeout-minutes: 60
env:
DEFAULT_JAN_APP_URL: 'https://delta.jan.ai/nightly/Jan-nightly_0.6.4-728_amd64.deb'
DEFAULT_IS_NIGHTLY: 'true'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Download artifact (if source_type is local)
if: inputs.source_type == 'local'
uses: actions/download-artifact@v4
with:
name: ${{ inputs.artifact_name_ubuntu }}
path: ${{ runner.temp }}/ubuntu-artifact
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y \
x11-utils \
python3-tk \
python3-dev \
wmctrl \
xdotool \
libnss3-dev \
libgconf-2-4 \
libxss1 \
libasound2 \
libxtst6 \
libgtk-3-0 \
libgbm-dev \
libxshmfence1 \
libxrandr2 \
libpangocairo-1.0-0 \
libatk1.0-0 \
libcairo-gobject2 \
libgdk-pixbuf2.0-0 \
gnome-screenshot
- name: Setup script permissions
run: |
chmod +x autoqa/scripts/setup_permissions.sh
./autoqa/scripts/setup_permissions.sh
- name: Clean existing Jan installations
run: |
./autoqa/scripts/ubuntu_cleanup.sh
- name: Download/Prepare Jan app
run: |
if [ "${{ inputs.source_type }}" = "local" ]; then
# Find the deb file in the artifact
DEB_FILE=$(find "${{ runner.temp }}/ubuntu-artifact" -name "*.deb" -type f | head -1)
if [ -n "$DEB_FILE" ]; then
echo "[SUCCESS] Found local installer: $DEB_FILE"
cp "$DEB_FILE" "/tmp/jan-installer.deb"
echo "[SUCCESS] Installer copied to: /tmp/jan-installer.deb"
echo "JAN_APP_PATH=/tmp/jan-installer.deb" >> $GITHUB_ENV
echo "IS_NIGHTLY=${{ inputs.is_nightly }}" >> $GITHUB_ENV
if [ "${{ inputs.is_nightly }}" = "true" ]; then
echo "JAN_PROCESS_NAME=Jan-nightly" >> $GITHUB_ENV
else
echo "JAN_PROCESS_NAME=Jan" >> $GITHUB_ENV
fi
else
echo "[FAILED] No .deb file found in artifact"
exit 1
fi
else
# Use the existing download script for URLs
./autoqa/scripts/ubuntu_download.sh \
"${{ inputs.jan_app_ubuntu_source }}" \
"${{ inputs.is_nightly }}" \
"${{ vars.JAN_APP_URL_LINUX }}" \
"${{ vars.IS_NIGHTLY }}" \
"$DEFAULT_JAN_APP_URL" \
"$DEFAULT_IS_NIGHTLY"
# Set the correct environment variables for the test runner
echo "JAN_APP_PATH=/tmp/jan-installer.deb" >> $GITHUB_ENV
if [ "${{ inputs.is_nightly }}" = "true" ]; then
echo "JAN_PROCESS_NAME=Jan-nightly" >> $GITHUB_ENV
else
echo "JAN_PROCESS_NAME=Jan" >> $GITHUB_ENV
fi
fi
- name: Install Jan app
run: |
./autoqa/scripts/ubuntu_install.sh "$IS_NIGHTLY"
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run Auto QA Tests
working-directory: autoqa
env:
RP_TOKEN: ${{ secrets.RP_TOKEN }}
ENABLE_REPORTPORTAL: 'true'
RP_ENDPOINT: 'https://reportportal.menlo.ai'
RP_PROJECT: 'default_personal'
MAX_TURNS: '50'
DELAY_BETWEEN_TESTS: '3'
LAUNCH_NAME: 'CI AutoQA Run Ubuntu - ${{ github.run_number }} - ${{ github.ref_name }}'
run: |
./scripts/run_tests.sh "$JAN_APP_PATH" "$JAN_PROCESS_NAME" "$RP_TOKEN" "ubuntu"
- name: Collect Jan logs for artifact upload
if: always()
run: |
mkdir -p autoqa/jan-logs
cp ~/.local/share/Jan-nightly/data/logs/*.log autoqa/jan-logs/ 2>/dev/null || true
cp ~/.local/share/Jan/data/logs/*.log autoqa/jan-logs/ 2>/dev/null || true
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: ${{ inputs.is_nightly && 'jan-nightly' || 'jan' }}-recordings-${{ github.run_number }}-${{ runner.os }}
path: autoqa/recordings/
- name: Upload Jan logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.is_nightly && 'jan-nightly' || 'jan' }}-logs-${{ github.run_number }}-${{ runner.os }}
path: autoqa/jan-logs/
- name: Cleanup after tests
if: always()
run: |
./autoqa/scripts/ubuntu_post_cleanup.sh "$IS_NIGHTLY"
macos:
runs-on: macos-selfhosted-15-arm64-cua
timeout-minutes: 60
env:
DEFAULT_JAN_APP_URL: 'https://delta.jan.ai/nightly/Jan-nightly_0.6.4-728_universal.dmg'
DEFAULT_IS_NIGHTLY: 'true'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Download artifact (if source_type is local)
if: inputs.source_type == 'local'
uses: actions/download-artifact@v4
with:
name: ${{ inputs.artifact_name_macos }}
path: ${{ runner.temp }}/macos-artifact
- name: Setup script permissions
run: |
chmod +x autoqa/scripts/setup_permissions.sh
./autoqa/scripts/setup_permissions.sh
- name: Clean existing Jan installations
run: |
./autoqa/scripts/macos_cleanup.sh
- name: Download/Prepare Jan app
run: |
if [ "${{ inputs.source_type }}" = "local" ]; then
# Find the dmg file in the artifact
DMG_FILE=$(find "${{ runner.temp }}/macos-artifact" -name "*.dmg" -type f | head -1)
if [ -n "$DMG_FILE" ]; then
echo "[SUCCESS] Found local installer: $DMG_FILE"
cp "$DMG_FILE" "/tmp/jan-installer.dmg"
echo "[SUCCESS] Installer copied to: /tmp/jan-installer.dmg"
echo "JAN_APP_PATH=/tmp/jan-installer.dmg" >> $GITHUB_ENV
echo "IS_NIGHTLY=${{ inputs.is_nightly }}" >> $GITHUB_ENV
if [ "${{ inputs.is_nightly }}" = "true" ]; then
echo "PROCESS_NAME=Jan-nightly" >> $GITHUB_ENV
else
echo "PROCESS_NAME=Jan" >> $GITHUB_ENV
fi
else
echo "[FAILED] No .dmg file found in artifact"
exit 1
fi
else
# Use the existing download script for URLs
./autoqa/scripts/macos_download.sh \
"${{ inputs.jan_app_macos_source }}" \
"${{ inputs.is_nightly }}" \
"${{ vars.JAN_APP_URL }}" \
"${{ vars.IS_NIGHTLY }}" \
"$DEFAULT_JAN_APP_URL" \
"$DEFAULT_IS_NIGHTLY"
# Set the correct environment variables for the test runner
echo "JAN_APP_PATH=/tmp/jan-installer.dmg" >> $GITHUB_ENV
if [ "${{ inputs.is_nightly }}" = "true" ]; then
echo "PROCESS_NAME=Jan-nightly" >> $GITHUB_ENV
else
echo "PROCESS_NAME=Jan" >> $GITHUB_ENV
fi
fi
- name: Install Jan app
run: |
./autoqa/scripts/macos_install.sh
- name: Install system dependencies
run: |
echo "Installing system dependencies for macOS..."
# Check if Homebrew is available
if command -v brew >/dev/null 2>&1; then
echo "Homebrew is available"
# Install python-tk if not available
python3 -c "import tkinter" 2>/dev/null || {
echo "Installing python-tk via Homebrew..."
brew install python-tk || true
}
else
echo "Homebrew not available, checking if tkinter works..."
python3 -c "import tkinter" || {
echo "[WARNING] tkinter not available and Homebrew not found"
echo "This may cause issues with mouse control"
}
fi
echo "System dependencies check completed"
- name: Install Python dependencies
run: |
cd autoqa
echo "Installing Python dependencies..."
pip install --upgrade pip
pip install -r requirements.txt
echo "[SUCCESS] Python dependencies installed"
- name: Setup ReportPortal environment
run: |
echo "Setting up ReportPortal environment..."
echo "RP_TOKEN=${{ secrets.RP_TOKEN }}" >> $GITHUB_ENV
echo "ReportPortal environment configured"
- name: Run E2E tests
env:
RP_TOKEN: ${{ secrets.RP_TOKEN }}
ENABLE_REPORTPORTAL: 'true'
RP_ENDPOINT: 'https://reportportal.menlo.ai'
RP_PROJECT: 'default_personal'
MAX_TURNS: '50'
DELAY_BETWEEN_TESTS: '3'
LAUNCH_NAME: 'CI AutoQA Run Macos - ${{ github.run_number }} - ${{ github.ref_name }}'
run: |
cd autoqa
echo "Starting E2E test execution..."
echo "Environment variables:"
echo "JAN_APP_PATH: $JAN_APP_PATH"
echo "PROCESS_NAME: $PROCESS_NAME"
echo "IS_NIGHTLY: $IS_NIGHTLY"
./scripts/run_tests.sh "$JAN_APP_PATH" "$PROCESS_NAME" "$RP_TOKEN" "macos"
- name: Collect Jan logs for artifact upload
if: always()
run: |
mkdir -p autoqa/jan-logs
cp ~/Library/Application\ Support/Jan-nightly/data/logs/*.log autoqa/jan-logs/ 2>/dev/null || true
cp ~/Library/Application\ Support/Jan/data/logs/*.log autoqa/jan-logs/ 2>/dev/null || true
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: ${{ inputs.is_nightly && 'jan-nightly' || 'jan' }}-recordings-${{ github.run_number }}-${{ runner.os }}
path: autoqa/recordings/
- name: Upload Jan logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.is_nightly && 'jan-nightly' || 'jan' }}-logs-${{ github.run_number }}-${{ runner.os }}
path: autoqa/jan-logs/
- name: Cleanup after tests
if: always()
run: |
./autoqa/scripts/macos_post_cleanup.sh

View File

@ -1,31 +0,0 @@
name: Claude Issue Dedupe
description: Automatically dedupe GitHub issues using Claude Code
on:
issues:
types: [opened]
workflow_dispatch:
inputs:
issue_number:
description: 'Issue number to process for duplicate detection'
required: true
type: string
jobs:
claude-dedupe-issues:
runs-on: ubuntu-latest
timeout-minutes: 10
permissions:
contents: read
issues: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Run Claude Code dedupe
uses: anthropics/claude-code-base-action@beta
with:
prompt: "/dedupe ${{ github.repository }}/issues/${{ github.event.issue.number || inputs.issue_number }}"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_env: |
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,145 +0,0 @@
name: Jan Astro Docs
on:
push:
branches:
- dev
paths:
- 'website/**'
- '.github/workflows/jan-astro-docs.yml'
pull_request:
paths:
- 'website/**'
- '.github/workflows/jan-astro-docs.yml'
# Review gh actions docs if you want to further define triggers, paths, etc
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#on
workflow_dispatch:
inputs:
update_cloud_spec:
description: 'Update Jan Server API specification'
required: false
default: 'false'
type: choice
options:
- 'true'
- 'false'
schedule:
# Run daily at 2 AM UTC to sync with Jan Server updates
- cron: '0 2 * * *'
jobs:
deploy:
name: Deploy to CloudFlare Pages
env:
CLOUDFLARE_PROJECT_NAME: astro-docs # docs.jan.ai
runs-on: ubuntu-latest
permissions:
contents: write
deployments: write
pull-requests: write
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 20
- uses: oven-sh/setup-bun@v2
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Fill env vars
continue-on-error: true
working-directory: website
run: |
env_example_file=".env.example"
touch .env
while IFS= read -r line || [[ -n "$line" ]]; do
if [[ "$line" == *"="* ]]; then
var_name=$(echo $line | cut -d '=' -f 1)
echo $var_name
var_value="$(jq -r --arg key "$var_name" '.[$key]' <<< "$SECRETS")"
echo "$var_name=$var_value" >> .env
fi
done < "$env_example_file"
env:
SECRETS: '${{ toJson(secrets) }}'
- name: Install dependencies
working-directory: website
run: bun install
- name: Update Jan Server API Spec (Scheduled/Manual)
if: github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.update_cloud_spec == 'true')
working-directory: website
continue-on-error: true
run: |
echo "📡 Updating Jan Server API specification..."
bun run generate:cloud-spec
# Check if the spec file was updated
if git diff --quiet public/openapi/cloud-openapi.json; then
echo "✅ No changes to API specification"
else
echo "📝 API specification updated"
# Commit the changes if this is a scheduled run on main branch
if [ "${{ github.event_name }}" = "schedule" ] && [ "${{ github.ref }}" = "refs/heads/dev" ]; then
git config --local user.email "github-actions[bot]@users.noreply.github.com"
git config --local user.name "github-actions[bot]"
git add public/openapi/cloud-openapi.json
git commit -m "chore: update Jan Server API specification [skip ci]"
git push
fi
fi
env:
JAN_SERVER_SPEC_URL: ${{ secrets.JAN_SERVER_SPEC_URL || 'https://api.jan.ai/api/swagger/doc.json' }}
JAN_SERVER_PROD_URL: ${{ secrets.JAN_SERVER_PROD_URL || 'https://api.jan.ai/v1' }}
- name: Build website
working-directory: website
run: |
# For PR and regular pushes, skip cloud spec generation in prebuild
# It will use the existing committed spec or fallback
if [ "${{ github.event_name }}" = "pull_request" ] || [ "${{ github.event_name }}" = "push" ]; then
echo "Using existing cloud spec for build"
export SKIP_CLOUD_SPEC_UPDATE=true
fi
bun run build
env:
SKIP_CLOUD_SPEC_UPDATE: ${{ github.event_name == 'pull_request' || github.event_name == 'push' }}
- name: copy redirects and headers
continue-on-error: true
working-directory: website
run: |
cp _redirects dist/_redirects
cp _headers dist/_headers
- name: Publish to Cloudflare Pages PR Preview and Staging
if: github.event_name == 'pull_request'
uses: cloudflare/pages-action@v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
projectName: ${{ env.CLOUDFLARE_PROJECT_NAME }}
directory: ./website/dist
# Optional: Enable this if you want to have GitHub Deployments triggered
gitHubToken: ${{ secrets.GITHUB_TOKEN }}
id: deployCloudflarePages
- uses: mshick/add-pr-comment@v2
if: github.event_name == 'pull_request'
with:
message: |
Preview URL Astro Docs: ${{ steps.deployCloudflarePages.outputs.url }}
- name: Publish to Cloudflare Pages Production
if: (github.event_name == 'push' && github.ref == 'refs/heads/dev') || (github.event_name == 'workflow_dispatch' && github.ref == 'refs/heads/dev')
uses: cloudflare/pages-action@v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
projectName: ${{ env.CLOUDFLARE_PROJECT_NAME }}
directory: ./website/dist
branch: main
# Optional: Enable this if you want to have GitHub Deployments triggered
gitHubToken: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,63 @@
name: Deploy Docs on new release
on:
release:
types:
- published
- edited
- released
jobs:
deploy:
name: Deploy to CloudFlare Pages
env:
CLOUDFLARE_PROJECT_NAME: docs
runs-on: ubuntu-latest
permissions:
contents: write
deployments: write
pull-requests: write
steps:
- uses: actions/checkout@v4
with:
ref: dev
- uses: actions/setup-node@v3
with:
node-version: 18
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Fill env vars
working-directory: docs
run: |
env_example_file=".env.example"
touch .env
while IFS= read -r line || [[ -n "$line" ]]; do
if [[ "$line" == *"="* ]]; then
var_name=$(echo $line | cut -d '=' -f 1)
echo $var_name
var_value="$(jq -r --arg key "$var_name" '.[$key]' <<< "$SECRETS")"
echo "$var_name=$var_value" >> .env
fi
done < "$env_example_file"
env:
SECRETS: '${{ toJson(secrets) }}'
- name: Install dependencies
working-directory: docs
run: yarn install
- name: Build website
working-directory: docs
run: export NODE_ENV=production && yarn build && cp _redirects out/_redirects && cp _headers out/_headers
- name: Publish to Cloudflare Pages Production
uses: cloudflare/pages-action@v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
projectName: ${{ env.CLOUDFLARE_PROJECT_NAME }}
directory: ./docs/out
branch: main
# Optional: Enable this if you want to have GitHub Deployments triggered
gitHubToken: ${{ secrets.GITHUB_TOKEN }}

View File

@ -26,10 +26,10 @@ jobs:
deployments: write deployments: write
pull-requests: write pull-requests: write
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
with: with:
node-version: 20 node-version: 18
- name: Install jq - name: Install jq
uses: dcarbone/install-jq-action@v2.0.1 uses: dcarbone/install-jq-action@v2.0.1
@ -53,9 +53,6 @@ jobs:
- name: Install dependencies - name: Install dependencies
working-directory: docs working-directory: docs
run: yarn install run: yarn install
- name: Clean output directory
working-directory: docs
run: rm -rf out/* .next/*
- name: Build website - name: Build website
working-directory: docs working-directory: docs
run: export NODE_ENV=production && yarn build && cp _redirects out/_redirects && cp _headers out/_headers run: export NODE_ENV=production && yarn build && cp _redirects out/_redirects && cp _headers out/_headers
@ -79,7 +76,7 @@ jobs:
Preview URL: ${{ steps.deployCloudflarePages.outputs.url }} Preview URL: ${{ steps.deployCloudflarePages.outputs.url }}
- name: Publish to Cloudflare Pages Production - name: Publish to Cloudflare Pages Production
if: (github.event_name == 'push' && github.ref == 'refs/heads/dev') || (github.event_name == 'workflow_dispatch' && github.ref == 'refs/heads/dev') || (github.event_name == 'workflow_dispatch' && startsWith(github.ref, 'refs/heads/release/')) if: (github.event_name == 'push' && github.ref == 'refs/heads/dev') || (github.event_name == 'workflow_dispatch' && github.ref == 'refs/heads/dev')
uses: cloudflare/pages-action@v1 uses: cloudflare/pages-action@v1
with: with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}

View File

@ -0,0 +1,215 @@
name: Electron Builder - Nightly / Manual
on:
schedule:
- cron: '0 20 * * 1,2,3' # At 8 PM UTC on Monday, Tuesday, and Wednesday which is 3 AM UTC+7 Tuesday, Wednesday, and Thursday
workflow_dispatch:
inputs:
public_provider:
type: choice
description: 'Public Provider'
options:
- none
- aws-s3
default: none
pull_request:
branches:
- release/**
jobs:
set-public-provider:
runs-on: ubuntu-latest
outputs:
public_provider: ${{ steps.set-public-provider.outputs.public_provider }}
ref: ${{ steps.set-public-provider.outputs.ref }}
steps:
- name: Set public provider
id: set-public-provider
run: |
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
echo "::set-output name=public_provider::${{ github.event.inputs.public_provider }}"
echo "::set-output name=ref::${{ github.ref }}"
else
if [ "${{ github.event_name }}" == "schedule" ]; then
echo "::set-output name=public_provider::aws-s3"
echo "::set-output name=ref::refs/heads/dev"
elif [ "${{ github.event_name }}" == "push" ]; then
echo "::set-output name=public_provider::aws-s3"
echo "::set-output name=ref::${{ github.ref }}"
elif [ "${{ github.event_name }}" == "pull_request_review" ]; then
echo "::set-output name=public_provider::none"
echo "::set-output name=ref::${{ github.ref }}"
else
echo "::set-output name=public_provider::none"
echo "::set-output name=ref::${{ github.ref }}"
fi
fi
# Job create Update app version based on latest release tag with build number and save to output
get-update-version:
uses: ./.github/workflows/template-get-update-version.yml
build-tauri-macos:
uses: ./.github/workflows/template-tauri-build-macos.yml
secrets: inherit
needs: [get-update-version, set-public-provider]
with:
ref: ${{ needs.set-public-provider.outputs.ref }}
public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
cortex_api_port: "39261"
build-tauri-windows-x64:
uses: ./.github/workflows/template-tauri-build-windows-x64.yml
secrets: inherit
needs: [get-update-version, set-public-provider]
with:
ref: ${{ needs.set-public-provider.outputs.ref }}
public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
cortex_api_port: "39261"
build-tauri-linux-x64:
uses: ./.github/workflows/template-tauri-build-linux-x64.yml
secrets: inherit
needs: [get-update-version, set-public-provider]
with:
ref: ${{ needs.set-public-provider.outputs.ref }}
public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
cortex_api_port: "39261"
sync-temp-to-latest:
needs: [get-update-version, set-public-provider, build-tauri-windows-x64, build-tauri-linux-x64, build-tauri-macos]
runs-on: ubuntu-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: create latest.json file
run: |
VERSION=${{ needs.get-update-version.outputs.new_version }}
PUB_DATE=$(date -u +"%Y-%m-%dT%H:%M:%S.%3NZ")
LINUX_SIGNATURE="${{ needs.build-tauri-linux-x64.outputs.APPIMAGE_SIG }}"
LINUX_URL="https://delta.jan.ai/nightly/${{ needs.build-tauri-linux-x64.outputs.APPIMAGE_FILE_NAME }}"
WINDOWS_SIGNATURE="${{ needs.build-tauri-windows-x64.outputs.WIN_SIG }}"
WINDOWS_URL="https://delta.jan.ai/nightly/${{ needs.build-tauri-windows-x64.outputs.FILE_NAME }}"
DARWIN_SIGNATURE="${{ needs.build-tauri-macos.outputs.MAC_UNIVERSAL_SIG }}"
DARWIN_URL="https://delta.jan.ai/nightly/Jan-nightly_${{ needs.get-update-version.outputs.new_version }}.app.tar.gz"
jq --arg version "$VERSION" \
--arg pub_date "$PUB_DATE" \
--arg linux_signature "$LINUX_SIGNATURE" \
--arg linux_url "$LINUX_URL" \
--arg windows_signature "$WINDOWS_SIGNATURE" \
--arg windows_url "$WINDOWS_URL" \
--arg darwin_arm_signature "$DARWIN_SIGNATURE" \
--arg darwin_arm_url "$DARWIN_URL" \
--arg darwin_amd_signature "$DARWIN_SIGNATURE" \
--arg darwin_amd_url "$DARWIN_URL" \
'.version = $version
| .pub_date = $pub_date
| .platforms["linux-x86_64"].signature = $linux_signature
| .platforms["linux-x86_64"].url = $linux_url
| .platforms["windows-x86_64"].signature = $windows_signature
| .platforms["windows-x86_64"].url = $windows_url
| .platforms["darwin-aarch64"].signature = $darwin_arm_signature
| .platforms["darwin-aarch64"].url = $darwin_arm_url
| .platforms["darwin-x86_64"].signature = $darwin_amd_signature
| .platforms["darwin-x86_64"].url = $darwin_amd_url' \
src-tauri/latest.json.template > latest.json
cat latest.json
- name: Sync temp to latest
if: ${{ needs.set-public-provider.outputs.public_provider == 'aws-s3' }}
run: |
aws s3 cp ./latest.json s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-nightly/latest.json
aws s3 sync s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-nightly/ s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/nightly/
env:
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }}
AWS_EC2_METADATA_DISABLED: "true"
noti-discord-nightly-and-update-url-readme:
needs: [
build-tauri-macos,
build-tauri-windows-x64,
build-tauri-linux-x64,
get-update-version,
set-public-provider,
sync-temp-to-latest
]
secrets: inherit
if: github.event_name == 'schedule'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
with:
ref: refs/heads/dev
build_reason: Nightly
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
noti-discord-pre-release-and-update-url-readme:
needs: [
build-tauri-macos,
build-tauri-windows-x64,
build-tauri-linux-x64,
get-update-version,
set-public-provider,
sync-temp-to-latest
]
secrets: inherit
if: github.event_name == 'push'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
with:
ref: refs/heads/dev
build_reason: Pre-release
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
noti-discord-manual-and-update-url-readme:
needs: [
build-tauri-macos,
build-tauri-windows-x64,
build-tauri-linux-x64,
get-update-version,
set-public-provider,
sync-temp-to-latest
]
secrets: inherit
if: github.event_name == 'workflow_dispatch' && github.event.inputs.public_provider == 'aws-s3'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
with:
ref: refs/heads/dev
build_reason: Manual
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
# comment-pr-build-url:
# needs: [
# build-tauri-macos,
# build-tauri-windows-x64,
# build-tauri-linux-x64,
# get-update-version,
# set-public-provider,
# sync-temp-to-latest
# ]
# runs-on: ubuntu-latest
# if: github.event_name == 'pull_request_review'
# steps:
# - name: Set up GitHub CLI
# run: |
# curl -sSL https://github.com/cli/cli/releases/download/v2.33.0/gh_2.33.0_linux_amd64.tar.gz | tar xz
# sudo cp gh_2.33.0_linux_amd64/bin/gh /usr/local/bin/
# - name: Comment build URL on PR
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# run: |
# PR_URL=${{ github.event.pull_request.html_url }}
# RUN_ID=${{ github.run_id }}
# COMMENT="This is the build for this pull request. You can download it from the Artifacts section here: [Build URL](https://github.com/${{ github.repository }}/actions/runs/${RUN_ID})."
# gh pr comment $PR_URL --body "$COMMENT"

131
.github/workflows/jan-electron-build.yml vendored Normal file
View File

@ -0,0 +1,131 @@
name: Electron Builder - Tag
on:
push:
tags: ["v[0-9]+.[0-9]+.[0-9]+"]
jobs:
# Job create Update app version based on latest release tag with build number and save to output
get-update-version:
uses: ./.github/workflows/template-get-update-version.yml
create-draft-release:
runs-on: ubuntu-latest
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
outputs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
version: ${{ steps.get_version.outputs.version }}
permissions:
contents: write
steps:
- name: Extract tag name without v prefix
id: get_version
run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV && echo "::set-output name=version::${GITHUB_REF#refs/tags/v}"
env:
GITHUB_REF: ${{ github.ref }}
- name: Create Draft Release
id: create_release
uses: softprops/action-gh-release@v2
with:
tag_name: ${{ github.ref_name }}
token: ${{ secrets.GITHUB_TOKEN }}
name: "${{ env.VERSION }}"
draft: true
prerelease: false
build-electron-macos:
uses: ./.github/workflows/template-electron-build-macos.yml
secrets: inherit
needs: [get-update-version]
with:
ref: ${{ github.ref }}
public_provider: github
beta: false
nightly: false
new_version: ${{ needs.get-update-version.outputs.new_version }}
build-electron-windows-x64:
uses: ./.github/workflows/template-electron-build-windows-x64.yml
secrets: inherit
needs: [get-update-version]
with:
ref: ${{ github.ref }}
public_provider: github
beta: false
nightly: false
new_version: ${{ needs.get-update-version.outputs.new_version }}
build-electron-linux-x64:
uses: ./.github/workflows/template-electron-build-linux-x64.yml
secrets: inherit
needs: [get-update-version]
with:
ref: ${{ github.ref }}
public_provider: github
beta: false
nightly: false
new_version: ${{ needs.get-update-version.outputs.new_version }}
# build-tauri-macos:
# uses: ./.github/workflows/template-tauri-build-macos.yml
# secrets: inherit
# needs: [get-update-version, create-draft-release]
# with:
# ref: ${{ github.ref }}
# public_provider: github
# channel: stable
# new_version: ${{ needs.get-update-version.outputs.new_version }}
# upload_url: ${{ needs.create-draft-release.outputs.upload_url }}
# build-tauri-windows-x64:
# uses: ./.github/workflows/template-tauri-build-windows-x64.yml
# secrets: inherit
# needs: [get-update-version, create-draft-release]
# with:
# ref: ${{ github.ref }}
# public_provider: github
# channel: stable
# new_version: ${{ needs.get-update-version.outputs.new_version }}
# upload_url: ${{ needs.create-draft-release.outputs.upload_url }}
# build-tauri-linux-x64:
# uses: ./.github/workflows/template-tauri-build-linux-x64.yml
# secrets: inherit
# needs: [get-update-version, create-draft-release]
# with:
# ref: ${{ github.ref }}
# public_provider: github
# channel: stable
# new_version: ${{ needs.get-update-version.outputs.new_version }}
# upload_url: ${{ needs.create-draft-release.outputs.upload_url }}
update_release_draft:
needs: [
build-electron-windows-x64,
build-electron-linux-x64,
build-electron-macos,
build-tauri-windows-x64,
build-tauri-linux-x64,
build-tauri-macos
]
permissions:
# write permission is required to create a github release
contents: write
# write permission is required for autolabeler
# otherwise, read permission is required at least
pull-requests: write
runs-on: ubuntu-latest
steps:
# (Optional) GitHub Enterprise requires GHE_HOST variable set
#- name: Set GHE_HOST
# run: |
# echo "GHE_HOST=${GITHUB_SERVER_URL##https:\/\/}" >> $GITHUB_ENV
# Drafts your next Release notes as Pull Requests are merged into "master"
- uses: release-drafter/release-drafter@v5
# (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
# with:
# config-name: my-config.yml
# disable-autolabeler: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,435 @@
name: Test - Linter & Playwright
on:
workflow_dispatch:
push:
branches:
- main
- dev
paths:
- 'electron/**'
- .github/workflows/jan-electron-linter-and-test.yml
- 'web/**'
- 'joi/**'
- 'package.json'
- 'node_modules/**'
- 'yarn.lock'
- 'core/**'
- 'extensions/**'
- '!README.md'
- 'Makefile'
pull_request:
branches:
- main
- dev
- release/**
paths:
- 'electron/**'
- .github/workflows/jan-electron-linter-and-test.yml
- 'web/**'
- 'joi/**'
- 'package.json'
- 'node_modules/**'
- 'yarn.lock'
- 'Makefile'
- 'extensions/**'
- 'core/**'
- 'src-tauri/**'
- 'web-app/**'
- '!README.md'
jobs:
base_branch_cov:
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.base_ref }}
- name: Use Node.js 20.x
uses: actions/setup-node@v3
with:
node-version: 20
- name: Install dependencies
run: |
make config-yarn
yarn
yarn build:core
- name: Run test coverage
run: yarn test:coverage
- name: Upload code coverage for ref branch
uses: actions/upload-artifact@v4
with:
name: ref-lcov.info
path: ./coverage/lcov.info
test-on-macos:
if: (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || github.event_name == 'push' || github.event_name == 'workflow_dispatch'
runs-on: macos-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: Set IS_TEST environment variable
run: |
echo "IS_TEST=true" >> $GITHUB_ENV
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Get Commit Message for PR
if: github.event_name == 'pull_request'
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.event.after}})" >> $GITHUB_ENV
- name: Get Commit Message for push event
if: github.event_name == 'push'
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.sha}})" >> $GITHUB_ENV
# - name: 'Config report portal'
# run: |
# make update-playwright-config REPORT_PORTAL_URL=${{ secrets.REPORT_PORTAL_URL }} REPORT_PORTAL_API_KEY=${{ secrets.REPORT_PORTAL_API_KEY }} REPORT_PORTAL_PROJECT_NAME=${{ secrets.REPORT_PORTAL_PROJECT_NAME }} REPORT_PORTAL_LAUNCH_NAME="Jan App macos" REPORT_PORTAL_DESCRIPTION="${{env.REPORT_PORTAL_DESCRIPTION}}"
- name: Linter and test
run: |
make test
env:
CSC_IDENTITY_AUTO_DISCOVERY: 'false'
test-on-macos-pr-target:
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository
runs-on: macos-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Linter and test
run: |
make test
env:
CSC_IDENTITY_AUTO_DISCOVERY: 'false'
test-on-windows:
if: github.event_name == 'push'
strategy:
fail-fast: false
matrix:
antivirus-tools: ['mcafee', 'default-windows-security', 'bit-defender']
runs-on: windows-desktop-${{ matrix.antivirus-tools }}
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: Install tauri-driver dependencies
run: |
cargo install tauri-driver --locked
# Clean cache, continue on error
- name: 'Cleanup cache'
shell: powershell
continue-on-error: true
run: |
$path = "$Env:APPDATA\jan"
if (Test-Path $path) {
Remove-Item "\\?\$path" -Recurse -Force
} else {
Write-Output "Folder does not exist."
}
make clean
- name: Get Commit Message for push event
if: github.event_name == 'push'
shell: bash
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.sha}}" >> $GITHUB_ENV
# - name: 'Config report portal'
# shell: bash
# run: |
# make update-playwright-config REPORT_PORTAL_URL=${{ secrets.REPORT_PORTAL_URL }} REPORT_PORTAL_API_KEY=${{ secrets.REPORT_PORTAL_API_KEY }} REPORT_PORTAL_PROJECT_NAME=${{ secrets.REPORT_PORTAL_PROJECT_NAME }} REPORT_PORTAL_LAUNCH_NAME="Jan App Windows ${{ matrix.antivirus-tools }}" REPORT_PORTAL_DESCRIPTION="${{env.REPORT_PORTAL_DESCRIPTION}}"
- name: Linter and test
shell: powershell
run: |
make test
test-on-windows-pr:
if: (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || github.event_name == 'workflow_dispatch'
runs-on: windows-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/cache@v4 # v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('**/yarn.lock') }}
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: Install tauri-driver dependencies
run: |
cargo install tauri-driver --locked
# Clean cache, continue on error
- name: 'Cleanup cache'
shell: powershell
continue-on-error: true
run: |
$path = "$Env:APPDATA\jan"
if (Test-Path $path) {
Remove-Item "\\?\$path" -Recurse -Force
} else {
Write-Output "Folder does not exist."
}
make clean
- name: Get Commit Message for PR
if: github.event_name == 'pull_request'
shell: bash
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.event.after}}" >> $GITHUB_ENV
# - name: 'Config report portal'
# shell: bash
# run: |
# make update-playwright-config REPORT_PORTAL_URL=${{ secrets.REPORT_PORTAL_URL }} REPORT_PORTAL_API_KEY=${{ secrets.REPORT_PORTAL_API_KEY }} REPORT_PORTAL_PROJECT_NAME=${{ secrets.REPORT_PORTAL_PROJECT_NAME }} REPORT_PORTAL_LAUNCH_NAME="Jan App Windows" REPORT_PORTAL_DESCRIPTION="${{env.REPORT_PORTAL_DESCRIPTION}}"
- name: Install Prerequisites
shell: 'powershell'
# https://github.com/actions/runner-images/issues/9538
# https://github.com/microsoft/playwright/pull/30009/files
# https://github.com/tauri-apps/wry/issues/1268
# Evergreen Bootstrapper
# The Bootstrapper is a tiny installer that downloads
# the Evergreen Runtime matching device architecture
# and installs it locally.
# https://developer.microsoft.com/en-us/microsoft-edge/webview2/consumer/?form=MA13LH
run: |
Invoke-WebRequest -Uri 'https://go.microsoft.com/fwlink/p/?LinkId=2124703' -OutFile 'setup.exe'
Start-Process -FilePath setup.exe -Verb RunAs -Wait
- name: Linter and test
shell: powershell
run: |
make test
test-on-windows-pr-target:
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository
runs-on: windows-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install tauri-driver dependencies
run: |
cargo install tauri-driver --locked
# Clean cache, continue on error
- name: 'Cleanup cache'
shell: powershell
continue-on-error: true
run: |
$path = "$Env:APPDATA\jan"
if (Test-Path $path) {
Remove-Item "\\?\$path" -Recurse -Force
} else {
Write-Output "Folder does not exist."
}
make clean
- name: Linter and test
shell: powershell
run: |
make test
test-on-ubuntu:
runs-on: ubuntu-latest
if: (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || github.event_name == 'push' || github.event_name == 'workflow_dispatch'
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: Install Tauri dependencies
run: |
sudo apt update
sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2 webkit2gtk-driver
- name: Install tauri-driver dependencies
run: |
cargo install tauri-driver --locked
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Get Commit Message for PR
if: github.event_name == 'pull_request'
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.event.after}}" >> $GITHUB_ENV
- name: Get Commit Message for push event
if: github.event_name == 'push'
run: |
echo "REPORT_PORTAL_DESCRIPTION=${{github.sha}}" >> $GITHUB_ENV
# - name: 'Config report portal'
# shell: bash
# run: |
# make update-playwright-config REPORT_PORTAL_URL=${{ secrets.REPORT_PORTAL_URL }} REPORT_PORTAL_API_KEY=${{ secrets.REPORT_PORTAL_API_KEY }} REPORT_PORTAL_PROJECT_NAME=${{ secrets.REPORT_PORTAL_PROJECT_NAME }} REPORT_PORTAL_LAUNCH_NAME="Jan App Linux" REPORT_PORTAL_DESCRIPTION="${{env.REPORT_PORTAL_DESCRIPTION}}"
- name: Linter and test
run: |
export DISPLAY=$(w -h | awk 'NR==1 {print $2}')
echo -e "Display ID: $DISPLAY"
make test
- uses: actions/upload-artifact@v4
if: always()
with:
name: playwright-report
path: electron/playwright-report/
retention-days: 2
# coverage-check:
# runs-on: ubuntu-latest
# needs: base_branch_cov
# continue-on-error: true
# if: (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || github.event_name == 'push' || github.event_name == 'workflow_dispatch'
# steps:
# - name: Getting the repo
# uses: actions/checkout@v3
# with:
# fetch-depth: 0
# - name: Installing node
# uses: actions/setup-node@v3
# with:
# node-version: 20
# - name: Install yarn
# run: npm install -g yarn
# - name: 'Cleanup cache'
# continue-on-error: true
# run: |
# rm -rf ~/jan
# make clean
# - name: Download code coverage report from base branch
# uses: actions/download-artifact@v4
# with:
# name: ref-lcov.info
# - name: Linter and test coverage
# run: |
# export DISPLAY=$(w -h | awk 'NR==1 {print $2}')
# echo -e "Display ID: $DISPLAY"
# make lint
# yarn build:test
# yarn test:coverage
# - name: Generate Code Coverage report
# id: code-coverage
# uses: barecheck/code-coverage-action@v1
# with:
# github-token: ${{ secrets.GITHUB_TOKEN }}
# lcov-file: './coverage/lcov.info'
# base-lcov-file: './lcov.info'
# send-summary-comment: true
# show-annotations: 'warning'
test-on-ubuntu-pr-target:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: Install Tauri dependencies
run: |
sudo apt update
sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2 webkit2gtk-driver
- name: Install tauri-driver dependencies
run: |
cargo install tauri-driver --locked
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Linter and test
run: |
export DISPLAY=$(w -h | awk 'NR==1 {print $2}')
echo -e "Display ID: $DISPLAY"
make test

View File

@ -1,257 +0,0 @@
name: Linter & Test
on:
workflow_dispatch:
push:
branches:
- main
- dev
paths:
- .github/workflows/jan-linter-and-test.yml
- 'web/**'
- 'joi/**'
- 'package.json'
- 'node_modules/**'
- 'yarn.lock'
- 'core/**'
- 'extensions/**'
- '!README.md'
- 'Makefile'
pull_request:
branches:
- main
- dev
- release/**
paths:
- .github/workflows/jan-linter-and-test.yml
- 'web/**'
- 'joi/**'
- 'package.json'
- 'node_modules/**'
- 'yarn.lock'
- 'Makefile'
- 'extensions/**'
- 'core/**'
- 'src-tauri/**'
- 'web-app/**'
- '!README.md'
jobs:
base_branch_cov:
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.base_ref }}
- name: Use Node.js 20.x
uses: actions/setup-node@v3
with:
node-version: 20
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Install dependencies
run: |
make lint
- name: Run test coverage
run: |
yarn test:coverage
- name: Upload code coverage for ref branch
uses: actions/upload-artifact@v4
with:
name: ref-lcov.info
path: coverage/lcov.info
test-on-macos:
runs-on: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) && 'macos-latest' || 'macos-selfhosted-15-arm64' }}
if: github.event_name == 'pull_request' || github.event_name == 'push' || github.event_name == 'workflow_dispatch'
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Linter and test
run: |
make test
env:
CSC_IDENTITY_AUTO_DISCOVERY: 'false'
test-on-windows:
if: github.event_name == 'push'
strategy:
fail-fast: false
matrix:
antivirus-tools: ['mcafee', 'default-windows-security', 'bit-defender']
runs-on: windows-desktop-${{ matrix.antivirus-tools }}
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
# Clean cache, continue on error
- name: 'Cleanup cache'
shell: powershell
continue-on-error: true
run: |
$path = "$Env:APPDATA\jan"
if (Test-Path $path) {
Remove-Item "\\?\$path" -Recurse -Force
} else {
Write-Output "Folder does not exist."
}
make clean
- name: Linter and test
shell: powershell
run: |
make test
test-on-windows-pr:
if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch'
runs-on: 'windows-latest'
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: install dependencies
run: |
choco install --yes --no-progress make
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: 'Cleanup cache'
shell: powershell
continue-on-error: true
run: |
$path = "$Env:APPDATA\jan"
if (Test-Path $path) {
Remove-Item "\\?\$path" -Recurse -Force
} else {
Write-Output "Folder does not exist."
}
make clean
- name: Install WebView2 Runtime (Bootstrapper)
shell: powershell
run: |
Invoke-WebRequest -Uri 'https://go.microsoft.com/fwlink/p/?LinkId=2124703' -OutFile 'setup.exe'
Start-Process -FilePath setup.exe -Verb RunAs -Wait
- name: Linter and test
shell: powershell
run: |
make test
env:
NODE_OPTIONS: '--max-old-space-size=2048'
test-on-ubuntu:
runs-on: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) && 'ubuntu-latest' || 'ubuntu-latest' }}
if: github.event_name == 'pull_request' || github.event_name == 'push' || github.event_name == 'workflow_dispatch'
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: Install Tauri dependencies
run: |
sudo apt update
sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2 webkit2gtk-driver
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Linter and test
run: |
export DISPLAY=$(w -h | awk 'NR==1 {print $2}')
echo -e "Display ID: $DISPLAY"
make test
- uses: actions/upload-artifact@v4
if: always()
with:
name: playwright-report
path: electron/playwright-report/
retention-days: 2
coverage-check:
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository
runs-on: ubuntu-latest
needs: base_branch_cov
continue-on-error: true
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
- name: 'Cleanup cache'
continue-on-error: true
run: |
rm -rf ~/jan
make clean
- name: Install dependencies
run: |
make lint
- name: Run test coverage
run: |
yarn test:coverage
- name: Download code coverage report from base branch
uses: actions/download-artifact@v4
with:
name: ref-lcov.info
- name: Generate Code Coverage report
id: code-coverage
uses: barecheck/code-coverage-action@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
lcov-file: './coverage/lcov.info'
base-lcov-file: './lcov.info'
send-summary-comment: true
show-annotations: 'warning'

View File

@ -1,60 +0,0 @@
name: Jan Web Server build image and push to Harbor Registry
on:
push:
branches:
- dev-web
pull_request:
branches:
- dev-web
jobs:
build-and-preview:
runs-on: [ubuntu-24-04-docker]
env:
MENLO_PLATFORM_BASE_URL: "https://api-dev.jan.ai/v1"
permissions:
pull-requests: write
contents: write
steps:
- name: Checkout source repo
uses: actions/checkout@v4
- name: Login to Harbor Registry
uses: docker/login-action@v3
with:
registry: registry.menlo.ai
username: ${{ secrets.HARBOR_USERNAME }}
password: ${{ secrets.HARBOR_PASSWORD }}
- name: Install dependencies
run: |
(type -p wget >/dev/null || (sudo apt update && sudo apt install wget -y)) \
&& sudo mkdir -p -m 755 /etc/apt/keyrings \
&& out=$(mktemp) && wget -nv -O$out https://cli.github.com/packages/githubcli-archive-keyring.gpg \
&& cat $out | sudo tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \
&& sudo chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
&& sudo mkdir -p -m 755 /etc/apt/sources.list.d \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
&& sudo apt update
sudo apt-get install -y jq gettext
- name: Set image tag
id: vars
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
IMAGE_TAG="web:preview-${{ github.sha }}"
else
IMAGE_TAG="web:dev-${{ github.sha }}"
fi
echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "FULL_IMAGE=registry.menlo.ai/jan-server/${IMAGE_TAG}" >> $GITHUB_OUTPUT
- name: Build docker image
run: |
docker build --build-arg MENLO_PLATFORM_BASE_URL=${{ env.MENLO_PLATFORM_BASE_URL }} -t ${{ steps.vars.outputs.FULL_IMAGE }} .
- name: Push docker image
if: github.event_name == 'push'
run: |
docker push ${{ steps.vars.outputs.FULL_IMAGE }}

View File

@ -1,58 +0,0 @@
name: Jan Web Server deploy to production
on:
push:
branches:
- prod-web
jobs:
build-and-deploy:
runs-on: ubuntu-latest
permissions:
contents: write
deployments: write
pull-requests: write
env:
MENLO_PLATFORM_BASE_URL: "https://api.jan.ai/v1"
GA_MEASUREMENT_ID: "G-YK53MX8M8M"
CLOUDFLARE_PROJECT_NAME: "jan-server-web"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v3
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
# - name: Fill env vars
# run: |
# env_example_file=".env.example"
# touch .env
# while IFS= read -r line || [[ -n "$line" ]]; do
# if [[ "$line" == *"="* ]]; then
# var_name=$(echo $line | cut -d '=' -f 1)
# echo $var_name
# var_value="$(jq -r --arg key "$var_name" '.[$key]' <<< "$SECRETS")"
# echo "$var_name=$var_value" >> .env
# fi
# done < "$env_example_file"
# env:
# SECRETS: '${{ toJson(secrets) }}'
- name: Install dependencies
run: make config-yarn && yarn install && yarn build:core && make build-web-app
env:
MENLO_PLATFORM_BASE_URL: ${{ env.MENLO_PLATFORM_BASE_URL }}
GA_MEASUREMENT_ID: ${{ env.GA_MEASUREMENT_ID }}
- name: Publish to Cloudflare Pages Production
uses: cloudflare/pages-action@v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
projectName: ${{ env.CLOUDFLARE_PROJECT_NAME }}
directory: ./web-app/dist-web
branch: main
# Optional: Enable this if you want to have GitHub Deployments triggered
gitHubToken: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,60 +0,0 @@
name: Jan Web Server build image and push to Harbor Registry
on:
push:
branches:
- stag-web
pull_request:
branches:
- stag-web
jobs:
build-and-preview:
runs-on: [ubuntu-24-04-docker]
env:
MENLO_PLATFORM_BASE_URL: "https://api-stag.jan.ai/v1"
permissions:
pull-requests: write
contents: write
steps:
- name: Checkout source repo
uses: actions/checkout@v4
- name: Login to Harbor Registry
uses: docker/login-action@v3
with:
registry: registry.menlo.ai
username: ${{ secrets.HARBOR_USERNAME }}
password: ${{ secrets.HARBOR_PASSWORD }}
- name: Install dependencies
run: |
(type -p wget >/dev/null || (sudo apt update && sudo apt install wget -y)) \
&& sudo mkdir -p -m 755 /etc/apt/keyrings \
&& out=$(mktemp) && wget -nv -O$out https://cli.github.com/packages/githubcli-archive-keyring.gpg \
&& cat $out | sudo tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \
&& sudo chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
&& sudo mkdir -p -m 755 /etc/apt/sources.list.d \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
&& sudo apt update
sudo apt-get install -y jq gettext
- name: Set image tag
id: vars
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
IMAGE_TAG="web:preview-${{ github.sha }}"
else
IMAGE_TAG="web:stag-${{ github.sha }}"
fi
echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "FULL_IMAGE=registry.menlo.ai/jan-server/${IMAGE_TAG}" >> $GITHUB_OUTPUT
- name: Build docker image
run: |
docker build --build-arg MENLO_PLATFORM_BASE_URL=${{ env.MENLO_PLATFORM_BASE_URL }} -t ${{ steps.vars.outputs.FULL_IMAGE }} .
- name: Push docker image
if: github.event_name == 'push'
run: |
docker push ${{ steps.vars.outputs.FULL_IMAGE }}

View File

@ -1,20 +0,0 @@
name: Tauri Builder Flatpak
on:
workflow_dispatch:
inputs:
version:
description: 'Version to build. For example: 0.6.8'
required: false
jobs:
build-linux-x64:
uses: ./.github/workflows/template-tauri-build-linux-x64-flatpak.yml
secrets: inherit
with:
ref: ${{ github.ref }}
public_provider: none
channel: stable
new_version: ${{ inputs.version }}
disable_updater: true

View File

@ -1,46 +0,0 @@
name: Tauri Builder - Nightly / External PRs
on:
pull_request:
branches:
- dev
paths:
- '.github/workflows/jan-tauri-build-nightly-external.yaml'
- '.github/workflows/template-tauri-build-*-external.yml'
- 'src-tauri/**'
- 'core/**'
- 'web-app/**'
- 'extensions/**'
- 'scripts/**'
- 'pre-install/**'
- 'Makefile'
- 'package.json'
jobs:
get-update-version:
uses: ./.github/workflows/template-get-update-version.yml
build-macos:
uses: ./.github/workflows/template-tauri-build-macos-external.yml
needs: [get-update-version]
with:
ref: ${{ github.ref }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
build-windows-x64:
uses: ./.github/workflows/template-tauri-build-windows-x64-external.yml
needs: [get-update-version]
with:
ref: ${{ github.ref }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
build-linux-x64:
uses: ./.github/workflows/template-tauri-build-linux-x64-external.yml
needs: [get-update-version]
with:
ref: ${{ github.ref }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly
disable_updater: false

View File

@ -12,35 +12,13 @@ on:
- none - none
- aws-s3 - aws-s3
default: none default: none
disable_updater:
type: boolean
description: 'If true, builds both .deb and .appimage but disables auto-updater'
default: false
pull_request: pull_request:
branches: branches:
- release/** - release/**
- dev
paths:
- '.github/workflows/jan-tauri-build-nightly.yaml'
- '.github/workflows/template-get-update-version.yml'
- '.github/workflows/template-tauri-build-macos.yml'
- '.github/workflows/template-tauri-build-windows-x64.yml'
- '.github/workflows/template-tauri-build-linux-x64.yml'
- '.github/workflows/template-noti-discord-and-update-url-readme.yml'
- 'src-tauri/**'
- 'core/**'
- 'web-app/**'
- 'extensions/**'
- 'scripts/**'
- 'pre-install/**'
- 'Makefile'
- 'package.json'
jobs: jobs:
set-public-provider: set-public-provider:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository
outputs: outputs:
public_provider: ${{ steps.set-public-provider.outputs.public_provider }} public_provider: ${{ steps.set-public-provider.outputs.public_provider }}
ref: ${{ steps.set-public-provider.outputs.ref }} ref: ${{ steps.set-public-provider.outputs.ref }}
@ -68,13 +46,11 @@ jobs:
fi fi
# Job create Update app version based on latest release tag with build number and save to output # Job create Update app version based on latest release tag with build number and save to output
get-update-version: get-update-version:
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository
uses: ./.github/workflows/template-get-update-version.yml uses: ./.github/workflows/template-get-update-version.yml
build-macos: build-macos:
uses: ./.github/workflows/template-tauri-build-macos.yml uses: ./.github/workflows/template-tauri-build-macos.yml
needs: [get-update-version, set-public-provider] needs: [get-update-version, set-public-provider]
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository
secrets: inherit secrets: inherit
with: with:
ref: ${{ needs.set-public-provider.outputs.ref }} ref: ${{ needs.set-public-provider.outputs.ref }}
@ -87,7 +63,6 @@ jobs:
uses: ./.github/workflows/template-tauri-build-windows-x64.yml uses: ./.github/workflows/template-tauri-build-windows-x64.yml
secrets: inherit secrets: inherit
needs: [get-update-version, set-public-provider] needs: [get-update-version, set-public-provider]
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository
with: with:
ref: ${{ needs.set-public-provider.outputs.ref }} ref: ${{ needs.set-public-provider.outputs.ref }}
public_provider: ${{ needs.set-public-provider.outputs.public_provider }} public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
@ -98,14 +73,12 @@ jobs:
uses: ./.github/workflows/template-tauri-build-linux-x64.yml uses: ./.github/workflows/template-tauri-build-linux-x64.yml
secrets: inherit secrets: inherit
needs: [get-update-version, set-public-provider] needs: [get-update-version, set-public-provider]
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository
with: with:
ref: ${{ needs.set-public-provider.outputs.ref }} ref: ${{ needs.set-public-provider.outputs.ref }}
public_provider: ${{ needs.set-public-provider.outputs.public_provider }} public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
new_version: ${{ needs.get-update-version.outputs.new_version }} new_version: ${{ needs.get-update-version.outputs.new_version }}
channel: nightly channel: nightly
cortex_api_port: '39261' cortex_api_port: '39261'
disable_updater: ${{ github.event.inputs.disable_updater == 'true' }}
sync-temp-to-latest: sync-temp-to-latest:
needs: needs:
@ -117,7 +90,6 @@ jobs:
build-macos, build-macos,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository
steps: steps:
- name: Getting the repo - name: Getting the repo
uses: actions/checkout@v3 uses: actions/checkout@v3
@ -168,62 +140,62 @@ jobs:
AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }} AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }}
AWS_EC2_METADATA_DISABLED: 'true' AWS_EC2_METADATA_DISABLED: 'true'
# noti-discord-nightly-and-update-url-readme: noti-discord-nightly-and-update-url-readme:
# needs: needs:
# [ [
# build-macos, build-macos,
# build-windows-x64, build-windows-x64,
# build-linux-x64, build-linux-x64,
# get-update-version, get-update-version,
# set-public-provider, set-public-provider,
# sync-temp-to-latest, sync-temp-to-latest,
# ] ]
# secrets: inherit secrets: inherit
# if: github.event_name == 'schedule' if: github.event_name == 'schedule'
# uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
# with: with:
# ref: refs/heads/dev ref: refs/heads/dev
# build_reason: Nightly build_reason: Nightly
# push_to_branch: dev push_to_branch: dev
# new_version: ${{ needs.get-update-version.outputs.new_version }} new_version: ${{ needs.get-update-version.outputs.new_version }}
# noti-discord-pre-release-and-update-url-readme: noti-discord-pre-release-and-update-url-readme:
# needs: needs:
# [ [
# build-macos, build-macos,
# build-windows-x64, build-windows-x64,
# build-linux-x64, build-linux-x64,
# get-update-version, get-update-version,
# set-public-provider, set-public-provider,
# sync-temp-to-latest, sync-temp-to-latest,
# ] ]
# secrets: inherit secrets: inherit
# if: github.event_name == 'push' if: github.event_name == 'push'
# uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
# with: with:
# ref: refs/heads/dev ref: refs/heads/dev
# build_reason: Pre-release build_reason: Pre-release
# push_to_branch: dev push_to_branch: dev
# new_version: ${{ needs.get-update-version.outputs.new_version }} new_version: ${{ needs.get-update-version.outputs.new_version }}
# noti-discord-manual-and-update-url-readme: noti-discord-manual-and-update-url-readme:
# needs: needs:
# [ [
# build-macos, build-macos,
# build-windows-x64, build-windows-x64,
# build-linux-x64, build-linux-x64,
# get-update-version, get-update-version,
# set-public-provider, set-public-provider,
# sync-temp-to-latest, sync-temp-to-latest,
# ] ]
# secrets: inherit secrets: inherit
# if: github.event_name == 'workflow_dispatch' && github.event.inputs.public_provider == 'aws-s3' if: github.event_name == 'workflow_dispatch' && github.event.inputs.public_provider == 'aws-s3'
# uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
# with: with:
# ref: refs/heads/dev ref: refs/heads/dev
# build_reason: Manual build_reason: Manual
# push_to_branch: dev push_to_branch: dev
# new_version: ${{ needs.get-update-version.outputs.new_version }} new_version: ${{ needs.get-update-version.outputs.new_version }}
comment-pr-build-url: comment-pr-build-url:
needs: needs:

View File

@ -32,7 +32,6 @@ jobs:
name: "${{ env.VERSION }}" name: "${{ env.VERSION }}"
draft: true draft: true
prerelease: false prerelease: false
generate_release_notes: true
build-macos: build-macos:
uses: ./.github/workflows/template-tauri-build-macos.yml uses: ./.github/workflows/template-tauri-build-macos.yml
@ -82,11 +81,11 @@ jobs:
VERSION=${{ needs.get-update-version.outputs.new_version }} VERSION=${{ needs.get-update-version.outputs.new_version }}
PUB_DATE=$(date -u +"%Y-%m-%dT%H:%M:%S.%3NZ") PUB_DATE=$(date -u +"%Y-%m-%dT%H:%M:%S.%3NZ")
LINUX_SIGNATURE="${{ needs.build-linux-x64.outputs.APPIMAGE_SIG }}" LINUX_SIGNATURE="${{ needs.build-linux-x64.outputs.APPIMAGE_SIG }}"
LINUX_URL="https://github.com/janhq/jan/releases/download/v${{ needs.get-update-version.outputs.new_version }}/${{ needs.build-linux-x64.outputs.APPIMAGE_FILE_NAME }}" LINUX_URL="https://github.com/menloresearch/jan/releases/download/v${{ needs.get-update-version.outputs.new_version }}/${{ needs.build-linux-x64.outputs.APPIMAGE_FILE_NAME }}"
WINDOWS_SIGNATURE="${{ needs.build-windows-x64.outputs.WIN_SIG }}" WINDOWS_SIGNATURE="${{ needs.build-windows-x64.outputs.WIN_SIG }}"
WINDOWS_URL="https://github.com/janhq/jan/releases/download/v${{ needs.get-update-version.outputs.new_version }}/${{ needs.build-windows-x64.outputs.FILE_NAME }}" WINDOWS_URL="https://github.com/menloresearch/jan/releases/download/v${{ needs.get-update-version.outputs.new_version }}/${{ needs.build-windows-x64.outputs.FILE_NAME }}"
DARWIN_SIGNATURE="${{ needs.build-macos.outputs.MAC_UNIVERSAL_SIG }}" DARWIN_SIGNATURE="${{ needs.build-macos.outputs.MAC_UNIVERSAL_SIG }}"
DARWIN_URL="https://github.com/janhq/jan/releases/download/v${{ needs.get-update-version.outputs.new_version }}/${{ needs.build-macos.outputs.TAR_NAME }}" DARWIN_URL="https://github.com/menloresearch/jan/releases/download/v${{ needs.get-update-version.outputs.new_version }}/${{ needs.build-macos.outputs.TAR_NAME }}"
jq --arg version "$VERSION" \ jq --arg version "$VERSION" \
--arg pub_date "$PUB_DATE" \ --arg pub_date "$PUB_DATE" \
@ -119,4 +118,28 @@ jobs:
upload_url: ${{ needs.create-draft-release.outputs.upload_url }} upload_url: ${{ needs.create-draft-release.outputs.upload_url }}
asset_path: ./latest.json asset_path: ./latest.json
asset_name: latest.json asset_name: latest.json
asset_content_type: text/json asset_content_type: text/json
update_release_draft:
needs: [build-macos, build-windows-x64, build-linux-x64]
permissions:
# write permission is required to create a github release
contents: write
# write permission is required for autolabeler
# otherwise, read permission is required at least
pull-requests: write
runs-on: ubuntu-latest
steps:
# (Optional) GitHub Enterprise requires GHE_HOST variable set
#- name: Set GHE_HOST
# run: |
# echo "GHE_HOST=${GITHUB_SERVER_URL##https:\/\/}" >> $GITHUB_ENV
# Drafts your next Release notes as Pull Requests are merged into "master"
- uses: release-drafter/release-drafter@v5
# (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
# with:
# config-name: my-config.yml
# disable-autolabeler: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,186 @@
name: build-linux-x64
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
public_provider:
required: true
type: string
default: none
description: 'none: build only, github: build and publish to github, aws s3: build and publish to aws s3'
new_version:
required: true
type: string
default: ''
aws_s3_prefix:
required: false
type: string
default: '/latest/'
beta:
required: false
type: boolean
default: false
nightly:
required: false
type: boolean
default: false
cortex_api_port:
required: false
type: string
default: null
secrets:
DELTA_AWS_S3_BUCKET_NAME:
required: false
DELTA_AWS_ACCESS_KEY_ID:
required: false
DELTA_AWS_SECRET_ACCESS_KEY:
required: false
jobs:
build-linux-x64:
if: inputs.public_provider == 'github' || inputs.public_provider == 'none'
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.beta == true && inputs.nightly != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-beta-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-beta.ico electron/icons/icon.ico
cp electron/icons_dev/jan-beta.png electron/icons/icon.png
cp electron/icons_dev/jan-beta-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-beta-tray.png electron/icons/icon-tray.png
- name: Replace Icons for Nightly Build
if: inputs.nightly == true && inputs.beta != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-nightly-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-nightly.ico electron/icons/icon.ico
cp electron/icons_dev/jan-nightly.png electron/icons/icon.png
cp electron/icons_dev/jan-nightly-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-nightly-tray.png electron/icons/icon-tray.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Update app version base public_provider
if: inputs.public_provider != 'github'
run: |
echo "Version: ${{ inputs.new_version }}"
# Update the version in electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/nightly", "channel": "latest"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-nightly", "channel": "latest"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json nightly
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json nightly
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
- name: Change App Name for beta version
if: inputs.beta == true
shell: bash
run: |
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json beta
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json beta
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
cat ./package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
- name: Update app version base on tag
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github'
run: |
jq --arg version "${VERSION_TAG#v}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${VERSION_TAG#v}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
env:
VERSION_TAG: ${{ inputs.new_version }}
- name: Build and publish app to aws s3 r2 or github artifactory
if: inputs.public_provider != 'github'
run: |
# check public_provider is true or not
echo "public_provider is ${{ inputs.public_provider }}"
if [ "${{ inputs.public_provider }}" == "none" ]; then
make build
else
make build-and-publish
fi
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
CORTEX_API_PORT: ${{ inputs.cortex_api_port }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == true
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Upload Artifact .deb file
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-electron-linux-amd64-${{ inputs.new_version }}-deb
path: ./electron/dist/*.deb
- name: Upload Artifact .AppImage file
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-electron-linux-amd64-${{ inputs.new_version }}-AppImage
path: ./electron/dist/*.AppImage

View File

@ -0,0 +1,233 @@
name: build-macos
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
public_provider:
required: true
type: string
default: none
description: 'none: build only, github: build and publish to github, aws s3: build and publish to aws s3'
new_version:
required: true
type: string
default: ''
aws_s3_prefix:
required: false
type: string
default: '/latest/'
beta:
required: false
type: boolean
default: false
nightly:
required: false
type: boolean
default: false
cortex_api_port:
required: false
type: string
default: null
secrets:
DELTA_AWS_S3_BUCKET_NAME:
required: false
DELTA_AWS_ACCESS_KEY_ID:
required: false
DELTA_AWS_SECRET_ACCESS_KEY:
required: false
CODE_SIGN_P12_BASE64:
required: false
CODE_SIGN_P12_PASSWORD:
required: false
APPLE_ID:
required: false
APPLE_APP_SPECIFIC_PASSWORD:
required: false
DEVELOPER_ID:
required: false
jobs:
build-macos:
if: inputs.public_provider == 'github' || inputs.public_provider == 'none'
runs-on: macos-latest
permissions:
contents: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.beta == true && inputs.nightly != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-beta-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-beta.ico electron/icons/icon.ico
cp electron/icons_dev/jan-beta.png electron/icons/icon.png
cp electron/icons_dev/jan-beta-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-beta-tray.png electron/icons/icon-tray.png
- name: Replace Icons for Nightly Build
if: inputs.nightly == true && inputs.beta != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-nightly-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-nightly.ico electron/icons/icon.ico
cp electron/icons_dev/jan-nightly.png electron/icons/icon.png
cp electron/icons_dev/jan-nightly-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-nightly-tray.png electron/icons/icon-tray.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Update app version based on latest release tag with build number
if: inputs.public_provider != 'github'
run: |
echo "Version: ${{ inputs.new_version }}"
# Update the version in electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/nightly", "channel": "latest"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-nightly", "channel": "latest"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg teamid "${{ secrets.APPLE_TEAM_ID }}" '.build.mac.notarize.teamId = $teamid' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
# cat electron/package.json
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json nightly
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json nightly
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
- name: Change App Name for beta version
if: inputs.beta == true
shell: bash
run: |
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json beta
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json beta
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
cat ./package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
- name: Update app version base on tag
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github'
run: |
jq --arg version "${VERSION_TAG#v}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${VERSION_TAG#v}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq --arg teamid "${{ secrets.APPLE_TEAM_ID }}" '.build.mac.notarize.teamId = $teamid' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
env:
VERSION_TAG: ${{ inputs.new_version }}
- name: Get Cer for code signing
run: base64 -d <<< "$CODE_SIGN_P12_BASE64" > /tmp/codesign.p12
shell: bash
env:
CODE_SIGN_P12_BASE64: ${{ secrets.CODE_SIGN_P12_BASE64 }}
- uses: apple-actions/import-codesign-certs@v2
continue-on-error: true
with:
p12-file-base64: ${{ secrets.CODE_SIGN_P12_BASE64 }}
p12-password: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
- name: Build and publish app to aws s3 r2 or github artifactory
if: inputs.public_provider != 'github'
run: |
# check public_provider is true or not
echo "public_provider is ${{ inputs.public_provider }}"
if [ "${{ inputs.public_provider }}" == "none" ]; then
make build
else
make build-and-publish
fi
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: '/tmp/codesign.p12'
CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
CSC_IDENTITY_AUTO_DISCOVERY: 'true'
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
CORTEX_API_PORT: ${{ inputs.cortex_api_port }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: '/tmp/codesign.p12'
CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
CSC_IDENTITY_AUTO_DISCOVERY: 'true'
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == true
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: '/tmp/codesign.p12'
CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
CSC_IDENTITY_AUTO_DISCOVERY: 'true'
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Upload Artifact
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-electron-mac-universal-${{ inputs.new_version }}
path: ./electron/dist/*.dmg

View File

@ -0,0 +1,230 @@
name: build-windows-x64
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
public_provider:
required: true
type: string
default: none
description: 'none: build only, github: build and publish to github, aws s3: build and publish to aws s3'
new_version:
required: true
type: string
default: ''
aws_s3_prefix:
required: false
type: string
default: '/latest/'
beta:
required: false
type: boolean
default: false
nightly:
required: false
type: boolean
default: false
cortex_api_port:
required: false
type: string
default: null
secrets:
DELTA_AWS_S3_BUCKET_NAME:
required: false
DELTA_AWS_ACCESS_KEY_ID:
required: false
DELTA_AWS_SECRET_ACCESS_KEY:
required: false
AZURE_KEY_VAULT_URI:
required: false
AZURE_CLIENT_ID:
required: false
AZURE_TENANT_ID:
required: false
AZURE_CLIENT_SECRET:
required: false
AZURE_CERT_NAME:
required: false
jobs:
build-windows-x64:
if: inputs.public_provider == 'github' || inputs.public_provider == 'none'
runs-on: windows-latest
permissions:
contents: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.beta == true && inputs.nightly != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-beta-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-beta.ico electron/icons/icon.ico
cp electron/icons_dev/jan-beta.png electron/icons/icon.png
cp electron/icons_dev/jan-beta-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-beta-tray.png electron/icons/icon-tray.png
- name: Replace Icons for Nightly Build
if: inputs.nightly == true && inputs.beta != true
shell: bash
run: |
rm -rf electron/icons/*
cp electron/icons_dev/jan-nightly-512x512.png electron/icons/512x512.png
cp electron/icons_dev/jan-nightly.ico electron/icons/icon.ico
cp electron/icons_dev/jan-nightly.png electron/icons/icon.png
cp electron/icons_dev/jan-nightly-tray@2x.png electron/icons/icon-tray@2x.png
cp electron/icons_dev/jan-nightly-tray.png electron/icons/icon-tray.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Update app version base on tag
if: inputs.public_provider != 'github'
id: version_update
shell: bash
run: |
echo "Version: ${{ inputs.new_version }}"
# Update the version in electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/nightly", "channel": "latest"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-nightly", "channel": "latest"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq '.build.win.sign = "./sign.js"' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json nightly
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json nightly
chmod +x .github/scripts/rename-uninstaller.sh
.github/scripts/rename-uninstaller.sh nightly
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
cat ./package.json
echo "------------------------"
- name: Change App Name for beta version
if: inputs.beta == true
shell: bash
run: |
chmod +x .github/scripts/rename-app.sh
.github/scripts/rename-app.sh ./electron/package.json beta
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json beta
chmod +x .github/scripts/rename-uninstaller.sh
.github/scripts/rename-uninstaller.sh beta
echo "------------------------"
cat ./electron/package.json
echo "------------------------"
cat ./package.json
echo "------------------------"
cat ./electron/scripts/uninstaller.nsh
jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
cat electron/package.json
- name: Update app version base on tag
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github'
shell: bash
run: |
jq --arg version "${VERSION_TAG#v}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
jq --arg version "${VERSION_TAG#v}" '.version = $version' web/package.json > /tmp/package.json
mv /tmp/package.json web/package.json
jq '.build.win.sign = "./sign.js"' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
env:
VERSION_TAG: ${{ inputs.new_version }}
- name: Install AzureSignTool
run: |
dotnet tool install --global AzureSignTool
- name: Build and publish app to aws s3 r2 or github artifactory
shell: bash
if: inputs.public_provider != 'github'
run: |
# check public_provider is true or not
echo "public_provider is ${{ inputs.public_provider }}"
if [ "${{ inputs.public_provider }}" == "none" ]; then
make build
else
make build-and-publish
fi
env:
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
AZURE_CERT_NAME: homebrewltd
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
CORTEX_API_PORT: ${{ inputs.cortex_api_port }}
- name: Build app and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
AZURE_CERT_NAME: homebrewltd
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build app and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == true
run: |
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
# AZURE_CERT_NAME: ${{ secrets.AZURE_CERT_NAME }}
AZURE_CERT_NAME: homebrewltd
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Upload Artifact
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-electron-win-x64-${{ inputs.new_version }}
path: ./electron/dist/*.exe

View File

@ -29,7 +29,7 @@ jobs:
local max_retries=3 local max_retries=3
local tag local tag
while [ $retries -lt $max_retries ]; do while [ $retries -lt $max_retries ]; do
tag=$(curl -s https://api.github.com/repos/janhq/jan/releases/latest | jq -r .tag_name) tag=$(curl -s https://api.github.com/repos/menloresearch/jan/releases/latest | jq -r .tag_name)
if [ -n "$tag" ] && [ "$tag" != "null" ]; then if [ -n "$tag" ] && [ "$tag" != "null" ]; then
echo $tag echo $tag
return return

View File

@ -50,6 +50,6 @@ jobs:
- macOS Universal: https://delta.jan.ai/nightly/Jan-nightly_{{ VERSION }}_universal.dmg - macOS Universal: https://delta.jan.ai/nightly/Jan-nightly_{{ VERSION }}_universal.dmg
- Linux Deb: https://delta.jan.ai/nightly/Jan-nightly_{{ VERSION }}_amd64.deb - Linux Deb: https://delta.jan.ai/nightly/Jan-nightly_{{ VERSION }}_amd64.deb
- Linux AppImage: https://delta.jan.ai/nightly/Jan-nightly_{{ VERSION }}_amd64.AppImage - Linux AppImage: https://delta.jan.ai/nightly/Jan-nightly_{{ VERSION }}_amd64.AppImage
- Github action run: https://github.com/janhq/jan/actions/runs/{{ GITHUB_RUN_ID }} - Github action run: https://github.com/menloresearch/jan/actions/runs/{{ GITHUB_RUN_ID }}
env: env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}

View File

@ -1,138 +0,0 @@
name: tauri-build-linux-x64-external
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
new_version:
required: true
type: string
default: ''
channel:
required: true
type: string
default: 'nightly'
description: 'The channel to use for this job'
disable_updater:
required: false
type: boolean
default: false
description: 'If true, builds both .deb and .appimage but disables auto-updater'
jobs:
build-linux-x64-external:
runs-on: ubuntu-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Free Disk Space Before Build
run: |
echo "Disk space before cleanup:"
df -h
sudo rm -rf /usr/local/.ghcup
sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo rm -rf /usr/local/lib/android/sdk/ndk
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf /usr/local/share/boost
sudo apt-get clean
echo "Disk space after cleanup:"
df -h
- name: Replace Icons for Beta Build
if: inputs.channel != 'stable'
shell: bash
run: |
cp .github/scripts/icon-${{ inputs.channel }}.png src-tauri/icons/icon.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Install ctoml
run: |
cargo install ctoml
- name: Install Tauri dependencies
run: |
sudo apt update
sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2 libayatana-appindicator3-dev
- name: Update app version
run: |
echo "Version: ${{ inputs.new_version }}"
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = false' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json
fi
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json
# Update tauri plugin versions
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json
echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-llamacpp/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/Cargo.toml---------"
cat ./src-tauri/Cargo.toml
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
chmod +x .github/scripts/rename-tauri-app.sh
.github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }}
ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}"
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json ${{ inputs.channel }}
fi
- name: Build app
run: |
make build
env:
RELEASE_CHANNEL: '${{ inputs.channel }}'
AUTO_UPDATER_DISABLED: ${{ inputs.disable_updater && 'true' || 'false' }}
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-linux-amd64-${{ inputs.new_version }}-deb
path: ./src-tauri/target/release/bundle/deb/*.deb
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-linux-amd64-${{ inputs.new_version }}-AppImage
path: ./src-tauri/target/release/bundle/appimage/*.AppImage

View File

@ -1,185 +0,0 @@
name: tauri-build-linux-x64-flatpak
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
public_provider:
required: true
type: string
default: none
description: 'none: build only, github: build and publish to github, aws s3: build and publish to aws s3'
new_version:
required: true
type: string
default: ''
cortex_api_port:
required: false
type: string
default: ''
upload_url:
required: false
type: string
default: ''
channel:
required: true
type: string
default: 'nightly'
description: 'The channel to use for this job'
disable_updater:
required: false
type: boolean
default: false
description: 'If true, builds both .deb and .appimage but disables auto-updater'
secrets:
DELTA_AWS_S3_BUCKET_NAME:
required: false
DELTA_AWS_ACCESS_KEY_ID:
required: false
DELTA_AWS_SECRET_ACCESS_KEY:
required: false
TAURI_SIGNING_PRIVATE_KEY:
required: false
TAURI_SIGNING_PRIVATE_KEY_PASSWORD:
required: false
jobs:
build-linux-x64:
runs-on: ubuntu-22.04
permissions:
contents: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Free Disk Space Before Build
run: |
echo "Disk space before cleanup:"
df -h
sudo rm -rf /usr/local/.ghcup
sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo rm -rf /usr/local/lib/android/sdk/ndk
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf /usr/local/share/boost
sudo apt-get clean
echo "Disk space after cleanup:"
df -h
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Install ctoml
run: |
cargo install ctoml
- name: Install Tauri dependencies
run: |
sudo apt update
sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2
- name: Update app version base public_provider
run: |
echo "Version: ${{ inputs.new_version }}"
# Update tauri.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json
fi
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json
# Update tauri plugin versions
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json
echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-llamacpp/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/Cargo.toml---------"
cat ./src-tauri/Cargo.toml
# Temporarily enable devtool on prod build
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
cat ./src-tauri/Cargo.toml
# Change app name for beta and nightly builds
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
chmod +x .github/scripts/rename-tauri-app.sh
.github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }}
cat ./src-tauri/tauri.conf.json
# Update Cargo.toml
ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}"
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
echo "------------------"
cat ./src-tauri/Cargo.toml
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json ${{ inputs.channel }}
cat ./package.json
fi
- name: Build app
run: |
make build
APP_IMAGE=./src-tauri/target/release/bundle/appimage/$(ls ./src-tauri/target/release/bundle/appimage/ | grep AppImage | head -1)
yarn tauri signer sign \
--private-key "$TAURI_SIGNING_PRIVATE_KEY" \
--password "$TAURI_SIGNING_PRIVATE_KEY_PASSWORD" \
"$APP_IMAGE"
env:
RELEASE_CHANNEL: '${{ inputs.channel }}'
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }}
TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }}
AUTO_UPDATER_DISABLED: ${{ inputs.disable_updater && 'true' || 'false' }}
# Publish app
## Artifacts, for dev and test
- name: Upload Artifact
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-linux-amd64-flatpak-${{ inputs.new_version }}-deb
path: ./src-tauri/target/release/bundle/deb/*.deb
- name: Upload Artifact
if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v4
with:
name: jan-linux-amd64-flatpak-${{ inputs.new_version }}-AppImage
path: ./src-tauri/target/release/bundle/appimage/*.AppImage

View File

@ -28,11 +28,6 @@ on:
type: string type: string
default: 'nightly' default: 'nightly'
description: 'The channel to use for this job' description: 'The channel to use for this job'
disable_updater:
required: false
type: boolean
default: false
description: 'If true, builds both .deb and .appimage but disables auto-updater'
secrets: secrets:
DELTA_AWS_S3_BUCKET_NAME: DELTA_AWS_S3_BUCKET_NAME:
required: false required: false
@ -44,6 +39,8 @@ on:
required: false required: false
TAURI_SIGNING_PRIVATE_KEY_PASSWORD: TAURI_SIGNING_PRIVATE_KEY_PASSWORD:
required: false required: false
TAURI_SIGNING_PUBLIC_KEY:
required: false
outputs: outputs:
DEB_SIG: DEB_SIG:
value: ${{ jobs.build-linux-x64.outputs.DEB_SIG }} value: ${{ jobs.build-linux-x64.outputs.DEB_SIG }}
@ -53,7 +50,7 @@ on:
value: ${{ jobs.build-linux-x64.outputs.APPIMAGE_FILE_NAME }} value: ${{ jobs.build-linux-x64.outputs.APPIMAGE_FILE_NAME }}
jobs: jobs:
build-linux-x64: build-linux-x64:
runs-on: ubuntu-latest runs-on: ubuntu-22.04
outputs: outputs:
DEB_SIG: ${{ steps.packageinfo.outputs.DEB_SIG }} DEB_SIG: ${{ steps.packageinfo.outputs.DEB_SIG }}
APPIMAGE_SIG: ${{ steps.packageinfo.outputs.APPIMAGE_SIG }} APPIMAGE_SIG: ${{ steps.packageinfo.outputs.APPIMAGE_SIG }}
@ -101,51 +98,36 @@ jobs:
- name: Install Tauri dependencies - name: Install Tauri dependencies
run: | run: |
sudo apt update sudo apt update
sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2 libayatana-appindicator3-dev sudo apt install -y libglib2.0-dev libatk1.0-dev libpango1.0-dev libgtk-3-dev libsoup-3.0-dev libwebkit2gtk-4.1-dev librsvg2-dev libfuse2
- name: Update app version base public_provider - name: Update app version base public_provider
run: | run: |
echo "Version: ${{ inputs.new_version }}" echo "Version: ${{ inputs.new_version }}"
# Update tauri.conf.json # Update tauri.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true | .bundle.resources = ["resources/pre-install/**/*"] | .bundle.externalBin = ["binaries/cortex-server", "resources/bin/uv"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
if [ "${{ inputs.channel }}" != "stable" ]; then if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun",
mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json "usr/lib/Jan-${{ inputs.channel }}/binaries": "binaries/deps",
"usr/lib/Jan-${{ inputs.channel }}/binaries/engines": "binaries/engines",
"usr/lib/Jan-${{ inputs.channel }}/binaries/libvulkan.so": "binaries/libvulkan.so"}' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
else
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun",
"usr/lib/Jan/binaries": "binaries/deps",
"usr/lib/Jan/binaries/engines": "binaries/engines",
"usr/lib/Jan/binaries/libvulkan.so": "binaries/libvulkan.so"}' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
fi fi
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json mv /tmp/package.json web-app/package.json
# Update tauri plugin versions
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json
echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-llamacpp/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/Cargo.toml---------"
cat ./src-tauri/Cargo.toml
# Temporarily enable devtool on prod build # Temporarily enable devtool on prod build
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools" ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
cat ./src-tauri/Cargo.toml cat ./src-tauri/Cargo.toml
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
cat ./src-tauri/Cargo.toml
# Change app name for beta and nightly builds # Change app name for beta and nightly builds
if [ "${{ inputs.channel }}" != "stable" ]; then if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
@ -155,7 +137,7 @@ jobs:
.github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }} .github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }}
cat ./src-tauri/tauri.conf.json cat ./src-tauri/tauri.conf.json
# Update Cargo.toml # Update Cargo.toml
ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}" ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}"
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools" ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
@ -168,22 +150,56 @@ jobs:
fi fi
- name: Build app - name: Build app
run: | run: |
make build # Pin linuxdeploy version to prevent @tauri-apps/cli-linux-x64-gnu from pulling in an outdated version
TAURI_TOOLKIT_PATH="${XDG_CACHE_HOME:-$HOME/.cache}/tauri"
mkdir -p "$TAURI_TOOLKIT_PATH"
wget https://github.com/linuxdeploy/linuxdeploy/releases/download/1-alpha-20250213-2/linuxdeploy-x86_64.AppImage -O "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
chmod +x "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
APP_IMAGE=./src-tauri/target/release/bundle/appimage/$(ls ./src-tauri/target/release/bundle/appimage/ | grep AppImage | head -1) make build-tauri
yarn tauri signer sign \ # Copy engines and bun to appimage
--private-key "$TAURI_SIGNING_PRIVATE_KEY" \ wget https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage -O ./appimagetool
--password "$TAURI_SIGNING_PRIVATE_KEY_PASSWORD" \ chmod +x ./appimagetool
"$APP_IMAGE" if [ "${{ inputs.channel }}" != "stable" ]; then
ls ./src-tauri/target/release/bundle/appimage/
cp ./src-tauri/resources/bin/bun ./src-tauri/target/release/bundle/appimage/Jan-${{ inputs.channel }}.AppDir/usr/bin/bun
mkdir -p ./src-tauri/target/release/bundle/appimage/Jan-${{ inputs.channel }}.AppDir/usr/lib/Jan-${{ inputs.channel }}/binaries/engines
cp -f ./src-tauri/binaries/deps/*.so* ./src-tauri/target/release/bundle/appimage/Jan-${{ inputs.channel }}.AppDir/usr/lib/Jan-${{ inputs.channel }}/binaries/
cp -f ./src-tauri/binaries/*.so* ./src-tauri/target/release/bundle/appimage/Jan-${{ inputs.channel }}.AppDir/usr/lib/Jan-${{ inputs.channel }}/binaries/
cp -rf ./src-tauri/binaries/engines ./src-tauri/target/release/bundle/appimage/Jan-${{ inputs.channel }}.AppDir/usr/lib/Jan-${{ inputs.channel }}/binaries/
APP_IMAGE=./src-tauri/target/release/bundle/appimage/$(ls ./src-tauri/target/release/bundle/appimage/ | grep .AppImage | head -1)
echo $APP_IMAGE
rm -f $APP_IMAGE
./appimagetool ./src-tauri/target/release/bundle/appimage/Jan-${{ inputs.channel }}.AppDir $APP_IMAGE
yarn tauri signer sign \
--private-key "$TAURI_SIGNING_PRIVATE_KEY" \
--password "$TAURI_SIGNING_PRIVATE_KEY_PASSWORD" \
"$APP_IMAGE"
else
cp ./src-tauri/resources/bin/bun ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/bin/bun
mkdir -p ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/engines
cp -f ./src-tauri/binaries/deps/*.so* ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
cp -f ./src-tauri/binaries/*.so* ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
cp -rf ./src-tauri/binaries/engines ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
APP_IMAGE=./src-tauri/target/release/bundle/appimage/$(ls ./src-tauri/target/release/bundle/appimage/ | grep AppImage | head -1)
echo $APP_IMAGE
rm -f $APP_IMAGE
./appimagetool ./src-tauri/target/release/bundle/appimage/Jan.AppDir $APP_IMAGE
yarn tauri signer sign \
--private-key "$TAURI_SIGNING_PRIVATE_KEY" \
--password "$TAURI_SIGNING_PRIVATE_KEY_PASSWORD" \
"$APP_IMAGE"
fi
env: env:
RELEASE_CHANNEL: '${{ inputs.channel }}'
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }} POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }} POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
# CORTEX_API_PORT: ${{ inputs.cortex_api_port }}
TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }} TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }}
TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }} TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }}
AUTO_UPDATER_DISABLED: ${{ inputs.disable_updater && 'true' || 'false' }} TAURI_SIGNING_PUBLIC_KEY: ${{ secrets.TAURI_SIGNING_PUBLIC_KEY }}
# Publish app # Publish app
## Artifacts, for dev and test ## Artifacts, for dev and test
@ -201,8 +217,8 @@ jobs:
name: jan-linux-amd64-${{ inputs.new_version }}-AppImage name: jan-linux-amd64-${{ inputs.new_version }}-AppImage
path: ./src-tauri/target/release/bundle/appimage/*.AppImage path: ./src-tauri/target/release/bundle/appimage/*.AppImage
## Set output filename for linux ## create zip file and latest-linux.yml for linux electron auto updater
- name: Set output filename for linux - name: Create zip file and latest-linux.yml for linux electron auto updater
id: packageinfo id: packageinfo
run: | run: |
cd ./src-tauri/target/release/bundle cd ./src-tauri/target/release/bundle
@ -219,6 +235,35 @@ jobs:
APPIMAGE_SIG=$(cat appimage/Jan_${{ inputs.new_version }}_amd64.AppImage.sig) APPIMAGE_SIG=$(cat appimage/Jan_${{ inputs.new_version }}_amd64.AppImage.sig)
fi fi
DEB_FILE_SIZE=$(stat -c%s deb/$DEB_FILE_NAME)
APPIMAGE_FILE_SIZE=$(stat -c%s appimage/$APPIMAGE_FILE_NAME)
echo "deb file size: $DEB_FILE_SIZE"
echo "appimage file size: $APPIMAGE_FILE_SIZE"
DEB_SH512_CHECKSUM=$(python3 ../../../../.github/scripts/electron-checksum.py deb/$DEB_FILE_NAME)
APPIMAGE_SH512_CHECKSUM=$(python3 ../../../../.github/scripts/electron-checksum.py appimage/$APPIMAGE_FILE_NAME)
echo "deb sh512 checksum: $DEB_SH512_CHECKSUM"
echo "appimage sh512 checksum: $APPIMAGE_SH512_CHECKSUM"
CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S.%3NZ")
echo "releaseDate: $CURRENT_TIME"
# Create latest-linux.yml file
echo "version: ${{ inputs.new_version }}" > latest-linux.yml
echo "files:" >> latest-linux.yml
echo " - url: $DEB_FILE_NAME" >> latest-linux.yml
echo " sha512: $DEB_SH512_CHECKSUM" >> latest-linux.yml
echo " size: $DEB_FILE_SIZE" >> latest-linux.yml
echo " - url: $APPIMAGE_FILE_NAME" >> latest-linux.yml
echo " sha512: $APPIMAGE_SH512_CHECKSUM" >> latest-linux.yml
echo " size: $APPIMAGE_FILE_SIZE" >> latest-linux.yml
echo "path: $APPIMAGE_FILE_NAME" >> latest-linux.yml
echo "sha512: $APPIMAGE_SH512_CHECKSUM" >> latest-linux.yml
echo "releaseDate: $CURRENT_TIME" >> latest-linux.yml
cat latest-linux.yml
cp latest-linux.yml beta-linux.yml
echo "DEB_SIG=$DEB_SIG" >> $GITHUB_OUTPUT echo "DEB_SIG=$DEB_SIG" >> $GITHUB_OUTPUT
echo "APPIMAGE_SIG=$APPIMAGE_SIG" >> $GITHUB_OUTPUT echo "APPIMAGE_SIG=$APPIMAGE_SIG" >> $GITHUB_OUTPUT
echo "DEB_FILE_NAME=$DEB_FILE_NAME" >> $GITHUB_OUTPUT echo "DEB_FILE_NAME=$DEB_FILE_NAME" >> $GITHUB_OUTPUT
@ -230,6 +275,10 @@ jobs:
run: | run: |
cd ./src-tauri/target/release/bundle cd ./src-tauri/target/release/bundle
# Upload for electron updater for nightly
aws s3 cp ./latest-linux.yml s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/latest-linux.yml
aws s3 cp ./beta-linux.yml s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/beta-linux.yml
# Upload for tauri updater # Upload for tauri updater
aws s3 cp ./appimage/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_amd64.AppImage s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_amd64.AppImage aws s3 cp ./appimage/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_amd64.AppImage s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_amd64.AppImage
aws s3 cp ./deb/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_amd64.deb s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_amd64.deb aws s3 cp ./deb/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_amd64.deb s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_amd64.deb
@ -241,6 +290,28 @@ jobs:
AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }} AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }}
AWS_EC2_METADATA_DISABLED: 'true' AWS_EC2_METADATA_DISABLED: 'true'
## Upload to github release for stable release
- name: Upload release assert if public provider is github
if: inputs.channel == 'stable'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
uses: actions/upload-release-asset@v1.0.1
with:
upload_url: ${{ inputs.upload_url }}
asset_path: ./src-tauri/target/release/bundle/latest-linux.yml
asset_name: latest-linux.yml
asset_content_type: text/yaml
- name: Upload release assert if public provider is github
if: inputs.channel == 'beta'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
uses: actions/upload-release-asset@v1.0.1
with:
upload_url: ${{ inputs.upload_url }}
asset_path: ./src-tauri/target/release/bundle/beta-linux.yml
asset_name: beta-linux.yml
asset_content_type: text/yaml
- name: Upload release assert if public provider is github - name: Upload release assert if public provider is github
if: inputs.public_provider == 'github' if: inputs.public_provider == 'github'
env: env:

View File

@ -1,103 +0,0 @@
name: tauri-build-macos-external
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
new_version:
required: true
type: string
default: ''
channel:
required: true
type: string
default: 'nightly'
description: 'The channel to use for this job'
jobs:
build-macos-external:
runs-on: macos-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.channel != 'stable'
shell: bash
run: |
cp .github/scripts/icon-${{ inputs.channel }}.png src-tauri/icons/icon.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Install ctoml
run: |
cargo install ctoml
- name: Update app version
run: |
echo "Version: ${{ inputs.new_version }}"
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = false' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json
# Update tauri plugin versions
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json
echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-llamacpp/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/Cargo.toml---------"
cat ./src-tauri/Cargo.toml
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
chmod +x .github/scripts/rename-tauri-app.sh
.github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }}
ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}"
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json ${{ inputs.channel }}
fi
- name: Build app
run: |
make build
env:
APP_PATH: '.'
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-${{ inputs.channel }}-mac-universal-${{ inputs.new_version }}.dmg
path: |
./src-tauri/target/universal-apple-darwin/release/bundle/dmg/*.dmg

View File

@ -49,13 +49,11 @@ on:
required: false required: false
TAURI_SIGNING_PRIVATE_KEY_PASSWORD: TAURI_SIGNING_PRIVATE_KEY_PASSWORD:
required: false required: false
TAURI_SIGNING_PUBLIC_KEY:
required: false
outputs: outputs:
MAC_UNIVERSAL_SIG: MAC_UNIVERSAL_SIG:
value: ${{ jobs.build-macos.outputs.MAC_UNIVERSAL_SIG }} value: ${{ jobs.build-macos.outputs.MAC_UNIVERSAL_SIG }}
FILE_NAME:
value: ${{ jobs.build-macos.outputs.FILE_NAME }}
DMG_NAME:
value: ${{ jobs.build-macos.outputs.DMG_NAME }}
TAR_NAME: TAR_NAME:
value: ${{ jobs.build-macos.outputs.TAR_NAME }} value: ${{ jobs.build-macos.outputs.TAR_NAME }}
@ -64,8 +62,6 @@ jobs:
runs-on: macos-latest runs-on: macos-latest
outputs: outputs:
MAC_UNIVERSAL_SIG: ${{ steps.metadata.outputs.MAC_UNIVERSAL_SIG }} MAC_UNIVERSAL_SIG: ${{ steps.metadata.outputs.MAC_UNIVERSAL_SIG }}
FILE_NAME: ${{ steps.metadata.outputs.FILE_NAME }}
DMG_NAME: ${{ steps.metadata.outputs.DMG_NAME }}
TAR_NAME: ${{ steps.metadata.outputs.TAR_NAME }} TAR_NAME: ${{ steps.metadata.outputs.TAR_NAME }}
permissions: permissions:
contents: write contents: write
@ -92,6 +88,31 @@ jobs:
run: | run: |
cargo install ctoml cargo install ctoml
- name: Create bun and uv universal
run: |
mkdir -p ./src-tauri/resources/bin/
cd ./src-tauri/resources/bin/
curl -L -o bun-darwin-x64.zip https://github.com/oven-sh/bun/releases/download/bun-v1.2.10/bun-darwin-x64.zip
curl -L -o bun-darwin-aarch64.zip https://github.com/oven-sh/bun/releases/download/bun-v1.2.10/bun-darwin-aarch64.zip
unzip bun-darwin-x64.zip
unzip bun-darwin-aarch64.zip
lipo -create -output bun-universal-apple-darwin bun-darwin-x64/bun bun-darwin-aarch64/bun
cp -f bun-darwin-aarch64/bun bun-aarch64-apple-darwin
cp -f bun-darwin-x64/bun bun-x86_64-apple-darwin
cp -f bun-universal-apple-darwin bun
curl -L -o uv-x86_64.tar.gz https://github.com/astral-sh/uv/releases/download/0.6.17/uv-x86_64-apple-darwin.tar.gz
curl -L -o uv-arm64.tar.gz https://github.com/astral-sh/uv/releases/download/0.6.17/uv-aarch64-apple-darwin.tar.gz
tar -xzf uv-x86_64.tar.gz
tar -xzf uv-arm64.tar.gz
mv uv-x86_64-apple-darwin uv-x86_64
mv uv-aarch64-apple-darwin uv-aarch64
lipo -create -output uv-universal-apple-darwin uv-x86_64/uv uv-aarch64/uv
cp -f uv-x86_64/uv uv-x86_64-apple-darwin
cp -f uv-aarch64/uv uv-aarch64-apple-darwin
cp -f uv-universal-apple-darwin uv
ls -la
- name: Update app version based on latest release tag with build number - name: Update app version based on latest release tag with build number
run: | run: |
echo "Version: ${{ inputs.new_version }}" echo "Version: ${{ inputs.new_version }}"
@ -101,30 +122,7 @@ jobs:
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json mv /tmp/package.json web-app/package.json
# Update tauri plugin versions
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json
echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-llamacpp/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}" ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/Cargo.toml---------"
cat ./src-tauri/Cargo.toml cat ./src-tauri/Cargo.toml
# Temporarily enable devtool on prod build # Temporarily enable devtool on prod build
@ -165,12 +163,14 @@ jobs:
- name: Build app - name: Build app
run: | run: |
make build rustup target add x86_64-apple-darwin
make build-tauri
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
APP_PATH: '.' APP_PATH: '.'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }} POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }} POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
# CORTEX_API_PORT: ${{ inputs.cortex_api_port }}
APPLE_CERTIFICATE: ${{ secrets.CODE_SIGN_P12_BASE64 }} APPLE_CERTIFICATE: ${{ secrets.CODE_SIGN_P12_BASE64 }}
APPLE_CERTIFICATE_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }} APPLE_CERTIFICATE_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
APPLE_API_ISSUER: ${{ secrets.NOTARY_ISSUER }} APPLE_API_ISSUER: ${{ secrets.NOTARY_ISSUER }}
@ -178,6 +178,7 @@ jobs:
APPLE_API_KEY_PATH: /tmp/notary-key.p8 APPLE_API_KEY_PATH: /tmp/notary-key.p8
TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }} TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }}
TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }} TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }}
TAURI_SIGNING_PUBLIC_KEY: ${{ secrets.TAURI_SIGNING_PUBLIC_KEY }}
# Publish app # Publish app
@ -190,8 +191,8 @@ jobs:
path: | path: |
./src-tauri/target/universal-apple-darwin/release/bundle/dmg/*.dmg ./src-tauri/target/universal-apple-darwin/release/bundle/dmg/*.dmg
## Set output filename for mac ## create zip file and latest-mac.yml for mac electron auto updater
- name: Set output filename for mac - name: create zip file and latest-mac.yml for mac electron auto updater
run: | run: |
cd ./src-tauri/target/universal-apple-darwin/release/bundle/macos cd ./src-tauri/target/universal-apple-darwin/release/bundle/macos
if [ "${{ inputs.channel }}" != "stable" ]; then if [ "${{ inputs.channel }}" != "stable" ]; then
@ -208,6 +209,27 @@ jobs:
TAR_NAME=Jan.app.tar.gz TAR_NAME=Jan.app.tar.gz
fi fi
FILE_SIZE=$(stat -f%z $FILE_NAME)
echo "size: $FILE_SIZE"
SH512_CHECKSUM=$(python3 ../../../../../../.github/scripts/electron-checksum.py $FILE_NAME)
echo "sha512: $SH512_CHECKSUM"
CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S.%3NZ")
echo "releaseDate: $CURRENT_TIME"
# Create latest-mac.yml file
echo "version: ${{ inputs.new_version }}" > latest-mac.yml
echo "files:" >> latest-mac.yml
echo " - url: $FILE_NAME" >> latest-mac.yml
echo " sha512: $SH512_CHECKSUM" >> latest-mac.yml
echo " size: $FILE_SIZE" >> latest-mac.yml
echo "path: $FILE_NAME" >> latest-mac.yml
echo "sha512: $SH512_CHECKSUM" >> latest-mac.yml
echo "releaseDate: $CURRENT_TIME" >> latest-mac.yml
cat latest-mac.yml
cp latest-mac.yml beta-mac.yml
echo "::set-output name=MAC_UNIVERSAL_SIG::$MAC_UNIVERSAL_SIG" echo "::set-output name=MAC_UNIVERSAL_SIG::$MAC_UNIVERSAL_SIG"
echo "::set-output name=FILE_NAME::$FILE_NAME" echo "::set-output name=FILE_NAME::$FILE_NAME"
echo "::set-output name=DMG_NAME::$DMG_NAME" echo "::set-output name=DMG_NAME::$DMG_NAME"
@ -220,6 +242,12 @@ jobs:
run: | run: |
cd ./src-tauri/target/universal-apple-darwin/release/bundle cd ./src-tauri/target/universal-apple-darwin/release/bundle
# Upload for electron updater for nightly
aws s3 cp ./macos/latest-mac.yml s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/latest-mac.yml
aws s3 cp ./macos/beta-mac.yml s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/beta-mac.yml
aws s3 cp ./macos/jan-${{ inputs.channel }}-mac-universal-${{ inputs.new_version }}.zip s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/jan-${{ inputs.channel }}-mac-universal-${{ inputs.new_version }}.zip
# aws s3 cp ./macos/jan-${{ inputs.channel }}-mac-universal-${{ inputs.new_version }}.zip.sig s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/jan-${{ inputs.channel }}-mac-universal-${{ inputs.new_version }}.zip.sig
# Upload for tauri updater # Upload for tauri updater
aws s3 cp ./dmg/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_universal.dmg s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_universal.dmg aws s3 cp ./dmg/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_universal.dmg s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/Jan-${{ inputs.channel }}_${{ inputs.new_version }}_universal.dmg
aws s3 cp ./macos/Jan-${{ inputs.channel }}.app.tar.gz s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/Jan-${{ inputs.channel }}_${{ inputs.new_version }}.app.tar.gz aws s3 cp ./macos/Jan-${{ inputs.channel }}.app.tar.gz s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/Jan-${{ inputs.channel }}_${{ inputs.new_version }}.app.tar.gz
@ -230,6 +258,29 @@ jobs:
AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }} AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }}
AWS_EC2_METADATA_DISABLED: 'true' AWS_EC2_METADATA_DISABLED: 'true'
## Upload to github release for stable release
- name: Upload release assert if public provider is github
if: inputs.channel == 'stable'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
uses: actions/upload-release-asset@v1.0.1
with:
upload_url: ${{ inputs.upload_url }}
asset_path: ./src-tauri/target/universal-apple-darwin/release/bundle/macos/latest-mac.yml
asset_name: latest-mac.yml
asset_content_type: text/yaml
- name: Upload release assert if public provider is github
if: inputs.channel == 'beta'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
uses: actions/upload-release-asset@v1.0.1
with:
upload_url: ${{ inputs.upload_url }}
asset_path: ./src-tauri/target/universal-apple-darwin/release/bundle/macos/beta-mac.yml
asset_name: beta-mac.yml
asset_content_type: text/yaml
- name: Upload release assert if public provider is github - name: Upload release assert if public provider is github
if: inputs.public_provider == 'github' if: inputs.public_provider == 'github'
env: env:

View File

@ -1,156 +0,0 @@
name: tauri-build-windows-x64-external
on:
workflow_call:
inputs:
ref:
required: true
type: string
default: 'refs/heads/main'
new_version:
required: true
type: string
default: ''
channel:
required: true
type: string
default: 'nightly'
description: 'The channel to use for this job'
jobs:
build-windows-x64-external:
runs-on: windows-latest
steps:
- name: Getting the repo
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Replace Icons for Beta Build
if: inputs.channel != 'stable'
shell: bash
run: |
cp .github/scripts/icon-${{ inputs.channel }}.png src-tauri/icons/icon.png
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Install ctoml
run: |
cargo install ctoml
- name: Update app version
shell: bash
run: |
echo "Version: ${{ inputs.new_version }}"
# Update tauri.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = false' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
jq '.bundle.windows.nsis.template = "tauri.bundle.windows.nsis.template"' ./src-tauri/tauri.windows.conf.json > /tmp/tauri.windows.conf.json
mv /tmp/tauri.windows.conf.json ./src-tauri/tauri.windows.conf.json
jq '.bundle.windows.signCommand = "echo External build - skipping signature: %1"' ./src-tauri/tauri.windows.conf.json > /tmp/tauri.windows.conf.json
mv /tmp/tauri.windows.conf.json ./src-tauri/tauri.windows.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json
# Update tauri plugin versions
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json
echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-llamacpp/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/Cargo.toml---------"
cat ./src-tauri/Cargo.toml
generate_build_version() {
### Examble
### input 0.5.6 output will be 0.5.6 and 0.5.6.0
### input 0.5.6-rc2-beta output will be 0.5.6 and 0.5.6.2
### input 0.5.6-1213 output will be 0.5.6 and and 0.5.6.1213
local new_version="$1"
local base_version
local t_value
# Check if it has a "-"
if [[ "$new_version" == *-* ]]; then
base_version="${new_version%%-*}" # part before -
suffix="${new_version#*-}" # part after -
# Check if it is rcX-beta
if [[ "$suffix" =~ ^rc([0-9]+)-beta$ ]]; then
t_value="${BASH_REMATCH[1]}"
else
t_value="$suffix"
fi
else
base_version="$new_version"
t_value="0"
fi
# Export two values
new_base_version="$base_version"
new_build_version="${base_version}.${t_value}"
}
generate_build_version ${{ inputs.new_version }}
sed -i "s/jan_version/$new_base_version/g" ./src-tauri/tauri.bundle.windows.nsis.template
sed -i "s/jan_build/$new_build_version/g" ./src-tauri/tauri.bundle.windows.nsis.template
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
# Update product name
jq --arg name "Jan-${{ inputs.channel }}" '.productName = $name' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
chmod +x .github/scripts/rename-tauri-app.sh
.github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }}
echo "---------tauri.conf.json---------"
cat ./src-tauri/tauri.conf.json
# Update Cargo.toml
ctoml ./src-tauri/Cargo.toml package.name "Jan-${{ inputs.channel }}"
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
echo "------------------"
cat ./src-tauri/Cargo.toml
chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json ${{ inputs.channel }}
cat ./package.json
sed -i "s/jan_productname/Jan-${{ inputs.channel }}/g" ./src-tauri/tauri.bundle.windows.nsis.template
sed -i "s/jan_mainbinaryname/jan-${{ inputs.channel }}/g" ./src-tauri/tauri.bundle.windows.nsis.template
else
sed -i "s/jan_productname/Jan/g" ./src-tauri/tauri.bundle.windows.nsis.template
sed -i "s/jan_mainbinaryname/jan/g" ./src-tauri/tauri.bundle.windows.nsis.template
fi
echo "---------nsis.template---------"
cat ./src-tauri/tauri.bundle.windows.nsis.template
- name: Build app
shell: bash
run: |
make build
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-windows-${{ inputs.new_version }}
path: |
./src-tauri/target/release/bundle/nsis/*.exe

View File

@ -49,13 +49,13 @@ on:
required: false required: false
TAURI_SIGNING_PRIVATE_KEY_PASSWORD: TAURI_SIGNING_PRIVATE_KEY_PASSWORD:
required: false required: false
TAURI_SIGNING_PUBLIC_KEY:
required: false
outputs: outputs:
WIN_SIG: WIN_SIG:
value: ${{ jobs.build-windows-x64.outputs.WIN_SIG }} value: ${{ jobs.build-windows-x64.outputs.WIN_SIG }}
FILE_NAME: FILE_NAME:
value: ${{ jobs.build-windows-x64.outputs.FILE_NAME }} value: ${{ jobs.build-windows-x64.outputs.FILE_NAME }}
MSI_FILE_NAME:
value: ${{ jobs.build-windows-x64.outputs.MSI_FILE_NAME }}
jobs: jobs:
build-windows-x64: build-windows-x64:
@ -63,7 +63,6 @@ jobs:
outputs: outputs:
WIN_SIG: ${{ steps.metadata.outputs.WIN_SIG }} WIN_SIG: ${{ steps.metadata.outputs.WIN_SIG }}
FILE_NAME: ${{ steps.metadata.outputs.FILE_NAME }} FILE_NAME: ${{ steps.metadata.outputs.FILE_NAME }}
MSI_FILE_NAME: ${{ steps.metadata.outputs.MSI_FILE_NAME }}
permissions: permissions:
contents: write contents: write
steps: steps:
@ -96,55 +95,29 @@ jobs:
run: | run: |
echo "Version: ${{ inputs.new_version }}" echo "Version: ${{ inputs.new_version }}"
# Update tauri.conf.json # Update tauri.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true | .bundle.windows.nsis.template = "tauri.bundle.windows.nsis.template"' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
jq '.bundle.windows.nsis.template = "tauri.bundle.windows.nsis.template"' ./src-tauri/tauri.windows.conf.json > /tmp/tauri.windows.conf.json
mv /tmp/tauri.windows.conf.json ./src-tauri/tauri.windows.conf.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json
mv /tmp/package.json web-app/package.json mv /tmp/package.json web-app/package.json
# Add sign commands to tauri.windows.conf.json
jq '.bundle.windows.signCommand = "powershell -ExecutionPolicy Bypass -File ./sign.ps1 %1"' ./src-tauri/tauri.windows.conf.json > /tmp/tauri.windows.conf.json
mv /tmp/tauri.windows.conf.json ./src-tauri/tauri.windows.conf.json
# Update tauri plugin versions
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-hardware/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-hardware/package.json
echo "---------./src-tauri/plugins/tauri-plugin-hardware/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/package.json
jq --arg version "${{ inputs.new_version }}" '.version = $version' ./src-tauri/plugins/tauri-plugin-llamacpp/package.json > /tmp/package.json
mv /tmp/package.json ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/package.json---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/package.json
ctoml ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
ctoml ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml---------"
cat ./src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}" ctoml ./src-tauri/Cargo.toml package.version "${{ inputs.new_version }}"
echo "---------./src-tauri/Cargo.toml---------" echo "---------Cargo.toml---------"
cat ./src-tauri/Cargo.toml cat ./src-tauri/Cargo.toml
generate_build_version() { generate_build_version() {
### Example ### Examble
### input 0.5.6 output will be 0.5.6 and 0.5.6.0 ### input 0.5.6 output will be 0.5.6 and 0.5.6.0
### input 0.5.6-rc2-beta output will be 0.5.6 and 0.5.6.2 ### input 0.5.6-rc2-beta output will be 0.5.6 and 0.5.6.2
### input 0.5.6-1213 output will be 0.5.6 and and 0.5.6.1213 ### input 0.5.6-1213 output will be 0.5.6 and and 0.5.6.1213
local new_version="$1" local new_version="$1"
local base_version local base_version
local t_value local t_value
# Check if it has a "-" # Check if it has a "-"
if [[ "$new_version" == *-* ]]; then if [[ "$new_version" == *-* ]]; then
base_version="${new_version%%-*}" # part before - base_version="${new_version%%-*}" # part before -
suffix="${new_version#*-}" # part after - suffix="${new_version#*-}" # part after -
# Check if it is rcX-beta # Check if it is rcX-beta
if [[ "$suffix" =~ ^rc([0-9]+)-beta$ ]]; then if [[ "$suffix" =~ ^rc([0-9]+)-beta$ ]]; then
t_value="${BASH_REMATCH[1]}" t_value="${BASH_REMATCH[1]}"
@ -155,16 +128,14 @@ jobs:
base_version="$new_version" base_version="$new_version"
t_value="0" t_value="0"
fi fi
# Export two values # Export two values
new_base_version="$base_version" new_base_version="$base_version"
new_build_version="${base_version}.${t_value}" new_build_version="${base_version}.${t_value}"
} }
generate_build_version ${{ inputs.new_version }} generate_build_version ${{ inputs.new_version }}
sed -i "s/jan_version/$new_base_version/g" ./src-tauri/tauri.bundle.windows.nsis.template sed -i "s/jan_version/$new_base_version/g" ./src-tauri/tauri.bundle.windows.nsis.template
sed -i "s/jan_build/$new_build_version/g" ./src-tauri/tauri.bundle.windows.nsis.template sed -i "s/jan_build/$new_build_version/g" ./src-tauri/tauri.bundle.windows.nsis.template
echo "---------tauri.windows.conf.json---------"
cat ./src-tauri/tauri.windows.conf.json
# Temporarily enable devtool on prod build # Temporarily enable devtool on prod build
ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools" ctoml ./src-tauri/Cargo.toml dependencies.tauri.features[] "devtools"
@ -172,13 +143,8 @@ jobs:
# Change app name for beta and nightly builds # Change app name for beta and nightly builds
if [ "${{ inputs.channel }}" != "stable" ]; then if [ "${{ inputs.channel }}" != "stable" ]; then
# Update updater endpoint
jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json jq '.plugins.updater.endpoints = ["https://delta.jan.ai/${{ inputs.channel }}/latest.json"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
# Update product name
jq --arg name "Jan-${{ inputs.channel }}" '.productName = $name' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
chmod +x .github/scripts/rename-tauri-app.sh chmod +x .github/scripts/rename-tauri-app.sh
.github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }} .github/scripts/rename-tauri-app.sh ./src-tauri/tauri.conf.json ${{ inputs.channel }}
@ -195,6 +161,7 @@ jobs:
chmod +x .github/scripts/rename-workspace.sh chmod +x .github/scripts/rename-workspace.sh
.github/scripts/rename-workspace.sh ./package.json ${{ inputs.channel }} .github/scripts/rename-workspace.sh ./package.json ${{ inputs.channel }}
cat ./package.json cat ./package.json
sed -i "s/jan_productname/Jan-${{ inputs.channel }}/g" ./src-tauri/tauri.bundle.windows.nsis.template sed -i "s/jan_productname/Jan-${{ inputs.channel }}/g" ./src-tauri/tauri.bundle.windows.nsis.template
sed -i "s/jan_mainbinaryname/jan-${{ inputs.channel }}/g" ./src-tauri/tauri.bundle.windows.nsis.template sed -i "s/jan_mainbinaryname/jan-${{ inputs.channel }}/g" ./src-tauri/tauri.bundle.windows.nsis.template
else else
@ -211,7 +178,10 @@ jobs:
- name: Build app - name: Build app
shell: bash shell: bash
run: | run: |
make build curl -L -o ./src-tauri/binaries/vcomp140.dll https://catalog.jan.ai/vcomp140.dll
curl -L -o ./src-tauri/binaries/msvcp140_codecvt_ids.dll https://catalog.jan.ai/msvcp140_codecvt_ids.dll
ls ./src-tauri/binaries
make build-tauri
env: env:
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }} AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
@ -225,42 +195,54 @@ jobs:
AWS_MAX_ATTEMPTS: '5' AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }} POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }} POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
# CORTEX_API_PORT: ${{ inputs.cortex_api_port }}
TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }} TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }}
TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }} TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }}
TAURI_SIGNING_PUBLIC_KEY: ${{ secrets.TAURI_SIGNING_PUBLIC_KEY }}
- name: Upload Artifact - name: Upload Artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: jan-windows-exe-${{ inputs.new_version }} name: jan-windows-${{ inputs.new_version }}
path: | path: |
./src-tauri/target/release/bundle/nsis/*.exe ./src-tauri/target/release/bundle/nsis/*.exe
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: jan-windows-msi-${{ inputs.new_version }}
path: |
./src-tauri/target/release/bundle/msi/*.msi
## Set output filename for windows ## create zip file and latest.yml for windows electron auto updater
- name: Set output filename for windows - name: create zip file and latest.yml for windows electron auto updater
shell: bash shell: bash
run: | run: |
cd ./src-tauri/target/release/bundle/nsis cd ./src-tauri/target/release/bundle/nsis
if [ "${{ inputs.channel }}" != "stable" ]; then if [ "${{ inputs.channel }}" != "stable" ]; then
FILE_NAME=Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64-setup.exe FILE_NAME=Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64-setup.exe
WIN_SIG=$(cat Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64-setup.exe.sig) WIN_SIG=$(cat Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64-setup.exe.sig)
MSI_FILE="Jan-${{ inputs.channel }}_${{ inputs.new_version }}_x64_en-US.msi"
else else
FILE_NAME=Jan_${{ inputs.new_version }}_x64-setup.exe FILE_NAME=Jan_${{ inputs.new_version }}_x64-setup.exe
WIN_SIG=$(cat Jan_${{ inputs.new_version }}_x64-setup.exe.sig) WIN_SIG=$(cat Jan_${{ inputs.new_version }}_x64-setup.exe.sig)
MSI_FILE="Jan_${{ inputs.new_version }}_x64_en-US.msi"
fi fi
FILE_SIZE=$(stat -c %s $FILE_NAME)
echo "size: $FILE_SIZE"
SH512_CHECKSUM=$(python3 ../../../../../.github/scripts/electron-checksum.py $FILE_NAME)
echo "sha512: $SH512_CHECKSUM"
CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S.%3NZ")
echo "releaseDate: $CURRENT_TIME"
# Create latest.yml file
echo "version: ${{ inputs.new_version }}" > latest.yml
echo "files:" >> latest.yml
echo " - url: $FILE_NAME" >> latest.yml
echo " sha512: $SH512_CHECKSUM" >> latest.yml
echo " size: $FILE_SIZE" >> latest.yml
echo "path: $FILE_NAME" >> latest.yml
echo "sha512: $SH512_CHECKSUM" >> latest.yml
echo "releaseDate: $CURRENT_TIME" >> latest.yml
cat latest.yml
cp latest.yml beta.yml
echo "::set-output name=WIN_SIG::$WIN_SIG" echo "::set-output name=WIN_SIG::$WIN_SIG"
echo "::set-output name=FILE_NAME::$FILE_NAME" echo "::set-output name=FILE_NAME::$FILE_NAME"
echo "::set-output name=MSI_FILE_NAME::$MSI_FILE"
id: metadata id: metadata
## Upload to s3 for nightly and beta ## Upload to s3 for nightly and beta
@ -270,6 +252,10 @@ jobs:
run: | run: |
cd ./src-tauri/target/release/bundle/nsis cd ./src-tauri/target/release/bundle/nsis
# Upload for electron updater for nightly
aws s3 cp ./latest.yml s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/latest.yml
aws s3 cp ./beta.yml s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/beta.yml
# Upload for tauri updater # Upload for tauri updater
aws s3 cp ./${{ steps.metadata.outputs.FILE_NAME }} s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.FILE_NAME }} aws s3 cp ./${{ steps.metadata.outputs.FILE_NAME }} s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.FILE_NAME }}
aws s3 cp ./${{ steps.metadata.outputs.FILE_NAME }}.sig s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.FILE_NAME }}.sig aws s3 cp ./${{ steps.metadata.outputs.FILE_NAME }}.sig s3://${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}/temp-${{ inputs.channel }}/${{ steps.metadata.outputs.FILE_NAME }}.sig
@ -279,6 +265,29 @@ jobs:
AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }} AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }}
AWS_EC2_METADATA_DISABLED: 'true' AWS_EC2_METADATA_DISABLED: 'true'
## Upload to github release for stable release
- name: Upload release assert if public provider is github
if: inputs.channel == 'stable'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
uses: actions/upload-release-asset@v1.0.1
with:
upload_url: ${{ inputs.upload_url }}
asset_path: ./src-tauri/target/release/bundle/nsis/latest.yml
asset_name: latest.yml
asset_content_type: text/yaml
- name: Upload release assert if public provider is github
if: inputs.channel == 'beta'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
uses: actions/upload-release-asset@v1.0.1
with:
upload_url: ${{ inputs.upload_url }}
asset_path: ./src-tauri/target/release/bundle/nsis/beta.yml
asset_name: beta.yml
asset_content_type: text/yaml
- name: Upload release assert if public provider is github - name: Upload release assert if public provider is github
if: inputs.public_provider == 'github' if: inputs.public_provider == 'github'
env: env:

65
.gitignore vendored
View File

@ -1,65 +1,52 @@
.idea
.env .env
.idea
# Jan inference
error.log error.log
node_modules node_modules
*.tgz *.tgz
!charts/server/charts/*.tgz
dist dist
build build
.DS_Store .DS_Store
electron/renderer
electron/models
electron/docs
electron/engines
electron/themes
electron/playwright-report
server/pre-install
package-lock.json package-lock.json
coverage coverage
*.log *.log
core/lib/** core/lib/**
# Turborepo
.turbo
electron/test-data
electron/test-results
core/test_results.html
coverage
.yarn .yarn
.yarnrc .yarnrc
*.tsbuildinfo
test_results.html test_results.html
pre-install *.tsbuildinfo
electron/shared/**
# docs # docs
docs/yarn.lock docs/yarn.lock
electron/.version.bak
src-tauri/binaries/engines/cortex.llamacpp
src-tauri/resources/themes
src-tauri/resources/lib src-tauri/resources/lib
src-tauri/Cargo.lock
src-tauri/icons src-tauri/icons
!src-tauri/icons/icon.png !src-tauri/icons/icon.png
src-tauri/gen/apple src-tauri/gen/apple
src-tauri/gen/android
src-tauri/resources/bin src-tauri/resources/bin
# Helper tools # Helper tools
.opencode .opencode
OpenCode.md OpenCode.md
Claude.md archive/
archive/
.cache/
# auto qa
autoqa/trajectories
autoqa/recordings
autoqa/__pycache__
# Astro / Starlight specific
website/dist/
website/.astro/
website/src/content/config.ts.timestamp-*
# Nextra specific
docs/out/
docs/.next/
# General Node.js
**/node_modules
**/.env
**/.env.*
**/npm-debug.log*
**/yarn-debug.log*
**/yarn-error.log*
**/pnpm-debug.log*
## cargo
target
Cargo.lock
src-tauri/resources/
## test
test-data
llm-docs
.claude/agents

5
.vscode/extensions.json vendored Normal file
View File

@ -0,0 +1,5 @@
{
"recommendations": [
"esbenp.prettier-vscode"
]
}

7
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,7 @@
{
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"[rust]": {
"editor.defaultFormatter": "rust-lang.rust-analyzer"
}
}

View File

@ -1,252 +1,32 @@
# Contributing to Jan # Contributing to jan
First off, thank you for considering contributing to Jan. It's people like you that make Jan such an amazing project. First off, thank you for considering contributing to jan. It's people like you that make jan such an amazing project.
Jan is an AI assistant that can run 100% offline on your device. Think ChatGPT, but private, local, and under your complete control. If you're thinking about contributing, you're already awesome - let's make AI accessible to everyone, one commit at a time.
## Quick Links to Component Guides
- **[Web App](./web-app/CONTRIBUTING.md)** - React UI and logic
- **[Core SDK](./core/CONTRIBUTING.md)** - TypeScript SDK and extension system
- **[Extensions](./extensions/CONTRIBUTING.md)** - Supportive modules for the frontend
- **[Tauri Backend](./src-tauri/CONTRIBUTING.md)** - Rust native integration
- **[Tauri Plugins](./src-tauri/plugins/CONTRIBUTING.md)** - Hardware and system plugins
## How Jan Actually Works
Jan is a desktop app that runs local AI models. Here's how the components actually connect:
```
┌──────────────────────────────────────────────────────────┐
│ Web App (Frontend) │
│ (web-app/) │
│ • React UI │
│ • Chat Interface │
│ • Settings Pages │
│ • Model Hub │
└────────────┬─────────────────────────────┬───────────────┘
│ │
│ imports │ imports
▼ ▼
┌──────────────────────┐ ┌──────────────────────┐
│ Core SDK │ │ Extensions │
│ (core/) │ │ (extensions/) │
│ │ │ │
│ • TypeScript APIs │◄─────│ • Assistant Mgmt │
│ • Extension System │ uses │ • Conversations │
│ • Event Bus │ │ • Downloads │
│ • Type Definitions │ │ • LlamaCPP │
└──────────┬───────────┘ └───────────┬──────────┘
│ │
│ ┌──────────────────────┐ │
│ │ Web App │ │
│ └──────────┬───────────┘ │
│ │ │
└──────────────┼───────────────┘
Tauri IPC
(invoke commands)
┌───────────────────────────────────────────────────────────┐
│ Tauri Backend (Rust) │
│ (src-tauri/) │
│ │
│ • Window Management • File System Access │
│ • Process Control • System Integration │
│ • IPC Command Handler • Security & Permissions │
└───────────────────────────┬───────────────────────────────┘
┌───────────────────────────────────────────────────────────┐
│ Tauri Plugins (Rust) │
│ (src-tauri/plugins/) │
│ │
│ ┌──────────────────┐ ┌──────────────────┐ │
│ │ Hardware Plugin │ │ LlamaCPP Plugin │ │
│ │ │ │ │ │
│ │ • CPU/GPU Info │ │ • Process Mgmt │ │
│ │ • Memory Stats │ │ • Model Loading │ │
│ │ • System Info │ │ • Inference │ │
│ └──────────────────┘ └──────────────────┘ │
└───────────────────────────────────────────────────────────┘
```
### The Communication Flow
1. **JavaScript Layer Relationships**:
- Web App imports Core SDK and Extensions as JavaScript modules
- Extensions use Core SDK for shared functionality
- All run in the browser/webview context
2. **All Three → Backend**: Through Tauri IPC
- **Web App** → Backend: `await invoke('app_command', data)`
- **Core SDK** → Backend: `await invoke('core_command', data)`
- **Extensions** → Backend: `await invoke('ext_command', data)`
- Each component can independently call backend commands
3. **Backend → Plugins**: Native Rust integration
- Backend loads plugins as Rust libraries
- Direct function calls, no IPC overhead
4. **Response Flow**:
- Plugin → Backend → IPC → Requester (Web App/Core/Extension) → UI updates
### Real-World Example: Loading a Model
Here's what actually happens when you click "Download Llama 3":
1. **Web App** (`web-app/`) - User clicks download button
2. **Extension** (`extensions/download-extension`) - Handles the download logic
3. **Tauri Backend** (`src-tauri/`) - Actually downloads the file to disk
4. **Extension** (`extensions/llamacpp-extension`) - Prepares model for loading
5. **Tauri Plugin** (`src-tauri/plugins/llamacpp`) - Starts llama.cpp process
6. **Hardware Plugin** (`src-tauri/plugins/hardware`) - Detects GPU, optimizes settings
7. **Model ready!** - User can start chatting
## Project Structure
```
jan/
├── web-app/ # React frontend (what users see)
├── src-tauri/ # Rust backend (system integration)
│ ├── src/core/ # Core Tauri commands
│ └── plugins/ # Tauri plugins (hardware, llamacpp)
├── core/ # TypeScript SDK (API layer)
├── extensions/ # JavaScript extensions
│ ├── assistant-extension/
│ ├── conversational-extension/
│ ├── download-extension/
│ └── llamacpp-extension/
├── docs/ # Documentation website
├── website/ # Marketing website
├── autoqa/ # Automated testing
├── scripts/ # Build utilities
├── package.json # Root workspace configuration
├── Makefile # Build automation commands
├── LICENSE # Apache 2.0 license
└── README.md # Project overview
```
## Development Setup
### The Scenic Route (Build from Source)
**Prerequisites:**
- Node.js ≥ 20.0.0
- Yarn ≥ 1.22.0
- Rust (for Tauri)
- Make ≥ 3.81
**Option 1: The Easy Way (Make)**
```bash
git clone https://github.com/janhq/jan
cd jan
make dev
```
## How Can I Contribute? ## How Can I Contribute?
### Reporting Bugs ### Reporting Bugs
- **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/janhq/jan/issues) - **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/menloresearch/jan/issues).
- If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/janhq/jan/issues/new) - If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/menloresearch/jan/issues/new).
- Include your system specs and error logs - it helps a ton
### Suggesting Enhancements ### Suggesting Enhancements
- Open a new issue with a clear title and description - Open a new issue with a clear title and description.
- Explain why this enhancement would be useful
- Include mockups or examples if you can
### Your First Code Contribution ### Your First Code Contribution
**Choose Your Adventure:** - Fork the repo.
- **Frontend UI and logic**`web-app/` - Create a new branch (`git checkout -b feature-name`).
- **Shared API declarations**`core/` - Commit your changes (`git commit -am 'Add some feature'`).
- **Backend system integration**`src-tauri/` - Push to the branch (`git push origin feature-name`).
- **Business logic features**`extensions/` - Open a new Pull Request.
- **Dedicated backend handler**`src-tauri/plugins/`
**The Process:** ## Styleguides
1. Fork the repo
2. Create a new branch (`git checkout -b feature-name`)
3. Make your changes (and write tests!)
4. Commit your changes (`git commit -am 'Add some feature'`)
5. Push to the branch (`git push origin feature-name`)
6. Open a new Pull Request against `dev` branch
## Testing ### Git Commit Messages
```bash - Use the present tense ("Add feature" not "Added feature").
yarn test # All tests
cd src-tauri && cargo test # Rust tests
cd autoqa && python main.py # End-to-end tests
```
## Code Standards
### TypeScript/JavaScript
- TypeScript required (we're not animals)
- ESLint + Prettier
- Functional React components
- Proper typing (no `any` - seriously!)
### Rust
- `cargo fmt` + `cargo clippy`
- `Result<T, E>` for error handling
- Document public APIs
## Git Conventions
### Branches
- `main` - stable releases
- `dev` - development (target this for PRs)
- `feature/*` - new features
- `fix/*` - bug fixes
### Commit Messages
- Use the present tense ("Add feature" not "Added feature")
- Be descriptive but concise
- Reference issues when applicable
Examples:
```
feat: add support for Qwen models
fix: resolve memory leak in model loading
docs: update installation instructions
```
## Troubleshooting
If things go sideways:
1. **Check our [troubleshooting docs](https://jan.ai/docs/troubleshooting)**
2. **Clear everything and start fresh:** `make clean` then `make dev`
3. **Copy your error logs and system specs**
4. **Ask for help in our [Discord](https://discord.gg/FTk2MvZwJH)** `#🆘|jan-help` channel
Common issues:
- **Build failures**: Check Node.js and Rust versions
- **Extension not loading**: Verify it's properly registered
- **Model not working**: Check hardware requirements and GPU drivers
## Getting Help
- [Documentation](https://jan.ai/docs) - The manual you should read
- [Discord Community](https://discord.gg/jan) - Where the community lives
- [GitHub Issues](https://github.com/janhq/jan/issues) - Report bugs here
- [GitHub Discussions](https://github.com/janhq/jan/discussions) - Ask questions
## License
Apache 2.0 - Because sharing is caring. See [LICENSE](./LICENSE) for the legal stuff.
## Additional Notes ## Additional Notes
We're building something pretty cool here - an AI assistant that respects your privacy and runs entirely on your machine. Every contribution, no matter how small, helps make AI more accessible to everyone. Thank you for contributing to jan!
Thanks for being part of the journey. Let's build the future of local AI together! 🚀

View File

@ -1,50 +0,0 @@
# Stage 1: Build stage with Node.js and Yarn v4
FROM node:20-alpine AS builder
ARG MENLO_PLATFORM_BASE_URL=https://api-dev.menlo.ai/v1
ENV MENLO_PLATFORM_BASE_URL=$MENLO_PLATFORM_BASE_URL
# Install build dependencies
RUN apk add --no-cache \
make \
g++ \
python3 \
py3-pip \
git
# Enable corepack and install Yarn 4
RUN corepack enable && corepack prepare yarn@4.5.3 --activate
# Verify Yarn version
RUN yarn --version
# Set working directory
WORKDIR /app
# Copy source code
COPY ./extensions-web ./extensions-web
COPY ./web-app ./web-app
COPY ./Makefile ./Makefile
COPY ./.* /
COPY ./package.json ./package.json
COPY ./yarn.lock ./yarn.lock
COPY ./pre-install ./pre-install
COPY ./core ./core
# Build web application
RUN yarn install && yarn build:core && make build-web-app
# Stage 2: Production stage with Nginx
FROM nginx:alpine
# Copy static files from build stage
COPY --from=builder /app/web-app/dist-web /usr/share/nginx/html
# Copy custom nginx config
COPY nginx.conf /etc/nginx/conf.d/default.conf
# Expose port 80
EXPOSE 80
# Start nginx
CMD ["nginx", "-g", "daemon off;"]

196
LICENSE
View File

@ -1,19 +1,201 @@
Jan Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Copyright 2025 Menlo Research TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
This product includes software developed by Menlo Research (https://menlo.ai). 1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2025 Menlo Research Pte. Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
Attribution is requested in user-facing documentation and materials, where appropriate.

134
Makefile
View File

@ -22,112 +22,37 @@ config-yarn:
install-and-build: config-yarn install-and-build: config-yarn
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)
echo "skip" echo "skip"
else ifeq ($(shell uname -s),Linux)
chmod +x src-tauri/build-utils/*
endif endif
yarn install yarn install
yarn build:tauri:plugin:api
yarn build:core yarn build:core
yarn build:extensions && yarn build:extensions-web yarn build:extensions
# Install required Rust targets for macOS universal builds
install-rust-targets:
ifeq ($(shell uname -s),Darwin)
@echo "Detected macOS, installing universal build targets..."
rustup target add x86_64-apple-darwin
rustup target add aarch64-apple-darwin
@echo "Rust targets installed successfully!"
else
@echo "Not macOS; skipping Rust target installation."
endif
# Install required Rust targets for Android builds
install-android-rust-targets:
@echo "Checking and installing Android Rust targets..."
@rustup target list --installed | grep -q "aarch64-linux-android" || rustup target add aarch64-linux-android
@rustup target list --installed | grep -q "armv7-linux-androideabi" || rustup target add armv7-linux-androideabi
@rustup target list --installed | grep -q "i686-linux-android" || rustup target add i686-linux-android
@rustup target list --installed | grep -q "x86_64-linux-android" || rustup target add x86_64-linux-android
@echo "Android Rust targets ready!"
# Install required Rust targets for iOS builds
install-ios-rust-targets:
@echo "Checking and installing iOS Rust targets..."
@rustup target list --installed | grep -q "aarch64-apple-ios" || rustup target add aarch64-apple-ios
@rustup target list --installed | grep -q "aarch64-apple-ios-sim" || rustup target add aarch64-apple-ios-sim
@rustup target list --installed | grep -q "x86_64-apple-ios" || rustup target add x86_64-apple-ios
@echo "iOS Rust targets ready!"
dev: install-and-build dev: install-and-build
yarn install:cortex
yarn download:bin yarn download:bin
yarn copy:lib
yarn dev yarn dev
# Web application targets
install-web-app: config-yarn
yarn install
dev-web-app: install-web-app
yarn build:core
yarn dev:web-app
build-web-app: install-web-app
yarn build:core
yarn build:web-app
serve-web-app:
yarn serve:web-app
build-serve-web-app: build-web-app
yarn serve:web-app
# Mobile
dev-android: install-and-build install-android-rust-targets
@echo "Setting up Android development environment..."
@if [ ! -d "src-tauri/gen/android" ]; then \
echo "Android app not initialized. Initializing..."; \
yarn tauri android init; \
fi
@echo "Sourcing Android environment setup..."
@bash autoqa/scripts/setup-android-env.sh echo "Android environment ready"
@echo "Starting Android development server..."
yarn dev:android
dev-ios: install-and-build install-ios-rust-targets
@echo "Setting up iOS development environment..."
ifeq ($(shell uname -s),Darwin)
@if [ ! -d "src-tauri/gen/ios" ]; then \
echo "iOS app not initialized. Initializing..."; \
yarn tauri ios init; \
fi
@echo "Checking iOS development requirements..."
@xcrun --version > /dev/null 2>&1 || (echo "❌ Xcode command line tools not found. Install with: xcode-select --install" && exit 1)
@xcrun simctl list devices available | grep -q "iPhone\|iPad" || (echo "❌ No iOS simulators found. Install simulators through Xcode." && exit 1)
@echo "Starting iOS development server..."
yarn dev:ios
else
@echo "❌ iOS development is only supported on macOS"
@exit 1
endif
# Linting # Linting
lint: install-and-build lint: install-and-build
yarn lint yarn lint
# Testing # Testing
test: lint test: lint
yarn download:bin
ifeq ($(OS),Windows_NT)
endif
yarn test yarn test
yarn copy:assets:tauri yarn test:e2e
yarn build:icon
cargo test --manifest-path src-tauri/Cargo.toml --no-default-features --features test-tauri -- --test-threads=1 # Builds and publishes the app
cargo test --manifest-path src-tauri/plugins/tauri-plugin-hardware/Cargo.toml build-and-publish: install-and-build
cargo test --manifest-path src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml yarn build
cargo test --manifest-path src-tauri/utils/Cargo.toml
# Build # Build
build: install-and-build install-rust-targets build: install-and-build
yarn build
# Deprecated soon
build-tauri: install-and-build
yarn copy:lib
yarn build yarn build
clean: clean:
@ -157,21 +82,20 @@ else ifeq ($(shell uname -s),Linux)
rm -rf ./src-tauri/target rm -rf ./src-tauri/target
rm -rf "~/jan/extensions" rm -rf "~/jan/extensions"
rm -rf "~/.cache/jan*" rm -rf "~/.cache/jan*"
rm -rf "./.cache"
else else
find . -name "node_modules" -type d -prune -exec rm -rfv '{}' + find . -name "node_modules" -type d -prune -exec rm -rf '{}' +
find . -name ".next" -type d -exec rm -rfv '{}' + find . -name ".next" -type d -exec rm -rf '{}' +
find . -name "dist" -type d -exec rm -rfv '{}' + find . -name "dist" -type d -exec rm -rf '{}' +
find . -name "build" -type d -exec rm -rfv '{}' + find . -name "build" -type d -exec rm -rf '{}' +
find . -name "out" -type d -exec rm -rfv '{}' + find . -name "out" -type d -exec rm -rf '{}' +
find . -name ".turbo" -type d -exec rm -rfv '{}' + find . -name ".turbo" -type d -exec rm -rf '{}' +
find . -name ".yarn" -type d -exec rm -rfv '{}' + find . -name ".yarn" -type d -exec rm -rf '{}' +
find . -name "package-lock.json" -type f -exec rm -rfv '{}' + find . -name "package-lock.json" -type f -exec rm -rf '{}' +
rm -rfv ./pre-install/*.tgz rm -rf ./pre-install/*.tgz
rm -rfv ./extensions/*/*.tgz rm -rf ./extensions/*/*.tgz
rm -rfv ./electron/pre-install/*.tgz rm -rf ./electron/pre-install/*.tgz
rm -rfv ./src-tauri/resources rm -rf ./src-tauri/resources
rm -rfv ./src-tauri/target rm -rf ./src-tauri/target
rm -rfv ~/jan/extensions rm -rf ~/jan/extensions
rm -rfv ~/Library/Caches/jan* rm -rf ~/Library/Caches/jan*
endif endif

View File

@ -1,62 +1,81 @@
# Jan - Open-source ChatGPT replacement # Jan - Local AI Assistant
<img width="2048" height="280" alt="github jan banner" src="https://github.com/user-attachments/assets/f3f87889-c133-433b-b250-236218150d3f" /> ![Jan banner](./JanBanner.png)
<p align="center"> <p align="center">
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section --> <!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/janhq/jan"/> <img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/menloresearch/jan"/>
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/janhq/jan"/> <img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/menloresearch/jan"/>
<img alt="Github Contributors" src="https://img.shields.io/github/contributors/janhq/jan"/> <img alt="Github Contributors" src="https://img.shields.io/github/contributors/menloresearch/jan"/>
<img alt="GitHub closed issues" src="https://img.shields.io/github/issues-closed/janhq/jan"/> <img alt="GitHub closed issues" src="https://img.shields.io/github/issues-closed/menloresearch/jan"/>
<img alt="Discord" src="https://img.shields.io/discord/1107178041848909847?label=discord"/> <img alt="Discord" src="https://img.shields.io/discord/1107178041848909847?label=discord"/>
</p> </p>
<p align="center"> <p align="center">
<a href="https://www.jan.ai/docs/desktop">Getting Started</a> <a href="https://jan.ai/docs/quickstart">Getting Started</a>
- <a href="https://discord.gg/Exe46xPMbK">Community</a> - <a href="https://jan.ai/docs">Docs</a>
- <a href="https://jan.ai/changelog">Changelog</a> - <a href="https://jan.ai/changelog">Changelog</a>
- <a href="https://github.com/janhq/jan/issues">Bug reports</a> - <a href="https://github.com/menloresearch/jan/issues">Bug reports</a>
- <a href="https://discord.gg/AsJ8krTT3N">Discord</a>
</p> </p>
Jan is bringing the best of open-source AI in an easy-to-use product. Download and run LLMs with **full control** and **privacy**. Jan is a ChatGPT-alternative that runs 100% offline on your device. Our goal is to make it easy for a layperson to download and run LLMs and use AI with **full control** and **privacy**.
**⚠️ Jan is in active development.**
## Installation ## Installation
The easiest way to get started is by downloading one of the following versions for your respective operating system: Because clicking a button is still the easiest way to get started:
<table> <table>
<tr> <tr>
<td><b>Platform</b></td> <td><b>Platform</b></td>
<td><b>Download</b></td> <td><b>Stable</b></td>
<td><b>Beta</b></td>
<td><b>Nightly</b></td>
</tr> </tr>
<tr> <tr>
<td><b>Windows</b></td> <td><b>Windows</b></td>
<td><a href='https://app.jan.ai/download/latest/win-x64'>jan.exe</a></td> <td><a href='https://app.jan.ai/download/latest/win-x64'>jan.exe</a></td>
<td><a href='https://app.jan.ai/download/beta/win-x64'>jan.exe</a></td>
<td><a href='https://app.jan.ai/download/nightly/win-x64'>jan.exe</a></td>
</tr> </tr>
<tr> <tr>
<td><b>macOS</b></td> <td><b>macOS</b></td>
<td><a href='https://app.jan.ai/download/latest/mac-universal'>jan.dmg</a></td> <td><a href='https://app.jan.ai/download/latest/mac-universal'>jan.dmg</a></td>
<td><a href='https://app.jan.ai/download/beta/mac-universal'>jan.dmg</a></td>
<td><a href='https://app.jan.ai/download/nightly/mac-universal'>jan.dmg</a></td>
</tr> </tr>
<tr> <tr>
<td><b>Linux (deb)</b></td> <td><b>Linux (deb)</b></td>
<td><a href='https://app.jan.ai/download/latest/linux-amd64-deb'>jan.deb</a></td> <td><a href='https://app.jan.ai/download/latest/linux-amd64-deb'>jan.deb</a></td>
<td><a href='https://app.jan.ai/download/beta/linux-amd64-deb'>jan.deb</a></td>
<td><a href='https://app.jan.ai/download/nightly/linux-amd64-deb'>jan.deb</a></td>
</tr> </tr>
<tr> <tr>
<td><b>Linux (AppImage)</b></td> <td><b>Linux (AppImage)</b></td>
<td><a href='https://app.jan.ai/download/latest/linux-amd64-appimage'>jan.AppImage</a></td> <td><a href='https://app.jan.ai/download/latest/linux-amd64-appimage'>jan.AppImage</a></td>
<td><a href='https://app.jan.ai/download/beta/linux-amd64-appimage'>jan.AppImage</a></td>
<td><a href='https://app.jan.ai/download/nightly/linux-amd64-appimage'>jan.AppImage</a></td>
</tr> </tr>
</table> </table>
Download from [jan.ai](https://jan.ai/) or [GitHub Releases](https://github.com/menloresearch/jan/releases).
Download from [jan.ai](https://jan.ai/) or [GitHub Releases](https://github.com/janhq/jan/releases). ## Demo
<video width="100%" controls>
<source src="./docs/public/assets/videos/enable-tool-call-for-models.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
## Features ## Features
- **Local AI Models**: Download and run LLMs (Llama, Gemma, Qwen, GPT-oss etc.) from HuggingFace - **Local AI Models**: Download and run LLMs (Llama, Gemma, Qwen, etc.) from HuggingFace
- **Cloud Integration**: Connect to GPT models via OpenAI, Claude models via Anthropic, Mistral, Groq, and others - **Cloud Integration**: Connect to OpenAI, Anthropic, Mistral, Groq, and others
- **Custom Assistants**: Create specialized AI assistants for your tasks - **Custom Assistants**: Create specialized AI assistants for your tasks
- **OpenAI-Compatible API**: Local server at `localhost:1337` for other applications - **OpenAI-Compatible API**: Local server at `localhost:1337` for other applications
- **Model Context Protocol**: MCP integration for agentic capabilities - **Model Context Protocol**: MCP integration for enhanced capabilities
- **Privacy First**: Everything runs locally when you want it to - **Privacy First**: Everything runs locally when you want it to
## Build from Source ## Build from Source
@ -73,7 +92,7 @@ For those who enjoy the scenic route:
### Run with Make ### Run with Make
```bash ```bash
git clone https://github.com/janhq/jan git clone https://github.com/menloresearch/jan
cd jan cd jan
make dev make dev
``` ```
@ -86,11 +105,33 @@ This handles everything: installs dependencies, builds core components, and laun
- `make test` - Run tests and linting - `make test` - Run tests and linting
- `make clean` - Delete everything and start fresh - `make clean` - Delete everything and start fresh
### Run with Mise (easier)
You can also run with [mise](https://mise.jdx.dev/), which is a bit easier as it ensures Node.js, Rust, and other dependency versions are automatically managed:
```bash
git clone https://github.com/menloresearch/jan
cd jan
# Install mise (if not already installed)
curl https://mise.run | sh
# Install tools and start development
mise install # installs Node.js, Rust, and other tools
mise dev # runs the full development setup
```
**Available mise commands:**
- `mise dev` - Full development setup and launch
- `mise build` - Production build
- `mise test` - Run tests and linting
- `mise clean` - Delete everything and start fresh
- `mise tasks` - List all available tasks
### Manual Commands ### Manual Commands
```bash ```bash
yarn install yarn install
yarn build:tauri:plugin:api
yarn build:core yarn build:core
yarn build:extensions yarn build:extensions
yarn dev yarn dev
@ -108,12 +149,13 @@ For detailed compatibility, check our [installation guides](https://jan.ai/docs/
## Troubleshooting ## Troubleshooting
If things go sideways: When things go sideways (they will):
1. Check our [troubleshooting docs](https://jan.ai/docs/troubleshooting) 1. Check our [troubleshooting docs](https://jan.ai/docs/troubleshooting)
2. Copy your error logs and system specs 2. Copy your error logs and system specs
3. Ask for help in our [Discord](https://discord.gg/FTk2MvZwJH) `#🆘|jan-help` channel 3. Ask for help in our [Discord](https://discord.gg/FTk2MvZwJH) `#🆘|jan-help` channel
We keep logs for 24 hours, so don't procrastinate on reporting issues.
## Contributing ## Contributing
@ -128,11 +170,20 @@ Contributions welcome. See [CONTRIBUTING.md](CONTRIBUTING.md) for the full spiel
## Contact ## Contact
- **Bugs**: [GitHub Issues](https://github.com/janhq/jan/issues) - **Bugs**: [GitHub Issues](https://github.com/menloresearch/jan/issues)
- **Business**: hello@jan.ai - **Business**: hello@jan.ai
- **Jobs**: hr@jan.ai - **Jobs**: hr@jan.ai
- **General Discussion**: [Discord](https://discord.gg/FTk2MvZwJH) - **General Discussion**: [Discord](https://discord.gg/FTk2MvZwJH)
## Trust & Safety
**Friendly reminder**: We're not trying to scam you.
- We won't ask for personal information
- Jan is completely free (no premium version exists)
- We don't have a cryptocurrency or ICO
- We're bootstrapped and not seeking your investment (yet)
## License ## License
Apache 2.0 - Because sharing is caring. Apache 2.0 - Because sharing is caring.

9
ai.menlo.jan.desktop Normal file
View File

@ -0,0 +1,9 @@
[Desktop Entry]
Name=Jan
Comment=Local AI Assistant that runs 100% offline
Exec=run.sh
Icon=ai.menlo.jan
Type=Application
Categories=Development;
Keywords=AI;Assistant;LLM;ChatGPT;Local;Offline;
StartupNotify=true

42
ai.menlo.jan.metainfo.xml Normal file
View File

@ -0,0 +1,42 @@
<?xml version="1.0" encoding="UTF-8"?>
<component type="desktop-application">
<id>ai.menlo.jan</id>
<metadata_license>FSFAP</metadata_license>
<project_license>AGPL-3.0-only</project_license>
<name>Jan</name>
<summary>Local AI Assistant that runs 100% offline on your device</summary>
<description>
<p>
Jan is a ChatGPT-alternative that runs 100% offline on your device. Our goal is to make it easy for anyone to download and run LLMs and use AI with full control and privacy.
</p>
<p>Features:</p>
<ul>
<li>Model Library with popular LLMs like Llama, Gemma, Mistral, or Qwen</li>
<li>Connect to Remote AI APIs like Groq and OpenRouter</li>
<li>Local API Server with OpenAI-equivalent API</li>
<li>Extensions for customizing Jan</li>
</ul>
</description>
<launchable type="desktop-id">ai.menlo.jan.desktop</launchable>
<screenshots>
<screenshot type="default">

</screenshot>
</screenshots>
<url type="homepage">https://jan.ai/</url>
<url type="bugtracker">https://github.com/menloresearch/jan/issues</url>
<content_rating type="oars-1.1" />
<releases>
<release version="0.5.12" date="2024-01-02">
<description>
<p>Latest stable release of Jan AI</p>
</description>
</release>
</releases>
</component>

View File

@ -1,319 +0,0 @@
# E2E Test Runner with ReportPortal Integration
🚀 An automated end-to-end test runner for Jan application with ReportPortal integration, screen recording, and comprehensive test monitoring.
## Features
- ✅ **Automated Jan App Testing**: Automatically starts/stops Jan application
- 🖥️ **Auto Computer Server**: Automatically starts computer server in background
- 📹 **Screen Recording**: Records test execution for debugging
- 📊 **ReportPortal Integration**: Optional test results upload to ReportPortal
- 🔄 **Turn Monitoring**: Prevents infinite loops with configurable turn limits
- 🎯 **Flexible Configuration**: Command-line arguments and environment variables
- 🌐 **Cross-platform**: Windows, macOS, and Linux support
- 📁 **Test Discovery**: Automatically scans test files from directory
## Prerequisites
- Python 3.8+
- Jan application installed
- Windows Sandbox (for computer provider)
- Computer server package installed
- Required Python packages (see requirements.txt)
## Installation
1. Clone the repository:
```bash
git clone <repository-url>
cd autoqa
```
2. Install dependencies:
```bash
## For Windows and Linux
pip install -r requirements.txt
```
3. Ensure Jan application is installed in one of the default locations:
- Windows: `%LOCALAPPDATA%\Programs\jan\Jan.exe`
- macOS: `~/Applications/Jan.app/Contents/MacOS/Jan`
- Linux: `jan` (in PATH)
## Quick Start
### Local Development (No ReportPortal)
```bash
# Run all tests in ./tests directory (auto-starts computer server)
python main.py
# Run with custom test directory
python main.py --tests-dir "my_tests"
# Run with custom Jan app path
python main.py --jan-app-path "C:/Custom/Path/Jan.exe"
# Skip auto computer server start (if already running)
python main.py --skip-server-start
```
### With ReportPortal Integration
```bash
# Enable ReportPortal with token
python main.py --enable-reportportal --rp-token "YOUR_API_TOKEN"
# Full ReportPortal configuration
python main.py \
--enable-reportportal \
--rp-endpoint "https://reportportal.example.com" \
--rp-project "my_project" \
--rp-token "YOUR_API_TOKEN"
```
## Configuration
### Command Line Arguments
| Argument | Environment Variable | Default | Description |
| ----------------------- | --------------------- | ------------------------------- | ------------------------------------------------- |
| **Computer Server** |
| `--skip-server-start` | `SKIP_SERVER_START` | `false` | Skip automatic computer server startup |
| **ReportPortal** |
| `--enable-reportportal` | `ENABLE_REPORTPORTAL` | `false` | Enable ReportPortal integration |
| `--rp-endpoint` | `RP_ENDPOINT` | `https://reportportal.menlo.ai` | ReportPortal endpoint URL |
| `--rp-project` | `RP_PROJECT` | `default_personal` | ReportPortal project name |
| `--rp-token` | `RP_TOKEN` | - | ReportPortal API token (required when RP enabled) |
| **Jan Application** |
| `--jan-app-path` | `JAN_APP_PATH` | _auto-detected_ | Path to Jan application executable |
| `--jan-process-name` | `JAN_PROCESS_NAME` | `Jan.exe` | Jan process name for monitoring |
| **Model Configuration** |
| `--model-name` | `MODEL_NAME` | `ByteDance-Seed/UI-TARS-1.5-7B` | AI model name |
| `--model-base-url` | `MODEL_BASE_URL` | `http://10.200.108.58:1234/v1` | Model API endpoint |
| `--model-provider` | `MODEL_PROVIDER` | `oaicompat` | Model provider type |
| `--model-loop` | `MODEL_LOOP` | `uitars` | Agent loop type |
| **Test Execution** |
| `--max-turns` | `MAX_TURNS` | `30` | Maximum turns per test |
| `--tests-dir` | `TESTS_DIR` | `tests` | Directory containing test files |
| `--delay-between-tests` | `DELAY_BETWEEN_TESTS` | `3` | Delay between tests (seconds) |
### Environment Variables
Create a `.env` file or set environment variables:
```bash
# Computer Server
SKIP_SERVER_START=false
# ReportPortal Configuration
ENABLE_REPORTPORTAL=true
RP_ENDPOINT=https://reportportal.example.com
RP_PROJECT=my_project
RP_TOKEN=your_secret_token
# Jan Application
JAN_APP_PATH=C:\Custom\Path\Jan.exe
JAN_PROCESS_NAME=Jan.exe
# Model Configuration
MODEL_NAME=gpt-4
MODEL_BASE_URL=https://api.openai.com/v1
MODEL_PROVIDER=openai
MODEL_LOOP=uitars
# Test Settings
MAX_TURNS=50
TESTS_DIR=e2e_tests
DELAY_BETWEEN_TESTS=5
```
## Test Structure
### Test Files
- Test files should be `.txt` files containing test prompts
- Place test files in the `tests/` directory (or custom directory)
- Support nested directories for organization
Example test file (`tests/basic/login_test.txt`):
```
Test the login functionality of Jan application.
Navigate to login screen, enter valid credentials, and verify successful login.
```
### Directory Structure
```
autoqa/
├── main.py # Main test runner
├── utils.py # Jan app utilities
├── test_runner.py # Test execution logic
├── screen_recorder.py # Screen recording functionality
├── reportportal_handler.py # ReportPortal integration
├── tests/ # Test files directory
│ ├── basic/
│ │ ├── login_test.txt
│ │ └── navigation_test.txt
│ └── advanced/
│ └── complex_workflow.txt
├── recordings/ # Screen recordings (auto-created)
├── trajectories/ # Agent trajectories (auto-created)
└── README.md
```
## Usage Examples
### Basic Usage
```bash
# Run all tests locally (auto-starts computer server)
python main.py
# Get help
python main.py --help
# Run without auto-starting computer server
python main.py --skip-server-start
```
### Advanced Usage
```bash
# Custom configuration
python main.py \
--tests-dir "integration_tests" \
--max-turns 40 \
--delay-between-tests 10 \
--model-name "gpt-4"
# Environment + Arguments
ENABLE_REPORTPORTAL=true RP_TOKEN=secret python main.py --max-turns 50
# Different model provider
python main.py \
--model-provider "openai" \
--model-name "gpt-4" \
--model-base-url "https://api.openai.com/v1"
# External computer server (skip auto-start)
SKIP_SERVER_START=true python main.py
```
### CI/CD Usage
```bash
# GitHub Actions / CI environment
ENABLE_REPORTPORTAL=true \
RP_TOKEN=${{ secrets.RP_TOKEN }} \
MODEL_NAME=production-model \
MAX_TURNS=40 \
SKIP_SERVER_START=false \
python main.py
```
## Computer Server Management
The test runner automatically manages the computer server:
### Automatic Server Management (Default)
- **Auto-start**: Computer server starts automatically in background thread
- **Auto-cleanup**: Server stops when main program exits (daemon thread)
- **Error handling**: Graceful fallback if server fails to start
### Manual Server Management
```bash
# If you prefer to manage computer server manually:
python -m computer_server # In separate terminal
# Then run tests without auto-start:
python main.py --skip-server-start
```
### Server Logs
```
2025-07-15 15:30:45 - INFO - Starting computer server in background...
2025-07-15 15:30:45 - INFO - Calling computer_server.run_cli()...
2025-07-15 15:30:45 - INFO - Computer server thread started
2025-07-15 15:30:50 - INFO - Computer server is running successfully
```
## Output
### Local Development
- **Console logs**: Detailed execution information
- **Screen recordings**: Saved to `recordings/` directory as MP4 files
- **Trajectories**: Agent interaction data in `trajectories/` directory
- **Local results**: Test results logged to console
### ReportPortal Integration
When enabled, results are uploaded to ReportPortal including:
- Test execution status (PASSED/FAILED)
- Screen recordings as attachments
- Detailed turn-by-turn interaction logs
- Error messages and debugging information
## Troubleshooting
### Common Issues
1. **Computer server startup failed**:
```bash
# Install required dependencies
pip install computer_server
# Check if computer_server is available
python -c "import computer_server; print('OK')"
# Use manual server if auto-start fails
python main.py --skip-server-start
```
2. **Jan app not found**:
```bash
# Specify custom path
python main.py --jan-app-path "D:/Apps/Jan/Jan.exe"
```
3. **Windows dependencies missing**:
```bash
# Install Windows-specific packages
pip install pywin32 psutil
```
4. **ReportPortal connection failed**:
- Verify endpoint URL and token
- Check network connectivity
- Ensure project exists
5. **Screen recording issues**:
- Check disk space in `recordings/` directory
- Verify screen recording permissions
6. **Test timeouts**:
```bash
# Increase turn limit
python main.py --max-turns 50
```
### Debug Mode
Enable detailed logging by modifying the logging level in `main.py`:
```python
logging.basicConfig(level=logging.DEBUG)
```

View File

@ -1,264 +0,0 @@
# I. Before release
## A. Initial update / migration Data check
Before testing, set-up the following in the old version to make sure that we can see the data is properly migrated:
- [ ] Changing appearance / theme to something that is obviously different from default set-up
- [ ] Ensure there are a few chat threads
- [ ] Ensure there are a few favourites / star threads
- [ ] Ensure there are 2 model downloaded
- [ ] Ensure there are 2 import on local provider (llama.cpp)
- [ ] Modify MCP servers list and add some ENV value to MCP servers
- [ ] Modify Local API Server
- [ ] HTTPS proxy config value
- [ ] Add 2 custom assistants to Jan
- [ ] Create a new chat with the custom assistant
- [ ] Change the `App Data` to some other folder
- [ ] Create a Custom Provider
- [ ] Disabled some model providers
- [NEW] Change llama.cpp setting of 2 models
#### Validate that the update does not corrupt existing user data or settings (before and after update show the same information):
- [ ] Threads
- [ ] Previously used model and assistants is shown correctly
- [ ] Can resume chat in threads with the previous context
- [ ] Assistants
- Settings:
- [ ] Appearance
- [ ] MCP Servers
- [ ] Local API Server
- [ ] HTTPS Proxy
- [ ] Custom Provider Set-up
#### In `Hub`:
- [ ] Can see model from HF listed properly
- [ ] Downloaded model will show `Use` instead of `Download`
- [ ] Toggling on `Downloaded` on the right corner show the correct list of downloaded models
#### In `Settings -> General`:
- [ ] Ensure the `App Data` path is the same
- [ ] Click Open Logs, App Log will show
#### In `Settings -> Model Providers`:
- [ ] Llama.cpp still listed downloaded models and user can chat with the models
- [ ] Llama.cpp still listed imported models and user can chat with the models
- [ ] Remote model still retain previously set up API keys and user can chat with model from the provider without having to re-enter API keys
- [ ] Enabled and Disabled Model Providers stay the same as before update
#### In `Settings -> Extensions`, check that following exists:
- [ ] Conversational
- [ ] Jan Assistant
- [ ] Download Manager
- [ ] llama.cpp Inference Engine
## B. `Settings`
#### In `General`:
- [ ] Ensure `Community` links work and point to the correct website
- [ ] Ensure the `Check for Updates` function detect the correct latest version
- [ ] [ENG] Create a folder with un-standard character as title (e.g. Chinese character) => change the `App data` location to that folder => test that model is still able to load and run properly.
#### In `Appearance`:
- [ ] Toggle between different `Theme` options to check that they change accordingly and that all elements of the UI are legible with the right contrast:
- [ ] Light
- [ ] Dark
- [ ] System (should follow your OS system settings)
- [ ] Change the following values => close the application => re-open the application => ensure that the change is persisted across session:
- [ ] Theme
- [ ] Font Size
- [ ] Window Background
- [ ] App Main View
- [ ] Primary
- [ ] Accent
- [ ] Destructive
- [ ] Chat Width
- [ ] Ensure that when this value is changed, there is no broken UI caused by it
- [ ] Code Block
- [ ] Show Line Numbers
- [ENG] Ensure that when click on `Reset` in the `Appearance` section, it reset back to the default values
- [ENG] Ensure that when click on `Reset` in the `Code Block` section, it reset back to the default values
#### In `Model Providers`:
In `Llama.cpp`:
- [ ] After downloading a model from hub, the model is listed with the correct name under `Models`
- [ ] Can import `gguf` model with no error
- [ ] Imported model will be listed with correct name under the `Models`
- [ ] Check that when click `delete` the model will be removed from the list
- [ ] Deleted model doesn't appear in the selectable models section in chat input (even in old threads that use the model previously)
- [ ] Ensure that user can re-import deleted imported models
- [ ] Enable `Auto-Unload Old Models`, and ensure that only one model can run / start at a time. If there are two model running at the time of enable, both of them will be stopped.
- [ ] Disable `Auto-Unload Old Models`, and ensure that multiple models can run at the same time.
- [ ] Enable `Context Shift` and ensure that context can run for long without encountering memory error. Use the `banana test` by turn on fetch MCP => ask local model to fetch and summarize the history of banana (banana has a very long history on wiki it turns out). It should run out of context memory sufficiently fast if `Context Shift` is not enabled.
- [ ] Ensure that user can change the Jinja chat template of individual model and it doesn't affect the template of other model
- [ ] Ensure that there is a recommended `llama.cpp` for each system and that it works out of the box for users.
- [ ] [0.6.9] Take a `gguf` file and delete the `.gguf` extensions from the file name, import it into Jan and verify that it works.
In Remote Model Providers:
- [ ] Check that the following providers are presence:
- [ ] OpenAI
- [ ] Anthropic
- [ ] Cohere
- [ ] OpenRouter
- [ ] Mistral
- [ ] Groq
- [ ] Gemini
- [ ] Hugging Face
- [ ] Models should appear as available on the selectable dropdown in chat input once some value is input in the API key field. (it could be the wrong API key)
- [ ] Once a valid API key is used, user can select a model from that provider and chat without any error.
- [ ] Delete a model and ensure that it doesn't show up in the `Modesl` list view or in the selectable dropdown in chat input.
- [ ] Ensure that a deleted model also not selectable or appear in old threads that used it.
- [ ] Adding of new model manually works and user can chat with the newly added model without error (you can add back the model you just delete for testing)
- [ ] [0.6.9] Make sure that Ollama set-up as a custom provider work with Jan
In Custom Providers:
- [ ] Ensure that user can create a new custom providers with the right baseURL and API key.
- [ ] Click `Refresh` should retrieve a list of available models from the Custom Providers.
- [ ] User can chat with the custom providers
- [ ] Ensure that Custom Providers can be deleted and won't reappear in a new session
In general:
- [ ] Disabled Model Provider should not show up as selectable in chat input of new thread and old thread alike (old threads' chat input should show `Select Model` instead of disabled model)
#### In `Shortcuts`:
Make sure the following shortcut key combo is visible and works:
- [ ] New chat
- [ ] Toggle Sidebar
- [ ] Zoom In
- [ ] Zoom Out
- [ ] Send Message
- [ ] New Line
- [ ] Navigation
#### In `Hardware`:
Ensure that the following section information show up for hardware
- [ ] Operating System
- [ ] CPU
- [ ] Memory
- [ ] GPU (If the machine has one)
- [ ] Enabling and Disabling GPUs and ensure that model still run correctly in both mode
- [ ] Enabling or Disabling GPU should not affect the UI of the application
#### In `MCP Servers`:
- [ ] Ensure that an user can create a MCP server successfully when enter in the correct information
- [ ] Ensure that `Env` value is masked by `*` in the quick view.
- [ ] If an `Env` value is missing, there should be a error pop up.
- [ ] Ensure that deleted MCP server disappear from the `MCP Server` list without any error
- [ ] Ensure that before a MCP is deleted, it will be disable itself first and won't appear on the tool list after deleted.
- [ ] Ensure that when the content of a MCP server is edited, it will be updated and reflected accordingly in the UI and when running it.
- [ ] Toggling enable and disabled of a MCP server work properly
- [ ] A disabled MCP should not appear in the available tool list in chat input
- [ ] An disabled MCP should not be callable even when forced prompt by the model (ensure there is no ghost MCP server)
- [ ] Ensure that enabled MCP server start automatically upon starting of the application
- [ ] An enabled MCP should show functions in the available tool list
- [ ] User can use a model and call different tool from multiple enabled MCP servers in the same thread
- [ ] If `Allow All MCP Tool Permissions` is disabled, in every new thread, before a tool is called, there should be a confirmation dialog pop up to confirm the action.
- [ ] When the user click `Deny`, the tool call will not be executed and return a message indicate so in the tool call result.
- [ ] When the user click `Allow Once` on the pop up, a confirmation dialog will appear again when the tool is called next time.
- [ ] When the user click `Always Allow` on the pop up, the tool will retain permission and won't ask for confirmation again. (this applied at an individual tool level, not at the MCP server level)
- [ ] If `Allow All MCP Tool Permissions` is enabled, in every new thread, there should not be any confirmation dialog pop up when a tool is called.
- [ ] When the pop-up appear, make sure that the `Tool Parameters` is also shown with detail in the pop-up.a
- [ ] [0.6.9] Go to Enter JSON configuration when created a new MCp => paste the JSON config inside => click `Save` => server works
- [ ] [0.6.9] If individual JSON config format is failed, the MCP server should not be activated
- [ ] [0.6.9] Make sure that MCP server can be used with streamable-http transport => connect to Smithery and test MCP server
#### In `Local API Server`:
- [ ] User can `Start Server` and chat with the default endpoint
- [ ] User should see the correct model name at `v1/models`
- [ ] User should be able to chat with it at `v1/chat/completions`
- [ ] `Open Logs` show the correct query log send to the server and return from the server
- [ ] Make sure that changing all the parameter in `Server Configuration` is reflected when `Start Server`
- [ ] [0.6.9] When the startup configuration, the last used model is also automatically start (users does not have to manually start a model before starting the server)
- [ ] [0.6.9] Make sure that you can send an image to a Local API Server and it also works (can set up Local API Server as a Custom Provider in Jan to test)
#### In `HTTPS Proxy`:
- [ ] Model download request goes through proxy endpoint
## C. Hub
- [ ] User can click `Download` to download a model
- [ ] User can cancel a model in the middle of downloading
- [ ] User can add a Hugging Face model detail to the list by pasting a model name / model url into the search bar and press enter
- [ ] Clicking on a listing will open up the model card information within Jan and render the HTML properly
- [ ] Clicking download work on the `Show variants` section
- [ ] Clicking download work inside the Model card HTML
- [ ] [0.6.9] Check that the model recommendation base on user hardware work as expected in the Model Hub
## D. Threads
#### In the left bar:
- [ ] User can delete an old thread, and it won't reappear even when app restart
- [ ] Change the title of the thread should update its last modification date and re-organise its position in the correct chronological order on the left bar.
- [ ] The title of a new thread is the first message from the user.
- [ ] Users can starred / un-starred threads accordingly
- [ ] Starred threads should move to `Favourite` section and other threads should stay in `Recent`
- [ ] Ensure that the search thread feature return accurate result based on thread titles and contents (including from both `Favourite` and `Recent`)
- [ ] `Delete All` should delete only threads in the `Recents` section
- [ ] `Unstar All` should un-star all of the `Favourites` threads and return them to `Recent`
#### In a thread:
- [ ] When `New Chat` is clicked, the assistant is set as the last selected assistant, the model selected is set as the last used model, and the user can immediately chat with the model.
- [ ] User can conduct multi-turn conversation in a single thread without lost of data (given that `Context Shift` is not enabled)
- [ ] User can change to a different model in the middle of a conversation in a thread and the model work.
- [ ] User can click on `Regenerate` button on a returned message from the model to get a new response base on the previous context.
- [ ] User can change `Assistant` in the middle of a conversation in a thread and the new assistant setting will be applied instead.
- [ ] The chat windows can render and show all the content of a selected threads (including scroll up and down on long threads)
- [ ] Old thread retained their setting as of the last update / usage
- [ ] Assistant option
- [ ] Model option (except if the model / model provider has been deleted or disabled)
- [ ] User can send message with different type of text content (e.g text, emoji, ...)
- [ ] When request model to generate a markdown table, the table is correctly formatted as returned from the model.
- [ ] When model generate code, ensure that the code snippets is properly formatted according to the `Appearance -> Code Block` setting.
- [ ] Users can edit their old message and and user can regenerate the answer based on the new message
- [ ] User can click `Copy` to copy the model response
- [ ] User can click `Delete` to delete either the user message or the model response.
- [ ] The token speed appear when a response from model is being generated and the final value is show under the response.
- [ ] Make sure that user when using IME keyboard to type Chinese and Japanese character and they press `Enter`, the `Send` button doesn't trigger automatically after each words.
- [ ] [0.6.9] Attach an image to the chat input and see if you can chat with it using a remote model
- [ ] [0.6.9] Attach an image to the chat input and see if you can chat with it using a local model
- [ ] [0.6.9] Check that you can paste an image to text box from your system clipboard (Copy - Paste)
- [ ] [0.6.9] Make sure that user can favourite a model in the model selection in chat input
## E. Assistants
- [ ] There is always at least one default Assistant which is Jan
- [ ] The default Jan assistant has `stream = True` by default
- [ ] User can create / edit a new assistant with different parameters and instructions choice.
- [ ] When user delete the default Assistant, the next Assistant in line will be come the default Assistant and apply their setting to new chat accordingly.
- [ ] User can create / edit assistant from within a Chat windows (on the top left)
## F. After checking everything else
In `Settings -> General`:
- [ ] Change the location of the `App Data` to some other path that is not the default path
- [ ] Click on `Reset` button in `Other` to factory reset the app:
- [ ] All threads deleted
- [ ] All Assistant deleted except for default Jan Assistant
- [ ] `App Data` location is reset back to default path
- [ ] Appearance reset
- [ ] Model Providers information all reset
- [ ] Llama.cpp setting reset
- [ ] API keys cleared
- [ ] All Custom Providers deleted
- [ ] MCP Servers reset
- [ ] Local API Server reset
- [ ] HTTPS Proxy reset
- [ ] After closing the app, all models are unloaded properly
- [ ] Locate to the data folder using the `App Data` path information => delete the folder => reopen the app to check that all the folder is re-created with all the necessary data.
- [ ] Ensure that the uninstallation process removes the app successfully from the system.
## G. New App Installation
- [ ] Clean up by deleting all the left over folder created by Jan
- [ ] On MacOS
- [ ] `~/Library/Application Support/Jan`
- [ ] `~/Library/Caches/jan.ai.app`
- [ ] On Windows
- [ ] `C:\Users<Username>\AppData\Roaming\Jan\`
- [ ] `C:\Users<Username>\AppData\Local\jan.ai.app`
- [ ] On Linux
- [ ] `~/.cache/Jan`
- [ ] `~/.cache/jan.ai.app`
- [ ] `~/.local/share/Jan`
- [ ] `~/.local/share/jan.ai.app`
- [ ] Ensure that the fresh install of Jan launch
- [ ] Do some basic check to see that all function still behaved as expected. To be extra careful, you can go through the whole list again. However, it is more advisable to just check to make sure that all the core functionality like `Thread` and `Model Providers` work as intended.
# II. After release
- [ ] Check that the App Updater works and user can update to the latest release without any problem
- [ ] App restarts after the user finished an update
- [ ] Repeat section `A. Initial update / migration Data check` above to verify that update is done correctly on live version

View File

@ -1,514 +0,0 @@
import asyncio
import logging
import os
import argparse
import threading
import time
import platform
from datetime import datetime
from computer import Computer
from reportportal_client import RPClient
from reportportal_client.helpers import timestamp
from utils import scan_test_files
from test_runner import run_single_test_with_timeout
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# Platform detection
IS_WINDOWS = platform.system() == "Windows"
IS_LINUX = platform.system() == "Linux"
IS_MACOS = platform.system() == "Darwin"
def get_computer_config():
"""Get computer configuration based on platform"""
if IS_WINDOWS:
return {
"os_type": "windows"
}
elif IS_LINUX:
return {
"os_type": "linux"
}
elif IS_MACOS:
return {
"os_type": "macos"
}
else:
# Default fallback
logger.warning(f"Unknown platform {platform.system()}, using Linux config as fallback")
return {
"os_type": "linux"
}
def get_default_jan_path():
"""Get default Jan app path based on OS"""
if IS_WINDOWS:
# Try multiple common locations on Windows
possible_paths = [
os.path.expanduser(r"~\AppData\Local\Programs\jan\Jan.exe"),
os.path.join(os.environ.get('LOCALAPPDATA', ''), 'Programs', 'jan', 'Jan.exe'),
os.path.join(os.environ.get('APPDATA', ''), 'jan', 'Jan.exe'),
r"C:\Program Files\jan\Jan.exe",
r"C:\Program Files (x86)\jan\Jan.exe"
]
# Return first existing path, or first option as default
for path in possible_paths:
if os.path.exists(path):
return path
# If none exist, return the most likely default
return possible_paths[0]
elif IS_LINUX:
# Linux possible locations
possible_paths = [
"/usr/bin/Jan",
"/usr/local/bin/Jan",
os.path.expanduser("~/Applications/Jan/Jan"),
"/opt/Jan/Jan"
]
# Return first existing path, or first option as default
for path in possible_paths:
if os.path.exists(path):
return path
# Default to nightly build path
return "/usr/bin/Jan"
elif IS_MACOS:
# macOS defaults
possible_paths = [
"/Applications/Jan.app/Contents/MacOS/Jan",
os.path.expanduser("~/Applications/Jan.app/Contents/MacOS/Jan")
]
for path in possible_paths:
if os.path.exists(path):
return path
return possible_paths[0]
else:
# Unknown platform
return "jan"
def start_computer_server():
"""Start computer server in background thread"""
try:
logger.info("Starting computer server in background...")
# Import computer_server module
import computer_server
import sys
# Start server in a separate thread
def run_server():
try:
# Save original sys.argv to avoid argument conflicts
original_argv = sys.argv.copy()
# Override sys.argv for computer_server to use default args
sys.argv = ['computer_server'] # Reset to minimal args
# Use the proper entry point
logger.info("Calling computer_server.run_cli()...")
computer_server.run_cli()
logger.info("Computer server.run_cli() completed")
except KeyboardInterrupt:
logger.info("Computer server interrupted")
except Exception as e:
logger.error(f"Computer server error: {e}")
import traceback
logger.error(f"Traceback: {traceback.format_exc()}")
finally:
# Restore original sys.argv
try:
sys.argv = original_argv
except:
pass
server_thread = threading.Thread(target=run_server, daemon=True)
server_thread.start()
logger.info("Computer server thread started")
# Give server more time to start up
time.sleep(5)
# Check if thread is still alive (server is running)
if server_thread.is_alive():
logger.info("Computer server is running successfully")
return server_thread
else:
logger.error("Computer server thread died unexpectedly")
return None
except ImportError as e:
logger.error(f"Cannot import computer_server module: {e}")
logger.error("Please install computer_server package")
return None
except Exception as e:
logger.error(f"Error starting computer server: {e}")
import traceback
logger.error(f"Traceback: {traceback.format_exc()}")
return None
def parse_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="E2E Test Runner with ReportPortal integration",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run locally without ReportPortal
python main.py
# Run with ReportPortal integration
python main.py --enable-reportportal --rp-token YOUR_TOKEN
# Run with custom Jan app path
python main.py --jan-app-path "C:/Custom/Path/Jan.exe"
# Run with different model
python main.py --model-name "gpt-4" --model-base-url "https://api.openai.com/v1"
# Using environment variables
ENABLE_REPORTPORTAL=true RP_TOKEN=xxx MODEL_NAME=gpt-4 python main.py
"""
)
# Get default Jan path
default_jan_path = get_default_jan_path()
# Computer server arguments
server_group = parser.add_argument_group('Computer Server Configuration')
server_group.add_argument(
'--skip-server-start',
action='store_true',
default=os.getenv('SKIP_SERVER_START', 'false').lower() == 'true',
help='Skip automatic computer server startup (env: SKIP_SERVER_START, default: false)'
)
# ReportPortal arguments
rp_group = parser.add_argument_group('ReportPortal Configuration')
rp_group.add_argument(
'--enable-reportportal',
action='store_true',
default=os.getenv('ENABLE_REPORTPORTAL', 'false').lower() == 'true',
help='Enable ReportPortal integration (env: ENABLE_REPORTPORTAL, default: false)'
)
rp_group.add_argument(
'--rp-endpoint',
default=os.getenv('RP_ENDPOINT', 'https://reportportal.menlo.ai'),
help='ReportPortal endpoint URL (env: RP_ENDPOINT, default: %(default)s)'
)
rp_group.add_argument(
'--rp-project',
default=os.getenv('RP_PROJECT', 'default_personal'),
help='ReportPortal project name (env: RP_PROJECT, default: %(default)s)'
)
rp_group.add_argument(
'--rp-token',
default=os.getenv('RP_TOKEN'),
help='ReportPortal API token (env: RP_TOKEN, required when --enable-reportportal is used)'
)
rp_group.add_argument(
'--launch-name',
default=os.getenv('LAUNCH_NAME'),
help='Custom launch name for ReportPortal (env: LAUNCH_NAME, default: auto-generated with timestamp)'
)
# Jan app arguments
jan_group = parser.add_argument_group('Jan Application Configuration')
jan_group.add_argument(
'--jan-app-path',
default=os.getenv('JAN_APP_PATH', default_jan_path),
help=f'Path to Jan application executable (env: JAN_APP_PATH, default: auto-detected or {default_jan_path})'
)
jan_group.add_argument(
'--jan-process-name',
default=os.getenv('JAN_PROCESS_NAME', 'Jan.exe' if IS_WINDOWS else ('Jan' if IS_MACOS else 'Jan-nightly')),
help='Jan process name for monitoring (env: JAN_PROCESS_NAME, default: platform-specific)'
)
# Model/Agent arguments
model_group = parser.add_argument_group('Model Configuration')
model_group.add_argument(
'--model-loop',
default=os.getenv('MODEL_LOOP', 'uitars'),
help='Agent loop type (env: MODEL_LOOP, default: %(default)s)'
)
model_group.add_argument(
'--model-provider',
default=os.getenv('MODEL_PROVIDER', 'oaicompat'),
help='Model provider (env: MODEL_PROVIDER, default: %(default)s)'
)
model_group.add_argument(
'--model-name',
default=os.getenv('MODEL_NAME', 'ByteDance-Seed/UI-TARS-1.5-7B'),
help='Model name (env: MODEL_NAME, default: %(default)s)'
)
model_group.add_argument(
'--model-base-url',
default=os.getenv('MODEL_BASE_URL', 'http://10.200.108.58:1234/v1'),
help='Model base URL (env: MODEL_BASE_URL, default: %(default)s)'
)
# Test execution arguments
test_group = parser.add_argument_group('Test Execution Configuration')
test_group.add_argument(
'--max-turns',
type=int,
default=int(os.getenv('MAX_TURNS', '30')),
help='Maximum number of turns per test (env: MAX_TURNS, default: %(default)s)'
)
test_group.add_argument(
'--tests-dir',
default=os.getenv('TESTS_DIR', 'tests'),
help='Directory containing test files (env: TESTS_DIR, default: %(default)s)'
)
test_group.add_argument(
'--delay-between-tests',
type=int,
default=int(os.getenv('DELAY_BETWEEN_TESTS', '3')),
help='Delay in seconds between tests (env: DELAY_BETWEEN_TESTS, default: %(default)s)'
)
args = parser.parse_args()
# Validate ReportPortal token if ReportPortal is enabled
if args.enable_reportportal and not args.rp_token:
parser.error("--rp-token (or RP_TOKEN env var) is required when --enable-reportportal is used")
return args
async def main():
"""
Main function to scan and run all test files with optional ReportPortal integration
"""
# Parse command line arguments
args = parse_arguments()
# Initialize final exit code
final_exit_code = 0
# Start computer server if not skipped
server_thread = None
if not args.skip_server_start:
server_thread = start_computer_server()
if server_thread is None:
logger.error("Failed to start computer server. Exiting...")
return
else:
logger.info("Skipping computer server startup (assuming it's already running)")
try:
# Build agent config from arguments
agent_config = {
"loop": args.model_loop,
"model_provider": args.model_provider,
"model_name": args.model_name,
"model_base_url": args.model_base_url
}
# Log configuration
logger.info("=== Configuration ===")
logger.info(f"Computer server: {'STARTED' if server_thread else 'EXTERNAL'}")
logger.info(f"Tests directory: {args.tests_dir}")
logger.info(f"Max turns per test: {args.max_turns}")
logger.info(f"Delay between tests: {args.delay_between_tests}s")
logger.info(f"Jan app path: {args.jan_app_path}")
logger.info(f"Jan app exists: {os.path.exists(args.jan_app_path)}")
logger.info(f"Jan process name: {args.jan_process_name}")
logger.info(f"Model: {args.model_name}")
logger.info(f"Model URL: {args.model_base_url}")
logger.info(f"Model provider: {args.model_provider}")
logger.info(f"ReportPortal integration: {'ENABLED' if args.enable_reportportal else 'DISABLED'}")
if args.enable_reportportal:
logger.info(f"ReportPortal endpoint: {args.rp_endpoint}")
logger.info(f"ReportPortal project: {args.rp_project}")
logger.info(f"ReportPortal token: {'SET' if args.rp_token else 'NOT SET'}")
logger.info(f"Launch name: {args.launch_name if args.launch_name else 'AUTO-GENERATED'}")
logger.info("======================")
# Scan all test files
test_files = scan_test_files(args.tests_dir)
if not test_files:
logger.warning(f"No test files found in directory: {args.tests_dir}")
return
logger.info(f"Found {len(test_files)} test files")
# Track test results for final exit code
test_results = {"passed": 0, "failed": 0, "total": len(test_files)}
# Initialize ReportPortal client only if enabled
rp_client = None
launch_id = None
if args.enable_reportportal:
try:
rp_client = RPClient(
endpoint=args.rp_endpoint,
project=args.rp_project,
api_key=args.rp_token
)
# Start ReportPortal launch
current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
# Use custom launch name if provided, otherwise generate default
if args.launch_name:
launch_name = args.launch_name
logger.info(f"Using custom launch name: {launch_name}")
else:
launch_name = f"E2E Test Run - {current_time}"
logger.info(f"Using auto-generated launch name: {launch_name}")
launch_id = rp_client.start_launch(
name=launch_name,
start_time=timestamp(),
description=f"Automated E2E test run with {len(test_files)} test cases\n"
f"Model: {args.model_name}\n"
f"Max turns: {args.max_turns}"
)
logger.info(f"Started ReportPortal launch: {launch_name}")
except Exception as e:
logger.error(f"Failed to initialize ReportPortal: {e}")
logger.warning("Continuing without ReportPortal integration...")
rp_client = None
launch_id = None
else:
logger.info("Running in local development mode - results will not be uploaded to ReportPortal")
# Start computer environment
logger.info("Initializing computer environment...")
# Get platform-specific computer configuration
computer_config = get_computer_config()
logger.info(f"Using computer config: {computer_config}")
computer = Computer(
os_type=computer_config["os_type"],
use_host_computer_server=True
)
await computer.run()
logger.info("Computer environment ready")
# Run each test sequentially with turn monitoring
for i, test_data in enumerate(test_files, 1):
logger.info(f"Running test {i}/{len(test_files)}: {test_data['path']}")
try:
# Pass all configs to test runner
test_result = await run_single_test_with_timeout(
computer=computer,
test_data=test_data,
rp_client=rp_client, # Can be None
launch_id=launch_id, # Can be None
max_turns=args.max_turns,
jan_app_path=args.jan_app_path,
jan_process_name=args.jan_process_name,
agent_config=agent_config,
enable_reportportal=args.enable_reportportal
)
# Track test result - properly handle different return formats
test_passed = False
if test_result:
# Check different possible return formats
if isinstance(test_result, dict):
# Dictionary format: check 'success' key
test_passed = test_result.get('success', False)
elif isinstance(test_result, bool):
# Boolean format: direct boolean value
test_passed = test_result
elif hasattr(test_result, 'success'):
# Object format: check success attribute
test_passed = getattr(test_result, 'success', False)
else:
# Any truthy value is considered success
test_passed = bool(test_result)
else:
test_passed = False
# Update counters and log result
if test_passed:
test_results["passed"] += 1
logger.info(f"[SUCCESS] Test {i} PASSED: {test_data['path']}")
else:
test_results["failed"] += 1
logger.error(f"[FAILED] Test {i} FAILED: {test_data['path']}")
# Debug log for troubleshooting
logger.info(f"[INFO] Debug - Test result: type={type(test_result)}, value={test_result}, success_field={test_result.get('success', 'N/A') if isinstance(test_result, dict) else 'N/A'}, final_passed={test_passed}")
except Exception as e:
test_results["failed"] += 1
logger.error(f"[FAILED] Test {i} FAILED with exception: {test_data['path']} - {e}")
# Add delay between tests
if i < len(test_files):
logger.info(f"Waiting {args.delay_between_tests} seconds before next test...")
await asyncio.sleep(args.delay_between_tests)
# Log final test results summary
logger.info("=" * 50)
logger.info("TEST EXECUTION SUMMARY")
logger.info("=" * 50)
logger.info(f"Total tests: {test_results['total']}")
logger.info(f"Passed: {test_results['passed']}")
logger.info(f"Failed: {test_results['failed']}")
logger.info(f"Success rate: {(test_results['passed']/test_results['total']*100):.1f}%")
logger.info("=" * 50)
if test_results["failed"] > 0:
logger.error(f"[FAILED] Test execution completed with {test_results['failed']} failures!")
final_exit_code = 1
else:
logger.info("[SUCCESS] All tests completed successfully!")
final_exit_code = 0
except KeyboardInterrupt:
logger.info("Test execution interrupted by user")
final_exit_code = 1
except Exception as e:
logger.error(f"Error in main execution: {e}")
final_exit_code = 1
finally:
# Finish ReportPortal launch only if it was started
if args.enable_reportportal and rp_client and launch_id:
try:
rp_client.finish_launch(
launch_id=launch_id,
end_time=timestamp()
)
rp_client.session.close()
logger.info("ReportPortal launch finished and session closed")
except Exception as e:
logger.error(f"Error finishing ReportPortal launch: {e}")
# Note: daemon thread will automatically terminate when main program ends
if server_thread:
logger.info("Computer server will stop when main program exits (daemon thread)")
# Exit with appropriate code based on test results
logger.info(f"Exiting with code: {final_exit_code}")
exit(final_exit_code)
if __name__ == "__main__":
asyncio.run(main())

View File

@ -1,439 +0,0 @@
import os
import json
import mimetypes
import re
import logging
import glob
import platform
from reportportal_client.helpers import timestamp
logger = logging.getLogger(__name__)
def upload_turn_folder(client, test_item_id, turn_path, turn_name, force_fail=False):
"""
Upload turn folder content to ReportPortal
"""
step_item_id = client.start_test_item(
parent_item_id=test_item_id,
name=turn_name,
start_time=timestamp(),
item_type="STEP"
)
uploaded = False
step_has_errors = False # Track if this step has any errors
for fname in sorted(os.listdir(turn_path)):
fpath = os.path.join(turn_path, fname)
if fname.endswith(".json"):
try:
with open(fpath, "r", encoding="utf-8") as f:
data = json.load(f)
client.log(
time=timestamp(),
level="INFO",
message=f"[{fname}]\n{json.dumps(data, indent=2)}",
item_id=step_item_id
)
uploaded = True
except Exception as e:
client.log(
time=timestamp(),
level="ERROR",
message=f"[ERROR parsing {fname}] {str(e)}",
item_id=step_item_id
)
step_has_errors = True
elif fname.endswith(".png"):
try:
with open(fpath, "rb") as img_file:
client.log(
time=timestamp(),
level="INFO",
message=f"Screenshot: {fname}",
item_id=step_item_id,
attachment={
"name": fname,
"data": img_file.read(),
"mime": mimetypes.guess_type(fname)[0] or "image/png"
}
)
uploaded = True
except Exception as e:
client.log(
time=timestamp(),
level="ERROR",
message=f"[ERROR attaching {fname}] {str(e)}",
item_id=step_item_id
)
step_has_errors = True
if not uploaded:
client.log(
time=timestamp(),
level="WARNING",
message="No data found in this turn.",
item_id=step_item_id
)
# Determine step status based on test case result
if force_fail:
step_status = "FAILED"
else:
step_status = "FAILED" if step_has_errors else "PASSED"
client.finish_test_item(
item_id=step_item_id,
end_time=timestamp(),
status=step_status
)
def extract_test_result_from_trajectory(trajectory_dir):
"""
Extract test result from the last turn's API response
Returns True only if found {"result": True}, False for all other cases including {"result": False}
"""
if not trajectory_dir or not os.path.exists(trajectory_dir):
logger.warning(f"Trajectory directory not found: {trajectory_dir}")
return False
try:
# Get all turn folders and find the last one
turn_folders = [f for f in os.listdir(trajectory_dir)
if os.path.isdir(os.path.join(trajectory_dir, f)) and f.startswith("turn_")]
if not turn_folders:
logger.warning("No turn folders found")
return False
# Sort to get the last turn
last_turn = sorted(turn_folders)[-1]
last_turn_path = os.path.join(trajectory_dir, last_turn)
logger.info(f"Checking result in last turn: {last_turn}")
# Look for API call response files
response_files = [f for f in os.listdir(last_turn_path)
if f.startswith("api_call_") and f.endswith("_response.json")]
if not response_files:
logger.warning("No API response files found in last turn")
return False
# Check the last response file
last_response_file = sorted(response_files)[-1]
response_file_path = os.path.join(last_turn_path, last_response_file)
logger.info(f"Checking response file: {last_response_file}")
with open(response_file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
# Extract content from response
if 'response' in data and 'choices' in data['response'] and data['response']['choices']:
last_choice = data['response']['choices'][-1]
if 'message' in last_choice and 'content' in last_choice['message']:
content = last_choice['message']['content']
logger.info(f"Last response content: {content}")
# Look for result patterns - need to check both True and False
true_pattern = r'\{\s*"result"\s*:\s*True\s*\}'
false_pattern = r'\{\s*"result"\s*:\s*False\s*\}'
true_match = re.search(true_pattern, content)
false_match = re.search(false_pattern, content)
if true_match:
logger.info(f"Found test result: True - PASSED")
return True
elif false_match:
logger.info(f"Found test result: False - FAILED")
return False
else:
logger.warning("No valid result pattern found in response content - marking as FAILED")
return False
logger.warning("Could not extract content from response structure")
return False
except Exception as e:
logger.error(f"Error extracting test result: {e}")
return False
def get_jan_log_paths(is_nightly=False):
"""
Get Jan application log file paths based on OS and version (nightly vs regular)
Returns list of glob patterns for log files
"""
system = platform.system().lower()
app_name = "Jan-nightly" if is_nightly else "Jan"
if system == "windows":
# Windows: %APPDATA%\Jan(-nightly)\data\logs\*.log
appdata = os.path.expandvars("%APPDATA%")
return [f"{appdata}\\{app_name}\\data\\logs\\*.log"]
elif system == "darwin": # macOS
# macOS: ~/Library/Application Support/Jan(-nightly)/data/logs/*.log
home_dir = os.path.expanduser("~")
return [f"{home_dir}/Library/Application Support/{app_name}/data/logs/*.log"]
elif system == "linux":
# Linux: ~/.local/share/Jan(-nightly)/data/logs/*.log
home_dir = os.path.expanduser("~")
return [f"{home_dir}/.local/share/{app_name}/data/logs/*.log"]
else:
logger.warning(f"Unsupported OS: {system}")
return []
def upload_jan_logs(client, test_item_id, is_nightly=False, max_log_files=5):
"""
Upload Jan application log files to ReportPortal
"""
log_patterns = get_jan_log_paths(is_nightly)
app_type = "nightly" if is_nightly else "regular"
logger.info(f"Looking for Jan {app_type} logs...")
all_log_files = []
for pattern in log_patterns:
try:
log_files = glob.glob(pattern)
all_log_files.extend(log_files)
logger.info(f"Found {len(log_files)} log files matching pattern: {pattern}")
except Exception as e:
logger.error(f"Error searching for logs with pattern {pattern}: {e}")
if not all_log_files:
logger.warning(f"No Jan {app_type} log files found")
client.log(
time=timestamp(),
level="WARNING",
message=f"[INFO] No Jan {app_type} application logs found",
item_id=test_item_id
)
return
# Sort by modification time (newest first) and limit to max_log_files
try:
all_log_files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
log_files_to_upload = all_log_files[:max_log_files]
logger.info(f"Uploading {len(log_files_to_upload)} most recent Jan {app_type} log files")
for i, log_file in enumerate(log_files_to_upload, 1):
try:
file_size = os.path.getsize(log_file)
file_name = os.path.basename(log_file)
# Check file size limit (50MB = 50 * 1024 * 1024 bytes)
max_file_size = 50 * 1024 * 1024 # 50MB
if file_size > max_file_size:
logger.warning(f"Log file {file_name} is too large ({file_size} bytes > {max_file_size} bytes), skipping upload")
client.log(
time=timestamp(),
level="WARNING",
message=f"[INFO] Log file {file_name} skipped (size: {file_size} bytes > 50MB limit)",
item_id=test_item_id
)
continue
logger.info(f"Uploading log file {i}/{len(log_files_to_upload)}: {file_name} ({file_size} bytes)")
# Read log file content (safe to read since we checked size)
with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
log_content = f.read()
# Upload as text attachment
client.log(
time=timestamp(),
level="INFO",
message=f"[INFO] Jan {app_type} application log: {file_name}",
item_id=test_item_id,
attachment={
"name": f"jan_{app_type}_log_{i}_{file_name}",
"data": log_content.encode('utf-8'),
"mime": "text/plain"
}
)
logger.info(f"Successfully uploaded log: {file_name}")
except Exception as e:
logger.error(f"Error uploading log file {log_file}: {e}")
client.log(
time=timestamp(),
level="ERROR",
message=f"Failed to upload log file {os.path.basename(log_file)}: {str(e)}",
item_id=test_item_id
)
# Add summary log
client.log(
time=timestamp(),
level="INFO",
message=f"[INFO] Uploaded {len(log_files_to_upload)} Jan {app_type} log files (total available: {len(all_log_files)})",
item_id=test_item_id
)
except Exception as e:
logger.error(f"Error processing Jan logs: {e}")
client.log(
time=timestamp(),
level="ERROR",
message=f"Error processing Jan {app_type} logs: {str(e)}",
item_id=test_item_id
)
def upload_test_results_to_rp(client, launch_id, test_path, trajectory_dir, force_stopped=False, video_path=None, is_nightly=False):
"""
Upload test results to ReportPortal with proper status based on test result
"""
if not trajectory_dir or not os.path.exists(trajectory_dir):
logger.warning(f"Trajectory directory not found: {trajectory_dir}")
formatted_test_path = test_path.replace('\\', '/').replace('.txt', '').replace('/', '__')
test_item_id = client.start_test_item(
launch_id=launch_id,
name=formatted_test_path,
start_time=timestamp(),
item_type="TEST",
description=f"Test case from: {test_path}"
)
client.log(
time=timestamp(),
level="ERROR",
message="[FAILED] TEST FAILED [FAILED]\nNo trajectory directory found",
item_id=test_item_id
)
# Upload video if available
if video_path and os.path.exists(video_path):
try:
with open(video_path, "rb") as video_file:
client.log(
time=timestamp(),
level="INFO",
message="Screen recording of test execution",
item_id=test_item_id,
attachment={
"name": f"test_recording_{formatted_test_path}.mp4",
"data": video_file.read(),
"mime": "video/x-msvideo"
}
)
logger.info(f"Uploaded video for failed test: {video_path}")
except Exception as e:
logger.error(f"Error uploading video: {e}")
client.finish_test_item(
item_id=test_item_id,
end_time=timestamp(),
status="FAILED"
)
return
formatted_test_path = test_path.replace('\\', '/').replace('.txt', '').replace('/', '__')
# Determine final status
if force_stopped:
final_status = "FAILED"
status_message = "exceeded maximum turn limit (30 turns)"
else:
test_result = extract_test_result_from_trajectory(trajectory_dir)
if test_result is True:
final_status = "PASSED"
status_message = "completed successfully with positive result"
else:
final_status = "FAILED"
status_message = "no valid success result found"
# Create test item
test_item_id = client.start_test_item(
launch_id=launch_id,
name=formatted_test_path,
start_time=timestamp(),
item_type="TEST",
description=f"Test case from: {test_path}"
)
try:
turn_folders = [f for f in os.listdir(trajectory_dir)
if os.path.isdir(os.path.join(trajectory_dir, f)) and f.startswith("turn_")]
# Add clear status log
status_emoji = "[SUCCESS]" if final_status == "PASSED" else "[FAILED]"
client.log(
time=timestamp(),
level="INFO" if final_status == "PASSED" else "ERROR",
message=f"{status_emoji} TEST {final_status} {status_emoji}\nReason: {status_message}\nTotal turns: {len(turn_folders)}",
item_id=test_item_id
)
# Upload screen recording video first
if video_path and os.path.exists(video_path):
logger.info(f"Attempting to upload video: {video_path}")
logger.info(f"Video file size: {os.path.getsize(video_path)} bytes")
try:
with open(video_path, "rb") as video_file:
video_data = video_file.read()
logger.info(f"Read video data: {len(video_data)} bytes")
client.log(
time=timestamp(),
level="INFO",
message="[INFO] Screen recording of test execution",
item_id=test_item_id,
attachment={
"name": f"test_recording_{formatted_test_path}.mp4",
"data": video_data,
"mime": "video/x-msvideo"
}
)
logger.info(f"Successfully uploaded screen recording: {video_path}")
except Exception as e:
logger.error(f"Error uploading screen recording: {e}")
client.log(
time=timestamp(),
level="WARNING",
message=f"Failed to upload screen recording: {str(e)}",
item_id=test_item_id
)
else:
logger.warning(f"Video upload skipped - video_path: {video_path}, exists: {os.path.exists(video_path) if video_path else 'N/A'}")
client.log(
time=timestamp(),
level="WARNING",
message="No screen recording available for this test",
item_id=test_item_id
)
# Upload Jan application logs
logger.info("Uploading Jan application logs...")
upload_jan_logs(client, test_item_id, is_nightly=is_nightly, max_log_files=5)
# Upload all turn data with appropriate status
# If test failed, mark all turns as failed
force_fail_turns = (final_status == "FAILED")
for turn_folder in sorted(turn_folders):
turn_path = os.path.join(trajectory_dir, turn_folder)
upload_turn_folder(client, test_item_id, turn_path, turn_folder, force_fail=force_fail_turns)
# Finish with correct status
client.finish_test_item(
item_id=test_item_id,
end_time=timestamp(),
status=final_status
)
logger.info(f"Uploaded test results for {formatted_test_path}: {final_status}")
except Exception as e:
logger.error(f"Error uploading test results: {e}")
client.finish_test_item(
item_id=test_item_id,
end_time=timestamp(),
status="FAILED"
)

View File

@ -1,18 +0,0 @@
# Core dependencies
cua-computer[all]~=0.3.5
cua-agent[all]~=0.3.0
cua-agent @ git+https://github.com/janhq/cua.git@compute-agent-0.3.0-patch#subdirectory=libs/python/agent
# ReportPortal integration
reportportal-client~=5.6.5
# Screen recording and automation
opencv-python~=4.10.0
numpy~=2.2.6
PyAutoGUI~=0.9.54
# System utilities
psutil~=7.0.0
# Server component
cua-computer-server~=0.1.19

View File

@ -1,84 +0,0 @@
import cv2
import numpy as np
import pyautogui
import threading
import time
import logging
logger = logging.getLogger(__name__)
class ScreenRecorder:
def __init__(self, output_path, fps=10):
self.output_path = output_path
self.fps = fps
self.recording = False
self.writer = None
self.thread = None
def start_recording(self):
"""Start screen recording"""
if self.recording:
logger.warning("Recording already in progress")
return
self.recording = True
self.thread = threading.Thread(target=self._record_screen, daemon=True)
self.thread.start()
logger.info(f"Started screen recording: {self.output_path}")
def stop_recording(self):
"""Stop screen recording"""
if not self.recording:
logger.warning("No recording in progress")
return
self.recording = False
if self.thread:
self.thread.join(timeout=5)
if self.writer:
self.writer.release()
logger.info(f"Stopped screen recording: {self.output_path}")
def _record_screen(self):
"""Internal method to record screen"""
try:
# Get screen dimensions
screen_size = pyautogui.size()
# Try MP4 with H264 codec for better compatibility
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # or 'H264'
output_path_mp4 = self.output_path
self.writer = cv2.VideoWriter(
output_path_mp4,
fourcc,
self.fps,
screen_size
)
while self.recording:
try:
# Capture screen
screenshot = pyautogui.screenshot()
# Convert PIL image to numpy array
frame = np.array(screenshot)
# Convert RGB to BGR (OpenCV uses BGR)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# Write frame
self.writer.write(frame)
# Control FPS
time.sleep(1.0 / self.fps)
except Exception as e:
logger.error(f"Error capturing frame: {e}")
break
except Exception as e:
logger.error(f"Error in screen recording: {e}")
finally:
if self.writer:
self.writer.release()

View File

@ -1,116 +0,0 @@
# AutoQA Scripts
This directory contains platform-specific scripts used by the AutoQA GitHub Actions workflow. These scripts help maintain a cleaner and more maintainable workflow file by extracting complex inline scripts into separate files.
## Directory Structure
```text
autoqa/scripts/
├── setup_permissions.sh # Setup executable permissions for all scripts
├── windows_cleanup.ps1 # Windows: Clean existing Jan installations
├── windows_download.ps1 # Windows: Download Jan app installer
├── windows_install.ps1 # Windows: Install Jan app
├── windows_post_cleanup.ps1 # Windows: Post-test cleanup
├── run_tests.ps1 # Windows: Run AutoQA tests
├── ubuntu_cleanup.sh # Ubuntu: Clean existing Jan installations
├── ubuntu_download.sh # Ubuntu: Download Jan app (.deb)
├── ubuntu_install.sh # Ubuntu: Install Jan app
├── ubuntu_post_cleanup.sh # Ubuntu: Post-test cleanup
├── macos_cleanup.sh # macOS: Clean existing Jan installations
├── macos_download.sh # macOS: Download Jan app (.dmg)
├── macos_install.sh # macOS: Install Jan app
├── macos_post_cleanup.sh # macOS: Post-test cleanup
├── run_tests.sh # Unix: Run AutoQA tests (Ubuntu/macOS)
├── README.md # This file
└── PERMISSIONS.md # Permission setup documentation
```
## Script Functions
### Windows Scripts (.ps1)
- **windows_cleanup.ps1**: Removes existing Jan installations and kills running processes
- **windows_download.ps1**: Downloads Jan installer with priority-based URL selection
- **windows_install.ps1**: Installs Jan app and sets environment variables
- **windows_post_cleanup.ps1**: Comprehensive cleanup after tests including uninstallation
- **run_tests.ps1**: Runs the AutoQA Python tests with proper arguments
### Ubuntu Scripts (.sh)
- **ubuntu_cleanup.sh**: Removes existing Jan installations and kills running processes
- **ubuntu_download.sh**: Downloads Jan .deb package with priority-based URL selection
- **ubuntu_install.sh**: Installs Jan .deb package and sets environment variables
- **ubuntu_post_cleanup.sh**: Comprehensive cleanup after tests including package removal
### macOS Scripts (.sh)
- **macos_cleanup.sh**: Removes existing Jan installations and kills running processes
- **macos_download.sh**: Downloads Jan .dmg package with priority-based URL selection
- **macos_install.sh**: Mounts DMG, extracts .app, and installs to Applications
- **macos_post_cleanup.sh**: Comprehensive cleanup after tests
### Common Scripts
- **setup_permissions.sh**: Automatically sets executable permissions for all shell scripts
- **run_tests.sh**: Platform-agnostic test runner for Unix-based systems (Ubuntu/macOS)
## Usage in GitHub Actions
These scripts are called from the `.github/workflows/autoqa.yml` workflow file:
```yaml
# Setup permissions first (Ubuntu/macOS)
- name: Setup script permissions
run: |
chmod +x autoqa/scripts/setup_permissions.sh
./autoqa/scripts/setup_permissions.sh
# Then use scripts without chmod
- name: Clean existing Jan installations
run: |
./autoqa/scripts/ubuntu_cleanup.sh
# Windows example (no chmod needed)
- name: Clean existing Jan installations
shell: powershell
run: |
.\autoqa\scripts\windows_cleanup.ps1
```
## Benefits
1. **Maintainability**: Complex scripts are in separate files, easier to read and modify
2. **Reusability**: Scripts can be reused across different workflows or locally
3. **Testing**: Scripts can be tested independently
4. **Version Control**: Better diff tracking for script changes
5. **Platform Consistency**: Similar functionality across platforms in separate files
## Development
When modifying these scripts:
1. Test them locally on the respective platforms
2. Ensure proper error handling and exit codes
3. Follow platform-specific best practices
4. Update this README if new scripts are added
## Script Parameters
### Windows Scripts
- Most scripts accept `-IsNightly` parameter to handle nightly vs stable builds
- Download script accepts multiple URL sources with priority ordering
### Unix Scripts
- Most scripts accept positional parameters for nightly flag and URLs
- Scripts use `$1`, `$2`, etc. for parameter access
## Environment Variables
Scripts set these environment variables for subsequent workflow steps:
- `JAN_APP_URL`: The selected Jan app download URL
- `IS_NIGHTLY`: Boolean flag indicating if it's a nightly build
- `JAN_APP_PATH`: Path to the installed Jan executable
- `JAN_PROCESS_NAME`: Name of the Jan process for monitoring

View File

@ -1,34 +0,0 @@
#!/bin/bash
# macOS cleanup script for Jan app
echo "Cleaning existing Jan installations..."
# Kill any running Jan processes (both regular and nightly)
pkill -f "Jan" || true
pkill -f "jan" || true
pkill -f "Jan-nightly" || true
pkill -f "jan-nightly" || true
# Remove Jan app directories
rm -rf /Applications/Jan.app
rm -rf /Applications/Jan-nightly.app
rm -rf ~/Applications/Jan.app
rm -rf ~/Applications/Jan-nightly.app
# Remove Jan data folders (both regular and nightly)
rm -rf ~/Library/Application\ Support/Jan
rm -rf ~/Library/Application\ Support/Jan-nightly
rm -rf ~/Library/Application\ Support/jan.ai.app
rm -rf ~/Library/Application\ Support/jan-nightly.ai.app
rm -rf ~/Library/Preferences/jan.*
rm -rf ~/Library/Preferences/jan-nightly.*
rm -rf ~/Library/Caches/jan.*
rm -rf ~/Library/Caches/jan-nightly.*
rm -rf ~/Library/Caches/jan.ai.app
rm -rf ~/Library/Caches/jan-nightly.ai.app
rm -rf ~/Library/WebKit/jan.ai.app
rm -rf ~/Library/WebKit/jan-nightly.ai.app
rm -rf ~/Library/Saved\ Application\ State/jan.ai.app
rm -rf ~/Library/Saved\ Application\ State/jan-nightly.ai.app
echo "Jan cleanup completed"

View File

@ -1,49 +0,0 @@
#!/bin/bash
# macOS download script for Jan app
WORKFLOW_INPUT_URL="$1"
WORKFLOW_INPUT_IS_NIGHTLY="$2"
REPO_VARIABLE_URL="$3"
REPO_VARIABLE_IS_NIGHTLY="$4"
DEFAULT_URL="$5"
DEFAULT_IS_NIGHTLY="$6"
# Determine Jan app URL and nightly flag from multiple sources (priority order):
# 1. Workflow dispatch input (manual trigger)
# 2. Repository variable JAN_APP_URL
# 3. Default URL from env
JAN_APP_URL=""
IS_NIGHTLY="false"
if [ -n "$WORKFLOW_INPUT_URL" ]; then
JAN_APP_URL="$WORKFLOW_INPUT_URL"
IS_NIGHTLY="$WORKFLOW_INPUT_IS_NIGHTLY"
echo "Using Jan app URL from workflow input: $JAN_APP_URL"
echo "Is nightly build: $IS_NIGHTLY"
elif [ -n "$REPO_VARIABLE_URL" ]; then
JAN_APP_URL="$REPO_VARIABLE_URL"
IS_NIGHTLY="$REPO_VARIABLE_IS_NIGHTLY"
echo "Using Jan app URL from repository variable: $JAN_APP_URL"
echo "Is nightly build: $IS_NIGHTLY"
else
JAN_APP_URL="$DEFAULT_URL"
IS_NIGHTLY="$DEFAULT_IS_NIGHTLY"
echo "Using default Jan app URL: $JAN_APP_URL"
echo "Is nightly build: $IS_NIGHTLY"
fi
# Export for later steps
echo "JAN_APP_URL=$JAN_APP_URL" >> $GITHUB_ENV
echo "IS_NIGHTLY=$IS_NIGHTLY" >> $GITHUB_ENV
echo "Downloading Jan app from: $JAN_APP_URL"
curl -L -o "/tmp/jan-installer.dmg" "$JAN_APP_URL"
if [ ! -f "/tmp/jan-installer.dmg" ]; then
echo "[FAILED] Failed to download Jan app"
exit 1
fi
echo "[SUCCESS] Successfully downloaded Jan app"
ls -la "/tmp/jan-installer.dmg"

View File

@ -1,91 +0,0 @@
#!/bin/bash
# macOS install script for Jan app
echo "Installing Jan app from DMG..."
# Mount the DMG
hdiutil attach "/tmp/jan-installer.dmg" -mountpoint "/tmp/jan-mount"
# Find the .app file in the mounted DMG
APP_FILE=$(find "/tmp/jan-mount" -name "*.app" -type d | head -1)
if [ -z "$APP_FILE" ]; then
echo "[Failed] No .app file found in DMG"
hdiutil detach "/tmp/jan-mount" || true
exit 1
fi
echo "Found app file: $APP_FILE"
# Copy to Applications directory
cp -R "$APP_FILE" /Applications/
# Unmount the DMG
hdiutil detach "/tmp/jan-mount"
# Determine app name and executable path
APP_NAME=$(basename "$APP_FILE")
echo "App name: $APP_NAME"
# First, check what's actually in the MacOS folder
echo "Contents of MacOS folder:"
ls -la "/Applications/$APP_NAME/Contents/MacOS/"
# Find all executable files in MacOS folder
echo "Looking for executable files..."
find "/Applications/$APP_NAME/Contents/MacOS/" -type f -perm +111 -ls
# Try to find the main executable - it's usually the one with the same name as the app (without .app)
APP_BASE_NAME=$(basename "$APP_NAME" .app)
POTENTIAL_EXECUTABLES=(
"/Applications/$APP_NAME/Contents/MacOS/$APP_BASE_NAME"
"/Applications/$APP_NAME/Contents/MacOS/Jan"
"/Applications/$APP_NAME/Contents/MacOS/Jan-nightly"
)
APP_PATH=""
for potential_exec in "${POTENTIAL_EXECUTABLES[@]}"; do
echo "Checking: $potential_exec"
if [ -f "$potential_exec" ] && [ -x "$potential_exec" ]; then
APP_PATH="$potential_exec"
echo "Found executable: $APP_PATH"
break
fi
done
# If still not found, get any executable file
if [ -z "$APP_PATH" ]; then
echo "No predefined executable found, searching for any executable..."
APP_PATH=$(find "/Applications/$APP_NAME/Contents/MacOS/" -type f -perm +111 | head -1)
fi
if [ -z "$APP_PATH" ]; then
echo "[FAILED] No executable found in MacOS folder"
ls -la "/Applications/$APP_NAME/Contents/MacOS/"
exit 1
fi
PROCESS_NAME=$(basename "$APP_PATH")
echo "App installed at: /Applications/$APP_NAME"
echo "Executable path: $APP_PATH"
echo "Process name: $PROCESS_NAME"
# Export for test step
echo "JAN_APP_PATH=$APP_PATH" >> $GITHUB_ENV
echo "PROCESS_NAME=$PROCESS_NAME" >> $GITHUB_ENV
echo "[INFO] Waiting for Jan app first initialization (120 seconds)..."
echo "This allows Jan to complete its initial setup and configuration"
sleep 120
echo "[SUCCESS] Initialization wait completed"
# Verify installation
if [ -f "$APP_PATH" ]; then
echo "[SUCCESS] Jan app installed successfully"
ls -la "/Applications/$APP_NAME"
else
echo "[FAILED] Jan app installation failed - executable not found"
exit 1
fi

View File

@ -1,38 +0,0 @@
#!/bin/bash
# macOS post-test cleanup script
echo "Cleaning up after tests..."
# Kill any running Jan processes (both regular and nightly)
pkill -f "Jan" || true
pkill -f "jan" || true
pkill -f "Jan-nightly" || true
pkill -f "jan-nightly" || true
# Remove Jan app directories
rm -rf /Applications/Jan.app
rm -rf /Applications/Jan-nightly.app
rm -rf ~/Applications/Jan.app
rm -rf ~/Applications/Jan-nightly.app
# Remove Jan data folders (both regular and nightly)
rm -rf ~/Library/Application\ Support/Jan
rm -rf ~/Library/Application\ Support/Jan-nightly
rm -rf ~/Library/Application\ Support/jan.ai.app
rm -rf ~/Library/Application\ Support/jan-nightly.ai.app
rm -rf ~/Library/Preferences/jan.*
rm -rf ~/Library/Preferences/jan-nightly.*
rm -rf ~/Library/Caches/jan.*
rm -rf ~/Library/Caches/jan-nightly.*
rm -rf ~/Library/Caches/jan.ai.app
rm -rf ~/Library/Caches/jan-nightly.ai.app
rm -rf ~/Library/WebKit/jan.ai.app
rm -rf ~/Library/WebKit/jan-nightly.ai.app
rm -rf ~/Library/Saved\ Application\ State/jan.ai.app
rm -rf ~/Library/Saved\ Application\ State/jan-nightly.ai.app
# Clean up downloaded installer
rm -f "/tmp/jan-installer.dmg"
rm -rf "/tmp/jan-mount"
echo "Cleanup completed"

View File

@ -1,31 +0,0 @@
#!/usr/bin/env pwsh
# Windows test runner script
param(
[string]$JanAppPath,
[string]$ProcessName,
[string]$RpToken
)
Write-Host "Starting Auto QA Tests..."
Write-Host "Jan app path: $JanAppPath"
Write-Host "Process name: $ProcessName"
Write-Host "Current working directory: $(Get-Location)"
Write-Host "Contents of current directory:"
Get-ChildItem
Write-Host "Contents of trajectories directory (if exists):"
if (Test-Path "trajectories") {
Get-ChildItem "trajectories"
} else {
Write-Host "trajectories directory not found"
}
# Run the main test with proper arguments
if ($JanAppPath -and $ProcessName) {
python main.py --enable-reportportal --rp-token "$RpToken" --jan-app-path "$JanAppPath" --jan-process-name "$ProcessName"
} elseif ($JanAppPath) {
python main.py --enable-reportportal --rp-token "$RpToken" --jan-app-path "$JanAppPath"
} else {
python main.py --enable-reportportal --rp-token "$RpToken"
}

View File

@ -1,69 +0,0 @@
#!/bin/bash
# Common test runner script
JAN_APP_PATH="$1"
PROCESS_NAME="$2"
RP_TOKEN="$3"
PLATFORM="$4"
echo "Starting Auto QA Tests..."
echo "Platform: $PLATFORM"
echo "Jan app path: $JAN_APP_PATH"
echo "Process name: $PROCESS_NAME"
# Platform-specific setup
if [ "$PLATFORM" = "ubuntu" ]; then
# Get the current display session
export DISPLAY=$(w -h | awk 'NR==1 {print $2}')
echo "Display ID: $DISPLAY"
# Verify display is working
if [ -z "$DISPLAY" ]; then
echo "No display session found, falling back to :0"
export DISPLAY=:0
fi
echo "Using display: $DISPLAY"
# Test display connection
xdpyinfo -display $DISPLAY >/dev/null 2>&1 || {
echo "Display $DISPLAY is not available"
exit 1
}
# Make Jan executable if needed
if [ -f "/usr/bin/Jan-nightly" ]; then
sudo chmod +x /usr/bin/Jan-nightly
fi
if [ -f "/usr/bin/Jan" ]; then
sudo chmod +x /usr/bin/Jan
fi
fi
# macOS specific setup
if [ "$PLATFORM" = "macos" ]; then
# Verify Jan app path
if [ ! -f "$JAN_APP_PATH" ]; then
echo "❌ Jan app not found at: $JAN_APP_PATH"
echo "Available files in /Applications:"
ls -la /Applications/ | grep -i jan || echo "No Jan apps found"
exit 1
fi
fi
# Change to autoqa directory to ensure correct working directory
cd "$(dirname "$0")/.."
echo "Current working directory: $(pwd)"
echo "Contents of current directory:"
ls -la
echo "Contents of trajectories directory (if exists):"
ls -la trajectories/ 2>/dev/null || echo "trajectories directory not found"
# Run the main test with proper arguments
if [ -n "$JAN_APP_PATH" ] && [ -n "$PROCESS_NAME" ]; then
python main.py --enable-reportportal --rp-token "$RP_TOKEN" --jan-app-path "$JAN_APP_PATH" --jan-process-name "$PROCESS_NAME"
elif [ -n "$JAN_APP_PATH" ]; then
python main.py --enable-reportportal --rp-token "$RP_TOKEN" --jan-app-path "$JAN_APP_PATH"
else
python main.py --enable-reportportal --rp-token "$RP_TOKEN"
fi

View File

@ -1,80 +0,0 @@
#!/bin/bash
# Android Development Environment Setup for Jan
# Ensure rustup's Rust toolchain is used instead of Homebrew's
export PATH="$HOME/.cargo/bin:$PATH"
# Set JAVA_HOME for Android builds
export JAVA_HOME=/opt/homebrew/opt/openjdk@17/libexec/openjdk.jdk/Contents/Home
export PATH="/opt/homebrew/opt/openjdk@17/bin:$PATH"
export ANDROID_HOME="$HOME/Library/Android/sdk"
export ANDROID_NDK_ROOT="$HOME/Library/Android/sdk/ndk/29.0.14033849"
export NDK_HOME="$HOME/Library/Android/sdk/ndk/29.0.14033849"
# Add Android tools to PATH
export PATH=$PATH:$ANDROID_HOME/platform-tools:$ANDROID_HOME/tools:$ANDROID_HOME/cmdline-tools/latest/bin:$ANDROID_HOME/emulator:$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin
# Set up CC and CXX for Android compilation
export CC_aarch64_linux_android="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang"
export CXX_aarch64_linux_android="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang++"
export AR_aarch64_linux_android="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ar"
export RANLIB_aarch64_linux_android="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ranlib"
# Additional environment variables for Rust cross-compilation
export CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang"
# Only set global CC and AR for Android builds (when IS_ANDROID is set)
if [ "$IS_ANDROID" = "true" ]; then
export CC="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang"
export AR="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ar"
echo "Global CC and AR set for Android build"
fi
# Create symlinks for Android tools if they don't exist
mkdir -p ~/.local/bin
if [ ! -f ~/.local/bin/aarch64-linux-android-ranlib ]; then
ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ranlib ~/.local/bin/aarch64-linux-android-ranlib
fi
if [ ! -f ~/.local/bin/aarch64-linux-android-clang ]; then
ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang ~/.local/bin/aarch64-linux-android-clang
fi
if [ ! -f ~/.local/bin/aarch64-linux-android-clang++ ]; then
ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang++ ~/.local/bin/aarch64-linux-android-clang++
fi
# Fix the broken clang symlinks by ensuring base clang is available
if [ ! -f ~/.local/bin/clang ]; then
ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/clang ~/.local/bin/clang
fi
if [ ! -f ~/.local/bin/clang++ ]; then
ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/clang++ ~/.local/bin/clang++
fi
# Create symlinks for target-specific ar tools
if [ ! -f ~/.local/bin/aarch64-linux-android-ar ]; then
ln -sf $NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64/bin/llvm-ar ~/.local/bin/aarch64-linux-android-ar
fi
export PATH="$HOME/.local/bin:$PATH"
echo "Android environment configured:"
echo "ANDROID_HOME: $ANDROID_HOME"
echo "ANDROID_NDK_ROOT: $ANDROID_NDK_ROOT"
echo "PATH includes NDK toolchain: $(echo $PATH | grep -o "ndk.*bin" || echo "NOT FOUND")"
# Verify required tools
echo -e "\nChecking required tools:"
which adb && echo "✅ adb found" || echo "❌ adb not found"
which emulator && echo "✅ emulator found" || echo "❌ emulator not found"
which $CC_aarch64_linux_android && echo "✅ Android clang found" || echo "❌ Android clang not found"
# Show available AVDs
echo -e "\nAvailable Android Virtual Devices:"
emulator -list-avds 2>/dev/null || echo "No AVDs found"
# Execute the provided command
if [ "$1" ]; then
echo -e "\nExecuting: $@"
exec "$@"
fi

View File

@ -1,15 +0,0 @@
#!/bin/bash
# Setup script permissions for AutoQA scripts
echo "Setting up permissions for AutoQA scripts..."
# Get the directory where this script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Make all shell scripts executable
chmod +x "$SCRIPT_DIR"/*.sh
echo "[SUCCESS] All shell scripts are now executable:"
ls -la "$SCRIPT_DIR"/*.sh
echo "[SUCCESS] Permission setup completed"

View File

@ -1,22 +0,0 @@
#!/bin/bash
# Ubuntu cleanup script for Jan app
echo "Cleaning existing Jan installations..."
# Remove Jan data folders (both regular and nightly)
rm -rf ~/.config/Jan
rm -rf ~/.config/Jan-nightly
rm -rf ~/.local/share/Jan
rm -rf ~/.local/share/Jan-nightly
rm -rf ~/.cache/jan
rm -rf ~/.cache/jan-nightly
rm -rf ~/.local/share/jan-nightly.ai.app
rm -rf ~/.local/share/jan.ai.app
# Kill any running Jan processes (both regular and nightly)
pkill -f "Jan" || true
pkill -f "jan" || true
pkill -f "Jan-nightly" || true
pkill -f "jan-nightly" || true
echo "Jan cleanup completed"

View File

@ -1,57 +0,0 @@
#!/bin/bash
# Ubuntu download script for Jan app
WORKFLOW_INPUT_URL="$1"
WORKFLOW_INPUT_IS_NIGHTLY="$2"
REPO_VARIABLE_URL="$3"
REPO_VARIABLE_IS_NIGHTLY="$4"
DEFAULT_URL="$5"
DEFAULT_IS_NIGHTLY="$6"
# Determine Jan app URL and nightly flag from multiple sources (priority order):
# 1. Workflow dispatch input (manual trigger)
# 2. Repository variable JAN_APP_URL_LINUX
# 3. Default URL from env
JAN_APP_URL=""
IS_NIGHTLY=false
if [ -n "$WORKFLOW_INPUT_URL" ]; then
JAN_APP_URL="$WORKFLOW_INPUT_URL"
IS_NIGHTLY="$WORKFLOW_INPUT_IS_NIGHTLY"
echo "Using Jan app URL from workflow input: $JAN_APP_URL"
echo "Is nightly build: $IS_NIGHTLY"
elif [ -n "$REPO_VARIABLE_URL" ]; then
JAN_APP_URL="$REPO_VARIABLE_URL"
IS_NIGHTLY="$REPO_VARIABLE_IS_NIGHTLY"
echo "Using Jan app URL from repository variable: $JAN_APP_URL"
echo "Is nightly build: $IS_NIGHTLY"
else
JAN_APP_URL="$DEFAULT_URL"
IS_NIGHTLY="$DEFAULT_IS_NIGHTLY"
echo "Using default Jan app URL: $JAN_APP_URL"
echo "Is nightly build: $IS_NIGHTLY"
fi
# Set environment variables for later steps
echo "JAN_APP_URL=$JAN_APP_URL" >> $GITHUB_ENV
echo "IS_NIGHTLY=$IS_NIGHTLY" >> $GITHUB_ENV
echo "Downloading Jan app from: $JAN_APP_URL"
DOWNLOAD_PATH="/tmp/jan-installer.deb"
# Download the package
if ! wget "$JAN_APP_URL" -O "$DOWNLOAD_PATH"; then
echo "Failed to download Jan app"
exit 1
fi
if [ -f "$DOWNLOAD_PATH" ]; then
FILE_SIZE=$(stat -c%s "$DOWNLOAD_PATH")
echo "Downloaded Jan app successfully. Size: $FILE_SIZE bytes"
echo "File saved to: $DOWNLOAD_PATH"
else
echo "Downloaded file not found"
exit 1
fi

View File

@ -1,39 +0,0 @@
#!/bin/bash
# Ubuntu install script for Jan app
IS_NIGHTLY="$1"
INSTALLER_PATH="/tmp/jan-installer.deb"
echo "Installing Jan app..."
echo "Is nightly build: $IS_NIGHTLY"
# Install the .deb package
sudo apt install "$INSTALLER_PATH" -y
sudo apt-get install -f -y
# Wait for installation to complete
sleep 10
echo "[INFO] Waiting for Jan app first initialization (120 seconds)..."
echo "This allows Jan to complete its initial setup and configuration"
sleep 120
echo "[SUCCESS] Initialization wait completed"
# Verify installation based on nightly flag
if [ "$IS_NIGHTLY" = "true" ]; then
DEFAULT_JAN_PATH="/usr/bin/Jan-nightly"
PROCESS_NAME="Jan-nightly"
else
DEFAULT_JAN_PATH="/usr/bin/Jan"
PROCESS_NAME="Jan"
fi
if [ -f "$DEFAULT_JAN_PATH" ]; then
echo "Jan app installed successfully at: $DEFAULT_JAN_PATH"
echo "JAN_APP_PATH=$DEFAULT_JAN_PATH" >> $GITHUB_ENV
echo "JAN_PROCESS_NAME=$PROCESS_NAME" >> $GITHUB_ENV
else
echo "Jan app not found at expected location: $DEFAULT_JAN_PATH"
echo "Will auto-detect during test run"
fi

View File

@ -1,44 +0,0 @@
#!/bin/bash
# Ubuntu post-test cleanup script
IS_NIGHTLY="$1"
echo "Cleaning up after tests..."
# Kill any running Jan processes (both regular and nightly)
pkill -f "Jan" || true
pkill -f "jan" || true
pkill -f "Jan-nightly" || true
pkill -f "jan-nightly" || true
# Remove Jan data folders (both regular and nightly)
rm -rf ~/.config/Jan
rm -rf ~/.config/Jan-nightly
rm -rf ~/.local/share/Jan
rm -rf ~/.local/share/Jan-nightly
rm -rf ~/.cache/jan
rm -rf ~/.cache/jan-nightly
rm -rf ~/.local/share/jan-nightly.ai.app
rm -rf ~/.local/share/jan.ai.app
# Try to uninstall Jan app
if [ "$IS_NIGHTLY" = "true" ]; then
PACKAGE_NAME="jan-nightly"
else
PACKAGE_NAME="jan"
fi
echo "Attempting to uninstall package: $PACKAGE_NAME"
if dpkg -l | grep -q "$PACKAGE_NAME"; then
echo "Found package $PACKAGE_NAME, uninstalling..."
sudo dpkg -r "$PACKAGE_NAME" || true
sudo apt-get autoremove -y || true
else
echo "Package $PACKAGE_NAME not found in dpkg list"
fi
# Clean up downloaded installer
rm -f "/tmp/jan-installer.deb"
echo "Cleanup completed"

View File

@ -1,50 +0,0 @@
#!/usr/bin/env pwsh
# Windows cleanup script for Jan app
param(
[string]$IsNightly = "false"
)
Write-Host "Cleaning existing Jan installations..."
# Remove Jan data folders (both regular and nightly)
$janAppData = "$env:APPDATA\Jan"
$janNightlyAppData = "$env:APPDATA\Jan-nightly"
$janLocalAppData = "$env:LOCALAPPDATA\jan.ai.app"
$janNightlyLocalAppData = "$env:LOCALAPPDATA\jan-nightly.ai.app"
if (Test-Path $janAppData) {
Write-Host "Removing $janAppData"
Remove-Item -Path $janAppData -Recurse -Force -ErrorAction SilentlyContinue
}
if (Test-Path $janNightlyAppData) {
Write-Host "Removing $janNightlyAppData"
Remove-Item -Path $janNightlyAppData -Recurse -Force -ErrorAction SilentlyContinue
}
if (Test-Path $janLocalAppData) {
Write-Host "Removing $janLocalAppData"
Remove-Item -Path $janLocalAppData -Recurse -Force -ErrorAction SilentlyContinue
}
if (Test-Path $janNightlyLocalAppData) {
Write-Host "Removing $janNightlyLocalAppData"
Remove-Item -Path $janNightlyLocalAppData -Recurse -Force -ErrorAction SilentlyContinue
}
# Kill any running Jan processes (both regular and nightly)
Get-Process -Name "Jan" -ErrorAction SilentlyContinue | Stop-Process -Force -ErrorAction SilentlyContinue
Get-Process -Name "jan" -ErrorAction SilentlyContinue | Stop-Process -Force -ErrorAction SilentlyContinue
Get-Process -Name "Jan-nightly" -ErrorAction SilentlyContinue | Stop-Process -Force -ErrorAction SilentlyContinue
Get-Process -Name "jan-nightly" -ErrorAction SilentlyContinue | Stop-Process -Force -ErrorAction SilentlyContinue
# Remove Jan extensions folder
$janExtensionsPath = "$env:USERPROFILE\jan\extensions"
if (Test-Path $janExtensionsPath) {
Write-Host "Removing $janExtensionsPath"
Remove-Item -Path $janExtensionsPath -Recurse -Force -ErrorAction SilentlyContinue
}
Write-Host "Jan cleanup completed"

View File

@ -1,63 +0,0 @@
#!/usr/bin/env pwsh
# Windows download script for Jan app
param(
[string]$WorkflowInputUrl = "",
[string]$WorkflowInputIsNightly = "",
[string]$RepoVariableUrl = "",
[string]$RepoVariableIsNightly = "",
[string]$DefaultUrl = "",
[string]$DefaultIsNightly = ""
)
# Determine Jan app URL and nightly flag from multiple sources (priority order):
# 1. Workflow dispatch input (manual trigger)
# 2. Repository variable JAN_APP_URL
# 3. Default URL from env
$janAppUrl = ""
$isNightly = $false
if ($WorkflowInputUrl -ne "") {
$janAppUrl = $WorkflowInputUrl
$isNightly = [System.Convert]::ToBoolean($WorkflowInputIsNightly)
Write-Host "Using Jan app URL from workflow input: $janAppUrl"
Write-Host "Is nightly build: $isNightly"
}
elseif ($RepoVariableUrl -ne "") {
$janAppUrl = $RepoVariableUrl
$isNightly = [System.Convert]::ToBoolean($RepoVariableIsNightly)
Write-Host "Using Jan app URL from repository variable: $janAppUrl"
Write-Host "Is nightly build: $isNightly"
}
else {
$janAppUrl = $DefaultUrl
$isNightly = [System.Convert]::ToBoolean($DefaultIsNightly)
Write-Host "Using default Jan app URL: $janAppUrl"
Write-Host "Is nightly build: $isNightly"
}
# Set environment variables for later steps
Write-Output "JAN_APP_URL=$janAppUrl" >> $env:GITHUB_ENV
Write-Output "IS_NIGHTLY=$isNightly" >> $env:GITHUB_ENV
Write-Host "Downloading Jan app from: $janAppUrl"
$downloadPath = "$env:TEMP\jan-installer.exe"
try {
# Use wget for better performance
wget.exe "$janAppUrl" -O "$downloadPath"
if (Test-Path $downloadPath) {
$fileSize = (Get-Item $downloadPath).Length
Write-Host "Downloaded Jan app successfully. Size: $fileSize bytes"
Write-Host "File saved to: $downloadPath"
} else {
throw "Downloaded file not found"
}
}
catch {
Write-Error "Failed to download Jan app: $_"
exit 1
}

View File

@ -1,48 +0,0 @@
#!/usr/bin/env pwsh
# Windows install script for Jan app
param(
[string]$IsNightly = "false"
)
$installerPath = "$env:TEMP\jan-installer.exe"
$isNightly = [System.Convert]::ToBoolean($IsNightly)
Write-Host "Installing Jan app..."
Write-Host "Is nightly build: $isNightly"
# Try silent installation first
try {
Start-Process -FilePath $installerPath -ArgumentList "/S" -Wait -NoNewWindow
Write-Host "Jan app installed silently"
}
catch {
Write-Host "Silent installation failed, trying normal installation..."
Start-Process -FilePath $installerPath -Wait -NoNewWindow
}
# Wait a bit for installation to complete
Start-Sleep -Seconds 10
Write-Host "[INFO] Waiting for Jan app first initialization (120 seconds)..."
Write-Host "This allows Jan to complete its initial setup and configuration"
Start-Sleep -Seconds 120
Write-Host "[SUCCESS] Initialization wait completed"
# Verify installation based on nightly flag
if ($isNightly) {
$defaultJanPath = "$env:LOCALAPPDATA\Programs\jan-nightly\Jan-nightly.exe"
$processName = "Jan-nightly.exe"
} else {
$defaultJanPath = "$env:LOCALAPPDATA\Programs\jan\Jan.exe"
$processName = "Jan.exe"
}
if (Test-Path $defaultJanPath) {
Write-Host "Jan app installed successfully at: $defaultJanPath"
Write-Output "JAN_APP_PATH=$defaultJanPath" >> $env:GITHUB_ENV
Write-Output "JAN_PROCESS_NAME=$processName" >> $env:GITHUB_ENV
} else {
Write-Warning "Jan app not found at expected location: $defaultJanPath"
Write-Host "Will auto-detect during test run"
}

View File

@ -1,102 +0,0 @@
#!/usr/bin/env pwsh
# Windows post-test cleanup script
param(
[string]$IsNightly = "false"
)
Write-Host "Cleaning up after tests..."
# Kill any running Jan processes (both regular and nightly)
Get-Process -Name "Jan" -ErrorAction SilentlyContinue | Stop-Process -Force -ErrorAction SilentlyContinue
Get-Process -Name "jan" -ErrorAction SilentlyContinue | Stop-Process -Force -ErrorAction SilentlyContinue
Get-Process -Name "Jan-nightly" -ErrorAction SilentlyContinue | Stop-Process -Force -ErrorAction SilentlyContinue
Get-Process -Name "jan-nightly" -ErrorAction SilentlyContinue | Stop-Process -Force -ErrorAction SilentlyContinue
# Remove Jan data folders (both regular and nightly)
$janAppData = "$env:APPDATA\Jan"
$janNightlyAppData = "$env:APPDATA\Jan-nightly"
$janLocalAppData = "$env:LOCALAPPDATA\jan.ai.app"
$janNightlyLocalAppData = "$env:LOCALAPPDATA\jan-nightly.ai.app"
$janProgramsPath = "$env:LOCALAPPDATA\Programs\Jan"
$janNightlyProgramsPath = "$env:LOCALAPPDATA\Programs\Jan-nightly"
if (Test-Path $janAppData) {
Write-Host "Removing $janAppData"
Remove-Item -Path $janAppData -Recurse -Force -ErrorAction SilentlyContinue
}
if (Test-Path $janNightlyAppData) {
Write-Host "Removing $janNightlyAppData"
Remove-Item -Path $janNightlyAppData -Recurse -Force -ErrorAction SilentlyContinue
}
if (Test-Path $janLocalAppData) {
Write-Host "Removing $janLocalAppData"
Remove-Item -Path $janLocalAppData -Recurse -Force -ErrorAction SilentlyContinue
}
if (Test-Path $janNightlyLocalAppData) {
Write-Host "Removing $janNightlyLocalAppData"
Remove-Item -Path $janNightlyLocalAppData -Recurse -Force -ErrorAction SilentlyContinue
}
if (Test-Path $janProgramsPath) {
Write-Host "Removing $janProgramsPath"
Remove-Item -Path $janProgramsPath -Recurse -Force -ErrorAction SilentlyContinue
}
if (Test-Path $janNightlyProgramsPath) {
Write-Host "Removing $janNightlyProgramsPath"
Remove-Item -Path $janNightlyProgramsPath -Recurse -Force -ErrorAction SilentlyContinue
}
# Remove Jan extensions folder
$janExtensionsPath = "$env:USERPROFILE\jan\extensions"
if (Test-Path $janExtensionsPath) {
Write-Host "Removing $janExtensionsPath"
Remove-Item -Path $janExtensionsPath -Recurse -Force -ErrorAction SilentlyContinue
}
# Try to uninstall Jan app silently
try {
$isNightly = [System.Convert]::ToBoolean($IsNightly)
# Determine uninstaller path based on nightly flag
if ($isNightly) {
$uninstallerPath = "$env:LOCALAPPDATA\Programs\jan-nightly\uninstall.exe"
$installPath = "$env:LOCALAPPDATA\Programs\jan-nightly"
} else {
$uninstallerPath = "$env:LOCALAPPDATA\Programs\jan\uninstall.exe"
$installPath = "$env:LOCALAPPDATA\Programs\jan"
}
Write-Host "Looking for uninstaller at: $uninstallerPath"
if (Test-Path $uninstallerPath) {
Write-Host "Found uninstaller, attempting silent uninstall..."
Start-Process -FilePath $uninstallerPath -ArgumentList "/S" -Wait -NoNewWindow -ErrorAction SilentlyContinue
Write-Host "Uninstall completed"
} else {
Write-Host "No uninstaller found, attempting manual cleanup..."
if (Test-Path $installPath) {
Write-Host "Removing installation directory: $installPath"
Remove-Item -Path $installPath -Recurse -Force -ErrorAction SilentlyContinue
}
}
Write-Host "Jan app cleanup completed"
}
catch {
Write-Warning "Failed to uninstall Jan app cleanly: $_"
Write-Host "Manual cleanup may be required"
}
# Clean up downloaded installer
$installerPath = "$env:TEMP\jan-installer.exe"
if (Test-Path $installerPath) {
Remove-Item -Path $installerPath -Force -ErrorAction SilentlyContinue
}
Write-Host "Cleanup completed"

View File

@ -1,322 +0,0 @@
import os
import asyncio
import threading
import time
import logging
from datetime import datetime
from pathlib import Path
# from computer import Computer
from agent import ComputerAgent, LLM
from utils import is_jan_running, force_close_jan, start_jan_app, get_latest_trajectory_folder
from screen_recorder import ScreenRecorder
from reportportal_handler import upload_test_results_to_rp
from reportportal_client.helpers import timestamp
logger = logging.getLogger(__name__)
async def run_single_test_with_timeout(computer, test_data, rp_client, launch_id, max_turns=30,
jan_app_path=None, jan_process_name="Jan.exe", agent_config=None,
enable_reportportal=False):
"""
Run a single test case with turn count monitoring, forced stop, and screen recording
Returns dict with test result: {"success": bool, "status": str, "message": str}
"""
path = test_data['path']
prompt = test_data['prompt']
# Detect if using nightly version based on process name
is_nightly = "nightly" in jan_process_name.lower() if jan_process_name else False
# Default agent config if not provided
if agent_config is None:
agent_config = {
"loop": "uitars",
"model_provider": "oaicompat",
"model_name": "ByteDance-Seed/UI-TARS-1.5-7B",
"model_base_url": "http://10.200.108.58:1234/v1"
}
# Create trajectory_dir from path (remove .txt extension)
trajectory_name = str(Path(path).with_suffix(''))
trajectory_base_dir = os.path.abspath(f"trajectories/{trajectory_name.replace(os.sep, '/')}")
# Ensure trajectories directory exists
os.makedirs(os.path.dirname(trajectory_base_dir), exist_ok=True)
# Create recordings directory
recordings_dir = "recordings"
os.makedirs(recordings_dir, exist_ok=True)
# Create video filename
current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
safe_test_name = trajectory_name.replace('/', '_').replace('\\', '_')
video_filename = f"{safe_test_name}_{current_time}.mp4"
video_path = os.path.abspath(os.path.join(recordings_dir, video_filename))
# Initialize result tracking
test_result_data = {
"success": False,
"status": "UNKNOWN",
"message": "Test execution incomplete",
"trajectory_dir": None,
"video_path": video_path
}
logger.info(f"Starting test: {path}")
logger.info(f"Current working directory: {os.getcwd()}")
logger.info(f"Trajectory base directory: {trajectory_base_dir}")
logger.info(f"Screen recording will be saved to: {video_path}")
logger.info(f"Using model: {agent_config['model_name']} from {agent_config['model_base_url']}")
logger.info(f"ReportPortal upload: {'ENABLED' if enable_reportportal else 'DISABLED'}")
trajectory_dir = None
agent_task = None
monitor_stop_event = threading.Event()
force_stopped_due_to_turns = False # Track if test was force stopped
# Initialize screen recorder
recorder = ScreenRecorder(video_path, fps=10)
try:
# Step 1: Check and force close Jan app if running
if is_jan_running(jan_process_name):
logger.info("Jan application is running, force closing...")
force_close_jan(jan_process_name)
# Step 2: Start Jan app in maximized mode
if jan_app_path:
start_jan_app(jan_app_path)
else:
start_jan_app() # Use default path
# Step 3: Start screen recording
recorder.start_recording()
# Step 4: Create agent for this test using config
agent = ComputerAgent(
computer=computer,
loop=agent_config["loop"],
model=LLM(
provider=agent_config["model_provider"],
name=agent_config["model_name"],
provider_base_url=agent_config["model_base_url"]
),
trajectory_dir=trajectory_base_dir
)
# Step 5: Start monitoring thread
def monitor_thread():
nonlocal force_stopped_due_to_turns
while not monitor_stop_event.is_set():
try:
if os.path.exists(trajectory_base_dir):
folders = [f for f in os.listdir(trajectory_base_dir)
if os.path.isdir(os.path.join(trajectory_base_dir, f))]
if folders:
latest_folder = sorted(folders)[-1]
latest_folder_path = os.path.join(trajectory_base_dir, latest_folder)
if os.path.exists(latest_folder_path):
turn_folders = [f for f in os.listdir(latest_folder_path)
if os.path.isdir(os.path.join(latest_folder_path, f)) and f.startswith("turn_")]
turn_count = len(turn_folders)
logger.info(f"Current turn count: {turn_count}")
if turn_count >= max_turns:
logger.warning(f"Turn count exceeded {max_turns} for test {path}, forcing stop")
force_stopped_due_to_turns = True # Mark as force stopped
# Cancel the agent task
if agent_task and not agent_task.done():
agent_task.cancel()
monitor_stop_event.set()
return
# Check every 5 seconds
if not monitor_stop_event.wait(5):
continue
else:
break
except Exception as e:
logger.error(f"Error in monitor thread: {e}")
time.sleep(5)
# Start monitoring in background thread
monitor_thread_obj = threading.Thread(target=monitor_thread, daemon=True)
monitor_thread_obj.start()
# Step 6: Run the test with prompt
logger.info(f"Running test case: {path}")
try:
# Create the agent task
async def run_agent():
async for result in agent.run(prompt):
if monitor_stop_event.is_set():
logger.warning(f"Test {path} stopped due to turn limit")
break
logger.info(f"Test result for {path}: {result}")
print(result)
agent_task = asyncio.create_task(run_agent())
# Wait for agent task to complete or timeout
try:
await asyncio.wait_for(agent_task, timeout=600) # 10 minute timeout as backup
if not monitor_stop_event.is_set():
logger.info(f"Successfully completed test execution: {path}")
else:
logger.warning(f"Test {path} was stopped due to turn limit")
except asyncio.TimeoutError:
logger.warning(f"Test {path} timed out after 10 minutes")
agent_task.cancel()
except asyncio.CancelledError:
logger.warning(f"Test {path} was cancelled due to turn limit")
finally:
# Stop monitoring
monitor_stop_event.set()
except Exception as e:
logger.error(f"Error running test {path}: {e}")
monitor_stop_event.set()
# Update result data for exception case
test_result_data.update({
"success": False,
"status": "ERROR",
"message": f"Test execution failed with exception: {str(e)}",
"trajectory_dir": None
})
finally:
# Step 7: Stop screen recording
try:
recorder.stop_recording()
logger.info(f"Screen recording saved to: {video_path}")
except Exception as e:
logger.error(f"Error stopping screen recording: {e}")
# Step 8: Upload results to ReportPortal only if enabled
if enable_reportportal and rp_client and launch_id:
# Get trajectory folder first
trajectory_dir = get_latest_trajectory_folder(trajectory_base_dir)
try:
if trajectory_dir:
logger.info(f"Uploading results to ReportPortal for: {path}")
logger.info(f"Video path for upload: {video_path}")
logger.info(f"Video exists: {os.path.exists(video_path)}")
if os.path.exists(video_path):
logger.info(f"Video file size: {os.path.getsize(video_path)} bytes")
upload_test_results_to_rp(rp_client, launch_id, path, trajectory_dir, force_stopped_due_to_turns, video_path, is_nightly)
else:
logger.warning(f"Test completed but no trajectory found for: {path}")
# Handle case where test completed but no trajectory found
formatted_test_path = path.replace('\\', '/').replace('.txt', '').replace('/', '__')
test_item_id = rp_client.start_test_item(
launch_id=launch_id,
name=formatted_test_path,
start_time=timestamp(),
item_type="TEST"
)
rp_client.log(
time=timestamp(),
level="ERROR",
message="Test execution completed but no trajectory data found",
item_id=test_item_id
)
# Still upload video for failed test
if video_path and os.path.exists(video_path):
try:
with open(video_path, "rb") as video_file:
rp_client.log(
time=timestamp(),
level="INFO",
message="[INFO] Screen recording of failed test",
item_id=test_item_id,
attachment={
"name": f"failed_test_recording_{formatted_test_path}.mp4",
"data": video_file.read(),
"mime": "video/x-msvideo"
}
)
except Exception as e:
logger.error(f"Error uploading video for failed test: {e}")
rp_client.finish_test_item(
item_id=test_item_id,
end_time=timestamp(),
status="FAILED"
)
except Exception as upload_error:
logger.error(f"Error uploading results for {path}: {upload_error}")
else:
# For non-ReportPortal mode, still get trajectory for final results
trajectory_dir = get_latest_trajectory_folder(trajectory_base_dir)
# Always process results for consistency (both RP and local mode)
# trajectory_dir is already set above, no need to call get_latest_trajectory_folder again
if trajectory_dir:
# Extract test result for processing
from reportportal_handler import extract_test_result_from_trajectory
if force_stopped_due_to_turns:
final_status = "FAILED"
status_message = "exceeded maximum turn limit ({} turns)".format(max_turns)
test_result_data.update({
"success": False,
"status": final_status,
"message": status_message,
"trajectory_dir": trajectory_dir
})
else:
test_result = extract_test_result_from_trajectory(trajectory_dir)
if test_result is True:
final_status = "PASSED"
status_message = "completed successfully with positive result"
test_result_data.update({
"success": True,
"status": final_status,
"message": status_message,
"trajectory_dir": trajectory_dir
})
else:
final_status = "FAILED"
status_message = "no valid success result found"
test_result_data.update({
"success": False,
"status": final_status,
"message": status_message,
"trajectory_dir": trajectory_dir
})
if not enable_reportportal:
# Local development mode - log results
logger.info(f"[INFO] LOCAL RESULT: {path} - {final_status} ({status_message})")
logger.info(f"[INFO] Video saved: {video_path}")
logger.info(f"[INFO] Trajectory: {trajectory_dir}")
else:
final_status = "FAILED"
status_message = "no trajectory found"
test_result_data.update({
"success": False,
"status": final_status,
"message": status_message,
"trajectory_dir": None
})
if not enable_reportportal:
logger.warning(f"[INFO] LOCAL RESULT: {path} - {final_status} ({status_message})")
# Step 9: Always force close Jan app after test completion
logger.info(f"Cleaning up after test: {path}")
force_close_jan(jan_process_name)
# Return test result
return test_result_data

View File

@ -1,17 +0,0 @@
prompt = """
You are going to test the Jan application by downloading and chatting with a model (bitcpm4).
Step-by-step instructions:
1. Given the Jan application is already opened.
2. In the **bottom-left corner**, click the **Hub** menu item.
3. Scroll through the model list or use the search bar to find **qwen3-0.6B**.
4. Click **Use** on the qwen3-0.6B model.
5. Wait for the model to finish downloading and become ready.
6. Once redirected to the chat screen, type any message into the input box (e.g. `Hello World`).
7. Press **Enter** to send the message.
8. Wait for the models response.
If the model responds correctly, return: {"result": True}, otherwise return: {"result": False}.
In all your responses, use only plain ASCII characters. Do NOT use Unicode symbols
"""

View File

@ -1,343 +0,0 @@
import os
import logging
import subprocess
import psutil
import time
import pyautogui
import platform
from pathlib import Path
logger = logging.getLogger(__name__)
# Cross-platform window management
IS_LINUX = platform.system() == "Linux"
IS_WINDOWS = platform.system() == "Windows"
IS_MACOS = platform.system() == "Darwin"
if IS_WINDOWS:
try:
import pygetwindow as gw
except ImportError:
gw = None
logger.warning("pygetwindow not available on this system")
def is_jan_running(jan_process_name="Jan.exe"):
"""
Check if Jan application is currently running
"""
for proc in psutil.process_iter(['pid', 'name']):
try:
if proc.info['name'] and jan_process_name.lower() in proc.info['name'].lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
def force_close_jan(jan_process_name="Jan.exe"):
"""
Force close Jan application if it's running
"""
logger.info("Checking for running Jan processes...")
closed_any = False
for proc in psutil.process_iter(['pid', 'name']):
try:
if proc.info['name'] and jan_process_name.lower() in proc.info['name'].lower():
logger.info(f"Force closing Jan process (PID: {proc.info['pid']})")
proc.kill()
closed_any = True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
if closed_any:
logger.info("Waiting for Jan processes to terminate...")
time.sleep(3) # Wait for processes to fully terminate
else:
logger.info("No Jan processes found running")
def find_jan_window_linux():
"""
Find Jan window on Linux using wmctrl
"""
try:
result = subprocess.run(['wmctrl', '-l'], capture_output=True, text=True, timeout=10)
if result.returncode == 0:
for line in result.stdout.split('\n'):
if 'jan' in line.lower() or 'Jan' in line:
# Extract window ID (first column)
window_id = line.split()[0]
logger.info(f"Found Jan window with ID: {window_id}")
return window_id
except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError) as e:
logger.warning(f"wmctrl command failed: {e}")
return None
def maximize_jan_window_linux():
"""
Maximize Jan window on Linux using wmctrl
"""
window_id = find_jan_window_linux()
if window_id:
try:
# Maximize window using wmctrl
subprocess.run(['wmctrl', '-i', '-r', window_id, '-b', 'add,maximized_vert,maximized_horz'],
timeout=5)
logger.info("Jan window maximized using wmctrl")
return True
except (subprocess.TimeoutExpired, subprocess.SubprocessError) as e:
logger.warning(f"Failed to maximize with wmctrl: {e}")
# Fallback: Try xdotool
try:
result = subprocess.run(['xdotool', 'search', '--name', 'Jan'],
capture_output=True, text=True, timeout=5)
if result.returncode == 0 and result.stdout.strip():
window_id = result.stdout.strip().split('\n')[0]
subprocess.run(['xdotool', 'windowactivate', window_id], timeout=5)
subprocess.run(['xdotool', 'key', 'alt+F10'], timeout=5) # Maximize shortcut
logger.info("Jan window maximized using xdotool")
return True
except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError) as e:
logger.warning(f"xdotool command failed: {e}")
return False
def find_jan_window_macos():
"""
Find Jan window on macOS using AppleScript
"""
try:
# AppleScript to find Jan window
script = '''
tell application "System Events"
set janApps to (every process whose name contains "Jan")
if length of janApps > 0 then
return name of first item of janApps
else
return ""
end if
end tell
'''
result = subprocess.run(['osascript', '-e', script],
capture_output=True, text=True, timeout=10)
if result.returncode == 0 and result.stdout.strip():
app_name = result.stdout.strip()
logger.info(f"Found Jan app: {app_name}")
return app_name
except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError) as e:
logger.warning(f"AppleScript command failed: {e}")
return None
def maximize_jan_window_macos():
"""
Maximize Jan window on macOS using AppleScript
"""
app_name = find_jan_window_macos()
if app_name:
try:
# AppleScript to maximize window
script = f'''
tell application "System Events"
tell process "{app_name}"
set frontmost to true
tell window 1
set value of attribute "AXFullScreen" to true
end tell
end tell
end tell
'''
result = subprocess.run(['osascript', '-e', script], timeout=10)
if result.returncode == 0:
logger.info("Jan window maximized using AppleScript")
return True
except (subprocess.TimeoutExpired, subprocess.SubprocessError) as e:
logger.warning(f"Failed to maximize with AppleScript: {e}")
# Fallback: Try Command+M (fullscreen hotkey on macOS)
try:
logger.info("Trying Cmd+Ctrl+F hotkey to maximize")
pyautogui.hotkey('cmd', 'ctrl', 'f')
time.sleep(1)
logger.info("Attempted to maximize using Cmd+Ctrl+F")
return True
except Exception as e:
logger.warning(f"Hotkey maximize failed: {e}")
return False
def maximize_jan_window():
"""
Find and maximize Jan window (cross-platform)
"""
try:
# Wait a bit for window to appear
time.sleep(2)
if IS_LINUX:
return maximize_jan_window_linux()
elif IS_MACOS:
return maximize_jan_window_macos()
elif IS_WINDOWS and gw:
# Method 1: Try to find window by title containing "Jan"
windows = gw.getWindowsWithTitle("Jan")
if windows:
jan_window = windows[0]
logger.info(f"Found Jan window: {jan_window.title}")
jan_window.maximize()
logger.info("Jan window maximized using pygetwindow")
return True
# Fallback methods for both platforms
# Method 2: Try Alt+Space then X (maximize hotkey) - works on both platforms
logger.info("Trying Alt+Space+X hotkey to maximize")
pyautogui.hotkey('alt', 'space')
time.sleep(0.5)
pyautogui.press('x')
logger.info("Attempted to maximize using Alt+Space+X")
return True
except Exception as e:
logger.warning(f"Could not maximize Jan window: {e}")
# Method 3: Platform-specific fallback
try:
if IS_WINDOWS:
logger.info("Trying Windows+Up arrow to maximize")
pyautogui.hotkey('win', 'up')
elif IS_LINUX:
logger.info("Trying Alt+F10 to maximize")
pyautogui.hotkey('alt', 'F10')
elif IS_MACOS:
logger.info("Trying macOS specific maximize")
pyautogui.hotkey('cmd', 'tab') # Switch to Jan if it's running
time.sleep(0.5)
return True
except Exception as e2:
logger.warning(f"All maximize methods failed: {e2}")
return False
def start_jan_app(jan_app_path=None):
"""
Start Jan application in maximized window (cross-platform)
"""
# Set default path based on platform
if jan_app_path is None:
if IS_WINDOWS:
jan_app_path = os.path.expanduser(r"~\AppData\Local\Programs\jan\Jan.exe")
elif IS_LINUX:
jan_app_path = "/usr/bin/Jan" # or "/usr/bin/Jan" for regular
elif IS_MACOS:
jan_app_path = "/Applications/Jan.app/Contents/MacOS/Jan" # Default macOS path
else:
raise NotImplementedError(f"Platform {platform.system()} not supported")
logger.info(f"Starting Jan application from: {jan_app_path}")
if not os.path.exists(jan_app_path):
logger.error(f"Jan executable not found at: {jan_app_path}")
raise FileNotFoundError(f"Jan app not found at {jan_app_path}")
try:
# Start the Jan application
if IS_WINDOWS:
subprocess.Popen([jan_app_path], shell=True)
elif IS_LINUX:
# On Linux, start with DISPLAY environment variable
env = os.environ.copy()
subprocess.Popen([jan_app_path], env=env)
elif IS_MACOS:
# On macOS, use 'open' command to launch .app bundle properly
if jan_app_path.endswith('.app/Contents/MacOS/Jan'):
# Use the .app bundle path instead
app_bundle = jan_app_path.replace('/Contents/MacOS/Jan', '')
subprocess.Popen(['open', app_bundle])
elif jan_app_path.endswith('.app'):
# Direct .app bundle
subprocess.Popen(['open', jan_app_path])
elif '/Contents/MacOS/' in jan_app_path:
# Extract app bundle from full executable path
app_bundle = jan_app_path.split('/Contents/MacOS/')[0]
subprocess.Popen(['open', app_bundle])
else:
# Fallback: try to execute directly
subprocess.Popen([jan_app_path])
else:
raise NotImplementedError(f"Platform {platform.system()} not supported")
logger.info("Jan application started")
# Wait for app to fully load
logger.info("Waiting for Jan application to initialize...")
time.sleep(5)
# Try to maximize the window
if maximize_jan_window():
logger.info("Jan application maximized successfully")
else:
logger.warning("Could not maximize Jan application window")
# Wait a bit more after maximizing
time.sleep(10)
logger.info("Jan application should be ready, waiting for additional setup...")
time.sleep(10) # Additional wait to ensure everything is ready
except Exception as e:
logger.error(f"Error starting Jan application: {e}")
raise
def scan_test_files(tests_dir="tests"):
"""
Scan tests folder and find all .txt files
Returns list with format [{'path': 'relative_path', 'prompt': 'file_content'}]
"""
test_files = []
tests_path = Path(tests_dir)
if not tests_path.exists():
logger.error(f"Tests directory {tests_dir} does not exist!")
return test_files
# Scan all .txt files in folder and subfolders
for txt_file in tests_path.rglob("*.txt"):
try:
# Read file content
with open(txt_file, 'r', encoding='utf-8') as f:
content = f.read().strip()
# Get relative path
relative_path = txt_file.relative_to(tests_path)
test_files.append({
'path': str(relative_path),
'prompt': content
})
logger.info(f"Found test file: {relative_path}")
except Exception as e:
logger.error(f"Error reading file {txt_file}: {e}")
return test_files
def get_latest_trajectory_folder(trajectory_base_path):
"""
Get the latest created folder in trajectory base path
"""
if not os.path.exists(trajectory_base_path):
logger.warning(f"Trajectory base path not found: {trajectory_base_path}")
return None
# Get all folders and sort by creation time (latest first)
folders = [f for f in os.listdir(trajectory_base_path)
if os.path.isdir(os.path.join(trajectory_base_path, f))]
if not folders:
logger.warning(f"No trajectory folders found in: {trajectory_base_path}")
return None
# Sort by folder name (assuming timestamp format like 20250715_100443)
folders.sort(reverse=True)
latest_folder = folders[0]
full_path = os.path.join(trajectory_base_path, latest_folder)
logger.info(f"Found latest trajectory folder: {full_path}")
return full_path

View File

@ -1,71 +0,0 @@
# Contributing to Jan Core
[← Back to Main Contributing Guide](../CONTRIBUTING.md)
TypeScript SDK providing extension system, APIs, and type definitions for all Jan components.
## Key Directories
- **`/src/browser`** - Core APIs (events, extensions, file system)
- **`/src/browser/extensions`** - Built-in extensions (assistant, inference, conversational)
- **`/src/types`** - TypeScript type definitions
- **`/src/test`** - Testing utilities
## Development
### Key Principles
1. **Platform Agnostic** - Works everywhere (browser, Node.js)
2. **Extension-Based** - New features = new extensions
3. **Type Everything** - TypeScript required
4. **Event-Driven** - Components communicate via events
### Building & Testing
```bash
# Build the SDK
yarn build
# Run tests
yarn test
# Watch mode
yarn test:watch
```
### Event System
```typescript
// Emit events
events.emit('model:loaded', { modelId: 'llama-3' })
// Listen for events
events.on('model:loaded', (data) => {
console.log('Model loaded:', data.modelId)
})
```
## Testing
```typescript
describe('MyFeature', () => {
it('should do something', () => {
const result = doSomething()
expect(result).toBe('expected')
})
})
```
## Best Practices
- Keep it simple
- Use TypeScript fully (no `any`)
- Write tests for critical features
- Follow existing patterns
- Export new modules in index files
## Dependencies
- **TypeScript** - Type safety
- **Rolldown** - Bundling
- **Vitest** - Testing

View File

@ -9,11 +9,14 @@
```js ```js
// Web / extension runtime // Web / extension runtime
import * as core from '@janhq/core' import * as core from '@janhq/core'
// Node runtime
import * as node from '@janhq/core/node'
``` ```
## Build an Extension ## Build an Extension
1. Download an extension template, for example, [https://github.com/janhq/extension-template](https://github.com/janhq/extension-template). 1. Download an extension template, for example, [https://github.com/menloresearch/extension-template](https://github.com/menloresearch/extension-template).
2. Update the source code: 2. Update the source code:

17
core/jest.config.js Normal file
View File

@ -0,0 +1,17 @@
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
collectCoverageFrom: ['src/**/*.{ts,tsx}'],
moduleNameMapper: {
'@/(.*)': '<rootDir>/src/$1',
},
runner: './testRunner.js',
transform: {
"^.+\\.tsx?$": [
"ts-jest",
{
diagnostics: false,
},
],
},
}

View File

@ -17,37 +17,34 @@
"author": "Jan <service@jan.ai>", "author": "Jan <service@jan.ai>",
"scripts": { "scripts": {
"lint": "tslint --project tsconfig.json -t codeFrame 'src/**/*.ts' 'test/**/*.ts'", "lint": "tslint --project tsconfig.json -t codeFrame 'src/**/*.ts' 'test/**/*.ts'",
"test": "vitest run", "test": "jest",
"test:watch": "vitest",
"test:ui": "vitest --ui",
"test:coverage": "vitest run --coverage",
"prebuild": "rimraf dist", "prebuild": "rimraf dist",
"build": "tsc -p . && rolldown -c rolldown.config.mjs" "build": "tsc -p . && rolldown -c rolldown.config.mjs"
}, },
"devDependencies": { "devDependencies": {
"@npmcli/arborist": "^7.1.0", "@npmcli/arborist": "^7.1.0",
"@types/jest": "^30.0.0",
"@types/node": "^22.10.0", "@types/node": "^22.10.0",
"@types/react": "19.1.2", "@types/pacote": "^11.1.7",
"@vitest/coverage-v8": "^2.1.8", "@types/request": "^2.48.12",
"@vitest/ui": "^2.1.8", "electron": "33.2.1",
"eslint": "8.57.0", "eslint": "8.57.0",
"happy-dom": "^20.0.0", "eslint-plugin-jest": "^27.9.0",
"jest": "^30.0.3",
"jest-junit": "^16.0.0",
"jest-runner": "^29.7.0",
"pacote": "^21.0.0", "pacote": "^21.0.0",
"react": "19.0.0",
"request": "^2.88.2", "request": "^2.88.2",
"request-progress": "^3.0.0", "request-progress": "^3.0.0",
"rimraf": "^6.0.1", "rimraf": "^6.0.1",
"rolldown": "1.0.0-beta.1", "rolldown": "1.0.0-beta.1",
"ts-jest": "^29.2.5",
"tslib": "^2.6.2", "tslib": "^2.6.2",
"typescript": "^5.8.3", "typescript": "^5.8.3"
"vitest": "^2.1.8"
}, },
"dependencies": { "dependencies": {
"rxjs": "^7.8.1", "rxjs": "^7.8.1",
"ulidx": "^2.3.0" "ulidx": "^2.3.0"
}, },
"peerDependencies": {
"react": "19.0.0"
},
"packageManager": "yarn@4.5.3" "packageManager": "yarn@4.5.3"
} }

View File

@ -10,10 +10,41 @@ export default defineConfig([
sourcemap: true, sourcemap: true,
}, },
platform: 'browser', platform: 'browser',
external: ['path', 'react', 'react-dom', 'react/jsx-runtime'], external: ['path'],
define: { define: {
NODE: JSON.stringify(`${pkgJson.name}/${pkgJson.node}`), NODE: JSON.stringify(`${pkgJson.name}/${pkgJson.node}`),
VERSION: JSON.stringify(pkgJson.version), VERSION: JSON.stringify(pkgJson.version),
}, },
} },
{
input: 'src/node/index.ts',
external: [
'fs/promises',
'path',
'pacote',
'@types/pacote',
'@npmcli/arborist',
'ulidx',
'fs',
'request',
'crypto',
'url',
'http',
'os',
'util',
'child_process',
'electron',
'request-progress',
],
output: {
format: 'cjs',
file: 'dist/node/index.cjs.js',
sourcemap: true,
inlineDynamicImports: true,
},
resolve: {
extensions: ['.js', '.ts'],
},
platform: 'node',
},
]) ])

View File

@ -1,4 +1,6 @@
import { describe, it, expect, vi } from 'vitest' /**
* @jest-environment jsdom
*/
import { openExternalUrl } from './core' import { openExternalUrl } from './core'
import { joinPath } from './core' import { joinPath } from './core'
import { openFileExplorer } from './core' import { openFileExplorer } from './core'
@ -10,7 +12,7 @@ describe('test core apis', () => {
const url = 'http://example.com' const url = 'http://example.com'
globalThis.core = { globalThis.core = {
api: { api: {
openExternalUrl: vi.fn().mockResolvedValue('opened'), openExternalUrl: jest.fn().mockResolvedValue('opened'),
}, },
} }
const result = await openExternalUrl(url) const result = await openExternalUrl(url)
@ -22,7 +24,7 @@ describe('test core apis', () => {
const paths = ['/path/one', '/path/two'] const paths = ['/path/one', '/path/two']
globalThis.core = { globalThis.core = {
api: { api: {
joinPath: vi.fn().mockResolvedValue('/path/one/path/two'), joinPath: jest.fn().mockResolvedValue('/path/one/path/two'),
}, },
} }
const result = await joinPath(paths) const result = await joinPath(paths)
@ -34,7 +36,7 @@ describe('test core apis', () => {
const path = '/path/to/open' const path = '/path/to/open'
globalThis.core = { globalThis.core = {
api: { api: {
openFileExplorer: vi.fn().mockResolvedValue('opened'), openFileExplorer: jest.fn().mockResolvedValue('opened'),
}, },
} }
const result = await openFileExplorer(path) const result = await openFileExplorer(path)
@ -45,7 +47,7 @@ describe('test core apis', () => {
it('should get jan data folder path', async () => { it('should get jan data folder path', async () => {
globalThis.core = { globalThis.core = {
api: { api: {
getJanDataFolderPath: vi.fn().mockResolvedValue('/path/to/jan/data'), getJanDataFolderPath: jest.fn().mockResolvedValue('/path/to/jan/data'),
}, },
} }
const result = await getJanDataFolderPath() const result = await getJanDataFolderPath()
@ -56,7 +58,7 @@ describe('test core apis', () => {
describe('dirName - just a pass thru api', () => { describe('dirName - just a pass thru api', () => {
it('should retrieve the directory name from a file path', async () => { it('should retrieve the directory name from a file path', async () => {
const mockDirName = vi.fn() const mockDirName = jest.fn()
globalThis.core = { globalThis.core = {
api: { api: {
dirName: mockDirName.mockResolvedValue('/path/to'), dirName: mockDirName.mockResolvedValue('/path/to'),

View File

@ -1,5 +1,24 @@
import { SystemInformation } from '../types' import { SystemInformation } from '../types'
/**
* Execute a extension module function in main process
*
* @param extension extension name to import
* @param method function name to execute
* @param args arguments to pass to the function
* @returns Promise<any>
*
*/
const executeOnMain: (extension: string, method: string, ...args: any[]) => Promise<any> = (
extension,
method,
...args
) => {
if ('electronAPI' in window && window.electronAPI)
return globalThis.core?.api?.invokeExtensionFunc(extension, method, ...args)
return () => {}
}
/** /**
* Gets Jan's data folder path. * Gets Jan's data folder path.
* *
@ -78,6 +97,13 @@ const log: (message: string, fileName?: string) => void = (message, fileName) =>
const isSubdirectory: (from: string, to: string) => Promise<boolean> = (from: string, to: string) => const isSubdirectory: (from: string, to: string) => Promise<boolean> = (from: string, to: string) =>
globalThis.core.api?.isSubdirectory(from, to) globalThis.core.api?.isSubdirectory(from, to)
/**
* Get system information
* @returns {Promise<any>} - A promise that resolves with the system information.
*/
const systemInformation: () => Promise<SystemInformation> = () =>
globalThis.core.api?.systemInformation()
/** /**
* Show toast message from browser processes. * Show toast message from browser processes.
* @param title * @param title
@ -101,6 +127,7 @@ export type RegisterExtensionPoint = (
* Functions exports * Functions exports
*/ */
export { export {
executeOnMain,
getJanDataFolderPath, getJanDataFolderPath,
openFileExplorer, openFileExplorer,
getResourcePath, getResourcePath,
@ -110,6 +137,7 @@ export {
log, log,
isSubdirectory, isSubdirectory,
getUserHomePath, getUserHomePath,
systemInformation,
showToast, showToast,
dirName, dirName,
} }

View File

@ -1,11 +1,11 @@
import { it, expect, vi } from 'vitest'
import { events } from './events'; import { events } from './events';
import { jest } from '@jest/globals';
it('should emit an event', () => { it('should emit an event', () => {
const mockObject = { key: 'value' }; const mockObject = { key: 'value' };
globalThis.core = { globalThis.core = {
events: { events: {
emit: vi.fn() emit: jest.fn()
} }
}; };
events.emit('testEvent', mockObject); events.emit('testEvent', mockObject);
@ -14,10 +14,10 @@ it('should emit an event', () => {
it('should remove an observer for an event', () => { it('should remove an observer for an event', () => {
const mockHandler = vi.fn(); const mockHandler = jest.fn();
globalThis.core = { globalThis.core = {
events: { events: {
off: vi.fn() off: jest.fn()
} }
}; };
events.off('testEvent', mockHandler); events.off('testEvent', mockHandler);
@ -26,10 +26,10 @@ it('should remove an observer for an event', () => {
it('should add an observer for an event', () => { it('should add an observer for an event', () => {
const mockHandler = vi.fn(); const mockHandler = jest.fn();
globalThis.core = { globalThis.core = {
events: { events: {
on: vi.fn() on: jest.fn()
} }
}; };
events.on('testEvent', mockHandler); events.on('testEvent', mockHandler);

View File

@ -1,8 +1,7 @@
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'
import { BaseExtension } from './extension' import { BaseExtension } from './extension'
import { SettingComponentProps } from '../types' import { SettingComponentProps } from '../types'
vi.mock('./core') jest.mock('./core')
vi.mock('./fs') jest.mock('./fs')
class TestBaseExtension extends BaseExtension { class TestBaseExtension extends BaseExtension {
onLoad(): void {} onLoad(): void {}
@ -17,7 +16,7 @@ describe('BaseExtension', () => {
}) })
afterEach(() => { afterEach(() => {
vi.clearAllMocks() jest.resetAllMocks()
}) })
it('should have the correct properties', () => { it('should have the correct properties', () => {
@ -57,7 +56,7 @@ describe('BaseExtension', () => {
}) })
afterEach(() => { afterEach(() => {
vi.clearAllMocks() jest.resetAllMocks()
}) })
it('should have the correct properties', () => { it('should have the correct properties', () => {
@ -109,7 +108,7 @@ describe('BaseExtension', () => {
Object.defineProperty(global, 'localStorage', { Object.defineProperty(global, 'localStorage', {
value: localStorageMock, value: localStorageMock,
}) })
const mock = vi.spyOn(localStorage, 'setItem') const mock = jest.spyOn(localStorage, 'setItem')
await baseExtension.registerSettings(settings) await baseExtension.registerSettings(settings)
expect(mock).toHaveBeenCalledWith( expect(mock).toHaveBeenCalledWith(
@ -123,7 +122,7 @@ describe('BaseExtension', () => {
{ key: 'setting1', controllerProps: { value: 'value1' } } as any, { key: 'setting1', controllerProps: { value: 'value1' } } as any,
] ]
vi.spyOn(baseExtension, 'getSettings').mockResolvedValue(settings) jest.spyOn(baseExtension, 'getSettings').mockResolvedValue(settings)
const value = await baseExtension.getSetting('setting1', 'defaultValue') const value = await baseExtension.getSetting('setting1', 'defaultValue')
expect(value).toBe('value1') expect(value).toBe('value1')
@ -137,8 +136,8 @@ describe('BaseExtension', () => {
{ key: 'setting1', controllerProps: { value: 'value1' } } as any, { key: 'setting1', controllerProps: { value: 'value1' } } as any,
] ]
vi.spyOn(baseExtension, 'getSettings').mockResolvedValue(settings) jest.spyOn(baseExtension, 'getSettings').mockResolvedValue(settings)
const mockSetItem = vi.spyOn(localStorage, 'setItem') const mockSetItem = jest.spyOn(localStorage, 'setItem')
await baseExtension.updateSettings([ await baseExtension.updateSettings([
{ key: 'setting1', controllerProps: { value: 'newValue' } } as any, { key: 'setting1', controllerProps: { value: 'newValue' } } as any,

View File

@ -7,12 +7,9 @@ export enum ExtensionTypeEnum {
Inference = 'inference', Inference = 'inference',
Model = 'model', Model = 'model',
SystemMonitoring = 'systemMonitoring', SystemMonitoring = 'systemMonitoring',
MCP = 'mcp',
HuggingFace = 'huggingFace', HuggingFace = 'huggingFace',
Engine = 'engine', Engine = 'engine',
Hardware = 'hardware', Hardware = 'hardware',
RAG = 'rag',
VectorDB = 'vectorDB',
} }
export interface ExtensionType { export interface ExtensionType {
@ -128,20 +125,9 @@ export abstract class BaseExtension implements ExtensionType {
settings.forEach((setting) => { settings.forEach((setting) => {
// Keep setting value // Keep setting value
if (setting.controllerProps && Array.isArray(oldSettings)) if (setting.controllerProps && Array.isArray(oldSettings))
setting.controllerProps.value = setting.controllerProps.value = oldSettings.find(
oldSettings.find((e: any) => e.key === setting.key)?.controllerProps?.value ?? (e: any) => e.key === setting.key
setting.controllerProps.value )?.controllerProps?.value
if ('options' in setting.controllerProps)
setting.controllerProps.options = setting.controllerProps.options?.length
? setting.controllerProps.options
: oldSettings.find((e: any) => e.key === setting.key)?.controllerProps?.options
if ('recommended' in setting.controllerProps) {
const oldRecommended = oldSettings.find((e: any) => e.key === setting.key)
?.controllerProps?.recommended
if (oldRecommended !== undefined && oldRecommended !== '') {
setting.controllerProps.recommended = oldRecommended
}
}
}) })
} }
localStorage.setItem(this.name, JSON.stringify(settings)) localStorage.setItem(this.name, JSON.stringify(settings))

View File

@ -1,5 +1,4 @@
import { it, expect } from 'vitest'
import { AssistantExtension } from './assistant'; import { AssistantExtension } from './assistant';
import { ExtensionTypeEnum } from '../extension'; import { ExtensionTypeEnum } from '../extension';

View File

@ -1,4 +1,3 @@
import { describe, it, test, expect, beforeEach } from 'vitest'
import { ConversationalExtension } from './conversational' import { ConversationalExtension } from './conversational'
import { ExtensionTypeEnum } from '../extension' import { ExtensionTypeEnum } from '../extension'
import { Thread, ThreadAssistantInfo, ThreadMessage } from '../../types' import { Thread, ThreadAssistantInfo, ThreadMessage } from '../../types'
@ -250,4 +249,4 @@ describe('ConversationalExtension', () => {
expect(retrievedAssistant.modelId).toBe('') expect(retrievedAssistant.modelId).toBe('')
}) })
}) })

View File

@ -1,11 +1,10 @@
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { AIEngine } from './AIEngine' import { AIEngine } from './AIEngine'
import { events } from '../../events' import { events } from '../../events'
import { ModelEvent, Model } from '../../../types' import { ModelEvent, Model } from '../../../types'
vi.mock('../../events') jest.mock('../../events')
vi.mock('./EngineManager') jest.mock('./EngineManager')
vi.mock('../../fs') jest.mock('../../fs')
class TestAIEngine extends AIEngine { class TestAIEngine extends AIEngine {
onUnload(): void {} onUnload(): void {}
@ -14,38 +13,6 @@ class TestAIEngine extends AIEngine {
inference(data: any) {} inference(data: any) {}
stopInference() {} stopInference() {}
async list(): Promise<any[]> {
return []
}
async load(modelId: string): Promise<any> {
return { pid: 1, port: 8080, model_id: modelId, model_path: '', api_key: '' }
}
async unload(sessionId: string): Promise<any> {
return { success: true }
}
async chat(opts: any): Promise<any> {
return { id: 'test', object: 'chat.completion', created: Date.now(), model: 'test', choices: [] }
}
async delete(modelId: string): Promise<void> {
return
}
async import(modelId: string, opts: any): Promise<void> {
return
}
async abortImport(modelId: string): Promise<void> {
return
}
async getLoadedModels(): Promise<string[]> {
return []
}
} }
describe('AIEngine', () => { describe('AIEngine', () => {
@ -53,34 +20,38 @@ describe('AIEngine', () => {
beforeEach(() => { beforeEach(() => {
engine = new TestAIEngine('', '') engine = new TestAIEngine('', '')
vi.clearAllMocks() jest.clearAllMocks()
}) })
it('should load model successfully', async () => { it('should load model if provider matches', async () => {
const modelId = 'model1' const model: any = { id: 'model1', engine: 'test-provider' } as any
const result = await engine.load(modelId) await engine.loadModel(model)
expect(result).toEqual({ pid: 1, port: 8080, model_id: modelId, model_path: '', api_key: '' }) expect(events.emit).toHaveBeenCalledWith(ModelEvent.OnModelReady, model)
}) })
it('should unload model successfully', async () => { it('should not load model if provider does not match', async () => {
const sessionId = 'session1' const model: any = { id: 'model1', engine: 'other-provider' } as any
const result = await engine.unload(sessionId) await engine.loadModel(model)
expect(result).toEqual({ success: true }) expect(events.emit).not.toHaveBeenCalledWith(ModelEvent.OnModelReady, model)
}) })
it('should list models', async () => { it('should unload model if provider matches', async () => {
const result = await engine.list() const model: Model = { id: 'model1', version: '1.0', engine: 'test-provider' } as any
expect(result).toEqual([]) await engine.unloadModel(model)
expect(events.emit).toHaveBeenCalledWith(ModelEvent.OnModelStopped, model)
}) })
it('should get loaded models', async () => { it('should not unload model if provider does not match', async () => {
const result = await engine.getLoadedModels() const model: Model = { id: 'model1', version: '1.0', engine: 'other-provider' } as any
expect(result).toEqual([]) await engine.unloadModel(model)
expect(events.emit).not.toHaveBeenCalledWith(ModelEvent.OnModelStopped, model)
}) })
}) })

View File

@ -1,237 +1,24 @@
import { events } from '../../events'
import { BaseExtension } from '../../extension' import { BaseExtension } from '../../extension'
import { MessageRequest, Model, ModelEvent } from '../../../types'
import { EngineManager } from './EngineManager' import { EngineManager } from './EngineManager'
/* AIEngine class types */
export interface chatCompletionRequestMessage {
role: 'system' | 'user' | 'assistant' | 'tool'
content: string | null | Content[] // Content can be a string OR an array of content parts
reasoning?: string | null // Some models return reasoning in completed responses
reasoning_content?: string | null // Some models return reasoning in completed responses
name?: string
tool_calls?: any[] // Simplified tool_call_id?: string
}
export interface Content {
type: 'text' | 'image_url' | 'input_audio'
text?: string
image_url?: string
input_audio?: InputAudio
}
export interface InputAudio {
data: string // Base64 encoded audio data
format: 'mp3' | 'wav' | 'ogg' | 'flac' // Add more formats as needed/llama-server seems to support mp3
}
export interface ToolFunction {
name: string // Required: a-z, A-Z, 0-9, _, -, max length 64
description?: string
parameters?: Record<string, unknown> // JSON Schema object
strict?: boolean | null // Defaults to false
}
export interface Tool {
type: 'function' // Currently, only 'function' is supported
function: ToolFunction
}
export interface ToolCallOptions {
tools?: Tool[]
}
// A specific tool choice to force the model to call
export interface ToolCallSpec {
type: 'function'
function: {
name: string
}
}
// tool_choice may be one of several modes or a specific call
export type ToolChoice = 'none' | 'auto' | 'required' | ToolCallSpec
export interface chatCompletionRequest {
model: string // Model ID, though for local it might be implicit via sessionInfo
messages: chatCompletionRequestMessage[]
thread_id?: string // Thread/conversation ID for context tracking
return_progress?: boolean
tools?: Tool[]
tool_choice?: ToolChoice
// Core sampling parameters
temperature?: number | null
dynatemp_range?: number | null
dynatemp_exponent?: number | null
top_k?: number | null
top_p?: number | null
min_p?: number | null
typical_p?: number | null
repeat_penalty?: number | null
repeat_last_n?: number | null
presence_penalty?: number | null
frequency_penalty?: number | null
dry_multiplier?: number | null
dry_base?: number | null
dry_allowed_length?: number | null
dry_penalty_last_n?: number | null
dry_sequence_breakers?: string[] | null
xtc_probability?: number | null
xtc_threshold?: number | null
mirostat?: number | null // 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0
mirostat_tau?: number | null
mirostat_eta?: number | null
n_predict?: number | null
n_indent?: number | null
n_keep?: number | null
stream?: boolean | null
stop?: string | string[] | null
seed?: number | null // RNG seed
// Advanced sampling
logit_bias?: { [key: string]: number } | null
n_probs?: number | null
min_keep?: number | null
t_max_predict_ms?: number | null
image_data?: Array<{ data: string; id: number }> | null
// Internal/optimization parameters
id_slot?: number | null
cache_prompt?: boolean | null
return_tokens?: boolean | null
samplers?: string[] | null
timings_per_token?: boolean | null
post_sampling_probs?: boolean | null
chat_template_kwargs?: chat_template_kdict | null
}
export interface chat_template_kdict {
enable_thinking: false
}
export interface chatCompletionChunkChoiceDelta {
content?: string | null
role?: 'system' | 'user' | 'assistant' | 'tool'
tool_calls?: any[] // Simplified
}
export interface chatCompletionChunkChoice {
index: number
delta: chatCompletionChunkChoiceDelta
finish_reason?: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null
}
export interface chatCompletionPromptProgress {
cache: number
processed: number
time_ms: number
total: number
}
export interface chatCompletionChunk {
id: string
object: 'chat.completion.chunk'
created: number
model: string
choices: chatCompletionChunkChoice[]
system_fingerprint?: string
prompt_progress?: chatCompletionPromptProgress
}
export interface chatCompletionChoice {
index: number
message: chatCompletionRequestMessage // Response message
finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call'
logprobs?: any // Simplified
}
export interface chatCompletion {
id: string
object: 'chat.completion'
created: number
model: string // Model ID used
choices: chatCompletionChoice[]
usage?: {
prompt_tokens: number
completion_tokens: number
total_tokens: number
}
system_fingerprint?: string
}
// --- End OpenAI types ---
// Shared model metadata
export interface modelInfo {
id: string // e.g. "qwen3-4B" or "org/model/quant"
name: string // humanreadable, e.g., "Qwen3 4B Q4_0"
quant_type?: string // q4_0 (optional as it might be part of ID or name)
providerId: string // e.g. "llama.cpp"
port: number
sizeBytes: number
tags?: string[]
path?: string // Absolute path to the model file, if applicable
// Additional provider-specific metadata can be added here
[key: string]: any
}
// 1. /list
export type listResult = modelInfo[]
export interface SessionInfo {
pid: number // opaque handle for unload/chat
port: number // llama-server output port (corrected from portid)
model_id: string //name of the model
model_path: string // path of the loaded model
is_embedding: boolean
api_key: string
mmproj_path?: string
}
export interface UnloadResult {
success: boolean
error?: string
}
// 5. /chat
export interface chatOptions {
providerId: string
sessionId: string
/** Full OpenAI ChatCompletionRequest payload */
payload: chatCompletionRequest
}
// Output for /chat will be Promise<ChatCompletion> for non-streaming
// or Promise<AsyncIterable<ChatCompletionChunk>> for streaming
// 7. /import
export interface ImportOptions {
modelPath: string
mmprojPath?: string
modelSha256?: string
modelSize?: number
mmprojSha256?: string
mmprojSize?: number
}
export interface importResult {
success: boolean
modelInfo?: modelInfo
error?: string
}
/** /**
* Base AIEngine * Base AIEngine
* Applicable to all AI Engines * Applicable to all AI Engines
*/ */
export abstract class AIEngine extends BaseExtension { export abstract class AIEngine extends BaseExtension {
// The inference engine ID, implementing the readonly providerId from interface // The inference engine
abstract readonly provider: string abstract provider: string
/** /**
* On extension load, subscribe to events. * On extension load, subscribe to events.
*/ */
override onLoad() { override onLoad() {
this.registerEngine() this.registerEngine()
events.on(ModelEvent.OnModelInit, (model: Model) => this.loadModel(model))
events.on(ModelEvent.OnModelStop, (model: Model) => this.unloadModel(model))
} }
/** /**
@ -242,61 +29,29 @@ export abstract class AIEngine extends BaseExtension {
} }
/** /**
* Gets model info * Loads the model.
* @param modelId
*/ */
abstract get(modelId: string): Promise<modelInfo | undefined> async loadModel(model: Partial<Model>, abortController?: AbortController): Promise<any> {
if (model?.engine?.toString() !== this.provider) return Promise.resolve()
events.emit(ModelEvent.OnModelReady, model)
return Promise.resolve()
}
/**
* Stops the model.
*/
async unloadModel(model?: Partial<Model>): Promise<any> {
if (model?.engine && model.engine.toString() !== this.provider) return Promise.resolve()
events.emit(ModelEvent.OnModelStopped, model ?? {})
return Promise.resolve()
}
/** /**
* Lists available models * Inference request
*/ */
abstract list(): Promise<modelInfo[]> inference(data: MessageRequest) {}
/** /**
* Loads a model into memory * Stop inference
*/ */
abstract load(modelId: string, settings?: any): Promise<SessionInfo> stopInference() {}
/**
* Unloads a model from memory
*/
abstract unload(sessionId: string): Promise<UnloadResult>
/**
* Sends a chat request to the model
*/
abstract chat(
opts: chatCompletionRequest,
abortController?: AbortController
): Promise<chatCompletion | AsyncIterable<chatCompletionChunk>>
/**
* Deletes a model
*/
abstract delete(modelId: string): Promise<void>
/**
* Updates a model
*/
abstract update(modelId: string, model: Partial<modelInfo>): Promise<void>
/**
* Imports a model
*/
abstract import(modelId: string, opts: ImportOptions): Promise<void>
/**
* Aborts an ongoing model import
*/
abstract abortImport(modelId: string): Promise<void>
/**
* Get currently loaded models
*/
abstract getLoadedModels(): Promise<string[]>
/**
* Check if a tool is supported by the model
* @param modelId
*/
abstract isToolSupported(modelId: string): Promise<boolean>
} }

View File

@ -1,4 +1,6 @@
import { describe, it, test, expect, beforeEach } from 'vitest' /**
* @jest-environment jsdom
*/
import { EngineManager } from './EngineManager' import { EngineManager } from './EngineManager'
import { AIEngine } from './AIEngine' import { AIEngine } from './AIEngine'
import { InferenceEngine } from '../../../types' import { InferenceEngine } from '../../../types'
@ -41,41 +43,41 @@ describe('EngineManager', () => {
}) })
describe('cortex engine migration', () => { describe('cortex engine migration', () => {
test.skip('should map nitro to cortex engine', () => { test('should map nitro to cortex engine', () => {
const cortexEngine = new MockAIEngine(InferenceEngine.cortex) const cortexEngine = new MockAIEngine(InferenceEngine.cortex)
// @ts-ignore // @ts-ignore
engineManager.register(cortexEngine) engineManager.register(cortexEngine)
// @ts-ignore // @ts-ignore
const retrievedEngine = engineManager.get<MockAIEngine>(InferenceEngine.nitro) const retrievedEngine = engineManager.get<MockAIEngine>(InferenceEngine.nitro)
expect(retrievedEngine).toBe(cortexEngine) expect(retrievedEngine).toBe(cortexEngine)
}) })
test.skip('should map cortex_llamacpp to cortex engine', () => { test('should map cortex_llamacpp to cortex engine', () => {
const cortexEngine = new MockAIEngine(InferenceEngine.cortex) const cortexEngine = new MockAIEngine(InferenceEngine.cortex)
// @ts-ignore // @ts-ignore
engineManager.register(cortexEngine) engineManager.register(cortexEngine)
// @ts-ignore // @ts-ignore
const retrievedEngine = engineManager.get<MockAIEngine>(InferenceEngine.cortex_llamacpp) const retrievedEngine = engineManager.get<MockAIEngine>(InferenceEngine.cortex_llamacpp)
expect(retrievedEngine).toBe(cortexEngine) expect(retrievedEngine).toBe(cortexEngine)
}) })
test.skip('should map cortex_onnx to cortex engine', () => { test('should map cortex_onnx to cortex engine', () => {
const cortexEngine = new MockAIEngine(InferenceEngine.cortex) const cortexEngine = new MockAIEngine(InferenceEngine.cortex)
// @ts-ignore // @ts-ignore
engineManager.register(cortexEngine) engineManager.register(cortexEngine)
// @ts-ignore // @ts-ignore
const retrievedEngine = engineManager.get<MockAIEngine>(InferenceEngine.cortex_onnx) const retrievedEngine = engineManager.get<MockAIEngine>(InferenceEngine.cortex_onnx)
expect(retrievedEngine).toBe(cortexEngine) expect(retrievedEngine).toBe(cortexEngine)
}) })
test.skip('should map cortex_tensorrtllm to cortex engine', () => { test('should map cortex_tensorrtllm to cortex engine', () => {
const cortexEngine = new MockAIEngine(InferenceEngine.cortex) const cortexEngine = new MockAIEngine(InferenceEngine.cortex)
// @ts-ignore // @ts-ignore
engineManager.register(cortexEngine) engineManager.register(cortexEngine)
// @ts-ignore // @ts-ignore
const retrievedEngine = engineManager.get<MockAIEngine>(InferenceEngine.cortex_tensorrtllm) const retrievedEngine = engineManager.get<MockAIEngine>(InferenceEngine.cortex_tensorrtllm)
expect(retrievedEngine).toBe(cortexEngine) expect(retrievedEngine).toBe(cortexEngine)
@ -87,19 +89,19 @@ describe('EngineManager', () => {
const mockEngineManager = new EngineManager() const mockEngineManager = new EngineManager()
// @ts-ignore // @ts-ignore
window.core = { engineManager: mockEngineManager } window.core = { engineManager: mockEngineManager }
const instance = EngineManager.instance() const instance = EngineManager.instance()
expect(instance).toBe(mockEngineManager) expect(instance).toBe(mockEngineManager)
// Clean up // Clean up
// @ts-ignore // @ts-ignore
delete window.core delete window.core
}) })
test('should create a new instance if window.core.engineManager is not available', () => { test('should create a new instance if window.core.engineManager is not available', () => {
// @ts-ignore // @ts-ignore
delete window.core delete window.core
const instance = EngineManager.instance() const instance = EngineManager.instance()
expect(instance).toBeInstanceOf(EngineManager) expect(instance).toBeInstanceOf(EngineManager)
}) })

View File

@ -1,134 +1,98 @@
import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest' /**
* @jest-environment jsdom
*/
import { LocalOAIEngine } from './LocalOAIEngine' import { LocalOAIEngine } from './LocalOAIEngine'
import { events } from '../../events' import { events } from '../../events'
import { Model, ModelEvent } from '../../../types' import { ModelEvent, Model } from '../../../types'
import { executeOnMain, systemInformation, dirName } from '../../core'
vi.mock('../../events') jest.mock('../../core', () => ({
executeOnMain: jest.fn(),
systemInformation: jest.fn(),
dirName: jest.fn(),
}))
jest.mock('../../events', () => ({
events: {
on: jest.fn(),
emit: jest.fn(),
},
}))
class TestLocalOAIEngine extends LocalOAIEngine { class TestLocalOAIEngine extends LocalOAIEngine {
inferenceUrl = 'http://test-local-inference-url' inferenceUrl = ''
provider = 'test-local-provider' nodeModule = 'testNodeModule'
nodeModule = 'test-node-module' provider = 'testProvider'
async headers() {
return { Authorization: 'Bearer test-token' }
}
async loadModel(model: Model & { file_path?: string }): Promise<void> {
this.loadedModel = model
}
async unloadModel(model?: Model) {
this.loadedModel = undefined
}
} }
describe('LocalOAIEngine', () => { describe('LocalOAIEngine', () => {
let engine: TestLocalOAIEngine let engine: TestLocalOAIEngine
const mockModel: Model & { file_path?: string } = {
object: 'model',
version: '1.0.0',
format: 'gguf',
sources: [],
id: 'test-model',
name: 'Test Model',
description: 'A test model',
settings: {},
parameters: {},
metadata: {},
file_path: '/path/to/model.gguf'
}
beforeEach(() => { beforeEach(() => {
engine = new TestLocalOAIEngine('', '') engine = new TestLocalOAIEngine('', '')
vi.clearAllMocks()
}) })
describe('onLoad', () => { afterEach(() => {
it('should call super.onLoad and subscribe to model events', () => { jest.clearAllMocks()
const superOnLoadSpy = vi.spyOn(Object.getPrototypeOf(Object.getPrototypeOf(engine)), 'onLoad')
engine.onLoad()
expect(superOnLoadSpy).toHaveBeenCalled()
expect(events.on).toHaveBeenCalledWith(
ModelEvent.OnModelInit,
expect.any(Function)
)
expect(events.on).toHaveBeenCalledWith(
ModelEvent.OnModelStop,
expect.any(Function)
)
})
it('should load model when OnModelInit event is triggered', () => {
const loadModelSpy = vi.spyOn(engine, 'loadModel')
engine.onLoad()
// Get the event handler for OnModelInit
const onModelInitCall = (events.on as Mock).mock.calls.find(
call => call[0] === ModelEvent.OnModelInit
)
const onModelInitHandler = onModelInitCall[1]
// Trigger the event handler
onModelInitHandler(mockModel)
expect(loadModelSpy).toHaveBeenCalledWith(mockModel)
})
it('should unload model when OnModelStop event is triggered', () => {
const unloadModelSpy = vi.spyOn(engine, 'unloadModel')
engine.onLoad()
// Get the event handler for OnModelStop
const onModelStopCall = (events.on as Mock).mock.calls.find(
call => call[0] === ModelEvent.OnModelStop
)
const onModelStopHandler = onModelStopCall[1]
// Trigger the event handler
onModelStopHandler(mockModel)
expect(unloadModelSpy).toHaveBeenCalledWith(mockModel)
})
}) })
describe('properties', () => { it('should subscribe to events on load', () => {
it('should have correct default function names', () => { engine.onLoad()
expect(engine.loadModelFunctionName).toBe('loadModel') expect(events.on).toHaveBeenCalledWith(ModelEvent.OnModelInit, expect.any(Function))
expect(engine.unloadModelFunctionName).toBe('unloadModel') expect(events.on).toHaveBeenCalledWith(ModelEvent.OnModelStop, expect.any(Function))
})
it('should have abstract nodeModule property implemented', () => {
expect(engine.nodeModule).toBe('test-node-module')
})
}) })
describe('loadModel', () => { it('should load model correctly', async () => {
it('should load the model and set loadedModel', async () => { const model: any = { engine: 'testProvider', file_path: 'path/to/model' } as any
await engine.loadModel(mockModel) const modelFolder = 'path/to'
expect(engine.loadedModel).toBe(mockModel) const systemInfo = { os: 'testOS' }
}) const res = { error: null }
it('should handle model with file_path', async () => { ;(dirName as jest.Mock).mockResolvedValue(modelFolder)
const modelWithPath = { ...mockModel, file_path: '/custom/path/model.gguf' } ;(systemInformation as jest.Mock).mockResolvedValue(systemInfo)
await engine.loadModel(modelWithPath) ;(executeOnMain as jest.Mock).mockResolvedValue(res)
expect(engine.loadedModel).toBe(modelWithPath)
}) await engine.loadModel(model)
expect(dirName).toHaveBeenCalledWith(model.file_path)
expect(systemInformation).toHaveBeenCalled()
expect(executeOnMain).toHaveBeenCalledWith(
engine.nodeModule,
engine.loadModelFunctionName,
{ modelFolder, model },
systemInfo
)
expect(events.emit).toHaveBeenCalledWith(ModelEvent.OnModelReady, model)
}) })
describe('unloadModel', () => { it('should handle load model error', async () => {
it('should unload the model and clear loadedModel', async () => { const model: any = { engine: 'testProvider', file_path: 'path/to/model' } as any
engine.loadedModel = mockModel const modelFolder = 'path/to'
await engine.unloadModel(mockModel) const systemInfo = { os: 'testOS' }
expect(engine.loadedModel).toBeUndefined() const res = { error: 'load error' }
})
it('should handle unload without passing a model', async () => { ;(dirName as jest.Mock).mockResolvedValue(modelFolder)
engine.loadedModel = mockModel ;(systemInformation as jest.Mock).mockResolvedValue(systemInfo)
await engine.unloadModel() ;(executeOnMain as jest.Mock).mockResolvedValue(res)
expect(engine.loadedModel).toBeUndefined()
}) await expect(engine.loadModel(model)).rejects.toEqual('load error')
expect(events.emit).toHaveBeenCalledWith(ModelEvent.OnModelFail, { error: res.error })
})
it('should unload model correctly', async () => {
const model: Model = { engine: 'testProvider' } as any
await engine.unloadModel(model)
expect(executeOnMain).toHaveBeenCalledWith(engine.nodeModule, engine.unloadModelFunctionName)
expect(events.emit).toHaveBeenCalledWith(ModelEvent.OnModelStopped, {})
})
it('should not unload model if engine does not match', async () => {
const model: Model = { engine: 'otherProvider' } as any
await engine.unloadModel(model)
expect(executeOnMain).not.toHaveBeenCalled()
expect(events.emit).not.toHaveBeenCalledWith(ModelEvent.OnModelStopped, {})
}) })
}) })

View File

@ -1,3 +1,4 @@
import { executeOnMain, systemInformation, dirName, joinPath, getJanDataFolderPath } from '../../core'
import { events } from '../../events' import { events } from '../../events'
import { Model, ModelEvent } from '../../../types' import { Model, ModelEvent } from '../../../types'
import { OAIEngine } from './OAIEngine' import { OAIEngine } from './OAIEngine'
@ -28,14 +29,46 @@ export abstract class LocalOAIEngine extends OAIEngine {
/** /**
* Load the model. * Load the model.
*/ */
async loadModel(model: Model & { file_path?: string }): Promise<void> { override async loadModel(model: Model & { file_path?: string }, abortController?: AbortController): Promise<void> {
// Implementation of loading the model if (model.engine.toString() !== this.provider) return
} const modelFolder = 'file_path' in model && model.file_path ? await dirName(model.file_path) : await this.getModelFilePath(model.id)
const systemInfo = await systemInformation()
const res = await executeOnMain(
this.nodeModule,
this.loadModelFunctionName,
{
modelFolder,
model,
},
systemInfo
)
if (res?.error) {
events.emit(ModelEvent.OnModelFail, { error: res.error })
return Promise.reject(res.error)
} else {
this.loadedModel = model
events.emit(ModelEvent.OnModelReady, model)
return Promise.resolve()
}
}
/** /**
* Stops the model. * Stops the model.
*/ */
async unloadModel(model?: Model) { override async unloadModel(model?: Model) {
// Implementation of unloading the model if (model?.engine && model.engine?.toString() !== this.provider) return Promise.resolve()
this.loadedModel = undefined
await executeOnMain(this.nodeModule, this.unloadModelFunctionName).then(() => {
events.emit(ModelEvent.OnModelStopped, {})
})
} }
/// Legacy
private getModelFilePath = async (
id: string,
): Promise<string> => {
return joinPath([await getJanDataFolderPath(), 'models', id])
}
///
} }

View File

@ -1,4 +1,6 @@
import { describe, it, expect, beforeEach, vi } from 'vitest' /**
* @jest-environment jsdom
*/
import { OAIEngine } from './OAIEngine' import { OAIEngine } from './OAIEngine'
import { events } from '../../events' import { events } from '../../events'
import { import {
@ -11,7 +13,7 @@ import {
ContentType, ContentType,
} from '../../../types' } from '../../../types'
vi.mock('../../events') jest.mock('../../events')
class TestOAIEngine extends OAIEngine { class TestOAIEngine extends OAIEngine {
inferenceUrl = 'http://test-inference-url' inferenceUrl = 'http://test-inference-url'
@ -27,7 +29,7 @@ describe('OAIEngine', () => {
beforeEach(() => { beforeEach(() => {
engine = new TestOAIEngine('', '') engine = new TestOAIEngine('', '')
vi.clearAllMocks() jest.clearAllMocks()
}) })
it('should subscribe to events on load', () => { it('should subscribe to events on load', () => {

View File

@ -44,12 +44,10 @@ export abstract class OAIEngine extends AIEngine {
*/ */
override onUnload(): void {} override onUnload(): void {}
inference(data: MessageRequest) {}
/** /**
* Stops the inference. * Stops the inference.
*/ */
stopInference() { override stopInference() {
this.isCancelled = true this.isCancelled = true
this.controller?.abort() this.controller?.abort()
} }

View File

@ -1,4 +1,6 @@
import { describe, test, expect, beforeEach, vi } from 'vitest' /**
* @jest-environment jsdom
*/
import { RemoteOAIEngine } from './' import { RemoteOAIEngine } from './'
class TestRemoteOAIEngine extends RemoteOAIEngine { class TestRemoteOAIEngine extends RemoteOAIEngine {
@ -14,8 +16,8 @@ describe('RemoteOAIEngine', () => {
}) })
test('should call onLoad and super.onLoad', () => { test('should call onLoad and super.onLoad', () => {
const onLoadSpy = vi.spyOn(engine, 'onLoad') const onLoadSpy = jest.spyOn(engine, 'onLoad')
const superOnLoadSpy = vi.spyOn(Object.getPrototypeOf(RemoteOAIEngine.prototype), 'onLoad') const superOnLoadSpy = jest.spyOn(Object.getPrototypeOf(RemoteOAIEngine.prototype), 'onLoad')
engine.onLoad() engine.onLoad()
expect(onLoadSpy).toHaveBeenCalled() expect(onLoadSpy).toHaveBeenCalled()

View File

@ -1,6 +1,6 @@
import { it, expect } from 'vitest'
import * as engines from './index' import { expect } from '@jest/globals';
it('should re-export all exports from ./AIEngine', () => { it('should re-export all exports from ./AIEngine', () => {
expect(engines).toHaveProperty('AIEngine') expect(require('./index')).toHaveProperty('AIEngine');
}) });

View File

@ -0,0 +1,566 @@
import { EngineManagementExtension } from './enginesManagement'
import { ExtensionTypeEnum } from '../extension'
import {
EngineConfig,
EngineReleased,
EngineVariant,
Engines,
InferenceEngine,
DefaultEngineVariant,
Model
} from '../../types'
// Mock implementation of EngineManagementExtension
class MockEngineManagementExtension extends EngineManagementExtension {
private mockEngines: Engines = {
llama: {
name: 'llama',
variants: [
{
variant: 'cpu',
version: '1.0.0',
path: '/engines/llama/cpu/1.0.0',
installed: true
},
{
variant: 'cuda',
version: '1.0.0',
path: '/engines/llama/cuda/1.0.0',
installed: false
}
],
default: {
variant: 'cpu',
version: '1.0.0'
}
},
gpt4all: {
name: 'gpt4all',
variants: [
{
variant: 'cpu',
version: '2.0.0',
path: '/engines/gpt4all/cpu/2.0.0',
installed: true
}
],
default: {
variant: 'cpu',
version: '2.0.0'
}
}
}
private mockReleases: { [key: string]: EngineReleased[] } = {
'llama-1.0.0': [
{
variant: 'cpu',
version: '1.0.0',
os: ['macos', 'linux', 'windows'],
url: 'https://example.com/llama/1.0.0/cpu'
},
{
variant: 'cuda',
version: '1.0.0',
os: ['linux', 'windows'],
url: 'https://example.com/llama/1.0.0/cuda'
}
],
'llama-1.1.0': [
{
variant: 'cpu',
version: '1.1.0',
os: ['macos', 'linux', 'windows'],
url: 'https://example.com/llama/1.1.0/cpu'
},
{
variant: 'cuda',
version: '1.1.0',
os: ['linux', 'windows'],
url: 'https://example.com/llama/1.1.0/cuda'
}
],
'gpt4all-2.0.0': [
{
variant: 'cpu',
version: '2.0.0',
os: ['macos', 'linux', 'windows'],
url: 'https://example.com/gpt4all/2.0.0/cpu'
}
]
}
private remoteModels: { [engine: string]: Model[] } = {
'llama': [],
'gpt4all': []
}
constructor() {
super('http://mock-url.com', 'mock-engine-extension', 'Mock Engine Extension', true, 'A mock engine extension', '1.0.0')
}
onLoad(): void {
// Mock implementation
}
onUnload(): void {
// Mock implementation
}
async getEngines(): Promise<Engines> {
return JSON.parse(JSON.stringify(this.mockEngines))
}
async getInstalledEngines(name: InferenceEngine): Promise<EngineVariant[]> {
if (!this.mockEngines[name]) {
return []
}
return this.mockEngines[name].variants.filter(variant => variant.installed)
}
async getReleasedEnginesByVersion(
name: InferenceEngine,
version: string,
platform?: string
): Promise<EngineReleased[]> {
const key = `${name}-${version}`
let releases = this.mockReleases[key] || []
if (platform) {
releases = releases.filter(release => release.os.includes(platform))
}
return releases
}
async getLatestReleasedEngine(
name: InferenceEngine,
platform?: string
): Promise<EngineReleased[]> {
// For mock, let's assume latest versions are 1.1.0 for llama and 2.0.0 for gpt4all
const latestVersions = {
'llama': '1.1.0',
'gpt4all': '2.0.0'
}
if (!latestVersions[name]) {
return []
}
return this.getReleasedEnginesByVersion(name, latestVersions[name], platform)
}
async installEngine(
name: string,
engineConfig: EngineConfig
): Promise<{ messages: string }> {
if (!this.mockEngines[name]) {
this.mockEngines[name] = {
name,
variants: [],
default: {
variant: engineConfig.variant,
version: engineConfig.version
}
}
}
// Check if variant already exists
const existingVariantIndex = this.mockEngines[name].variants.findIndex(
v => v.variant === engineConfig.variant && v.version === engineConfig.version
)
if (existingVariantIndex >= 0) {
this.mockEngines[name].variants[existingVariantIndex].installed = true
} else {
this.mockEngines[name].variants.push({
variant: engineConfig.variant,
version: engineConfig.version,
path: `/engines/${name}/${engineConfig.variant}/${engineConfig.version}`,
installed: true
})
}
return { messages: `Successfully installed ${name} ${engineConfig.variant} ${engineConfig.version}` }
}
async addRemoteEngine(
engineConfig: EngineConfig
): Promise<{ messages: string }> {
const name = engineConfig.name || 'remote-engine'
if (!this.mockEngines[name]) {
this.mockEngines[name] = {
name,
variants: [],
default: {
variant: engineConfig.variant,
version: engineConfig.version
}
}
}
this.mockEngines[name].variants.push({
variant: engineConfig.variant,
version: engineConfig.version,
path: engineConfig.path || `/engines/${name}/${engineConfig.variant}/${engineConfig.version}`,
installed: true,
url: engineConfig.url
})
return { messages: `Successfully added remote engine ${name}` }
}
async uninstallEngine(
name: InferenceEngine,
engineConfig: EngineConfig
): Promise<{ messages: string }> {
if (!this.mockEngines[name]) {
return { messages: `Engine ${name} not found` }
}
const variantIndex = this.mockEngines[name].variants.findIndex(
v => v.variant === engineConfig.variant && v.version === engineConfig.version
)
if (variantIndex >= 0) {
this.mockEngines[name].variants[variantIndex].installed = false
// If this was the default variant, reset default
if (
this.mockEngines[name].default.variant === engineConfig.variant &&
this.mockEngines[name].default.version === engineConfig.version
) {
// Find another installed variant to set as default
const installedVariant = this.mockEngines[name].variants.find(v => v.installed)
if (installedVariant) {
this.mockEngines[name].default = {
variant: installedVariant.variant,
version: installedVariant.version
}
} else {
// No installed variants remain, clear default
this.mockEngines[name].default = { variant: '', version: '' }
}
}
return { messages: `Successfully uninstalled ${name} ${engineConfig.variant} ${engineConfig.version}` }
} else {
return { messages: `Variant ${engineConfig.variant} ${engineConfig.version} not found for engine ${name}` }
}
}
async getDefaultEngineVariant(
name: InferenceEngine
): Promise<DefaultEngineVariant> {
if (!this.mockEngines[name]) {
return { variant: '', version: '' }
}
return this.mockEngines[name].default
}
async setDefaultEngineVariant(
name: InferenceEngine,
engineConfig: EngineConfig
): Promise<{ messages: string }> {
if (!this.mockEngines[name]) {
return { messages: `Engine ${name} not found` }
}
const variantExists = this.mockEngines[name].variants.some(
v => v.variant === engineConfig.variant && v.version === engineConfig.version && v.installed
)
if (!variantExists) {
return { messages: `Variant ${engineConfig.variant} ${engineConfig.version} not found or not installed` }
}
this.mockEngines[name].default = {
variant: engineConfig.variant,
version: engineConfig.version
}
return { messages: `Successfully set ${engineConfig.variant} ${engineConfig.version} as default for ${name}` }
}
async updateEngine(
name: InferenceEngine,
engineConfig?: EngineConfig
): Promise<{ messages: string }> {
if (!this.mockEngines[name]) {
return { messages: `Engine ${name} not found` }
}
if (!engineConfig) {
// Assume we're updating to the latest version
return { messages: `Successfully updated ${name} to the latest version` }
}
const variantIndex = this.mockEngines[name].variants.findIndex(
v => v.variant === engineConfig.variant && v.installed
)
if (variantIndex >= 0) {
// Update the version
this.mockEngines[name].variants[variantIndex].version = engineConfig.version
// If this was the default variant, update default version too
if (this.mockEngines[name].default.variant === engineConfig.variant) {
this.mockEngines[name].default.version = engineConfig.version
}
return { messages: `Successfully updated ${name} ${engineConfig.variant} to version ${engineConfig.version}` }
} else {
return { messages: `Installed variant ${engineConfig.variant} not found for engine ${name}` }
}
}
async addRemoteModel(model: Model): Promise<void> {
const engine = model.engine as string
if (!this.remoteModels[engine]) {
this.remoteModels[engine] = []
}
this.remoteModels[engine].push(model)
}
async getRemoteModels(name: InferenceEngine | string): Promise<Model[]> {
return this.remoteModels[name] || []
}
}
describe('EngineManagementExtension', () => {
let extension: MockEngineManagementExtension
beforeEach(() => {
extension = new MockEngineManagementExtension()
})
test('should return the correct extension type', () => {
expect(extension.type()).toBe(ExtensionTypeEnum.Engine)
})
test('should get all engines', async () => {
const engines = await extension.getEngines()
expect(engines).toBeDefined()
expect(engines.llama).toBeDefined()
expect(engines.gpt4all).toBeDefined()
expect(engines.llama.variants).toHaveLength(2)
expect(engines.gpt4all.variants).toHaveLength(1)
})
test('should get installed engines', async () => {
const llamaEngines = await extension.getInstalledEngines('llama')
expect(llamaEngines).toHaveLength(1)
expect(llamaEngines[0].variant).toBe('cpu')
expect(llamaEngines[0].installed).toBe(true)
const gpt4allEngines = await extension.getInstalledEngines('gpt4all')
expect(gpt4allEngines).toHaveLength(1)
expect(gpt4allEngines[0].variant).toBe('cpu')
expect(gpt4allEngines[0].installed).toBe(true)
// Test non-existent engine
const nonExistentEngines = await extension.getInstalledEngines('non-existent' as InferenceEngine)
expect(nonExistentEngines).toHaveLength(0)
})
test('should get released engines by version', async () => {
const llamaReleases = await extension.getReleasedEnginesByVersion('llama', '1.0.0')
expect(llamaReleases).toHaveLength(2)
expect(llamaReleases[0].variant).toBe('cpu')
expect(llamaReleases[1].variant).toBe('cuda')
// Test with platform filter
const llamaLinuxReleases = await extension.getReleasedEnginesByVersion('llama', '1.0.0', 'linux')
expect(llamaLinuxReleases).toHaveLength(2)
const llamaMacReleases = await extension.getReleasedEnginesByVersion('llama', '1.0.0', 'macos')
expect(llamaMacReleases).toHaveLength(1)
expect(llamaMacReleases[0].variant).toBe('cpu')
// Test non-existent version
const nonExistentReleases = await extension.getReleasedEnginesByVersion('llama', '9.9.9')
expect(nonExistentReleases).toHaveLength(0)
})
test('should get latest released engines', async () => {
const latestLlamaReleases = await extension.getLatestReleasedEngine('llama')
expect(latestLlamaReleases).toHaveLength(2)
expect(latestLlamaReleases[0].version).toBe('1.1.0')
// Test with platform filter
const latestLlamaMacReleases = await extension.getLatestReleasedEngine('llama', 'macos')
expect(latestLlamaMacReleases).toHaveLength(1)
expect(latestLlamaMacReleases[0].variant).toBe('cpu')
expect(latestLlamaMacReleases[0].version).toBe('1.1.0')
// Test non-existent engine
const nonExistentReleases = await extension.getLatestReleasedEngine('non-existent' as InferenceEngine)
expect(nonExistentReleases).toHaveLength(0)
})
test('should install engine', async () => {
// Install existing engine variant that is not installed
const result = await extension.installEngine('llama', { variant: 'cuda', version: '1.0.0' })
expect(result.messages).toContain('Successfully installed')
const installedEngines = await extension.getInstalledEngines('llama')
expect(installedEngines).toHaveLength(2)
expect(installedEngines.some(e => e.variant === 'cuda')).toBe(true)
// Install non-existent engine
const newEngineResult = await extension.installEngine('new-engine', { variant: 'cpu', version: '1.0.0' })
expect(newEngineResult.messages).toContain('Successfully installed')
const engines = await extension.getEngines()
expect(engines['new-engine']).toBeDefined()
expect(engines['new-engine'].variants).toHaveLength(1)
expect(engines['new-engine'].variants[0].installed).toBe(true)
})
test('should add remote engine', async () => {
const result = await extension.addRemoteEngine({
name: 'remote-llm',
variant: 'remote',
version: '1.0.0',
url: 'https://example.com/remote-llm-api'
})
expect(result.messages).toContain('Successfully added remote engine')
const engines = await extension.getEngines()
expect(engines['remote-llm']).toBeDefined()
expect(engines['remote-llm'].variants).toHaveLength(1)
expect(engines['remote-llm'].variants[0].url).toBe('https://example.com/remote-llm-api')
})
test('should uninstall engine', async () => {
const result = await extension.uninstallEngine('llama', { variant: 'cpu', version: '1.0.0' })
expect(result.messages).toContain('Successfully uninstalled')
const installedEngines = await extension.getInstalledEngines('llama')
expect(installedEngines).toHaveLength(0)
// Test uninstalling non-existent variant
const nonExistentResult = await extension.uninstallEngine('llama', { variant: 'non-existent', version: '1.0.0' })
expect(nonExistentResult.messages).toContain('not found')
})
test('should handle default variant when uninstalling', async () => {
// First install cuda variant
await extension.installEngine('llama', { variant: 'cuda', version: '1.0.0' })
// Set cuda as default
await extension.setDefaultEngineVariant('llama', { variant: 'cuda', version: '1.0.0' })
// Check that cuda is now default
let defaultVariant = await extension.getDefaultEngineVariant('llama')
expect(defaultVariant.variant).toBe('cuda')
// Uninstall cuda
await extension.uninstallEngine('llama', { variant: 'cuda', version: '1.0.0' })
// Check that default has changed to another installed variant
defaultVariant = await extension.getDefaultEngineVariant('llama')
expect(defaultVariant.variant).toBe('cpu')
// Uninstall all variants
await extension.uninstallEngine('llama', { variant: 'cpu', version: '1.0.0' })
// Check that default is now empty
defaultVariant = await extension.getDefaultEngineVariant('llama')
expect(defaultVariant.variant).toBe('')
expect(defaultVariant.version).toBe('')
})
test('should get default engine variant', async () => {
const llamaDefault = await extension.getDefaultEngineVariant('llama')
expect(llamaDefault.variant).toBe('cpu')
expect(llamaDefault.version).toBe('1.0.0')
// Test non-existent engine
const nonExistentDefault = await extension.getDefaultEngineVariant('non-existent' as InferenceEngine)
expect(nonExistentDefault.variant).toBe('')
expect(nonExistentDefault.version).toBe('')
})
test('should set default engine variant', async () => {
// Install cuda variant
await extension.installEngine('llama', { variant: 'cuda', version: '1.0.0' })
const result = await extension.setDefaultEngineVariant('llama', { variant: 'cuda', version: '1.0.0' })
expect(result.messages).toContain('Successfully set')
const defaultVariant = await extension.getDefaultEngineVariant('llama')
expect(defaultVariant.variant).toBe('cuda')
expect(defaultVariant.version).toBe('1.0.0')
// Test setting non-existent variant as default
const nonExistentResult = await extension.setDefaultEngineVariant('llama', { variant: 'non-existent', version: '1.0.0' })
expect(nonExistentResult.messages).toContain('not found')
})
test('should update engine', async () => {
const result = await extension.updateEngine('llama', { variant: 'cpu', version: '1.1.0' })
expect(result.messages).toContain('Successfully updated')
const engines = await extension.getEngines()
const cpuVariant = engines.llama.variants.find(v => v.variant === 'cpu')
expect(cpuVariant).toBeDefined()
expect(cpuVariant?.version).toBe('1.1.0')
// Default should also be updated since cpu was default
expect(engines.llama.default.version).toBe('1.1.0')
// Test updating non-existent variant
const nonExistentResult = await extension.updateEngine('llama', { variant: 'non-existent', version: '1.1.0' })
expect(nonExistentResult.messages).toContain('not found')
})
test('should add and get remote models', async () => {
const model: Model = {
id: 'remote-model-1',
name: 'Remote Model 1',
path: '/path/to/remote-model',
engine: 'llama',
format: 'gguf',
modelFormat: 'gguf',
source: 'remote',
status: 'ready',
contextLength: 4096,
sizeInGB: 4,
created: new Date().toISOString()
}
await extension.addRemoteModel(model)
const llamaModels = await extension.getRemoteModels('llama')
expect(llamaModels).toHaveLength(1)
expect(llamaModels[0].id).toBe('remote-model-1')
// Test non-existent engine
const nonExistentModels = await extension.getRemoteModels('non-existent')
expect(nonExistentModels).toHaveLength(0)
})
})

View File

@ -0,0 +1,115 @@
import {
Engines,
EngineVariant,
EngineReleased,
EngineConfig,
DefaultEngineVariant,
Model,
} from '../../types'
import { BaseExtension, ExtensionTypeEnum } from '../extension'
/**
* Engine management extension. Persists and retrieves engine management.
* @abstract
* @extends BaseExtension
*/
export abstract class EngineManagementExtension extends BaseExtension {
type(): ExtensionTypeEnum | undefined {
return ExtensionTypeEnum.Engine
}
/**
* @returns A Promise that resolves to an object of list engines.
*/
abstract getEngines(): Promise<Engines>
/**
* @param name - Inference engine name.
* @returns A Promise that resolves to an array of installed engine.
*/
abstract getInstalledEngines(name: string): Promise<EngineVariant[]>
/**
* @param name - Inference engine name.
* @param version - Version of the engine.
* @param platform - Optional to sort by operating system. macOS, linux, windows.
* @returns A Promise that resolves to an array of latest released engine by version.
*/
abstract getReleasedEnginesByVersion(
name: string,
version: string,
platform?: string
): Promise<EngineReleased[]>
/**
* @param name - Inference engine name.
* @param platform - Optional to sort by operating system. macOS, linux, windows.
* @returns A Promise that resolves to an array of latest released engine.
*/
abstract getLatestReleasedEngine(
name: string,
platform?: string
): Promise<EngineReleased[]>
/**
* @param name - Inference engine name.
* @returns A Promise that resolves to intall of engine.
*/
abstract installEngine(
name: string,
engineConfig: EngineConfig
): Promise<{ messages: string }>
/**
* Add a new remote engine
* @returns A Promise that resolves to intall of engine.
*/
abstract addRemoteEngine(
engineConfig: EngineConfig
): Promise<{ messages: string }>
/**
* @param name - Inference engine name.
* @returns A Promise that resolves to unintall of engine.
*/
abstract uninstallEngine(
name: string,
engineConfig: EngineConfig
): Promise<{ messages: string }>
/**
* @param name - Inference engine name.
* @returns A Promise that resolves to an object of default engine.
*/
abstract getDefaultEngineVariant(
name: string
): Promise<DefaultEngineVariant>
/**
* @body variant - string
* @body version - string
* @returns A Promise that resolves to set default engine.
*/
abstract setDefaultEngineVariant(
name: string,
engineConfig: EngineConfig
): Promise<{ messages: string }>
/**
* @returns A Promise that resolves to update engine.
*/
abstract updateEngine(
name: string,
engineConfig?: EngineConfig
): Promise<{ messages: string }>
/**
* Add a new remote model for a specific engine
*/
abstract addRemoteModel(model: Model): Promise<void>
/**
* @returns A Promise that resolves to an object of remote models list .
*/
abstract getRemoteModels(name: string): Promise<any>
}

Some files were not shown because too many files have changed in this diff Show More