diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index a4372656c..2d49f0d6e 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,5 +3,5 @@ blank_issues_enabled: true contact_links: - name: "\1F4AC Jan Discussions" - url: "https://github.com/orgs/janhq/discussions/categories/q-a" + url: "https://github.com/orgs/menloresearch/discussions/categories/q-a" about: "Get help, discuss features & roadmap, and share your projects" \ No newline at end of file diff --git a/.github/workflows/jan-electron-build-beta.yml b/.github/workflows/jan-electron-build-beta.yml index 9cae31d67..61ff717ac 100644 --- a/.github/workflows/jan-electron-build-beta.yml +++ b/.github/workflows/jan-electron-build-beta.yml @@ -9,31 +9,6 @@ jobs: get-update-version: uses: ./.github/workflows/template-get-update-version.yml - create-draft-release: - runs-on: ubuntu-latest - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') - outputs: - upload_url: ${{ steps.create_release.outputs.upload_url }} - version: ${{ steps.get_version.outputs.version }} - permissions: - contents: write - steps: - - name: Extract tag name without v prefix - id: get_version - run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV && echo "::set-output name=version::${GITHUB_REF#refs/tags/v}" - env: - GITHUB_REF: ${{ github.ref }} - - name: Create Draft Release - id: create_release - uses: softprops/action-gh-release@v2 - with: - tag_name: ${{ github.ref_name }} - token: ${{ secrets.GITHUB_TOKEN }} - name: "${{ env.VERSION }}" - draft: true - prerelease: false - generate_release_notes: true - build-macos: uses: ./.github/workflows/template-build-macos.yml secrets: inherit @@ -43,6 +18,8 @@ jobs: public_provider: github new_version: ${{ needs.get-update-version.outputs.new_version }} beta: true + nightly: false + cortex_api_port: "39271" build-windows-x64: uses: ./.github/workflows/template-build-windows-x64.yml @@ -53,6 +30,8 @@ jobs: public_provider: github new_version: ${{ needs.get-update-version.outputs.new_version }} beta: true + nightly: false + cortex_api_port: "39271" build-linux-x64: uses: ./.github/workflows/template-build-linux-x64.yml @@ -63,9 +42,11 @@ jobs: public_provider: github new_version: ${{ needs.get-update-version.outputs.new_version }} beta: true + nightly: false + cortex_api_port: "39271" sync-temp-to-latest: - needs: [build-macos, create-draft-release, build-windows-x64, build-linux-x64] + needs: [build-macos, build-windows-x64, build-linux-x64] runs-on: ubuntu-latest permissions: contents: write @@ -82,19 +63,15 @@ jobs: AWS_DEFAULT_REGION: ${{ secrets.DELTA_AWS_REGION }} AWS_EC2_METADATA_DISABLED: "true" - - name: set release to prerelease - run: | - gh release edit v${{ needs.create-draft-release.outputs.version }} --draft=false --prerelease - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - noti-discord-and-update-url-readme: - needs: [build-macos, create-draft-release, build-windows-x64, build-linux-x64, sync-temp-to-latest] + needs: [build-macos, get-update-version, build-windows-x64, build-linux-x64, sync-temp-to-latest] runs-on: ubuntu-latest steps: - name: Set version to environment variable run: | - echo "VERSION=${{ needs.create-draft-release.outputs.version }}" >> $GITHUB_ENV + VERSION=${{ needs.get-update-version.outputs.new_version }} + VERSION="${VERSION#v}" + echo "VERSION=$VERSION" >> $GITHUB_ENV - name: Notify Discord uses: Ilshidur/action-discord@master @@ -105,6 +82,5 @@ jobs: - macOS Universal: https://delta.jan.ai/beta/jan-beta-mac-universal-{{ VERSION }}.dmg - Linux Deb: https://delta.jan.ai/beta/jan-beta-linux-amd64-{{ VERSION }}.deb - Linux AppImage: https://delta.jan.ai/beta/jan-beta-linux-x86_64-{{ VERSION }}.AppImage - - Github Release URL: https://github.com/janhq/jan/releases/tag/v{{ VERSION }} env: DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK_JAN_BETA }} \ No newline at end of file diff --git a/.github/workflows/jan-electron-build-nightly.yml b/.github/workflows/jan-electron-build-nightly.yml index e08a35169..af5bab195 100644 --- a/.github/workflows/jan-electron-build-nightly.yml +++ b/.github/workflows/jan-electron-build-nightly.yml @@ -55,6 +55,9 @@ jobs: ref: ${{ needs.set-public-provider.outputs.ref }} public_provider: ${{ needs.set-public-provider.outputs.public_provider }} new_version: ${{ needs.get-update-version.outputs.new_version }} + nightly: true + beta: false + cortex_api_port: "39261" build-windows-x64: uses: ./.github/workflows/template-build-windows-x64.yml @@ -64,8 +67,9 @@ jobs: ref: ${{ needs.set-public-provider.outputs.ref }} public_provider: ${{ needs.set-public-provider.outputs.public_provider }} new_version: ${{ needs.get-update-version.outputs.new_version }} - - + nightly: true + beta: false + cortex_api_port: "39261" build-linux-x64: uses: ./.github/workflows/template-build-linux-x64.yml secrets: inherit @@ -74,6 +78,9 @@ jobs: ref: ${{ needs.set-public-provider.outputs.ref }} public_provider: ${{ needs.set-public-provider.outputs.public_provider }} new_version: ${{ needs.get-update-version.outputs.new_version }} + nightly: true + beta: false + cortex_api_port: "39261" sync-temp-to-latest: needs: [set-public-provider, build-windows-x64, build-linux-x64, build-macos] @@ -141,4 +148,3 @@ jobs: RUN_ID=${{ github.run_id }} COMMENT="This is the build for this pull request. You can download it from the Artifacts section here: [Build URL](https://github.com/${{ github.repository }}/actions/runs/${RUN_ID})." gh pr comment $PR_URL --body "$COMMENT" - \ No newline at end of file diff --git a/.github/workflows/jan-electron-build.yml b/.github/workflows/jan-electron-build.yml index 3ca4a1fe2..7d69a5c12 100644 --- a/.github/workflows/jan-electron-build.yml +++ b/.github/workflows/jan-electron-build.yml @@ -40,6 +40,8 @@ jobs: with: ref: ${{ github.ref }} public_provider: github + beta: false + nightly: false new_version: ${{ needs.get-update-version.outputs.new_version }} build-windows-x64: @@ -49,6 +51,8 @@ jobs: with: ref: ${{ github.ref }} public_provider: github + beta: false + nightly: false new_version: ${{ needs.get-update-version.outputs.new_version }} build-linux-x64: @@ -58,6 +62,8 @@ jobs: with: ref: ${{ github.ref }} public_provider: github + beta: false + nightly: false new_version: ${{ needs.get-update-version.outputs.new_version }} update_release_draft: @@ -82,4 +88,4 @@ jobs: # config-name: my-config.yml # disable-autolabeler: true env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/nightly-integrate-cortex-cpp.yml b/.github/workflows/nightly-integrate-cortex-cpp.yml index 8ddc40a11..066fbd28e 100644 --- a/.github/workflows/nightly-integrate-cortex-cpp.yml +++ b/.github/workflows/nightly-integrate-cortex-cpp.yml @@ -36,7 +36,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.PAT_SERVICE_ACCOUNT }} run: | - curl -s https://api.github.com/repos/janhq/cortex/releases > /tmp/github_api_releases.json + curl -s https://api.github.com/repos/menloresearch/cortex/releases > /tmp/github_api_releases.json latest_prerelease_name=$(cat /tmp/github_api_releases.json | jq -r '.[] | select(.prerelease) | .name' | head -n 1) get_asset_count() { @@ -89,39 +89,39 @@ jobs: pull-requests: write steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - submodules: recursive - fetch-depth: 0 - token: ${{ secrets.PAT_SERVICE_ACCOUNT }} + - name: Checkout repository + uses: actions/checkout@v3 + with: + submodules: recursive + fetch-depth: 0 + token: ${{ secrets.PAT_SERVICE_ACCOUNT }} - - name: Wait for CI to pass - env: + - name: Wait for CI to pass + env: GITHUB_TOKEN: ${{ secrets.PAT_SERVICE_ACCOUNT }} - run: | - pr_number=${{ needs.update-submodule.outputs.pr_number }} - while true; do - ci_completed=$(gh pr checks $pr_number --json completedAt --jq '.[].completedAt') - if echo "$ci_completed" | grep -q "0001-01-01T00:00:00Z"; then - echo "CI is still running, waiting..." - sleep 60 - else - echo "CI has completed, checking states..." - ci_states=$(gh pr checks $pr_number --json state --jq '.[].state') - if echo "$ci_states" | grep -vqE "SUCCESS|SKIPPED"; then - echo "CI failed, exiting..." - exit 1 + run: | + pr_number=${{ needs.update-submodule.outputs.pr_number }} + while true; do + ci_completed=$(gh pr checks $pr_number --json completedAt --jq '.[].completedAt') + if echo "$ci_completed" | grep -q "0001-01-01T00:00:00Z"; then + echo "CI is still running, waiting..." + sleep 60 else - echo "CI passed, merging PR..." - break + echo "CI has completed, checking states..." + ci_states=$(gh pr checks $pr_number --json state --jq '.[].state') + if echo "$ci_states" | grep -vqE "SUCCESS|SKIPPED"; then + echo "CI failed, exiting..." + exit 1 + else + echo "CI passed, merging PR..." + break + fi fi - fi - done + done - - name: Merge the PR - env: + - name: Merge the PR + env: GITHUB_TOKEN: ${{ secrets.PAT_SERVICE_ACCOUNT }} - run: | - pr_number=${{ needs.update-submodule.outputs.pr_number }} - gh pr merge $pr_number --merge --admin + run: | + pr_number=${{ needs.update-submodule.outputs.pr_number }} + gh pr merge $pr_number --merge --admin diff --git a/.github/workflows/template-build-jan-server.yml b/.github/workflows/template-build-jan-server.yml deleted file mode 100644 index 9bb772605..000000000 --- a/.github/workflows/template-build-jan-server.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: build-jan-server -on: - workflow_call: - inputs: - dockerfile_path: - required: false - type: string - default: './Dockerfile' - docker_image_tag: - required: true - type: string - default: 'ghcr.io/janhq/jan-server:dev-latest' - -jobs: - build: - runs-on: ubuntu-latest - env: - REGISTRY: ghcr.io - IMAGE_NAME: janhq/jan-server - permissions: - packages: write - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Log in to the Container registry - uses: docker/login-action@v3 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and push Docker image - uses: docker/build-push-action@v3 - with: - context: . - file: ${{ inputs.dockerfile_path }} - push: true - tags: ${{ inputs.docker_image_tag }} \ No newline at end of file diff --git a/.github/workflows/template-build-linux-x64.yml b/.github/workflows/template-build-linux-x64.yml index 85b050e62..58b566931 100644 --- a/.github/workflows/template-build-linux-x64.yml +++ b/.github/workflows/template-build-linux-x64.yml @@ -23,6 +23,14 @@ on: required: false type: boolean default: false + nightly: + required: false + type: boolean + default: false + cortex_api_port: + required: false + type: string + default: null secrets: DELTA_AWS_S3_BUCKET_NAME: required: false @@ -43,6 +51,31 @@ jobs: with: ref: ${{ inputs.ref }} + - name: Replace Icons for Beta Build + if: inputs.beta == true && inputs.nightly != true + shell: bash + run: | + rm -rf electron/icons/* + + cp electron/icons_dev/jan-beta-512x512.png electron/icons/512x512.png + cp electron/icons_dev/jan-beta.ico electron/icons/icon.ico + cp electron/icons_dev/jan-beta.png electron/icons/icon.png + cp electron/icons_dev/jan-beta-tray@2x.png electron/icons/icon-tray@2x.png + cp electron/icons_dev/jan-beta-tray.png electron/icons/icon-tray.png + + - name: Replace Icons for Nightly Build + if: inputs.nightly == true && inputs.beta != true + shell: bash + run: | + rm -rf electron/icons/* + + cp electron/icons_dev/jan-nightly-512x512.png electron/icons/512x512.png + cp electron/icons_dev/jan-nightly.ico electron/icons/icon.ico + cp electron/icons_dev/jan-nightly.png electron/icons/icon.png + cp electron/icons_dev/jan-nightly-tray@2x.png electron/icons/icon-tray@2x.png + cp electron/icons_dev/jan-nightly-tray.png electron/icons/icon-tray.png + + - name: Installing node uses: actions/setup-node@v1 with: @@ -83,7 +116,7 @@ jobs: cat ./electron/package.json echo "------------------------" cat ./package.json - jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "github", "owner": "janhq", "repo": "jan", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json + jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json mv /tmp/package.json electron/package.json cat electron/package.json @@ -115,6 +148,7 @@ jobs: AWS_MAX_ATTEMPTS: '5' POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }} POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }} + CORTEX_API_PORT: ${{ inputs.cortex_api_port }} - name: Build and publish app to github if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false @@ -150,4 +184,4 @@ jobs: uses: actions/upload-artifact@v4 with: name: jan-linux-amd64-${{ inputs.new_version }}-AppImage - path: ./electron/dist/*.AppImage + path: ./electron/dist/*.AppImage \ No newline at end of file diff --git a/.github/workflows/template-build-macos.yml b/.github/workflows/template-build-macos.yml index 2eabd9ce2..a5e5cc724 100644 --- a/.github/workflows/template-build-macos.yml +++ b/.github/workflows/template-build-macos.yml @@ -23,6 +23,14 @@ on: required: false type: boolean default: false + nightly: + required: false + type: boolean + default: false + cortex_api_port: + required: false + type: string + default: null secrets: DELTA_AWS_S3_BUCKET_NAME: required: false @@ -52,6 +60,30 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.ref }} + + - name: Replace Icons for Beta Build + if: inputs.beta == true && inputs.nightly != true + shell: bash + run: | + rm -rf electron/icons/* + + cp electron/icons_dev/jan-beta-512x512.png electron/icons/512x512.png + cp electron/icons_dev/jan-beta.ico electron/icons/icon.ico + cp electron/icons_dev/jan-beta.png electron/icons/icon.png + cp electron/icons_dev/jan-beta-tray@2x.png electron/icons/icon-tray@2x.png + cp electron/icons_dev/jan-beta-tray.png electron/icons/icon-tray.png + + - name: Replace Icons for Nightly Build + if: inputs.nightly == true && inputs.beta != true + shell: bash + run: | + rm -rf electron/icons/* + + cp electron/icons_dev/jan-nightly-512x512.png electron/icons/512x512.png + cp electron/icons_dev/jan-nightly.ico electron/icons/icon.ico + cp electron/icons_dev/jan-nightly.png electron/icons/icon.png + cp electron/icons_dev/jan-nightly-tray@2x.png electron/icons/icon-tray@2x.png + cp electron/icons_dev/jan-nightly-tray.png electron/icons/icon-tray.png - name: Installing node uses: actions/setup-node@v1 @@ -99,7 +131,7 @@ jobs: cat ./electron/package.json echo "------------------------" cat ./package.json - jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "github", "owner": "janhq", "repo": "jan", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json + jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json mv /tmp/package.json electron/package.json cat electron/package.json @@ -154,6 +186,7 @@ jobs: AWS_MAX_ATTEMPTS: '5' POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }} POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }} + CORTEX_API_PORT: ${{ inputs.cortex_api_port }} - name: Build and publish app to github if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false @@ -197,4 +230,4 @@ jobs: uses: actions/upload-artifact@v4 with: name: jan-mac-universal-${{ inputs.new_version }} - path: ./electron/dist/*.dmg + path: ./electron/dist/*.dmg \ No newline at end of file diff --git a/.github/workflows/template-build-windows-x64.yml b/.github/workflows/template-build-windows-x64.yml index a317b4960..9be028e15 100644 --- a/.github/workflows/template-build-windows-x64.yml +++ b/.github/workflows/template-build-windows-x64.yml @@ -23,6 +23,14 @@ on: required: false type: boolean default: false + nightly: + required: false + type: boolean + default: false + cortex_api_port: + required: false + type: string + default: null secrets: DELTA_AWS_S3_BUCKET_NAME: required: false @@ -52,6 +60,30 @@ jobs: with: ref: ${{ inputs.ref }} + - name: Replace Icons for Beta Build + if: inputs.beta == true && inputs.nightly != true + shell: bash + run: | + rm -rf electron/icons/* + + cp electron/icons_dev/jan-beta-512x512.png electron/icons/512x512.png + cp electron/icons_dev/jan-beta.ico electron/icons/icon.ico + cp electron/icons_dev/jan-beta.png electron/icons/icon.png + cp electron/icons_dev/jan-beta-tray@2x.png electron/icons/icon-tray@2x.png + cp electron/icons_dev/jan-beta-tray.png electron/icons/icon-tray.png + + - name: Replace Icons for Nightly Build + if: inputs.nightly == true && inputs.beta != true + shell: bash + run: | + rm -rf electron/icons/* + + cp electron/icons_dev/jan-nightly-512x512.png electron/icons/512x512.png + cp electron/icons_dev/jan-nightly.ico electron/icons/icon.ico + cp electron/icons_dev/jan-nightly.png electron/icons/icon.png + cp electron/icons_dev/jan-nightly-tray@2x.png electron/icons/icon-tray@2x.png + cp electron/icons_dev/jan-nightly-tray.png electron/icons/icon-tray.png + - name: Installing node uses: actions/setup-node@v1 with: @@ -108,7 +140,7 @@ jobs: cat ./package.json echo "------------------------" cat ./electron/scripts/uninstaller.nsh - jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "github", "owner": "janhq", "repo": "jan", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json + jq '.build.publish = [{"provider": "generic", "url": "https://delta.jan.ai/beta", "channel": "beta"}, {"provider": "s3", "acl": null, "bucket": "${{ secrets.DELTA_AWS_S3_BUCKET_NAME }}", "region": "${{ secrets.DELTA_AWS_REGION}}", "path": "temp-beta", "channel": "beta"}]' electron/package.json > /tmp/package.json mv /tmp/package.json electron/package.json cat electron/package.json @@ -153,6 +185,7 @@ jobs: AWS_MAX_ATTEMPTS: '5' POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }} POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }} + CORTEX_API_PORT: ${{ inputs.cortex_api_port }} - name: Build app and publish app to github if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false @@ -193,4 +226,4 @@ jobs: uses: actions/upload-artifact@v4 with: name: jan-win-x64-${{ inputs.new_version }} - path: ./electron/dist/*.exe + path: ./electron/dist/*.exe \ No newline at end of file diff --git a/.github/workflows/template-get-update-version.yml b/.github/workflows/template-get-update-version.yml index 24cfe4a03..97340be81 100644 --- a/.github/workflows/template-get-update-version.yml +++ b/.github/workflows/template-get-update-version.yml @@ -13,46 +13,46 @@ jobs: outputs: new_version: ${{ steps.version_update.outputs.new_version }} steps: - - name: Install jq - uses: dcarbone/install-jq-action@v2.0.1 + - name: Install jq + uses: dcarbone/install-jq-action@v2.0.1 - - name: Get tag - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') - id: tag - uses: dawidd6/action-get-tag@v1 + - name: Get tag + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') + id: tag + uses: dawidd6/action-get-tag@v1 - - name: Update app version based on latest release tag with build number - id: version_update - run: | - # Function to get the latest release tag - get_latest_tag() { - local retries=0 - local max_retries=3 - local tag - while [ $retries -lt $max_retries ]; do - tag=$(curl -s https://api.github.com/repos/janhq/jan/releases/latest | jq -r .tag_name) - if [ -n "$tag" ] && [ "$tag" != "null" ]; then - echo $tag - return - else - let retries++ - echo "Retrying... ($retries/$max_retries)" - sleep 2 - fi - done - echo "Failed to fetch latest tag after $max_retries attempts." - exit 1 - } + - name: Update app version based on latest release tag with build number + id: version_update + run: | + # Function to get the latest release tag + get_latest_tag() { + local retries=0 + local max_retries=3 + local tag + while [ $retries -lt $max_retries ]; do + tag=$(curl -s https://api.github.com/repos/menloresearch/jan/releases/latest | jq -r .tag_name) + if [ -n "$tag" ] && [ "$tag" != "null" ]; then + echo $tag + return + else + let retries++ + echo "Retrying... ($retries/$max_retries)" + sleep 2 + fi + done + echo "Failed to fetch latest tag after $max_retries attempts." + exit 1 + } - if ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') }}; then - echo "Tag detected, set output follow tag" - echo "::set-output name=new_version::${{ steps.tag.outputs.tag }}" - else - # Get the latest release tag from GitHub API - LATEST_TAG=$(get_latest_tag) - - # Remove the 'v' and append the build number to the version - new_version="${LATEST_TAG#v}-${GITHUB_RUN_NUMBER}" - echo "New version: $new_version" - echo "::set-output name=new_version::$new_version" - fi \ No newline at end of file + if ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') }}; then + echo "Tag detected, set output follow tag" + echo "::set-output name=new_version::${{ steps.tag.outputs.tag }}" + else + # Get the latest release tag from GitHub API + LATEST_TAG=$(get_latest_tag) + + # Remove the 'v' and append the build number to the version + new_version="${LATEST_TAG#v}-${GITHUB_RUN_NUMBER}" + echo "New version: $new_version" + echo "::set-output name=new_version::$new_version" + fi diff --git a/.github/workflows/template-noti-discord-and-update-url-readme.yml b/.github/workflows/template-noti-discord-and-update-url-readme.yml index 59ad3c7be..282e0aa76 100644 --- a/.github/workflows/template-noti-discord-and-update-url-readme.yml +++ b/.github/workflows/template-noti-discord-and-update-url-readme.yml @@ -34,7 +34,7 @@ jobs: - name: Checkout code uses: actions/checkout@v3 with: - fetch-depth: "0" + fetch-depth: '0' token: ${{ secrets.PAT_SERVICE_ACCOUNT }} ref: ${{ inputs.ref }} @@ -51,6 +51,6 @@ jobs: - macOS Universal: https://delta.jan.ai/nightly/jan-nightly-mac-universal-{{ VERSION }}.dmg - Linux Deb: https://delta.jan.ai/nightly/jan-nightly-linux-amd64-{{ VERSION }}.deb - Linux AppImage: https://delta.jan.ai/nightly/jan-nightly-linux-x86_64-{{ VERSION }}.AppImage - - Github action run: https://github.com/janhq/jan/actions/runs/{{ GITHUB_RUN_ID }} + - Github action run: https://github.com/menloresearch/jan/actions/runs/{{ GITHUB_RUN_ID }} env: - DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} \ No newline at end of file + DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} diff --git a/.husky/pre-commit b/.husky/pre-commit index 53c4e577e..94c03b512 100644 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -1 +1 @@ -npx oxlint@latest --fix \ No newline at end of file +yarn lint --fix --quiet \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6ea90e69c..2a254fb49 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,8 +6,8 @@ First off, thank you for considering contributing to jan. It's people like you t ### Reporting Bugs -- **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/janhq/jan/issues). -- If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/janhq/jan/issues/new). +- **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/menloresearch/jan/issues). +- If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/menloresearch/jan/issues/new). ### Suggesting Enhancements @@ -29,4 +29,4 @@ First off, thank you for considering contributing to jan. It's people like you t ## Additional Notes -Thank you for contributing to jan! \ No newline at end of file +Thank you for contributing to jan! diff --git a/README.md b/README.md index 8052a34dc..fb820ddf9 100644 --- a/README.md +++ b/README.md @@ -4,18 +4,18 @@

- GitHub commit activity - Github Last Commit - Github Contributors - GitHub closed issues + GitHub commit activity + Github Last Commit + Github Contributors + GitHub closed issues Discord

Getting Started - Docs - - Changelog - - Bug reports + - Changelog + - Bug reports - Discord

@@ -23,10 +23,9 @@ ⚠️ Jan is currently in Development: Expect breaking changes and bugs!

- Jan is a ChatGPT-alternative that runs 100% offline on your device. Our goal is to make it easy for a layperson to download and run LLMs and use AI with **full control** and **privacy**. -Jan is powered by [Cortex](https://github.com/janhq/cortex.cpp), our embeddable local AI engine that runs on any hardware. +Jan is powered by [Cortex](https://github.com/menloresearch/cortex.cpp), our embeddable local AI engine that runs on any hardware. From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures: - [x] NVIDIA GPUs (fast) @@ -36,7 +35,8 @@ From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures: - [x] Windows x64 #### Features: -- [Model Library](https://jan.ai/docs/models/manage-models#add-models) with popular LLMs like Llama, Gemma, Mistral, or Qwen + +- [Model Library](https://jan.ai/docs/models/manage-models#add-models) with popular LLMs like Llama, Gemma, Mistral, or Qwen - Connect to [Remote AI APIs](https://jan.ai/docs/remote-models/openai) like Groq and OpenRouter - Local API Server with OpenAI-equivalent API - [Extensions](https://jan.ai/docs/extensions) for customizing Jan @@ -54,25 +54,25 @@ From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures: Stable (Recommended) - + jan.exe - + jan.dmg - + jan.deb - + jan.AppImage @@ -81,25 +81,25 @@ From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures: Beta (Preview) - + jan.exe - + jan.dmg - + jan.deb - + jan.AppImage @@ -108,59 +108,59 @@ From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures: Nightly Build (Experimental) - + jan.exe - + jan.dmg - + jan.deb - + jan.AppImage -Download the latest version of Jan at https://jan.ai/ or visit the [GitHub Releases](https://github.com/janhq/jan/releases) to download any previous release. +Download the latest version of Jan at https://jan.ai/ or visit the [GitHub Releases](https://github.com/menloresearch/jan/releases) to download any previous release. ## Demo https://github.com/user-attachments/assets/c3592fa2-c504-4d9d-a885-7e00122a50f3 -*Real-time Video: Jan v0.5.7 on a Mac M2, 16GB Sonoma 14.2* +_Real-time Video: Jan v0.5.7 on a Mac M2, 16GB Sonoma 14.2_ ## Quicklinks ### Jan - [Jan Website](https://jan.ai/) -- [Jan GitHub](https://github.com/janhq/jan) +- [Jan GitHub](https://github.com/menloresearch/jan) - [Documentation](https://jan.ai/docs) - [Jan Changelog](https://jan.ai/changelog) - [Jan Blog](https://jan.ai/blog) ### Cortex.cpp + Jan is powered by **Cortex.cpp**. It is a C++ command-line interface (CLI) designed as an alternative to [Ollama](https://ollama.com/). By default, it runs on the llama.cpp engine but also supports other engines, including ONNX and TensorRT-LLM, making it a multi-engine platform. - - [Cortex Website](https://cortex.so/) -- [Cortex GitHub](https://github.com/janhq/cortex.cpp) +- [Cortex GitHub](https://github.com/menloresearch/cortex.cpp) - [Documentation](https://cortex.so/docs/) - [Models Library](https://cortex.so/models) -- API Reference: *Under development* - +- API Reference: _Under development_ + ## Requirements for running Jan - **MacOS**: 13 or higher @@ -179,17 +179,17 @@ Jan is powered by **Cortex.cpp**. It is a C++ command-line interface (CLI) desig ## Troubleshooting As Jan is in development mode, you might get stuck on a some common issues: + - [Troubleshooting a broken build](https://jan.ai/docs/troubleshooting#broken-build) - [Troubleshooting NVIDIA GPU](https://jan.ai/docs/troubleshooting#troubleshooting-nvidia-gpu) - [Troubleshooting Something's Amiss](https://jan.ai/docs/troubleshooting#somethings-amiss) - If you can't find what you need in our troubleshooting guide, feel free reach out to us for extra help: + 1. Copy your [error logs & device specifications](https://jan.ai/docs/troubleshooting#how-to-get-error-logs). 2. Go to our [Discord](https://discord.com/invite/FTk2MvZwJH) & send it to **#🆘|get-help** channel for further support. -*Check the logs to ensure the information is what you intend to send. Note that we retain your logs for only 24 hours, so report any issues promptly.* - +_Check the logs to ensure the information is what you intend to send. Note that we retain your logs for only 24 hours, so report any issues promptly._ ## Contributing @@ -206,7 +206,7 @@ Contributions are welcome! Please read the [CONTRIBUTING.md](CONTRIBUTING.md) fi 1. **Clone the repository and prepare:** ```bash - git clone https://github.com/janhq/jan + git clone https://github.com/menloresearch/jan cd jan git checkout -b DESIRED_BRANCH ``` @@ -219,8 +219,6 @@ Contributions are welcome! Please read the [CONTRIBUTING.md](CONTRIBUTING.md) fi This will start the development server and open the desktop app. - - ### For production build ```bash @@ -244,7 +242,7 @@ Jan builds on top of other open-source projects: - Bugs & requests: file a GitHub ticket - For discussion: join our Discord [here](https://discord.gg/FTk2MvZwJH) -- For business inquiries: email hello@jan.ai +- For business inquiries: email hello@jan.ai - For jobs: please email hr@jan.ai ## Trust & Safety @@ -254,7 +252,7 @@ Beware of scams! - We will never request your personal information. - Our product is completely free; no paid version exists. - We do not have a token or ICO. -- We are a [bootstrapped company](https://en.wikipedia.org/wiki/Bootstrapping), and don't have any external investors (*yet*). We're open to exploring opportunities with strategic partners want to tackle [our mission](https://jan.ai/about#mission) together. +- We are a [bootstrapped company](https://en.wikipedia.org/wiki/Bootstrapping), and don't have any external investors (_yet_). We're open to exploring opportunities with strategic partners want to tackle [our mission](https://jan.ai/about#mission) together. ## License diff --git a/ai.menlo.jan.metainfo.xml b/ai.menlo.jan.metainfo.xml index 713471d26..ba17914e9 100644 --- a/ai.menlo.jan.metainfo.xml +++ b/ai.menlo.jan.metainfo.xml @@ -28,7 +28,7 @@ https://jan.ai/ - https://github.com/janhq/jan/issues + https://github.com/menloresearch/jan/issues diff --git a/core/README.md b/core/README.md index 925ffaf7b..e22bed42d 100644 --- a/core/README.md +++ b/core/README.md @@ -8,37 +8,38 @@ ```js // Web / extension runtime -import * as core from "@janhq/core"; +import * as core from '@janhq/core' // Node runtime -import * as node from "@janhq/core/node"; +import * as node from '@janhq/core/node' ``` ## Build an Extension -1. Download an extension template, for example, [https://github.com/janhq/extension-template](https://github.com/janhq/extension-template). +1. Download an extension template, for example, [https://github.com/menloresearch/extension-template](https://github.com/menloresearch/extension-template). 2. Update the source code: + 1. Open `index.ts` in your code editor. 2. Rename the extension class from `SampleExtension` to your preferred extension name. 3. Import modules from the core package. ```ts - import * as core from "@janhq/core"; + import * as core from '@janhq/core' ``` 4. In the `onLoad()` method, add your code: + ```ts // Example of listening to app events and providing customized inference logic: - import * as core from "@janhq/core"; + import * as core from '@janhq/core' export default class MyExtension extends BaseExtension { // On extension load onLoad() { - core.events.on(MessageEvent.OnMessageSent, (data) => MyExtension.inference(data, this)); + core.events.on(MessageEvent.OnMessageSent, (data) => MyExtension.inference(data, this)) } // Customized inference logic private static inference(incomingMessage: MessageRequestData) { - // Prepare customized message content const content: ThreadContent = { type: ContentType.Text, @@ -46,16 +47,17 @@ import * as node from "@janhq/core/node"; value: "I'm Jan Assistant!", annotations: [], }, - }; + } // Modify message and send out const outGoingMessage: ThreadMessage = { ...incomingMessage, - content - }; + content, + } } } ``` + 3. Build the extension: 1. Navigate to the extension directory. 2. Install dependencies. @@ -66,4 +68,4 @@ import * as node from "@janhq/core/node"; ```bash yarn build ``` - 4. Select the generated .tgz from Jan > Settings > Extension > Manual Installation. \ No newline at end of file + 4. Select the generated .tgz from Jan > Settings > Extension > Manual Installation. diff --git a/core/rolldown.config.mjs b/core/rolldown.config.mjs index d95f8de8e..ea488df33 100644 --- a/core/rolldown.config.mjs +++ b/core/rolldown.config.mjs @@ -25,7 +25,6 @@ export default defineConfig([ '@types/pacote', '@npmcli/arborist', 'ulidx', - 'node-fetch', 'fs', 'request', 'crypto', diff --git a/core/src/browser/core.test.ts b/core/src/browser/core.test.ts index 720ea9dcf..117298eb6 100644 --- a/core/src/browser/core.test.ts +++ b/core/src/browser/core.test.ts @@ -2,7 +2,6 @@ import { openExternalUrl } from './core' import { joinPath } from './core' import { openFileExplorer } from './core' import { getJanDataFolderPath } from './core' -import { abortDownload } from './core' import { executeOnMain } from './core' describe('test core apis', () => { @@ -53,18 +52,6 @@ describe('test core apis', () => { expect(result).toBe('/path/to/jan/data') }) - it('should abort download', async () => { - const fileName = 'testFile' - globalThis.core = { - api: { - abortDownload: jest.fn().mockResolvedValue('aborted'), - }, - } - const result = await abortDownload(fileName) - expect(globalThis.core.api.abortDownload).toHaveBeenCalledWith(fileName) - expect(result).toBe('aborted') - }) - it('should execute function on main process', async () => { const extension = 'testExtension' const method = 'testMethod' diff --git a/core/src/browser/core.ts b/core/src/browser/core.ts index a0abbb43e..43b5f9d48 100644 --- a/core/src/browser/core.ts +++ b/core/src/browser/core.ts @@ -1,9 +1,4 @@ -import { - DownloadRequest, - FileStat, - NetworkConfig, - SystemInformation, -} from '../types' +import { SystemInformation } from '../types' /** * Execute a extension module function in main process @@ -14,42 +9,19 @@ import { * @returns Promise * */ -const executeOnMain: ( - extension: string, - method: string, - ...args: any[] -) => Promise = (extension, method, ...args) => - globalThis.core?.api?.invokeExtensionFunc(extension, method, ...args) +const executeOnMain: (extension: string, method: string, ...args: any[]) => Promise = ( + extension, + method, + ...args +) => globalThis.core?.api?.invokeExtensionFunc(extension, method, ...args) -/** - * Downloads a file from a URL and saves it to the local file system. - * - * @param {DownloadRequest} downloadRequest - The request to download the file. - * @param {NetworkConfig} network - Optional object to specify proxy/whether to ignore SSL certificates. - * - * @returns {Promise} A promise that resolves when the file is downloaded. - */ -const downloadFile: ( - downloadRequest: DownloadRequest, - network?: NetworkConfig -) => Promise = (downloadRequest, network) => - globalThis.core?.api?.downloadFile(downloadRequest, network) - -/** - * Aborts the download of a specific file. - * @param {string} fileName - The name of the file whose download is to be aborted. - * @returns {Promise} A promise that resolves when the download has been aborted. - */ -const abortDownload: (fileName: string) => Promise = (fileName) => - globalThis.core.api?.abortDownload(fileName) /** * Gets Jan's data folder path. * * @returns {Promise} A Promise that resolves with Jan's data folder path. */ -const getJanDataFolderPath = (): Promise => - globalThis.core.api?.getJanDataFolderPath() +const getJanDataFolderPath = (): Promise => globalThis.core.api?.getJanDataFolderPath() /** * Opens the file explorer at a specific path. @@ -72,16 +44,14 @@ const joinPath: (paths: string[]) => Promise = (paths) => * @param path - The file path to retrieve dirname. * @returns {Promise} A promise that resolves the dirname. */ -const dirName: (path: string) => Promise = (path) => - globalThis.core.api?.dirName(path) +const dirName: (path: string) => Promise = (path) => globalThis.core.api?.dirName(path) /** * Retrieve the basename from an url. * @param path - The path to retrieve. * @returns {Promise} A promise that resolves with the basename. */ -const baseName: (paths: string) => Promise = (path) => - globalThis.core.api?.baseName(path) +const baseName: (paths: string) => Promise = (path) => globalThis.core.api?.baseName(path) /** * Opens an external URL in the default web browser. @@ -97,15 +67,13 @@ const openExternalUrl: (url: string) => Promise = (url) => * * @returns {Promise} - A promise that resolves with the resource path. */ -const getResourcePath: () => Promise = () => - globalThis.core.api?.getResourcePath() +const getResourcePath: () => Promise = () => globalThis.core.api?.getResourcePath() /** * Gets the user's home path. * @returns return user's home path */ -const getUserHomePath = (): Promise => - globalThis.core.api?.getUserHomePath() +const getUserHomePath = (): Promise => globalThis.core.api?.getUserHomePath() /** * Log to file from browser processes. @@ -123,10 +91,8 @@ const log: (message: string, fileName?: string) => void = (message, fileName) => * * @returns {Promise} - A promise that resolves with a boolean indicating whether the path is a subdirectory. */ -const isSubdirectory: (from: string, to: string) => Promise = ( - from: string, - to: string -) => globalThis.core.api?.isSubdirectory(from, to) +const isSubdirectory: (from: string, to: string) => Promise = (from: string, to: string) => + globalThis.core.api?.isSubdirectory(from, to) /** * Get system information @@ -159,8 +125,6 @@ export type RegisterExtensionPoint = ( */ export { executeOnMain, - downloadFile, - abortDownload, getJanDataFolderPath, openFileExplorer, getResourcePath, diff --git a/core/src/browser/extension.test.ts b/core/src/browser/extension.test.ts index 2db14a24e..879258876 100644 --- a/core/src/browser/extension.test.ts +++ b/core/src/browser/extension.test.ts @@ -39,11 +39,6 @@ describe('BaseExtension', () => { expect(baseExtension.onUnload).toBeDefined() }) - it('should have installationState() return "NotRequired"', async () => { - const installationState = await baseExtension.installationState() - expect(installationState).toBe('NotRequired') - }) - it('should install the extension', async () => { await baseExtension.install() // Add your assertions here @@ -84,11 +79,6 @@ describe('BaseExtension', () => { expect(baseExtension.onUnload).toBeDefined() }) - it('should have installationState() return "NotRequired"', async () => { - const installationState = await baseExtension.installationState() - expect(installationState).toBe('NotRequired') - }) - it('should install the extension', async () => { await baseExtension.install() // Add your assertions here diff --git a/core/src/browser/extension.ts b/core/src/browser/extension.ts index 1d641980b..a050b9d59 100644 --- a/core/src/browser/extension.ts +++ b/core/src/browser/extension.ts @@ -12,6 +12,7 @@ export enum ExtensionTypeEnum { SystemMonitoring = 'systemMonitoring', HuggingFace = 'huggingFace', Engine = 'engine', + Hardware = 'hardware', } export interface ExtensionType { @@ -23,17 +24,6 @@ export interface Compatibility { version: string } -const ALL_INSTALLATION_STATE = [ - 'NotRequired', // not required. - 'Installed', // require and installed. Good to go. - 'NotInstalled', // require to be installed. - 'Corrupted', // require but corrupted. Need to redownload. - 'NotCompatible', // require but not compatible. -] as const - -export type InstallationStateTuple = typeof ALL_INSTALLATION_STATE -export type InstallationState = InstallationStateTuple[number] - /** * Represents a base extension. * This class should be extended by any class that represents an extension. @@ -174,15 +164,6 @@ export abstract class BaseExtension implements ExtensionType { return } - /** - * Determine if the prerequisites for the extension are installed. - * - * @returns {boolean} true if the prerequisites are installed, false otherwise. - */ - async installationState(): Promise { - return 'NotRequired' - } - /** * Install the prerequisites for the extension. * @@ -227,7 +208,7 @@ export abstract class BaseExtension implements ExtensionType { const settings = await this.getSettings() - const updatedSettings = settings.map((setting) => { + let updatedSettings = settings.map((setting) => { const updatedSetting = componentProps.find( (componentProp) => componentProp.key === setting.key ) @@ -237,13 +218,20 @@ export abstract class BaseExtension implements ExtensionType { return setting }) - const settingPath = await joinPath([ + if (!updatedSettings.length) updatedSettings = componentProps as SettingComponentProps[] + + const settingFolder = await joinPath([ await getJanDataFolderPath(), this.settingFolderName, this.name, - this.settingFileName, ]) + if (!(await fs.existsSync(settingFolder))) { + await fs.mkdir(settingFolder) + } + + const settingPath = await joinPath([settingFolder, this.settingFileName]) + await fs.writeFileSync(settingPath, JSON.stringify(updatedSettings, null, 2)) updatedSettings.forEach((setting) => { diff --git a/core/src/browser/extensions/conversational.test.ts b/core/src/browser/extensions/conversational.test.ts new file mode 100644 index 000000000..8046383c9 --- /dev/null +++ b/core/src/browser/extensions/conversational.test.ts @@ -0,0 +1,252 @@ +import { ConversationalExtension } from './conversational' +import { ExtensionTypeEnum } from '../extension' +import { Thread, ThreadAssistantInfo, ThreadMessage } from '../../types' + +// Mock implementation of ConversationalExtension +class MockConversationalExtension extends ConversationalExtension { + private threads: Thread[] = [] + private messages: { [threadId: string]: ThreadMessage[] } = {} + private assistants: { [threadId: string]: ThreadAssistantInfo } = {} + + constructor() { + super('http://mock-url.com', 'mock-extension', 'Mock Extension', true, 'A mock extension', '1.0.0') + } + + onLoad(): void { + // Mock implementation + } + + onUnload(): void { + // Mock implementation + } + + async listThreads(): Promise { + return this.threads + } + + async createThread(thread: Partial): Promise { + const newThread: Thread = { + id: thread.id || `thread-${Date.now()}`, + name: thread.name || 'New Thread', + createdAt: thread.createdAt || new Date().toISOString(), + updatedAt: thread.updatedAt || new Date().toISOString(), + } + this.threads.push(newThread) + this.messages[newThread.id] = [] + return newThread + } + + async modifyThread(thread: Thread): Promise { + const index = this.threads.findIndex(t => t.id === thread.id) + if (index !== -1) { + this.threads[index] = thread + } + } + + async deleteThread(threadId: string): Promise { + this.threads = this.threads.filter(t => t.id !== threadId) + delete this.messages[threadId] + delete this.assistants[threadId] + } + + async createMessage(message: Partial): Promise { + if (!message.threadId) throw new Error('Thread ID is required') + + const newMessage: ThreadMessage = { + id: message.id || `message-${Date.now()}`, + threadId: message.threadId, + content: message.content || '', + role: message.role || 'user', + createdAt: message.createdAt || new Date().toISOString(), + } + + if (!this.messages[message.threadId]) { + this.messages[message.threadId] = [] + } + + this.messages[message.threadId].push(newMessage) + return newMessage + } + + async deleteMessage(threadId: string, messageId: string): Promise { + if (this.messages[threadId]) { + this.messages[threadId] = this.messages[threadId].filter(m => m.id !== messageId) + } + } + + async listMessages(threadId: string): Promise { + return this.messages[threadId] || [] + } + + async getThreadAssistant(threadId: string): Promise { + return this.assistants[threadId] || { modelId: '', threadId } + } + + async createThreadAssistant( + threadId: string, + assistant: ThreadAssistantInfo + ): Promise { + this.assistants[threadId] = assistant + return assistant + } + + async modifyThreadAssistant( + threadId: string, + assistant: ThreadAssistantInfo + ): Promise { + this.assistants[threadId] = assistant + return assistant + } + + async modifyMessage(message: ThreadMessage): Promise { + if (!this.messages[message.threadId]) return message + + const index = this.messages[message.threadId].findIndex(m => m.id === message.id) + if (index !== -1) { + this.messages[message.threadId][index] = message + } + + return message + } +} + +describe('ConversationalExtension', () => { + let extension: MockConversationalExtension + + beforeEach(() => { + extension = new MockConversationalExtension() + }) + + test('should return the correct extension type', () => { + expect(extension.type()).toBe(ExtensionTypeEnum.Conversational) + }) + + test('should create and list threads', async () => { + const thread = await extension.createThread({ name: 'Test Thread' }) + expect(thread.name).toBe('Test Thread') + + const threads = await extension.listThreads() + expect(threads).toHaveLength(1) + expect(threads[0].id).toBe(thread.id) + }) + + test('should modify thread', async () => { + const thread = await extension.createThread({ name: 'Test Thread' }) + const modifiedThread = { ...thread, name: 'Modified Thread' } + + await extension.modifyThread(modifiedThread) + + const threads = await extension.listThreads() + expect(threads[0].name).toBe('Modified Thread') + }) + + test('should delete thread', async () => { + const thread = await extension.createThread({ name: 'Test Thread' }) + + await extension.deleteThread(thread.id) + + const threads = await extension.listThreads() + expect(threads).toHaveLength(0) + }) + + test('should create and list messages', async () => { + const thread = await extension.createThread({ name: 'Test Thread' }) + + const message = await extension.createMessage({ + threadId: thread.id, + content: 'Test message', + role: 'user' + }) + + expect(message.content).toBe('Test message') + + const messages = await extension.listMessages(thread.id) + expect(messages).toHaveLength(1) + expect(messages[0].id).toBe(message.id) + }) + + test('should modify message', async () => { + const thread = await extension.createThread({ name: 'Test Thread' }) + + const message = await extension.createMessage({ + threadId: thread.id, + content: 'Test message', + role: 'user' + }) + + const modifiedMessage = { ...message, content: 'Modified message' } + + await extension.modifyMessage(modifiedMessage) + + const messages = await extension.listMessages(thread.id) + expect(messages[0].content).toBe('Modified message') + }) + + test('should delete message', async () => { + const thread = await extension.createThread({ name: 'Test Thread' }) + + const message = await extension.createMessage({ + threadId: thread.id, + content: 'Test message', + role: 'user' + }) + + await extension.deleteMessage(thread.id, message.id) + + const messages = await extension.listMessages(thread.id) + expect(messages).toHaveLength(0) + }) + + test('should create and get thread assistant', async () => { + const thread = await extension.createThread({ name: 'Test Thread' }) + + const assistant: ThreadAssistantInfo = { + threadId: thread.id, + modelId: 'test-model' + } + + await extension.createThreadAssistant(thread.id, assistant) + + const retrievedAssistant = await extension.getThreadAssistant(thread.id) + expect(retrievedAssistant.modelId).toBe('test-model') + }) + + test('should modify thread assistant', async () => { + const thread = await extension.createThread({ name: 'Test Thread' }) + + const assistant: ThreadAssistantInfo = { + threadId: thread.id, + modelId: 'test-model' + } + + await extension.createThreadAssistant(thread.id, assistant) + + const modifiedAssistant: ThreadAssistantInfo = { + threadId: thread.id, + modelId: 'modified-model' + } + + await extension.modifyThreadAssistant(thread.id, modifiedAssistant) + + const retrievedAssistant = await extension.getThreadAssistant(thread.id) + expect(retrievedAssistant.modelId).toBe('modified-model') + }) + + test('should delete thread assistant when thread is deleted', async () => { + const thread = await extension.createThread({ name: 'Test Thread' }) + + const assistant: ThreadAssistantInfo = { + threadId: thread.id, + modelId: 'test-model' + } + + await extension.createThreadAssistant(thread.id, assistant) + await extension.deleteThread(thread.id) + + // Creating a new thread with the same ID to test if assistant was deleted + const newThread = await extension.createThread({ id: thread.id, name: 'New Thread' }) + const retrievedAssistant = await extension.getThreadAssistant(newThread.id) + + expect(retrievedAssistant.modelId).toBe('') + }) +}) \ No newline at end of file diff --git a/core/src/browser/extensions/engines/EngineManager.test.ts b/core/src/browser/extensions/engines/EngineManager.test.ts index c1f1fcb71..319dc792a 100644 --- a/core/src/browser/extensions/engines/EngineManager.test.ts +++ b/core/src/browser/extensions/engines/EngineManager.test.ts @@ -3,6 +3,7 @@ */ import { EngineManager } from './EngineManager' import { AIEngine } from './AIEngine' +import { InferenceEngine } from '../../../types' // @ts-ignore class MockAIEngine implements AIEngine { @@ -40,4 +41,69 @@ describe('EngineManager', () => { const retrievedEngine = engineManager.get('nonExistentProvider') expect(retrievedEngine).toBeUndefined() }) + + describe('cortex engine migration', () => { + test('should map nitro to cortex engine', () => { + const cortexEngine = new MockAIEngine(InferenceEngine.cortex) + // @ts-ignore + engineManager.register(cortexEngine) + + // @ts-ignore + const retrievedEngine = engineManager.get(InferenceEngine.nitro) + expect(retrievedEngine).toBe(cortexEngine) + }) + + test('should map cortex_llamacpp to cortex engine', () => { + const cortexEngine = new MockAIEngine(InferenceEngine.cortex) + // @ts-ignore + engineManager.register(cortexEngine) + + // @ts-ignore + const retrievedEngine = engineManager.get(InferenceEngine.cortex_llamacpp) + expect(retrievedEngine).toBe(cortexEngine) + }) + + test('should map cortex_onnx to cortex engine', () => { + const cortexEngine = new MockAIEngine(InferenceEngine.cortex) + // @ts-ignore + engineManager.register(cortexEngine) + + // @ts-ignore + const retrievedEngine = engineManager.get(InferenceEngine.cortex_onnx) + expect(retrievedEngine).toBe(cortexEngine) + }) + + test('should map cortex_tensorrtllm to cortex engine', () => { + const cortexEngine = new MockAIEngine(InferenceEngine.cortex) + // @ts-ignore + engineManager.register(cortexEngine) + + // @ts-ignore + const retrievedEngine = engineManager.get(InferenceEngine.cortex_tensorrtllm) + expect(retrievedEngine).toBe(cortexEngine) + }) + }) + + describe('singleton instance', () => { + test('should return the window.core.engineManager if available', () => { + const mockEngineManager = new EngineManager() + // @ts-ignore + window.core = { engineManager: mockEngineManager } + + const instance = EngineManager.instance() + expect(instance).toBe(mockEngineManager) + + // Clean up + // @ts-ignore + delete window.core + }) + + test('should create a new instance if window.core.engineManager is not available', () => { + // @ts-ignore + delete window.core + + const instance = EngineManager.instance() + expect(instance).toBeInstanceOf(EngineManager) + }) + }) }) diff --git a/core/src/browser/extensions/engines/OAIEngine.test.ts b/core/src/browser/extensions/engines/OAIEngine.test.ts index 81348786c..66537d0be 100644 --- a/core/src/browser/extensions/engines/OAIEngine.test.ts +++ b/core/src/browser/extensions/engines/OAIEngine.test.ts @@ -38,8 +38,14 @@ describe('OAIEngine', () => { it('should subscribe to events on load', () => { engine.onLoad() - expect(events.on).toHaveBeenCalledWith(MessageEvent.OnMessageSent, expect.any(Function)) - expect(events.on).toHaveBeenCalledWith(InferenceEvent.OnInferenceStopped, expect.any(Function)) + expect(events.on).toHaveBeenCalledWith( + MessageEvent.OnMessageSent, + expect.any(Function) + ) + expect(events.on).toHaveBeenCalledWith( + InferenceEvent.OnInferenceStopped, + expect.any(Function) + ) }) it('should handle inference request', async () => { @@ -77,7 +83,12 @@ describe('OAIEngine', () => { expect(events.emit).toHaveBeenCalledWith( MessageEvent.OnMessageUpdate, expect.objectContaining({ - content: [{ type: ContentType.Text, text: { value: 'test response', annotations: [] } }], + content: [ + { + type: ContentType.Text, + text: { value: 'test response', annotations: [] }, + }, + ], status: MessageStatus.Ready, }) ) @@ -101,11 +112,10 @@ describe('OAIEngine', () => { await engine.inference(data) - expect(events.emit).toHaveBeenCalledWith( + expect(events.emit).toHaveBeenLastCalledWith( MessageEvent.OnMessageUpdate, expect.objectContaining({ - content: [{ type: ContentType.Text, text: { value: 'test error', annotations: [] } }], - status: MessageStatus.Error, + status: 'error', error_code: 500, }) ) diff --git a/core/src/browser/extensions/engines/OAIEngine.ts b/core/src/browser/extensions/engines/OAIEngine.ts index 6b4c20a19..61032357c 100644 --- a/core/src/browser/extensions/engines/OAIEngine.ts +++ b/core/src/browser/extensions/engines/OAIEngine.ts @@ -42,7 +42,9 @@ export abstract class OAIEngine extends AIEngine { */ override onLoad() { super.onLoad() - events.on(MessageEvent.OnMessageSent, (data: MessageRequest) => this.inference(data)) + events.on(MessageEvent.OnMessageSent, (data: MessageRequest) => + this.inference(data) + ) events.on(InferenceEvent.OnInferenceStopped, () => this.stopInference()) } @@ -128,7 +130,9 @@ export abstract class OAIEngine extends AIEngine { events.emit(MessageEvent.OnMessageUpdate, message) }, complete: async () => { - message.status = message.content.length ? MessageStatus.Ready : MessageStatus.Error + message.status = message.content.length + ? MessageStatus.Ready + : MessageStatus.Error events.emit(MessageEvent.OnMessageUpdate, message) }, error: async (err: any) => { @@ -141,7 +145,10 @@ export abstract class OAIEngine extends AIEngine { message.content[0] = { type: ContentType.Text, text: { - value: err.message, + value: + typeof message === 'string' + ? err.message + : (JSON.stringify(err.message) ?? err.detail), annotations: [], }, } diff --git a/core/src/browser/extensions/engines/helpers/sse.test.ts b/core/src/browser/extensions/engines/helpers/sse.test.ts index 0b78aa9b5..f8c2ac6b4 100644 --- a/core/src/browser/extensions/engines/helpers/sse.test.ts +++ b/core/src/browser/extensions/engines/helpers/sse.test.ts @@ -1,14 +1,17 @@ import { lastValueFrom, Observable } from 'rxjs' import { requestInference } from './sse' -import { ReadableStream } from 'stream/web'; +import { ReadableStream } from 'stream/web' describe('requestInference', () => { it('should send a request to the inference server and return an Observable', () => { // Mock the fetch function const mockFetch: any = jest.fn(() => Promise.resolve({ ok: true, - json: () => Promise.resolve({ choices: [{ message: { content: 'Generated response' } }] }), + json: () => + Promise.resolve({ + choices: [{ message: { content: 'Generated response' } }], + }), headers: new Headers(), redirected: false, status: 200, @@ -36,7 +39,10 @@ describe('requestInference', () => { const mockFetch: any = jest.fn(() => Promise.resolve({ ok: false, - json: () => Promise.resolve({ error: { message: 'Wrong API Key', code: 'invalid_api_key' } }), + json: () => + Promise.resolve({ + error: { message: 'Invalid API Key.', code: 'invalid_api_key' }, + }), headers: new Headers(), redirected: false, status: 401, @@ -56,69 +62,85 @@ describe('requestInference', () => { // Assert the expected behavior expect(result).toBeInstanceOf(Observable) - expect(lastValueFrom(result)).rejects.toEqual({ message: 'Wrong API Key', code: 'invalid_api_key' }) + expect(lastValueFrom(result)).rejects.toEqual({ + message: 'Invalid API Key.', + code: 'invalid_api_key', + }) }) }) - it('should handle a successful response with a transformResponse function', () => { - // Mock the fetch function - const mockFetch: any = jest.fn(() => - Promise.resolve({ - ok: true, - json: () => Promise.resolve({ choices: [{ message: { content: 'Generated response' } }] }), - headers: new Headers(), - redirected: false, - status: 200, - statusText: 'OK', - }) - ) - jest.spyOn(global, 'fetch').mockImplementation(mockFetch) - - // Define the test inputs - const inferenceUrl = 'https://inference-server.com' - const requestBody = { message: 'Hello' } - const model = { id: 'model-id', parameters: { stream: false } } - const transformResponse = (data: any) => data.choices[0].message.content.toUpperCase() - - // Call the function - const result = requestInference(inferenceUrl, requestBody, model, undefined, undefined, transformResponse) - - // Assert the expected behavior - expect(result).toBeInstanceOf(Observable) - expect(lastValueFrom(result)).resolves.toEqual('GENERATED RESPONSE') - }) - - - it('should handle a successful response with streaming enabled', () => { - // Mock the fetch function - const mockFetch: any = jest.fn(() => - Promise.resolve({ - ok: true, - body: new ReadableStream({ - start(controller) { - controller.enqueue(new TextEncoder().encode('data: {"choices": [{"delta": {"content": "Streamed"}}]}')); - controller.enqueue(new TextEncoder().encode('data: [DONE]')); - controller.close(); - } +it('should handle a successful response with a transformResponse function', () => { + // Mock the fetch function + const mockFetch: any = jest.fn(() => + Promise.resolve({ + ok: true, + json: () => + Promise.resolve({ + choices: [{ message: { content: 'Generated response' } }], }), - headers: new Headers(), - redirected: false, - status: 200, - statusText: 'OK', - }) - ); - jest.spyOn(global, 'fetch').mockImplementation(mockFetch); - - // Define the test inputs - const inferenceUrl = 'https://inference-server.com'; - const requestBody = { message: 'Hello' }; - const model = { id: 'model-id', parameters: { stream: true } }; - - // Call the function - const result = requestInference(inferenceUrl, requestBody, model); - - // Assert the expected behavior - expect(result).toBeInstanceOf(Observable); - expect(lastValueFrom(result)).resolves.toEqual('Streamed'); - }); + headers: new Headers(), + redirected: false, + status: 200, + statusText: 'OK', + }) + ) + jest.spyOn(global, 'fetch').mockImplementation(mockFetch) + // Define the test inputs + const inferenceUrl = 'https://inference-server.com' + const requestBody = { message: 'Hello' } + const model = { id: 'model-id', parameters: { stream: false } } + const transformResponse = (data: any) => + data.choices[0].message.content.toUpperCase() + + // Call the function + const result = requestInference( + inferenceUrl, + requestBody, + model, + undefined, + undefined, + transformResponse + ) + + // Assert the expected behavior + expect(result).toBeInstanceOf(Observable) + expect(lastValueFrom(result)).resolves.toEqual('GENERATED RESPONSE') +}) + +it('should handle a successful response with streaming enabled', () => { + // Mock the fetch function + const mockFetch: any = jest.fn(() => + Promise.resolve({ + ok: true, + body: new ReadableStream({ + start(controller) { + controller.enqueue( + new TextEncoder().encode( + 'data: {"choices": [{"delta": {"content": "Streamed"}}]}' + ) + ) + controller.enqueue(new TextEncoder().encode('data: [DONE]')) + controller.close() + }, + }), + headers: new Headers(), + redirected: false, + status: 200, + statusText: 'OK', + }) + ) + jest.spyOn(global, 'fetch').mockImplementation(mockFetch) + + // Define the test inputs + const inferenceUrl = 'https://inference-server.com' + const requestBody = { message: 'Hello' } + const model = { id: 'model-id', parameters: { stream: true } } + + // Call the function + const result = requestInference(inferenceUrl, requestBody, model) + + // Assert the expected behavior + expect(result).toBeInstanceOf(Observable) + expect(lastValueFrom(result)).resolves.toEqual('Streamed') +}) diff --git a/core/src/browser/extensions/engines/helpers/sse.ts b/core/src/browser/extensions/engines/helpers/sse.ts index 55cde56b4..5c63008ff 100644 --- a/core/src/browser/extensions/engines/helpers/sse.ts +++ b/core/src/browser/extensions/engines/helpers/sse.ts @@ -32,20 +32,19 @@ export function requestInference( }) .then(async (response) => { if (!response.ok) { - const data = await response.json() - let errorCode = ErrorCode.Unknown - if (data.error) { - errorCode = data.error.code ?? data.error.type ?? ErrorCode.Unknown - } else if (response.status === 401) { - errorCode = ErrorCode.InvalidApiKey + if (response.status === 401) { + throw { + code: ErrorCode.InvalidApiKey, + message: 'Invalid API Key.', + } } - const error = { - message: data.error?.message ?? data.message ?? 'Error occurred.', - code: errorCode, + let data = await response.json() + try { + handleError(data) + } catch (err) { + subscriber.error(err) + return } - subscriber.error(error) - subscriber.complete() - return } // There could be overriden stream parameter in the model // that is set in request body (transformed payload) @@ -54,9 +53,10 @@ export function requestInference( model.parameters?.stream === false ) { const data = await response.json() - if (data.error || data.message) { - subscriber.error(data.error ?? data) - subscriber.complete() + try { + handleError(data) + } catch (err) { + subscriber.error(err) return } if (transformResponse) { @@ -91,13 +91,10 @@ export function requestInference( const toParse = cachedLines + line if (!line.includes('data: [DONE]')) { const data = JSON.parse(toParse.replace('data: ', '')) - if ( - 'error' in data || - 'message' in data || - 'detail' in data - ) { - subscriber.error(data.error ?? data) - subscriber.complete() + try { + handleError(data) + } catch (err) { + subscriber.error(err) return } content += data.choices[0]?.delta?.content ?? '' @@ -118,3 +115,18 @@ export function requestInference( .catch((err) => subscriber.error(err)) }) } + +/** + * Handle error and normalize it to a common format. + * @param data + */ +const handleError = (data: any) => { + if ( + data.error || + data.message || + data.detail || + (Array.isArray(data) && data.length && data[0].error) + ) { + throw data.error ?? data[0]?.error ?? data + } +} diff --git a/core/src/browser/extensions/enginesManagement.test.ts b/core/src/browser/extensions/enginesManagement.test.ts new file mode 100644 index 000000000..2a7880992 --- /dev/null +++ b/core/src/browser/extensions/enginesManagement.test.ts @@ -0,0 +1,566 @@ +import { EngineManagementExtension } from './enginesManagement' +import { ExtensionTypeEnum } from '../extension' +import { + EngineConfig, + EngineReleased, + EngineVariant, + Engines, + InferenceEngine, + DefaultEngineVariant, + Model +} from '../../types' + +// Mock implementation of EngineManagementExtension +class MockEngineManagementExtension extends EngineManagementExtension { + private mockEngines: Engines = { + llama: { + name: 'llama', + variants: [ + { + variant: 'cpu', + version: '1.0.0', + path: '/engines/llama/cpu/1.0.0', + installed: true + }, + { + variant: 'cuda', + version: '1.0.0', + path: '/engines/llama/cuda/1.0.0', + installed: false + } + ], + default: { + variant: 'cpu', + version: '1.0.0' + } + }, + gpt4all: { + name: 'gpt4all', + variants: [ + { + variant: 'cpu', + version: '2.0.0', + path: '/engines/gpt4all/cpu/2.0.0', + installed: true + } + ], + default: { + variant: 'cpu', + version: '2.0.0' + } + } + } + + private mockReleases: { [key: string]: EngineReleased[] } = { + 'llama-1.0.0': [ + { + variant: 'cpu', + version: '1.0.0', + os: ['macos', 'linux', 'windows'], + url: 'https://example.com/llama/1.0.0/cpu' + }, + { + variant: 'cuda', + version: '1.0.0', + os: ['linux', 'windows'], + url: 'https://example.com/llama/1.0.0/cuda' + } + ], + 'llama-1.1.0': [ + { + variant: 'cpu', + version: '1.1.0', + os: ['macos', 'linux', 'windows'], + url: 'https://example.com/llama/1.1.0/cpu' + }, + { + variant: 'cuda', + version: '1.1.0', + os: ['linux', 'windows'], + url: 'https://example.com/llama/1.1.0/cuda' + } + ], + 'gpt4all-2.0.0': [ + { + variant: 'cpu', + version: '2.0.0', + os: ['macos', 'linux', 'windows'], + url: 'https://example.com/gpt4all/2.0.0/cpu' + } + ] + } + + private remoteModels: { [engine: string]: Model[] } = { + 'llama': [], + 'gpt4all': [] + } + + constructor() { + super('http://mock-url.com', 'mock-engine-extension', 'Mock Engine Extension', true, 'A mock engine extension', '1.0.0') + } + + onLoad(): void { + // Mock implementation + } + + onUnload(): void { + // Mock implementation + } + + async getEngines(): Promise { + return JSON.parse(JSON.stringify(this.mockEngines)) + } + + async getInstalledEngines(name: InferenceEngine): Promise { + if (!this.mockEngines[name]) { + return [] + } + + return this.mockEngines[name].variants.filter(variant => variant.installed) + } + + async getReleasedEnginesByVersion( + name: InferenceEngine, + version: string, + platform?: string + ): Promise { + const key = `${name}-${version}` + let releases = this.mockReleases[key] || [] + + if (platform) { + releases = releases.filter(release => release.os.includes(platform)) + } + + return releases + } + + async getLatestReleasedEngine( + name: InferenceEngine, + platform?: string + ): Promise { + // For mock, let's assume latest versions are 1.1.0 for llama and 2.0.0 for gpt4all + const latestVersions = { + 'llama': '1.1.0', + 'gpt4all': '2.0.0' + } + + if (!latestVersions[name]) { + return [] + } + + return this.getReleasedEnginesByVersion(name, latestVersions[name], platform) + } + + async installEngine( + name: string, + engineConfig: EngineConfig + ): Promise<{ messages: string }> { + if (!this.mockEngines[name]) { + this.mockEngines[name] = { + name, + variants: [], + default: { + variant: engineConfig.variant, + version: engineConfig.version + } + } + } + + // Check if variant already exists + const existingVariantIndex = this.mockEngines[name].variants.findIndex( + v => v.variant === engineConfig.variant && v.version === engineConfig.version + ) + + if (existingVariantIndex >= 0) { + this.mockEngines[name].variants[existingVariantIndex].installed = true + } else { + this.mockEngines[name].variants.push({ + variant: engineConfig.variant, + version: engineConfig.version, + path: `/engines/${name}/${engineConfig.variant}/${engineConfig.version}`, + installed: true + }) + } + + return { messages: `Successfully installed ${name} ${engineConfig.variant} ${engineConfig.version}` } + } + + async addRemoteEngine( + engineConfig: EngineConfig + ): Promise<{ messages: string }> { + const name = engineConfig.name || 'remote-engine' + + if (!this.mockEngines[name]) { + this.mockEngines[name] = { + name, + variants: [], + default: { + variant: engineConfig.variant, + version: engineConfig.version + } + } + } + + this.mockEngines[name].variants.push({ + variant: engineConfig.variant, + version: engineConfig.version, + path: engineConfig.path || `/engines/${name}/${engineConfig.variant}/${engineConfig.version}`, + installed: true, + url: engineConfig.url + }) + + return { messages: `Successfully added remote engine ${name}` } + } + + async uninstallEngine( + name: InferenceEngine, + engineConfig: EngineConfig + ): Promise<{ messages: string }> { + if (!this.mockEngines[name]) { + return { messages: `Engine ${name} not found` } + } + + const variantIndex = this.mockEngines[name].variants.findIndex( + v => v.variant === engineConfig.variant && v.version === engineConfig.version + ) + + if (variantIndex >= 0) { + this.mockEngines[name].variants[variantIndex].installed = false + + // If this was the default variant, reset default + if ( + this.mockEngines[name].default.variant === engineConfig.variant && + this.mockEngines[name].default.version === engineConfig.version + ) { + // Find another installed variant to set as default + const installedVariant = this.mockEngines[name].variants.find(v => v.installed) + if (installedVariant) { + this.mockEngines[name].default = { + variant: installedVariant.variant, + version: installedVariant.version + } + } else { + // No installed variants remain, clear default + this.mockEngines[name].default = { variant: '', version: '' } + } + } + + return { messages: `Successfully uninstalled ${name} ${engineConfig.variant} ${engineConfig.version}` } + } else { + return { messages: `Variant ${engineConfig.variant} ${engineConfig.version} not found for engine ${name}` } + } + } + + async getDefaultEngineVariant( + name: InferenceEngine + ): Promise { + if (!this.mockEngines[name]) { + return { variant: '', version: '' } + } + + return this.mockEngines[name].default + } + + async setDefaultEngineVariant( + name: InferenceEngine, + engineConfig: EngineConfig + ): Promise<{ messages: string }> { + if (!this.mockEngines[name]) { + return { messages: `Engine ${name} not found` } + } + + const variantExists = this.mockEngines[name].variants.some( + v => v.variant === engineConfig.variant && v.version === engineConfig.version && v.installed + ) + + if (!variantExists) { + return { messages: `Variant ${engineConfig.variant} ${engineConfig.version} not found or not installed` } + } + + this.mockEngines[name].default = { + variant: engineConfig.variant, + version: engineConfig.version + } + + return { messages: `Successfully set ${engineConfig.variant} ${engineConfig.version} as default for ${name}` } + } + + async updateEngine( + name: InferenceEngine, + engineConfig?: EngineConfig + ): Promise<{ messages: string }> { + if (!this.mockEngines[name]) { + return { messages: `Engine ${name} not found` } + } + + if (!engineConfig) { + // Assume we're updating to the latest version + return { messages: `Successfully updated ${name} to the latest version` } + } + + const variantIndex = this.mockEngines[name].variants.findIndex( + v => v.variant === engineConfig.variant && v.installed + ) + + if (variantIndex >= 0) { + // Update the version + this.mockEngines[name].variants[variantIndex].version = engineConfig.version + + // If this was the default variant, update default version too + if (this.mockEngines[name].default.variant === engineConfig.variant) { + this.mockEngines[name].default.version = engineConfig.version + } + + return { messages: `Successfully updated ${name} ${engineConfig.variant} to version ${engineConfig.version}` } + } else { + return { messages: `Installed variant ${engineConfig.variant} not found for engine ${name}` } + } + } + + async addRemoteModel(model: Model): Promise { + const engine = model.engine as string + + if (!this.remoteModels[engine]) { + this.remoteModels[engine] = [] + } + + this.remoteModels[engine].push(model) + } + + async getRemoteModels(name: InferenceEngine | string): Promise { + return this.remoteModels[name] || [] + } +} + +describe('EngineManagementExtension', () => { + let extension: MockEngineManagementExtension + + beforeEach(() => { + extension = new MockEngineManagementExtension() + }) + + test('should return the correct extension type', () => { + expect(extension.type()).toBe(ExtensionTypeEnum.Engine) + }) + + test('should get all engines', async () => { + const engines = await extension.getEngines() + + expect(engines).toBeDefined() + expect(engines.llama).toBeDefined() + expect(engines.gpt4all).toBeDefined() + expect(engines.llama.variants).toHaveLength(2) + expect(engines.gpt4all.variants).toHaveLength(1) + }) + + test('should get installed engines', async () => { + const llamaEngines = await extension.getInstalledEngines('llama') + + expect(llamaEngines).toHaveLength(1) + expect(llamaEngines[0].variant).toBe('cpu') + expect(llamaEngines[0].installed).toBe(true) + + const gpt4allEngines = await extension.getInstalledEngines('gpt4all') + + expect(gpt4allEngines).toHaveLength(1) + expect(gpt4allEngines[0].variant).toBe('cpu') + expect(gpt4allEngines[0].installed).toBe(true) + + // Test non-existent engine + const nonExistentEngines = await extension.getInstalledEngines('non-existent' as InferenceEngine) + expect(nonExistentEngines).toHaveLength(0) + }) + + test('should get released engines by version', async () => { + const llamaReleases = await extension.getReleasedEnginesByVersion('llama', '1.0.0') + + expect(llamaReleases).toHaveLength(2) + expect(llamaReleases[0].variant).toBe('cpu') + expect(llamaReleases[1].variant).toBe('cuda') + + // Test with platform filter + const llamaLinuxReleases = await extension.getReleasedEnginesByVersion('llama', '1.0.0', 'linux') + + expect(llamaLinuxReleases).toHaveLength(2) + + const llamaMacReleases = await extension.getReleasedEnginesByVersion('llama', '1.0.0', 'macos') + + expect(llamaMacReleases).toHaveLength(1) + expect(llamaMacReleases[0].variant).toBe('cpu') + + // Test non-existent version + const nonExistentReleases = await extension.getReleasedEnginesByVersion('llama', '9.9.9') + expect(nonExistentReleases).toHaveLength(0) + }) + + test('should get latest released engines', async () => { + const latestLlamaReleases = await extension.getLatestReleasedEngine('llama') + + expect(latestLlamaReleases).toHaveLength(2) + expect(latestLlamaReleases[0].version).toBe('1.1.0') + + // Test with platform filter + const latestLlamaMacReleases = await extension.getLatestReleasedEngine('llama', 'macos') + + expect(latestLlamaMacReleases).toHaveLength(1) + expect(latestLlamaMacReleases[0].variant).toBe('cpu') + expect(latestLlamaMacReleases[0].version).toBe('1.1.0') + + // Test non-existent engine + const nonExistentReleases = await extension.getLatestReleasedEngine('non-existent' as InferenceEngine) + expect(nonExistentReleases).toHaveLength(0) + }) + + test('should install engine', async () => { + // Install existing engine variant that is not installed + const result = await extension.installEngine('llama', { variant: 'cuda', version: '1.0.0' }) + + expect(result.messages).toContain('Successfully installed') + + const installedEngines = await extension.getInstalledEngines('llama') + expect(installedEngines).toHaveLength(2) + expect(installedEngines.some(e => e.variant === 'cuda')).toBe(true) + + // Install non-existent engine + const newEngineResult = await extension.installEngine('new-engine', { variant: 'cpu', version: '1.0.0' }) + + expect(newEngineResult.messages).toContain('Successfully installed') + + const engines = await extension.getEngines() + expect(engines['new-engine']).toBeDefined() + expect(engines['new-engine'].variants).toHaveLength(1) + expect(engines['new-engine'].variants[0].installed).toBe(true) + }) + + test('should add remote engine', async () => { + const result = await extension.addRemoteEngine({ + name: 'remote-llm', + variant: 'remote', + version: '1.0.0', + url: 'https://example.com/remote-llm-api' + }) + + expect(result.messages).toContain('Successfully added remote engine') + + const engines = await extension.getEngines() + expect(engines['remote-llm']).toBeDefined() + expect(engines['remote-llm'].variants).toHaveLength(1) + expect(engines['remote-llm'].variants[0].url).toBe('https://example.com/remote-llm-api') + }) + + test('should uninstall engine', async () => { + const result = await extension.uninstallEngine('llama', { variant: 'cpu', version: '1.0.0' }) + + expect(result.messages).toContain('Successfully uninstalled') + + const installedEngines = await extension.getInstalledEngines('llama') + expect(installedEngines).toHaveLength(0) + + // Test uninstalling non-existent variant + const nonExistentResult = await extension.uninstallEngine('llama', { variant: 'non-existent', version: '1.0.0' }) + + expect(nonExistentResult.messages).toContain('not found') + }) + + test('should handle default variant when uninstalling', async () => { + // First install cuda variant + await extension.installEngine('llama', { variant: 'cuda', version: '1.0.0' }) + + // Set cuda as default + await extension.setDefaultEngineVariant('llama', { variant: 'cuda', version: '1.0.0' }) + + // Check that cuda is now default + let defaultVariant = await extension.getDefaultEngineVariant('llama') + expect(defaultVariant.variant).toBe('cuda') + + // Uninstall cuda + await extension.uninstallEngine('llama', { variant: 'cuda', version: '1.0.0' }) + + // Check that default has changed to another installed variant + defaultVariant = await extension.getDefaultEngineVariant('llama') + expect(defaultVariant.variant).toBe('cpu') + + // Uninstall all variants + await extension.uninstallEngine('llama', { variant: 'cpu', version: '1.0.0' }) + + // Check that default is now empty + defaultVariant = await extension.getDefaultEngineVariant('llama') + expect(defaultVariant.variant).toBe('') + expect(defaultVariant.version).toBe('') + }) + + test('should get default engine variant', async () => { + const llamaDefault = await extension.getDefaultEngineVariant('llama') + + expect(llamaDefault.variant).toBe('cpu') + expect(llamaDefault.version).toBe('1.0.0') + + // Test non-existent engine + const nonExistentDefault = await extension.getDefaultEngineVariant('non-existent' as InferenceEngine) + expect(nonExistentDefault.variant).toBe('') + expect(nonExistentDefault.version).toBe('') + }) + + test('should set default engine variant', async () => { + // Install cuda variant + await extension.installEngine('llama', { variant: 'cuda', version: '1.0.0' }) + + const result = await extension.setDefaultEngineVariant('llama', { variant: 'cuda', version: '1.0.0' }) + + expect(result.messages).toContain('Successfully set') + + const defaultVariant = await extension.getDefaultEngineVariant('llama') + expect(defaultVariant.variant).toBe('cuda') + expect(defaultVariant.version).toBe('1.0.0') + + // Test setting non-existent variant as default + const nonExistentResult = await extension.setDefaultEngineVariant('llama', { variant: 'non-existent', version: '1.0.0' }) + + expect(nonExistentResult.messages).toContain('not found') + }) + + test('should update engine', async () => { + const result = await extension.updateEngine('llama', { variant: 'cpu', version: '1.1.0' }) + + expect(result.messages).toContain('Successfully updated') + + const engines = await extension.getEngines() + const cpuVariant = engines.llama.variants.find(v => v.variant === 'cpu') + expect(cpuVariant).toBeDefined() + expect(cpuVariant?.version).toBe('1.1.0') + + // Default should also be updated since cpu was default + expect(engines.llama.default.version).toBe('1.1.0') + + // Test updating non-existent variant + const nonExistentResult = await extension.updateEngine('llama', { variant: 'non-existent', version: '1.1.0' }) + + expect(nonExistentResult.messages).toContain('not found') + }) + + test('should add and get remote models', async () => { + const model: Model = { + id: 'remote-model-1', + name: 'Remote Model 1', + path: '/path/to/remote-model', + engine: 'llama', + format: 'gguf', + modelFormat: 'gguf', + source: 'remote', + status: 'ready', + contextLength: 4096, + sizeInGB: 4, + created: new Date().toISOString() + } + + await extension.addRemoteModel(model) + + const llamaModels = await extension.getRemoteModels('llama') + expect(llamaModels).toHaveLength(1) + expect(llamaModels[0].id).toBe('remote-model-1') + + // Test non-existent engine + const nonExistentModels = await extension.getRemoteModels('non-existent') + expect(nonExistentModels).toHaveLength(0) + }) +}) \ No newline at end of file diff --git a/core/src/browser/extensions/hardwareManagement.test.ts b/core/src/browser/extensions/hardwareManagement.test.ts new file mode 100644 index 000000000..6ada06862 --- /dev/null +++ b/core/src/browser/extensions/hardwareManagement.test.ts @@ -0,0 +1,146 @@ +import { HardwareManagementExtension } from './hardwareManagement' +import { ExtensionTypeEnum } from '../extension' +import { HardwareInformation } from '../../types' + +// Mock implementation of HardwareManagementExtension +class MockHardwareManagementExtension extends HardwareManagementExtension { + private activeGpus: number[] = [0] + private mockHardwareInfo: HardwareInformation = { + cpu: { + manufacturer: 'Mock CPU Manufacturer', + brand: 'Mock CPU', + cores: 8, + physicalCores: 4, + speed: 3.5, + }, + memory: { + total: 16 * 1024 * 1024 * 1024, // 16GB in bytes + free: 8 * 1024 * 1024 * 1024, // 8GB in bytes + }, + gpus: [ + { + id: 0, + vendor: 'Mock GPU Vendor', + model: 'Mock GPU Model 1', + memory: 8 * 1024 * 1024 * 1024, // 8GB in bytes + }, + { + id: 1, + vendor: 'Mock GPU Vendor', + model: 'Mock GPU Model 2', + memory: 4 * 1024 * 1024 * 1024, // 4GB in bytes + } + ], + active_gpus: [0], + } + + constructor() { + super('http://mock-url.com', 'mock-hardware-extension', 'Mock Hardware Extension', true, 'A mock hardware extension', '1.0.0') + } + + onLoad(): void { + // Mock implementation + } + + onUnload(): void { + // Mock implementation + } + + async getHardware(): Promise { + // Return a copy to prevent test side effects + return JSON.parse(JSON.stringify(this.mockHardwareInfo)) + } + + async setAvtiveGpu(data: { gpus: number[] }): Promise<{ + message: string + activated_gpus: number[] + }> { + // Validate GPUs exist + const validGpus = data.gpus.filter(gpuId => + this.mockHardwareInfo.gpus.some(gpu => gpu.id === gpuId) + ) + + if (validGpus.length === 0) { + throw new Error('No valid GPUs selected') + } + + // Update active GPUs + this.activeGpus = validGpus + this.mockHardwareInfo.active_gpus = validGpus + + return { + message: 'GPU activation successful', + activated_gpus: validGpus + } + } +} + +describe('HardwareManagementExtension', () => { + let extension: MockHardwareManagementExtension + + beforeEach(() => { + extension = new MockHardwareManagementExtension() + }) + + test('should return the correct extension type', () => { + expect(extension.type()).toBe(ExtensionTypeEnum.Hardware) + }) + + test('should get hardware information', async () => { + const hardwareInfo = await extension.getHardware() + + // Check CPU info + expect(hardwareInfo.cpu).toBeDefined() + expect(hardwareInfo.cpu.manufacturer).toBe('Mock CPU Manufacturer') + expect(hardwareInfo.cpu.cores).toBe(8) + + // Check memory info + expect(hardwareInfo.memory).toBeDefined() + expect(hardwareInfo.memory.total).toBe(16 * 1024 * 1024 * 1024) + + // Check GPU info + expect(hardwareInfo.gpus).toHaveLength(2) + expect(hardwareInfo.gpus[0].model).toBe('Mock GPU Model 1') + expect(hardwareInfo.gpus[1].model).toBe('Mock GPU Model 2') + + // Check active GPUs + expect(hardwareInfo.active_gpus).toEqual([0]) + }) + + test('should set active GPUs', async () => { + const result = await extension.setAvtiveGpu({ gpus: [1] }) + + expect(result.message).toBe('GPU activation successful') + expect(result.activated_gpus).toEqual([1]) + + // Verify the change in hardware info + const hardwareInfo = await extension.getHardware() + expect(hardwareInfo.active_gpus).toEqual([1]) + }) + + test('should set multiple active GPUs', async () => { + const result = await extension.setAvtiveGpu({ gpus: [0, 1] }) + + expect(result.message).toBe('GPU activation successful') + expect(result.activated_gpus).toEqual([0, 1]) + + // Verify the change in hardware info + const hardwareInfo = await extension.getHardware() + expect(hardwareInfo.active_gpus).toEqual([0, 1]) + }) + + test('should throw error for invalid GPU ids', async () => { + await expect(extension.setAvtiveGpu({ gpus: [999] })).rejects.toThrow('No valid GPUs selected') + }) + + test('should handle mix of valid and invalid GPU ids', async () => { + const result = await extension.setAvtiveGpu({ gpus: [0, 999] }) + + // Should only activate valid GPUs + expect(result.activated_gpus).toEqual([0]) + + // Verify the change in hardware info + const hardwareInfo = await extension.getHardware() + expect(hardwareInfo.active_gpus).toEqual([0]) + }) +}) \ No newline at end of file diff --git a/core/src/browser/extensions/hardwareManagement.ts b/core/src/browser/extensions/hardwareManagement.ts new file mode 100644 index 000000000..1f7c36287 --- /dev/null +++ b/core/src/browser/extensions/hardwareManagement.ts @@ -0,0 +1,26 @@ +import { HardwareInformation } from '../../types' +import { BaseExtension, ExtensionTypeEnum } from '../extension' + +/** + * Engine management extension. Persists and retrieves engine management. + * @abstract + * @extends BaseExtension + */ +export abstract class HardwareManagementExtension extends BaseExtension { + type(): ExtensionTypeEnum | undefined { + return ExtensionTypeEnum.Hardware + } + + /** + * @returns A Promise that resolves to an object of list hardware. + */ + abstract getHardware(): Promise + + /** + * @returns A Promise that resolves to an object of set active gpus. + */ + abstract setAvtiveGpu(data: { gpus: number[] }): Promise<{ + message: string + activated_gpus: number[] + }> +} diff --git a/core/src/browser/extensions/index.test.ts b/core/src/browser/extensions/index.test.ts index 26cbda8c5..bc5a7c358 100644 --- a/core/src/browser/extensions/index.test.ts +++ b/core/src/browser/extensions/index.test.ts @@ -1,6 +1,5 @@ import { ConversationalExtension } from './index'; import { InferenceExtension } from './index'; -import { MonitoringExtension } from './index'; import { AssistantExtension } from './index'; import { ModelExtension } from './index'; import * as Engines from './index'; @@ -14,10 +13,6 @@ describe('index.ts exports', () => { expect(InferenceExtension).toBeDefined(); }); - test('should export MonitoringExtension', () => { - expect(MonitoringExtension).toBeDefined(); - }); - test('should export AssistantExtension', () => { expect(AssistantExtension).toBeDefined(); }); @@ -29,4 +24,4 @@ describe('index.ts exports', () => { test('should export Engines', () => { expect(Engines).toBeDefined(); }); -}); \ No newline at end of file +}); diff --git a/core/src/browser/extensions/index.ts b/core/src/browser/extensions/index.ts index 9dbfe1afe..f11c7b09f 100644 --- a/core/src/browser/extensions/index.ts +++ b/core/src/browser/extensions/index.ts @@ -9,10 +9,7 @@ export { ConversationalExtension } from './conversational' */ export { InferenceExtension } from './inference' -/** - * Monitoring extension for system monitoring. - */ -export { MonitoringExtension } from './monitoring' + /** * Assistant extension for managing assistants. @@ -33,3 +30,8 @@ export * from './engines' * Engines Management */ export * from './enginesManagement' + +/** + * Hardware Management + */ +export * from './hardwareManagement' diff --git a/core/src/browser/extensions/model.test.ts b/core/src/browser/extensions/model.test.ts new file mode 100644 index 000000000..bc045419d --- /dev/null +++ b/core/src/browser/extensions/model.test.ts @@ -0,0 +1,286 @@ +import { ModelExtension } from './model' +import { ExtensionTypeEnum } from '../extension' +import { Model, OptionType, ModelSource } from '../../types' + +// Mock implementation of ModelExtension +class MockModelExtension extends ModelExtension { + private models: Model[] = [] + private sources: ModelSource[] = [] + private loadedModels: Set = new Set() + private modelsPulling: Set = new Set() + + constructor() { + super('http://mock-url.com', 'mock-model-extension', 'Mock Model Extension', true, 'A mock model extension', '1.0.0') + } + + onLoad(): void { + // Mock implementation + } + + onUnload(): void { + // Mock implementation + } + + async configurePullOptions(configs: { [key: string]: any }): Promise { + return configs + } + + async getModels(): Promise { + return this.models + } + + async pullModel(model: string, id?: string, name?: string): Promise { + const modelId = id || `model-${Date.now()}` + this.modelsPulling.add(modelId) + + // Simulate model pull by adding it to the model list + const newModel: Model = { + id: modelId, + path: `/models/${model}`, + name: name || model, + source: 'mock-source', + modelFormat: 'mock-format', + engine: 'mock-engine', + format: 'mock-format', + status: 'ready', + contextLength: 2048, + sizeInGB: 2, + created: new Date().toISOString(), + pullProgress: { + percent: 100, + transferred: 0, + total: 0 + } + } + + this.models.push(newModel) + this.loadedModels.add(modelId) + this.modelsPulling.delete(modelId) + } + + async cancelModelPull(modelId: string): Promise { + this.modelsPulling.delete(modelId) + // Remove the model if it's in the pulling state + this.models = this.models.filter(m => m.id !== modelId) + } + + async importModel( + model: string, + modelPath: string, + name?: string, + optionType?: OptionType + ): Promise { + const newModel: Model = { + id: `model-${Date.now()}`, + path: modelPath, + name: name || model, + source: 'local', + modelFormat: optionType?.format || 'mock-format', + engine: optionType?.engine || 'mock-engine', + format: optionType?.format || 'mock-format', + status: 'ready', + contextLength: optionType?.contextLength || 2048, + sizeInGB: 2, + created: new Date().toISOString(), + } + + this.models.push(newModel) + this.loadedModels.add(newModel.id) + } + + async updateModel(modelInfo: Partial): Promise { + if (!modelInfo.id) throw new Error('Model ID is required') + + const index = this.models.findIndex(m => m.id === modelInfo.id) + if (index === -1) throw new Error('Model not found') + + this.models[index] = { ...this.models[index], ...modelInfo } + return this.models[index] + } + + async deleteModel(modelId: string): Promise { + this.models = this.models.filter(m => m.id !== modelId) + this.loadedModels.delete(modelId) + } + + async isModelLoaded(modelId: string): Promise { + return this.loadedModels.has(modelId) + } + + async getSources(): Promise { + return this.sources + } + + async addSource(source: string): Promise { + const newSource: ModelSource = { + id: `source-${Date.now()}`, + url: source, + name: `Source ${this.sources.length + 1}`, + type: 'mock-type' + } + + this.sources.push(newSource) + } + + async deleteSource(sourceId: string): Promise { + this.sources = this.sources.filter(s => s.id !== sourceId) + } +} + +describe('ModelExtension', () => { + let extension: MockModelExtension + + beforeEach(() => { + extension = new MockModelExtension() + }) + + test('should return the correct extension type', () => { + expect(extension.type()).toBe(ExtensionTypeEnum.Model) + }) + + test('should configure pull options', async () => { + const configs = { apiKey: 'test-key', baseUrl: 'https://test-url.com' } + const result = await extension.configurePullOptions(configs) + expect(result).toEqual(configs) + }) + + test('should add and get models', async () => { + await extension.pullModel('test-model', 'test-id', 'Test Model') + + const models = await extension.getModels() + expect(models).toHaveLength(1) + expect(models[0].id).toBe('test-id') + expect(models[0].name).toBe('Test Model') + }) + + test('should pull model with default id and name', async () => { + await extension.pullModel('test-model') + + const models = await extension.getModels() + expect(models).toHaveLength(1) + expect(models[0].name).toBe('test-model') + }) + + test('should cancel model pull', async () => { + await extension.pullModel('test-model', 'test-id') + + // Verify model exists + let models = await extension.getModels() + expect(models).toHaveLength(1) + + // Cancel the pull + await extension.cancelModelPull('test-id') + + // Verify model was removed + models = await extension.getModels() + expect(models).toHaveLength(0) + }) + + test('should import model', async () => { + const optionType: OptionType = { + engine: 'test-engine', + format: 'test-format', + contextLength: 4096 + } + + await extension.importModel('test-model', '/path/to/model', 'Imported Model', optionType) + + const models = await extension.getModels() + expect(models).toHaveLength(1) + expect(models[0].name).toBe('Imported Model') + expect(models[0].engine).toBe('test-engine') + expect(models[0].format).toBe('test-format') + expect(models[0].contextLength).toBe(4096) + }) + + test('should import model with default values', async () => { + await extension.importModel('test-model', '/path/to/model') + + const models = await extension.getModels() + expect(models).toHaveLength(1) + expect(models[0].name).toBe('test-model') + expect(models[0].engine).toBe('mock-engine') + expect(models[0].format).toBe('mock-format') + }) + + test('should update model', async () => { + await extension.pullModel('test-model', 'test-id', 'Test Model') + + const updatedModel = await extension.updateModel({ + id: 'test-id', + name: 'Updated Model', + contextLength: 8192 + }) + + expect(updatedModel.name).toBe('Updated Model') + expect(updatedModel.contextLength).toBe(8192) + + // Verify changes persisted + const models = await extension.getModels() + expect(models[0].name).toBe('Updated Model') + expect(models[0].contextLength).toBe(8192) + }) + + test('should throw error when updating non-existent model', async () => { + await expect(extension.updateModel({ + id: 'non-existent', + name: 'Updated Model' + })).rejects.toThrow('Model not found') + }) + + test('should throw error when updating model without ID', async () => { + await expect(extension.updateModel({ + name: 'Updated Model' + })).rejects.toThrow('Model ID is required') + }) + + test('should delete model', async () => { + await extension.pullModel('test-model', 'test-id') + + // Verify model exists + let models = await extension.getModels() + expect(models).toHaveLength(1) + + // Delete the model + await extension.deleteModel('test-id') + + // Verify model was removed + models = await extension.getModels() + expect(models).toHaveLength(0) + }) + + test('should check if model is loaded', async () => { + await extension.pullModel('test-model', 'test-id') + + // Check if model is loaded + const isLoaded = await extension.isModelLoaded('test-id') + expect(isLoaded).toBe(true) + + // Check if non-existent model is loaded + const nonExistentLoaded = await extension.isModelLoaded('non-existent') + expect(nonExistentLoaded).toBe(false) + }) + + test('should add and get sources', async () => { + await extension.addSource('https://test-source.com') + + const sources = await extension.getSources() + expect(sources).toHaveLength(1) + expect(sources[0].url).toBe('https://test-source.com') + }) + + test('should delete source', async () => { + await extension.addSource('https://test-source.com') + + // Get the source ID + const sources = await extension.getSources() + const sourceId = sources[0].id + + // Delete the source + await extension.deleteSource(sourceId) + + // Verify source was removed + const updatedSources = await extension.getSources() + expect(updatedSources).toHaveLength(0) + }) +}) \ No newline at end of file diff --git a/core/src/browser/extensions/monitoring.test.ts b/core/src/browser/extensions/monitoring.test.ts deleted file mode 100644 index 9bba89a8c..000000000 --- a/core/src/browser/extensions/monitoring.test.ts +++ /dev/null @@ -1,42 +0,0 @@ - -import { ExtensionTypeEnum } from '../extension'; -import { MonitoringExtension } from './monitoring'; - -it('should have the correct type', () => { - class TestMonitoringExtension extends MonitoringExtension { - getGpuSetting(): Promise { - throw new Error('Method not implemented.'); - } - getResourcesInfo(): Promise { - throw new Error('Method not implemented.'); - } - getCurrentLoad(): Promise { - throw new Error('Method not implemented.'); - } - getOsInfo(): Promise { - throw new Error('Method not implemented.'); - } - } - const monitoringExtension = new TestMonitoringExtension(); - expect(monitoringExtension.type()).toBe(ExtensionTypeEnum.SystemMonitoring); -}); - - -it('should create an instance of MonitoringExtension', () => { - class TestMonitoringExtension extends MonitoringExtension { - getGpuSetting(): Promise { - throw new Error('Method not implemented.'); - } - getResourcesInfo(): Promise { - throw new Error('Method not implemented.'); - } - getCurrentLoad(): Promise { - throw new Error('Method not implemented.'); - } - getOsInfo(): Promise { - throw new Error('Method not implemented.'); - } - } - const monitoringExtension = new TestMonitoringExtension(); - expect(monitoringExtension).toBeInstanceOf(MonitoringExtension); -}); diff --git a/core/src/browser/extensions/monitoring.ts b/core/src/browser/extensions/monitoring.ts deleted file mode 100644 index cb544b6b7..000000000 --- a/core/src/browser/extensions/monitoring.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { BaseExtension, ExtensionTypeEnum } from '../extension' -import { GpuSetting, MonitoringInterface, OperatingSystemInfo } from '../../types' - -/** - * Monitoring extension for system monitoring. - * @extends BaseExtension - */ -export abstract class MonitoringExtension extends BaseExtension implements MonitoringInterface { - /** - * Monitoring extension type. - */ - type(): ExtensionTypeEnum | undefined { - return ExtensionTypeEnum.SystemMonitoring - } - - abstract getGpuSetting(): Promise - abstract getResourcesInfo(): Promise - abstract getCurrentLoad(): Promise - abstract getOsInfo(): Promise -} diff --git a/core/src/browser/fs.ts b/core/src/browser/fs.ts index 9240b3876..7aa5f4d92 100644 --- a/core/src/browser/fs.ts +++ b/core/src/browser/fs.ts @@ -55,17 +55,23 @@ const unlinkSync = (...args: any[]) => globalThis.core.api?.unlinkSync(...args) */ const appendFileSync = (...args: any[]) => globalThis.core.api?.appendFileSync(...args) +/** + * Copies a file from the source path to the destination path. + * @param src + * @param dest + * @returns + */ const copyFile: (src: string, dest: string) => Promise = (src, dest) => globalThis.core.api?.copyFile(src, dest) /** * Gets the list of gguf files in a directory - * + * * @param path - The paths to the file. * @returns {Promise<{any}>} - A promise that resolves with the list of gguf and non-gguf files */ -const getGgufFiles: (paths: string[]) => Promise = ( - paths) => globalThis.core.api?.getGgufFiles(paths) +const getGgufFiles: (paths: string[]) => Promise = (paths) => + globalThis.core.api?.getGgufFiles(paths) /** * Gets the file's stats. diff --git a/core/src/node/api/common/adapter.ts b/core/src/node/api/common/adapter.ts index 2beacf325..b0c8173a9 100644 --- a/core/src/node/api/common/adapter.ts +++ b/core/src/node/api/common/adapter.ts @@ -1,25 +1,21 @@ import { AppRoute, - DownloadRoute, ExtensionRoute, FileManagerRoute, FileSystemRoute, } from '../../../types/api' -import { Downloader } from '../processors/download' import { FileSystem } from '../processors/fs' import { Extension } from '../processors/extension' import { FSExt } from '../processors/fsExt' import { App } from '../processors/app' export class RequestAdapter { - downloader: Downloader fileSystem: FileSystem extension: Extension fsExt: FSExt app: App constructor(observer?: Function) { - this.downloader = new Downloader(observer) this.fileSystem = new FileSystem() this.extension = new Extension() this.fsExt = new FSExt() @@ -28,9 +24,7 @@ export class RequestAdapter { // TODO: Clearer Factory pattern here process(route: string, ...args: any) { - if (route in DownloadRoute) { - return this.downloader.process(route, ...args) - } else if (route in FileSystemRoute) { + if (route in FileSystemRoute) { return this.fileSystem.process(route, ...args) } else if (route in ExtensionRoute) { return this.extension.process(route, ...args) diff --git a/core/src/node/api/processors/download.test.ts b/core/src/node/api/processors/download.test.ts deleted file mode 100644 index c4b171a7d..000000000 --- a/core/src/node/api/processors/download.test.ts +++ /dev/null @@ -1,125 +0,0 @@ -import { Downloader } from './download' -import { DownloadEvent } from '../../../types/api' -import { DownloadManager } from '../../helper/download' - -jest.mock('../../helper', () => ({ - getJanDataFolderPath: jest.fn().mockReturnValue('path/to/folder'), -})) - -jest.mock('../../helper/path', () => ({ - validatePath: jest.fn().mockReturnValue('path/to/folder'), - normalizeFilePath: () => - process.platform === 'win32' ? 'C:\\Users\\path\\to\\file.gguf' : '/Users/path/to/file.gguf', -})) - -jest.mock( - 'request', - jest.fn().mockReturnValue(() => ({ - on: jest.fn(), - })) -) - -jest.mock('fs', () => ({ - createWriteStream: jest.fn(), -})) - -const requestMock = jest.fn((options, callback) => { - callback(new Error('Test error'), null) -}) -jest.mock('request', () => requestMock) - -jest.mock('request-progress', () => { - return jest.fn().mockImplementation(() => { - return { - on: jest.fn().mockImplementation((event, callback) => { - if (event === 'error') { - callback(new Error('Download failed')) - } - return { - on: jest.fn().mockImplementation((event, callback) => { - if (event === 'error') { - callback(new Error('Download failed')) - } - return { - on: jest.fn().mockImplementation((event, callback) => { - if (event === 'error') { - callback(new Error('Download failed')) - } - return { pipe: jest.fn() } - }), - } - }), - } - }), - } - }) -}) - -describe('Downloader', () => { - beforeEach(() => { - jest.resetAllMocks() - }) - - it('should pause download correctly', () => { - const observer = jest.fn() - const fileName = process.platform === 'win32' ? 'C:\\path\\to\\file' : 'path/to/file' - - const downloader = new Downloader(observer) - const pauseMock = jest.fn() - DownloadManager.instance.networkRequests[fileName] = { pause: pauseMock } - - downloader.pauseDownload(observer, fileName) - - expect(pauseMock).toHaveBeenCalled() - }) - - it('should resume download correctly', () => { - const observer = jest.fn() - const fileName = process.platform === 'win32' ? 'C:\\path\\to\\file' : 'path/to/file' - - const downloader = new Downloader(observer) - const resumeMock = jest.fn() - DownloadManager.instance.networkRequests[fileName] = { resume: resumeMock } - - downloader.resumeDownload(observer, fileName) - - expect(resumeMock).toHaveBeenCalled() - }) - - it('should handle aborting a download correctly', () => { - const observer = jest.fn() - const fileName = process.platform === 'win32' ? 'C:\\path\\to\\file' : 'path/to/file' - - const downloader = new Downloader(observer) - const abortMock = jest.fn() - DownloadManager.instance.networkRequests[fileName] = { abort: abortMock } - - downloader.abortDownload(observer, fileName) - - expect(abortMock).toHaveBeenCalled() - expect(observer).toHaveBeenCalledWith( - DownloadEvent.onFileDownloadError, - expect.objectContaining({ - error: 'aborted', - }) - ) - }) - - it('should handle download fail correctly', () => { - const observer = jest.fn() - const fileName = process.platform === 'win32' ? 'C:\\path\\to\\file' : 'path/to/file.gguf' - - const downloader = new Downloader(observer) - - downloader.downloadFile(observer, { - localPath: fileName, - url: 'http://127.0.0.1', - }) - expect(observer).toHaveBeenCalledWith( - DownloadEvent.onFileDownloadError, - expect.objectContaining({ - error: expect.anything(), - }) - ) - }) -}) diff --git a/core/src/node/api/processors/download.ts b/core/src/node/api/processors/download.ts deleted file mode 100644 index 709ad9687..000000000 --- a/core/src/node/api/processors/download.ts +++ /dev/null @@ -1,138 +0,0 @@ -import { resolve, sep } from 'path' -import { DownloadEvent } from '../../../types/api' -import { normalizeFilePath } from '../../helper/path' -import { getJanDataFolderPath } from '../../helper' -import { DownloadManager } from '../../helper/download' -import { createWriteStream, renameSync } from 'fs' -import { Processor } from './Processor' -import { DownloadRequest, DownloadState, NetworkConfig } from '../../../types' - -export class Downloader implements Processor { - observer?: Function - - constructor(observer?: Function) { - this.observer = observer - } - - process(key: string, ...args: any[]): any { - const instance = this as any - const func = instance[key] - return func(this.observer, ...args) - } - - downloadFile(observer: any, downloadRequest: DownloadRequest, network?: NetworkConfig) { - const request = require('request') - const progress = require('request-progress') - - const strictSSL = !network?.ignoreSSL - const proxy = network?.proxy?.startsWith('http') ? network.proxy : undefined - - const { localPath, url } = downloadRequest - let normalizedPath = localPath - if (typeof localPath === 'string') { - normalizedPath = normalizeFilePath(localPath) - } - const array = normalizedPath.split(sep) - const fileName = array.pop() ?? '' - const modelId = downloadRequest.modelId ?? array.pop() ?? '' - - const destination = resolve(getJanDataFolderPath(), normalizedPath) - const rq = request({ url, strictSSL, proxy }) - - // Put request to download manager instance - DownloadManager.instance.setRequest(normalizedPath, rq) - - // Downloading file to a temp file first - const downloadingTempFile = `${destination}.download` - - // adding initial download state - const initialDownloadState: DownloadState = { - modelId, - fileName, - percent: 0, - size: { - total: 0, - transferred: 0, - }, - children: [], - downloadState: 'downloading', - extensionId: downloadRequest.extensionId, - downloadType: downloadRequest.downloadType, - localPath: normalizedPath, - } - DownloadManager.instance.downloadProgressMap[modelId] = initialDownloadState - DownloadManager.instance.downloadInfo[normalizedPath] = initialDownloadState - - if (downloadRequest.downloadType === 'extension') { - observer?.(DownloadEvent.onFileDownloadUpdate, initialDownloadState) - } - - progress(rq, {}) - .on('progress', (state: any) => { - const currentDownloadState = DownloadManager.instance.downloadProgressMap[modelId] - const downloadState: DownloadState = { - ...currentDownloadState, - ...state, - fileName: fileName, - downloadState: 'downloading', - } - console.debug('progress: ', downloadState) - observer?.(DownloadEvent.onFileDownloadUpdate, downloadState) - DownloadManager.instance.downloadProgressMap[modelId] = downloadState - }) - .on('error', (error: Error) => { - const currentDownloadState = DownloadManager.instance.downloadProgressMap[modelId] - const downloadState: DownloadState = { - ...currentDownloadState, - fileName: fileName, - error: error.message, - downloadState: 'error', - } - - observer?.(DownloadEvent.onFileDownloadError, downloadState) - DownloadManager.instance.downloadProgressMap[modelId] = downloadState - }) - .on('end', () => { - const currentDownloadState = DownloadManager.instance.downloadProgressMap[modelId] - if ( - currentDownloadState && - DownloadManager.instance.networkRequests[normalizedPath] && - DownloadManager.instance.downloadProgressMap[modelId]?.downloadState !== 'error' - ) { - // Finished downloading, rename temp file to actual file - renameSync(downloadingTempFile, destination) - const downloadState: DownloadState = { - ...currentDownloadState, - fileName: fileName, - downloadState: 'end', - } - observer?.(DownloadEvent.onFileDownloadSuccess, downloadState) - DownloadManager.instance.downloadProgressMap[modelId] = downloadState - } - }) - .pipe(createWriteStream(downloadingTempFile)) - } - - abortDownload(observer: any, fileName: string) { - const rq = DownloadManager.instance.networkRequests[fileName] - if (rq) { - DownloadManager.instance.networkRequests[fileName] = undefined - rq?.abort() - } - - const downloadInfo = DownloadManager.instance.downloadInfo[fileName] - observer?.(DownloadEvent.onFileDownloadError, { - ...downloadInfo, - fileName, - error: 'aborted', - }) - } - - resumeDownload(_observer: any, fileName: any) { - DownloadManager.instance.networkRequests[fileName]?.resume() - } - - pauseDownload(_observer: any, fileName: any) { - DownloadManager.instance.networkRequests[fileName]?.pause() - } -} diff --git a/core/src/node/helper/download.test.ts b/core/src/node/helper/download.test.ts deleted file mode 100644 index 95cc553b5..000000000 --- a/core/src/node/helper/download.test.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { DownloadManager } from './download'; - -it('should set a network request for a specific file', () => { - const downloadManager = new DownloadManager(); - const fileName = 'testFile'; - const request = { url: 'http://example.com' }; - - downloadManager.setRequest(fileName, request); - - expect(downloadManager.networkRequests[fileName]).toEqual(request); -}); diff --git a/core/src/node/helper/download.ts b/core/src/node/helper/download.ts deleted file mode 100644 index 51a0b0a8f..000000000 --- a/core/src/node/helper/download.ts +++ /dev/null @@ -1,30 +0,0 @@ -import { DownloadState } from '../../types' - -/** - * Manages file downloads and network requests. - */ -export class DownloadManager { - public networkRequests: Record = {} - - public static instance: DownloadManager = new DownloadManager() - - // store the download information with key is model id - public downloadProgressMap: Record = {} - - // store the download information with key is normalized file path - public downloadInfo: Record = {} - - constructor() { - if (DownloadManager.instance) { - return DownloadManager.instance - } - } - /** - * Sets a network request for a specific file. - * @param {string} fileName - The name of the file. - * @param {Request | undefined} request - The network request to set, or undefined to clear the request. - */ - setRequest(fileName: string, request: any | undefined) { - this.networkRequests[fileName] = request - } -} diff --git a/core/src/node/helper/index.ts b/core/src/node/helper/index.ts index 51030023f..6464fbce2 100644 --- a/core/src/node/helper/index.ts +++ b/core/src/node/helper/index.ts @@ -1,5 +1,4 @@ export * from './config' -export * from './download' export * from './logger' export * from './module' export * from './path' diff --git a/core/src/types/api/index.ts b/core/src/types/api/index.ts index 63b0eb10e..2f33b72e4 100644 --- a/core/src/types/api/index.ts +++ b/core/src/types/api/index.ts @@ -31,6 +31,10 @@ export enum NativeRoute { startServer = 'startServer', stopServer = 'stopServer', + + appUpdateDownload = 'appUpdateDownload', + + appToken = 'appToken', } /** @@ -50,6 +54,8 @@ export enum AppRoute { } export enum AppEvent { + onAppUpdateNotAvailable = 'onAppUpdateNotAvailable', + onAppUpdateAvailable = 'onAppUpdateAvailable', onAppUpdateDownloadUpdate = 'onAppUpdateDownloadUpdate', onAppUpdateDownloadError = 'onAppUpdateDownloadError', onAppUpdateDownloadSuccess = 'onAppUpdateDownloadSuccess', @@ -61,30 +67,13 @@ export enum AppEvent { onMainViewStateChange = 'onMainViewStateChange', } -export enum DownloadRoute { - abortDownload = 'abortDownload', - downloadFile = 'downloadFile', - pauseDownload = 'pauseDownload', - resumeDownload = 'resumeDownload', - getDownloadProgress = 'getDownloadProgress', -} - export enum DownloadEvent { onFileDownloadUpdate = 'onFileDownloadUpdate', onFileDownloadError = 'onFileDownloadError', onFileDownloadSuccess = 'onFileDownloadSuccess', onFileDownloadStopped = 'onFileDownloadStopped', onFileDownloadStarted = 'onFileDownloadStarted', - onFileUnzipSuccess = 'onFileUnzipSuccess', } - -export enum LocalImportModelEvent { - onLocalImportModelUpdate = 'onLocalImportModelUpdate', - onLocalImportModelFailed = 'onLocalImportModelFailed', - onLocalImportModelSuccess = 'onLocalImportModelSuccess', - onLocalImportModelFinished = 'onLocalImportModelFinished', -} - export enum ExtensionRoute { baseExtensions = 'baseExtensions', getActiveExtensions = 'getActiveExtensions', @@ -127,10 +116,6 @@ export type AppEventFunctions = { [K in AppEvent]: ApiFunction } -export type DownloadRouteFunctions = { - [K in DownloadRoute]: ApiFunction -} - export type DownloadEventFunctions = { [K in DownloadEvent]: ApiFunction } @@ -150,7 +135,6 @@ export type FileManagerRouteFunctions = { export type APIFunctions = NativeRouteFunctions & AppRouteFunctions & AppEventFunctions & - DownloadRouteFunctions & DownloadEventFunctions & ExtensionRouteFunctions & FileSystemRouteFunctions & @@ -158,7 +142,6 @@ export type APIFunctions = NativeRouteFunctions & export const CoreRoutes = [ ...Object.values(AppRoute), - ...Object.values(DownloadRoute), ...Object.values(ExtensionRoute), ...Object.values(FileSystemRoute), ...Object.values(FileManagerRoute), @@ -168,7 +151,6 @@ export const APIRoutes = [...CoreRoutes, ...Object.values(NativeRoute)] export const APIEvents = [ ...Object.values(AppEvent), ...Object.values(DownloadEvent), - ...Object.values(LocalImportModelEvent), ] export type PayloadType = { messages: ChatCompletionMessage[] diff --git a/core/src/types/config/appConfigEntity.ts b/core/src/types/config/appConfigEntity.ts index 1402aeca1..bd352d22f 100644 --- a/core/src/types/config/appConfigEntity.ts +++ b/core/src/types/config/appConfigEntity.ts @@ -1,4 +1,5 @@ export type AppConfiguration = { data_folder: string quick_ask: boolean + distinct_id?: string } diff --git a/core/src/types/engine/index.ts b/core/src/types/engine/index.ts index 7c848a279..9a6beeeff 100644 --- a/core/src/types/engine/index.ts +++ b/core/src/types/engine/index.ts @@ -18,6 +18,7 @@ export type EngineMetadata = { template?: string } } + explore_models_url?: string } export type EngineVariant = { diff --git a/core/src/types/file/index.ts b/core/src/types/file/index.ts index 87d83c51d..3535dc6da 100644 --- a/core/src/types/file/index.ts +++ b/core/src/types/file/index.ts @@ -16,41 +16,9 @@ export type DownloadState = { error?: string extensionId?: string - downloadType?: DownloadType | string localPath?: string } -export type DownloadType = 'model' | 'extension' - -export type DownloadRequest = { - /** - * The URL to download the file from. - */ - url: string - - /** - * The local path to save the file to. - */ - localPath: string - - /** - * The extension ID of the extension that initiated the download. - * - * Can be extension name. - */ - extensionId?: string - - /** - * The model ID of the model that initiated the download. - */ - modelId?: string - - /** - * The download type. - */ - downloadType?: DownloadType | string -} - type DownloadTime = { elapsed: number remaining: number @@ -60,7 +28,6 @@ type DownloadSize = { total: number transferred: number } - /** * The file metadata */ diff --git a/core/src/types/hardware/index.ts b/core/src/types/hardware/index.ts new file mode 100644 index 000000000..d154a4417 --- /dev/null +++ b/core/src/types/hardware/index.ts @@ -0,0 +1,55 @@ +export type Cpu = { + arch: string + cores: number + instructions: string[] + model: string + usage: number +} + +export type GpuAdditionalInformation = { + compute_cap: string + driver_version: string +} + +export type Gpu = { + activated: boolean + additional_information?: GpuAdditionalInformation + free_vram: number + id: string + name: string + total_vram: number + uuid: string + version: string +} + +export type Os = { + name: string + version: string +} + +export type Power = { + battery_life: number + charging_status: string + is_power_saving: boolean +} + +export type Ram = { + available: number + total: number + type: string +} + +export type Storage = { + available: number + total: number + type: string +} + +export type HardwareInformation = { + cpu: Cpu + gpus: Gpu[] + os: Os + power: Power + ram: Ram + storage: Storage +} diff --git a/core/src/types/index.test.ts b/core/src/types/index.test.ts index 9dc001c4d..d938feee9 100644 --- a/core/src/types/index.test.ts +++ b/core/src/types/index.test.ts @@ -4,7 +4,6 @@ import * as model from './model'; import * as thread from './thread'; import * as message from './message'; import * as inference from './inference'; -import * as monitoring from './monitoring'; import * as file from './file'; import * as config from './config'; import * as huggingface from './huggingface'; @@ -18,7 +17,6 @@ import * as setting from './setting'; expect(thread).toBeDefined(); expect(message).toBeDefined(); expect(inference).toBeDefined(); - expect(monitoring).toBeDefined(); expect(file).toBeDefined(); expect(config).toBeDefined(); expect(huggingface).toBeDefined(); diff --git a/core/src/types/index.ts b/core/src/types/index.ts index e30dd18c3..3d262a6b7 100644 --- a/core/src/types/index.ts +++ b/core/src/types/index.ts @@ -3,7 +3,6 @@ export * from './model' export * from './thread' export * from './message' export * from './inference' -export * from './monitoring' export * from './file' export * from './config' export * from './huggingface' @@ -11,3 +10,4 @@ export * from './miscellaneous' export * from './api' export * from './setting' export * from './engine' +export * from './hardware' diff --git a/core/src/types/miscellaneous/fileDownloadRequest.ts b/core/src/types/miscellaneous/fileDownloadRequest.ts deleted file mode 100644 index 83131aa71..000000000 --- a/core/src/types/miscellaneous/fileDownloadRequest.ts +++ /dev/null @@ -1,8 +0,0 @@ -export type FileDownloadRequest = { - downloadId: string - url: string - localPath: string - fileName: string - displayName: string - metadata: Record -} diff --git a/core/src/types/miscellaneous/index.ts b/core/src/types/miscellaneous/index.ts index 2693ffd8b..6e533259d 100644 --- a/core/src/types/miscellaneous/index.ts +++ b/core/src/types/miscellaneous/index.ts @@ -1,6 +1,4 @@ export * from './systemResourceInfo' export * from './promptTemplate' export * from './appUpdate' -export * from './fileDownloadRequest' -export * from './networkConfig' export * from './selectFiles' diff --git a/core/src/types/miscellaneous/networkConfig.ts b/core/src/types/miscellaneous/networkConfig.ts deleted file mode 100644 index 2d27f4223..000000000 --- a/core/src/types/miscellaneous/networkConfig.ts +++ /dev/null @@ -1,4 +0,0 @@ -export type NetworkConfig = { - proxy?: string - ignoreSSL?: boolean -} diff --git a/core/src/types/miscellaneous/systemResourceInfo.ts b/core/src/types/miscellaneous/systemResourceInfo.ts index 82db5d941..9361b79b6 100644 --- a/core/src/types/miscellaneous/systemResourceInfo.ts +++ b/core/src/types/miscellaneous/systemResourceInfo.ts @@ -1,33 +1,25 @@ +import { GpuAdditionalInformation } from '../hardware' + export type SystemResourceInfo = { memAvailable: number } -export type RunMode = 'cpu' | 'gpu' - export type GpuSetting = { - notify: boolean - run_mode: RunMode - nvidia_driver: { - exist: boolean - version: string - } - cuda: { - exist: boolean - version: string - } gpus: GpuSettingInfo[] - gpu_highest_vram: string - gpus_in_use: string[] - is_initial: boolean // TODO: This needs to be set based on user toggle in settings vulkan: boolean + cpu?: any } export type GpuSettingInfo = { + activated: boolean + free_vram: number id: string - vram: string name: string - arch?: string + total_vram: number + uuid: string + version: string + additional_information?: GpuAdditionalInformation } export type SystemInformation = { @@ -42,9 +34,6 @@ export type SupportedPlatform = SupportedPlatformTuple[number] export type OperatingSystemInfo = { platform: SupportedPlatform | 'unknown' arch: string - release: string - machine: string - version: string totalMem: number freeMem: number } diff --git a/core/src/types/model/modelEntity.ts b/core/src/types/model/modelEntity.ts index 482dfa1ac..6e47c9ae4 100644 --- a/core/src/types/model/modelEntity.ts +++ b/core/src/types/model/modelEntity.ts @@ -71,7 +71,7 @@ export type Model = { /** * The model identifier, modern version of id. */ - mode?: string + model?: string /** * Human-readable name that is used for UI. @@ -150,6 +150,7 @@ export type ModelSettingParams = { */ export type ModelRuntimeParams = { temperature?: number + max_temperature?: number token_limit?: number top_k?: number top_p?: number diff --git a/core/src/types/model/modelSource.ts b/core/src/types/model/modelSource.ts index af1e75d72..ecf3e8aee 100644 --- a/core/src/types/model/modelSource.ts +++ b/core/src/types/model/modelSource.ts @@ -61,6 +61,7 @@ export interface ModelSibling { */ export interface ModelSource { id: string + author?: string metadata: Metadata models: ModelSibling[] type?: string diff --git a/core/src/types/monitoring/index.test.ts b/core/src/types/monitoring/index.test.ts deleted file mode 100644 index 56c5879e4..000000000 --- a/core/src/types/monitoring/index.test.ts +++ /dev/null @@ -1,13 +0,0 @@ -import * as monitoringInterface from './monitoringInterface' -import * as resourceInfo from './resourceInfo' - -import * as index from './index' - -it('should re-export all symbols from monitoringInterface and resourceInfo', () => { - for (const key in monitoringInterface) { - expect(index[key]).toBe(monitoringInterface[key]) - } - for (const key in resourceInfo) { - expect(index[key]).toBe(resourceInfo[key]) - } -}) diff --git a/core/src/types/monitoring/index.ts b/core/src/types/monitoring/index.ts deleted file mode 100644 index b96c518fd..000000000 --- a/core/src/types/monitoring/index.ts +++ /dev/null @@ -1,2 +0,0 @@ -export * from './monitoringInterface' -export * from './resourceInfo' diff --git a/core/src/types/monitoring/monitoringInterface.ts b/core/src/types/monitoring/monitoringInterface.ts deleted file mode 100644 index 5ab1394a1..000000000 --- a/core/src/types/monitoring/monitoringInterface.ts +++ /dev/null @@ -1,29 +0,0 @@ -import { GpuSetting, OperatingSystemInfo } from '../miscellaneous' - -/** - * Monitoring extension for system monitoring. - * @extends BaseExtension - */ -export interface MonitoringInterface { - /** - * Returns information about the system resources. - * @returns {Promise} A promise that resolves with the system resources information. - */ - getResourcesInfo(): Promise - - /** - * Returns the current system load. - * @returns {Promise} A promise that resolves with the current system load. - */ - getCurrentLoad(): Promise - - /** - * Returns the GPU configuration. - */ - getGpuSetting(): Promise - - /** - * Returns information about the operating system. - */ - getOsInfo(): Promise -} diff --git a/core/src/types/monitoring/resourceInfo.ts b/core/src/types/monitoring/resourceInfo.ts deleted file mode 100644 index b19da5462..000000000 --- a/core/src/types/monitoring/resourceInfo.ts +++ /dev/null @@ -1,6 +0,0 @@ -export type ResourceInfo = { - mem: { - totalMemory: number - usedMemory: number - } -} diff --git a/core/src/types/thread/index.ts b/core/src/types/thread/index.ts index 32155e1cd..c6ff6204a 100644 --- a/core/src/types/thread/index.ts +++ b/core/src/types/thread/index.ts @@ -1,3 +1,2 @@ export * from './threadEntity' export * from './threadInterface' -export * from './threadEvent' diff --git a/core/src/types/thread/threadEvent.test.ts b/core/src/types/thread/threadEvent.test.ts deleted file mode 100644 index f892f1050..000000000 --- a/core/src/types/thread/threadEvent.test.ts +++ /dev/null @@ -1,6 +0,0 @@ - -import { ThreadEvent } from './threadEvent'; - -it('should have the correct values', () => { - expect(ThreadEvent.OnThreadStarted).toBe('OnThreadStarted'); -}); diff --git a/core/src/types/thread/threadEvent.ts b/core/src/types/thread/threadEvent.ts deleted file mode 100644 index 4b19b09c1..000000000 --- a/core/src/types/thread/threadEvent.ts +++ /dev/null @@ -1,4 +0,0 @@ -export enum ThreadEvent { - /** The `OnThreadStarted` event is emitted when a thread is started. */ - OnThreadStarted = 'OnThreadStarted', -} diff --git a/docs/.env.example b/docs/.env.example index 4048b604c..eff7b6923 100644 --- a/docs/.env.example +++ b/docs/.env.example @@ -1 +1,3 @@ -GTM_ID=xxxx \ No newline at end of file +GTM_ID=xxxx +POSTHOG_KEY=xxxx +POSTHOG_HOST=xxxx \ No newline at end of file diff --git a/docs/README.md b/docs/README.md index 11b68f26b..4a5e45f4e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -18,7 +18,7 @@ We try to **keep routes consistent** to maintain SEO. ## How to Contribute -Refer to the [Contributing Guide](https://github.com/janhq/jan/blob/main/CONTRIBUTING.md) for more comprehensive information on how to contribute to the Jan project. +Refer to the [Contributing Guide](https://github.com/menloresearch/jan/blob/main/CONTRIBUTING.md) for more comprehensive information on how to contribute to the Jan project. ### Pre-requisites and Installation diff --git a/docs/next.config.mjs b/docs/next.config.mjs index 92b8fb461..660410c06 100644 --- a/docs/next.config.mjs +++ b/docs/next.config.mjs @@ -27,6 +27,8 @@ const nextConfig = { output: 'export', env: { GTM_ID: process.env.GTM_ID, + POSTHOG_KEY: process.env.POSTHOG_KEY, + POSTHOG_HOST: process.env.POSTHOG_HOST, }, transpilePackages: ['@scalar', 'react-tweet'], images: { diff --git a/docs/package.json b/docs/package.json index ac2fab87b..fd691a09a 100644 --- a/docs/package.json +++ b/docs/package.json @@ -36,6 +36,7 @@ "path": "^0.12.7", "plop": "^4.0.1", "plop-helper-date": "^1.0.0", + "posthog-js": "^1.194.6", "react": "^18", "react-dom": "^18", "react-hook-form": "^7.51.1", diff --git a/docs/public/assets/images/changelog/jan-v0-5-13.gif b/docs/public/assets/images/changelog/jan-v0-5-13.gif new file mode 100644 index 000000000..5466692e8 Binary files /dev/null and b/docs/public/assets/images/changelog/jan-v0-5-13.gif differ diff --git a/docs/public/assets/images/changelog/jan-v0-5-14-deepseek-r1.gif b/docs/public/assets/images/changelog/jan-v0-5-14-deepseek-r1.gif new file mode 100644 index 000000000..c1732ec60 Binary files /dev/null and b/docs/public/assets/images/changelog/jan-v0-5-14-deepseek-r1.gif differ diff --git a/docs/public/assets/images/general/menlo.svg b/docs/public/assets/images/general/menlo.svg new file mode 100644 index 000000000..582a498bd --- /dev/null +++ b/docs/public/assets/images/general/menlo.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/docs/public/openapi/jan.json b/docs/public/openapi/jan.json index c3abaf760..787105ea8 100644 --- a/docs/public/openapi/jan.json +++ b/docs/public/openapi/jan.json @@ -1,2131 +1,1972 @@ { - "openapi": "3.0.0", - "paths": { - "/messages": { - "post": { - "operationId": "MessagesController_create", - "summary": "Create message", - "description": "Creates a message in a thread.", - "parameters": [], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateMessageDto" - } - } - } - }, - "responses": { - "201": { - "description": "The message has been successfully created.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateMessageDto" - } - } - } - } - }, - "tags": [ - "Messages" - ] - }, - "get": { - "operationId": "MessagesController_findAll", - "summary": "List messages", - "description": "Retrieves all the messages in a thread.", - "parameters": [], - "responses": { - "200": { - "description": "Ok", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListMessagesResponseDto" - } - } - } - } - }, - "tags": [ - "Messages" - ] + "openapi": "3.0.0", + "paths": { + "/messages": { + "post": { + "operationId": "MessagesController_create", + "summary": "Create message", + "description": "Creates a message in a thread.", + "parameters": [], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateMessageDto" + } } + } }, - "/messages/{id}": { - "get": { - "operationId": "MessagesController_findOne", - "summary": "Retrieve message", - "description": "Retrieves a specific message defined by a message's `id`.", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the message.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Ok", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetMessageResponseDto" - } - } - } - } - }, - "tags": [ - "Messages" - ] - }, - "patch": { - "operationId": "MessagesController_update", - "summary": "Update message", - "description": "Updates a specific message defined by a message's `id`.", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the message.", - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateMessageDto" - } - } - } - }, - "responses": { - "200": { - "description": "The message has been successfully updated.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateMessageDto" - } - } - } - } - }, - "tags": [ - "Messages" - ] - }, - "delete": { - "operationId": "MessagesController_remove", - "summary": "Delete message", - "description": "Deletes a specific message defined by a message's `id`.", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the message.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Successfully deleted the message.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeleteMessageResponseDto" - } - } - } - } - }, - "tags": [ - "Messages" - ] - } - }, - "/threads": { - "post": { - "operationId": "ThreadsController_create", - "summary": "Create thread", - "description": "Creates a new thread.", - "parameters": [], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateThreadDto" - } - } - } - }, - "responses": { - "201": { - "description": "", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ThreadEntity" - } - } - } - } - }, - "tags": [ - "Threads" - ] - }, - "get": { - "operationId": "ThreadsController_findAll", - "summary": "List threads", - "description": "Lists all the available threads along with its configurations.", - "parameters": [], - "responses": { - "200": { - "description": "", - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ThreadEntity" - } - } - } - } - } - }, - "tags": [ - "Threads" - ] - } - }, - "/threads/{id}": { - "get": { - "operationId": "ThreadsController_findOne", - "summary": "Get thread", - "description": "Retrieves a thread along with its configurations.", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the thread.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Ok", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetThreadResponseDto" - } - } - } - } - }, - "tags": [ - "Threads" - ] - }, - "patch": { - "operationId": "ThreadsController_update", - "summary": "Update thread", - "description": "Updates a thread's configurations.", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the thread.", - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateThreadDto" - } - } - } - }, - "responses": { - "200": { - "description": "The thread has been successfully updated.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateThreadDto" - } - } - } - } - }, - "tags": [ - "Threads" - ] - }, - "delete": { - "operationId": "ThreadsController_remove", - "summary": "Delete thread", - "description": "Deletes a specific thread defined by a thread `id` .", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the thread.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "The thread has been successfully deleted.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeleteThreadResponseDto" - } - } - } - } - }, - "tags": [ - "Threads" - ] - } - }, - "/models": { - "post": { - "operationId": "ModelsController_create", - "summary": "Create model", - "description": "Creates a model `.json` instance file manually.", - "parameters": [], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateModelDto" - } - } - } - }, - "responses": { - "201": { - "description": "The model has been successfully created.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/StartModelSuccessDto" - } - } - } - } - }, - "tags": [ - "Models" - ] - }, - "get": { - "operationId": "ModelsController_findAll", - "summary": "List models", - "description": "Lists the currently available models, and provides basic information about each one such as the owner and availability. [Equivalent to OpenAI's list model](https://platform.openai.com/docs/api-reference/models/list).", - "parameters": [], - "responses": { - "200": { - "description": "Ok", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListModelsResponseDto" - } - } - } - } - }, - "tags": [ - "Models" - ] - } - }, - "/models/{modelId}/start": { - "post": { - "operationId": "ModelsController_startModel", - "summary": "Start model", - "description": "Starts a model operation defined by a model `id`.", - "parameters": [ - { - "name": "modelId", - "required": true, - "in": "path", - "description": "The unique identifier of the model.", - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModelSettingParamsDto" - } - } - } - }, - "responses": { - "200": { - "description": "The model has been successfully started.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/StartModelSuccessDto" - } - } - } - } - }, - "tags": [ - "Models" - ] - } - }, - "/models/{modelId}/stop": { - "post": { - "operationId": "ModelsController_stopModel", - "summary": "Stop model", - "description": "Stops a model operation defined by a model `id`.", - "parameters": [ - { - "name": "modelId", - "required": true, - "in": "path", - "description": "The unique identifier of the model.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "The model has been successfully stopped.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/StartModelSuccessDto" - } - } - } - } - }, - "tags": [ - "Models" - ] - } - }, - "/models/download/{modelId}": { - "get": { - "operationId": "ModelsController_downloadModel", - "summary": "Download model", - "description": "Downloads a specific model instance.", - "parameters": [ - { - "name": "modelId", - "required": true, - "in": "path", - "description": "The unique identifier of the model.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Ok", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DownloadModelResponseDto" - } - } - } - } - }, - "tags": [ - "Models" - ] - } - }, - "/models/{id}": { - "get": { - "operationId": "ModelsController_findOne", - "summary": "Get model", - "description": "Retrieves a model instance, providing basic information about the model such as the owner and permissions. [Equivalent to OpenAI's list model](https://platform.openai.com/docs/api-reference/models/retrieve).", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the model.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Ok", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModelDto" - } - } - } - } - }, - "tags": [ - "Models" - ] - }, - "patch": { - "operationId": "ModelsController_update", - "summary": "Update model", - "description": "Updates a model instance defined by a model's `id`.", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the model.", - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateModelDto" - } - } - } - }, - "responses": { - "200": { - "description": "The model has been successfully updated.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateModelDto" - } - } - } - } - }, - "tags": [ - "Models" - ] - }, - "delete": { - "operationId": "ModelsController_remove", - "summary": "Delete model", - "description": "Deletes a model. [Equivalent to OpenAI's delete model](https://platform.openai.com/docs/api-reference/models/delete).", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the model.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "The model has been successfully deleted.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeleteModelResponseDto" - } - } - } - } - }, - "tags": [ - "Models" - ] - } - }, - "/chat/completions": { - "post": { - "operationId": "ChatController_create", - "summary": "Create chat completion", - "description": "Creates a model response for the given conversation.", - "parameters": [], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateChatCompletionDto" - } - } - } - }, - "responses": { - "200": { - "description": "Ok", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ChatCompletionResponseDto" - } - } - } - } - }, - "tags": [ - "Inference" - ] - } - }, - "/assistants": { - "post": { - "operationId": "AssistantsController_create", - "summary": "Create assistant", - "description": "Creates a new assistant.", - "parameters": [], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateAssistantDto" - } - } - } - }, - "responses": { - "201": { - "description": "The assistant has been successfully created." - } - }, - "tags": [ - "Assistants" - ] - }, - "get": { - "operationId": "AssistantsController_findAll", - "summary": "List assistants", - "description": "Retrieves all the available assistants along with their settings.", - "parameters": [], - "responses": { - "200": { - "description": "Ok", - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AssistantEntity" - } - } - } - } - } - }, - "tags": [ - "Assistants" - ] - } - }, - "/assistants/{id}": { - "get": { - "operationId": "AssistantsController_findOne", - "summary": "Get assistant", - "description": "Retrieves a specific assistant defined by an assistant's `id`.", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the assistant.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Ok", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/AssistantEntity" - } - } - } - } - }, - "tags": [ - "Assistants" - ] - }, - "delete": { - "operationId": "AssistantsController_remove", - "summary": "Delete assistant", - "description": "Deletes a specific assistant defined by an assistant's `id`.", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the assistant.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "The assistant has been successfully deleted.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeleteAssistantResponseDto" - } - } - } - } - }, - "tags": [ - "Assistants" - ] - } - } - }, - "info": { - "title": "Cortex API", - "description": "Cortex API provides a command-line interface (CLI) for seamless interaction with large language models (LLMs). Fully compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference), it enables straightforward command execution and management of LLM interactions.", - "version": "1.0", - "contact": {} - }, - "tags": [ - { - "name": "Inference", - "description": "This endpoint initiates interaction with a Language Learning Model (LLM)." - }, - { - "name": "Assistants", - "description": "These endpoints manage the lifecycle of an Assistant within a conversation thread." - }, - { - "name": "Models", - "description": "These endpoints provide a list and descriptions of all available models within the Cortex framework." - }, - { - "name": "Messages", - "description": "These endpoints manage the retrieval and storage of conversation content, including responses from LLMs and other metadata related to chat interactions." - }, - { - "name": "Threads", - "description": "These endpoints handle the creation, retrieval, updating, and deletion of conversation threads." - } - ], - "servers": [ - { - "url": "http://localhost:1337" - }, - { - "url": "http://localhost:1337/v1" - } - ], - "components": { - "schemas": { - "ContentValueDto": { - "type": "object", - "properties": { - "value": { - "type": "string", - "description": "The text's value." - }, - "annotations": { - "description": "The text's annotation that categorize the text.", - "type": "array", - "items": { - "type": "string" - } - }, - "name": { - "type": "string", - "description": "The name or title of the text." - }, - "size": { - "type": "number", - "description": "The text's size in bytes." - } - }, - "required": [ - "value", - "annotations" - ] - }, - "ThreadContentDto": { - "type": "object", - "properties": { - "type": { - "enum": [ - "text", - "image", - "pdf" - ], - "type": "string", - "description": "The type of content." - }, - "text": { - "description": "The content details.", - "allOf": [ - { - "$ref": "#/components/schemas/ContentValueDto" - } - ] - } - }, - "required": [ - "type", - "text" - ] - }, - "CreateMessageDto": { - "type": "object", - "properties": { - "thread_id": { - "type": "string", - "description": "The ID of the thread to which the message will be posted." - }, - "assistant_id": { - "type": "string", - "description": "The assistant's unique identifier." - }, - "role": { - "enum": [ - "system", - "assistant", - "user" - ], - "type": "string", - "description": "The sources of the messages." - }, - "content": { - "description": "The content of the messages.", - "type": "array", - "items": { - "$ref": "#/components/schemas/ThreadContentDto" - } - }, - "status": { - "enum": [ - "ready", - "pending", - "error", - "stopped" - ], - "type": "string", - "description": "Current status of the message." - }, - "metadata": { - "type": "object", - "description": "Optional dictionary for additional unstructured message information." - }, - "type": { - "type": "string", - "description": "Type of the message." - }, - "error_code": { - "enum": [ - "invalid_api_key", - "insufficient_quota", - "invalid_request_error", - "unknown" - ], - "type": "string", - "description": "Specifies the cause of any error." - } - }, - "required": [ - "thread_id", - "role", - "content", - "status" - ] - }, - "ListMessageObjectDto": { - "type": "object", - "properties": { - "id": { - "type": "string", - "example": "msg_abc123", - "description": "The identifier of the message." - }, - "object": { - "type": "string", - "example": "thread.message", - "description": "Type of the object, indicating it's a thread message." - }, - "created_at": { - "type": "integer", - "example": 1699017614, - "description": "Unix timestamp representing the creation time of the message." - }, - "thread_id": { - "type": "string", - "example": "thread_abc123", - "description": "Identifier of the thread to which this message belongs." - }, - "role": { - "type": "string", - "example": "user", - "description": "Role of the sender, either 'user' or 'assistant'." - }, - "file_ids": { - "description": "Array of file IDs associated with the message, if any.", - "example": [], - "type": "array", - "items": { - "type": "string" - } - }, - "assistant_id": { - "type": "string", - "nullable": true, - "description": "Identifier of the assistant involved in the message, if applicable.", - "example": null - }, - "run_id": { - "type": "string", - "nullable": true, - "description": "Run ID associated with the message, if applicable.", - "example": null - }, - "metadata": { - "type": "object", - "example": {}, - "description": "Metadata associated with the message." - } - }, - "required": [ - "id", - "object", - "created_at", - "thread_id", - "role", - "file_ids", - "assistant_id", - "run_id", - "metadata" - ] - }, - "ListMessagesResponseDto": { - "type": "object", - "properties": { - "object": { - "type": "string", - "example": "list", - "description": "Type of the object, indicating it's a list." - }, - "data": { - "description": "Array of message objects.", - "type": "array", - "items": { - "$ref": "#/components/schemas/ListMessageObjectDto" - } - }, - "first_id": { - "type": "string", - "example": "msg_abc123", - "description": "Identifier of the first message in the list." - }, - "last_id": { - "type": "string", - "example": "msg_abc456", - "description": "Identifier of the last message in the list." - }, - "has_more": { - "type": "boolean", - "example": false, - "description": "Indicates whether there are more messages to retrieve." - } - }, - "required": [ - "object", - "data", - "first_id", - "last_id", - "has_more" - ] - }, - "ContentDto": { - "type": "object", - "properties": { - "type": { - "type": "string", - "example": "text", - "description": "Type of content, e.g., \"text\"." - }, - "text": { - "type": "object", - "example": { - "value": "How does AI work? Explain it in simple terms.", - "annotations": [] - }, - "description": "Text content of the message along with any annotations." - } - }, - "required": [ - "type", - "text" - ] - }, - "GetMessageResponseDto": { - "type": "object", - "properties": { - "id": { - "type": "string", - "example": "msg_abc123", - "description": "The identifier of the message." - }, - "object": { - "type": "string", - "example": "thread.message", - "description": "Type of the object, indicating it's a thread message.", - "default": "thread.message" - }, - "created_at": { - "type": "integer", - "example": 1699017614, - "description": "Unix timestamp representing the creation time of the message." - }, - "thread_id": { - "type": "string", - "example": "thread_abc123", - "description": "Identifier of the thread to which this message belongs." - }, - "role": { - "type": "string", - "example": "user", - "description": "Role of the sender, either 'user' or 'assistant'." - }, - "content": { - "description": "Array of content objects detailing the message content.", - "type": "array", - "items": { - "$ref": "#/components/schemas/ContentDto" - } - }, - "file_ids": { - "example": [], - "description": "Array of file IDs associated with the message, if any.", - "type": "array", - "items": { - "type": "string" - } - }, - "assistant_id": { - "type": "string", - "nullable": true, - "example": null, - "description": "Identifier of the assistant involved in the message, if applicable." - }, - "run_id": { - "type": "string", - "nullable": true, - "example": null, - "description": "Run ID associated with the message, if applicable." - }, - "metadata": { - "type": "object", - "example": {}, - "description": "Metadata associated with the message." - } - }, - "required": [ - "id", - "object", - "created_at", - "thread_id", - "role", - "content", - "file_ids", - "assistant_id", - "run_id", - "metadata" - ] - }, - "UpdateMessageDto": { - "type": "object", - "properties": {} - }, - "DeleteMessageResponseDto": { - "type": "object", - "properties": { - "id": { - "type": "string", - "example": "message_123", - "description": "The identifier of the message that was deleted." - }, - "object": { - "type": "string", - "example": "message", - "description": "Type of the object, indicating it's a message.", - "default": "message" - }, - "deleted": { - "type": "boolean", - "example": true, - "description": "Indicates whether the message was successfully deleted." - } - }, - "required": [ - "id", - "object", - "deleted" - ] - }, - "ModelSettingParamsDto": { - "type": "object", - "properties": { - "ctx_len": { - "type": "number", - "description": "Sets the maximum input the model can use to generate a response, it varies with the model used." - }, - "ngl": { - "type": "number", - "description": "Determines GPU layer usage." - }, - "embedding": { - "type": "boolean", - "description": "Enables embedding utilization for tasks like document-enhanced chat in RAG-based applications." - }, - "n_parallel": { - "type": "number", - "description": "Number of parallel processing units to use." - }, - "cpu_threads": { - "type": "number", - "description": "Determines CPU inference threads, limited by hardware and OS. " - }, - "prompt_template": { - "type": "string", - "description": "A predefined text or framework that guides the AI model's response generation." - }, - "system_prompt": { - "type": "string", - "description": "Specific prompt used by the system for generating model outputs." - }, - "ai_prompt": { - "type": "string", - "description": "The prompt fed into the AI, typically to guide or specify the nature of the content it should generate." - }, - "user_prompt": { - "type": "string", - "description": "Customizable prompt input by the user to direct the model’s output generation." - }, - "llama_model_path": { - "type": "string", - "description": "File path to a specific llama model." - }, - "mmproj": { - "type": "string", - "description": "The mmproj is a projection matrix that is used to project the embeddings from CLIP into tokens usable by llama/mistral." - }, - "cont_batching": { - "type": "boolean", - "description": "Controls continuous batching, enhancing throughput for LLM inference." - }, - "vision_model": { - "type": "boolean", - "description": "Specifies if a vision-based model (for image processing) should be used." - }, - "text_model": { - "type": "boolean", - "description": "Specifies if a text-based model is to be utilized, for tasks like text generation or analysis." - } + "responses": { + "201": { + "description": "The message has been successfully created.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateMessageDto" } - }, - "ModelRuntimeParamsDto": { - "type": "object", - "properties": { - "temperature": { - "type": "number", - "description": "Influences the randomness of the model's output." - }, - "token_limit": { - "type": "number", - "description": "Sets the maximum number of pieces (like words or characters) the model will produce at one time." - }, - "top_k": { - "type": "number", - "description": "Limits the model's choices when it's deciding what to write next." - }, - "top_p": { - "type": "number", - "description": "Sets probability threshold for more relevant outputs." - }, - "stream": { - "type": "boolean", - "description": "Determines the format for output generation. If set to `true`, the output is generated continuously, allowing for real-time streaming of responses. If set to `false`, the output is delivered in a single JSON file." - }, - "max_tokens": { - "type": "number", - "description": "Sets the upper limit on the number of tokens the model can generate in a single output." - }, - "stop": { - "description": "Defines specific tokens or phrases that signal the model to stop producing further output.", - "type": "array", - "items": { - "type": "string" - } - }, - "frequency_penalty": { - "type": "number", - "description": "Modifies the likelihood of the model repeating the same words or phrases within a single output." - }, - "presence_penalty": { - "type": "number", - "description": "Reduces the likelihood of repeating tokens, promoting novelty in the output." - }, - "engine": { - "type": "string", - "description": "The engine used to run the model." - } - } - }, - "CreateThreadModelInfoDto": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The unique identifier of the thread." - }, - "settings": { - "description": "The settings of the thread.", - "allOf": [ - { - "$ref": "#/components/schemas/ModelSettingParamsDto" - } - ] - }, - "parameters": { - "description": "The parameters of the thread.", - "allOf": [ - { - "$ref": "#/components/schemas/ModelRuntimeParamsDto" - } - ] - }, - "engine": { - "type": "string", - "description": "The engine used in the thread to operate the model." - } - }, - "required": [ - "id", - "settings", - "parameters" - ] - }, - "AssistantToolDto": { - "type": "object", - "properties": { - "type": { - "type": "string", - "description": "The type of the assistant's tool." - }, - "enabled": { - "type": "boolean", - "description": "Enable or disable the assistant's tool." - }, - "settings": { - "type": "object", - "description": "The setting of the assistant's tool." - } - }, - "required": [ - "type", - "enabled", - "settings" - ] - }, - "CreateThreadAssistantDto": { - "type": "object", - "properties": { - "assistant_id": { - "type": "string", - "description": "The unique identifier of the assistant." - }, - "assistant_name": { - "type": "string", - "description": "The name of the assistant." - }, - "model": { - "description": "The model's unique identifier and settings.", - "allOf": [ - { - "$ref": "#/components/schemas/CreateThreadModelInfoDto" - } - ] - }, - "instructions": { - "type": "string", - "description": "The assistant's specific instructions." - }, - "tools": { - "description": "The thread's tool(Knowledge Retrieval) configurations.", - "type": "array", - "items": { - "$ref": "#/components/schemas/AssistantToolDto" - } - } - }, - "required": [ - "assistant_id", - "assistant_name", - "model" - ] - }, - "CreateThreadDto": { - "type": "object", - "properties": { - "title": { - "type": "string", - "description": "The title of the thread." - }, - "assistants": { - "description": "The details of the thread's settings.", - "type": "array", - "items": { - "$ref": "#/components/schemas/CreateThreadAssistantDto" - } - } - }, - "required": [ - "title", - "assistants" - ] - }, - "ThreadEntity": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "object": { - "type": "string" - }, - "title": { - "type": "string" - }, - "assistants": { - "type": "array", - "items": { - "type": "object" - } - }, - "createdAt": { - "type": "number" - }, - "updatedAt": { - "type": "number" - }, - "metadata": { - "type": "object" - } - }, - "required": [ - "id", - "object", - "title", - "assistants", - "createdAt" - ] - }, - "GetThreadResponseDto": { - "type": "object", - "properties": { - "id": { - "type": "string", - "example": "thread_abc123", - "description": "The identifier of the thread." - }, - "object": { - "type": "string", - "example": "thread", - "description": "Type of the object" - }, - "created_at": { - "type": "integer", - "example": 1699014083, - "description": "Unix timestamp representing the creation time of the thread." - }, - "assistants": { - "example": [ - "assistant-001" - ], - "description": "List of assistants involved in the thread.", - "type": "array", - "items": { - "type": "string" - } - }, - "metadata": { - "type": "object", - "example": {}, - "description": "Metadata associated with the thread." - }, - "messages": { - "example": [], - "description": "List of messages within the thread.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "required": [ - "id", - "object", - "created_at", - "assistants", - "metadata", - "messages" - ] - }, - "UpdateThreadDto": { - "type": "object", - "properties": {} - }, - "DeleteThreadResponseDto": { - "type": "object", - "properties": { - "id": { - "type": "string", - "example": "thread_123", - "description": "The identifier of the thread that was deleted." - }, - "object": { - "type": "string", - "example": "thread", - "description": "Type of the object, indicating it's a thread.", - "default": "thread" - }, - "deleted": { - "type": "boolean", - "example": true, - "description": "Indicates whether the thread was successfully deleted." - } - }, - "required": [ - "id", - "object", - "deleted" - ] - }, - "ModelArtifactDto": { - "type": "object", - "properties": { - "url": { - "type": "string", - "description": "The URL source of the model." - } - }, - "required": [ - "url" - ] - }, - "ModelMetadataDto": { - "type": "object", - "properties": { - "author": { - "type": "string", - "description": "The author of the model." - }, - "tags": { - "description": "The model's tags.", - "type": "array", - "items": { - "type": "string" - } - }, - "size": { - "type": "number", - "description": "The model's size." - }, - "cover": { - "type": "string", - "description": "The model's cover." - } - }, - "required": [ - "author", - "tags", - "size" - ] - }, - "CreateModelDto": { - "type": "object", - "properties": { - "version": { - "type": "string", - "description": "The version of the model." - }, - "format": { - "enum": [ - "gguf", - "api" - ], - "type": "string", - "description": "The state format of the model." - }, - "sources": { - "description": "The URL sources from which the model downloaded or accessed.", - "type": "array", - "items": { - "$ref": "#/components/schemas/ModelArtifactDto" - } - }, - "id": { - "type": "string", - "description": "The unique identifier of the model." - }, - "name": { - "type": "string", - "description": "The name of the model." - }, - "description": { - "type": "string", - "description": "A brief description of the model." - }, - "settings": { - "description": "The settings parameters of the model.", - "allOf": [ - { - "$ref": "#/components/schemas/ModelSettingParamsDto" - } - ] - }, - "parameters": { - "description": "The parameters configuration of the model.", - "allOf": [ - { - "$ref": "#/components/schemas/ModelRuntimeParamsDto" - } - ] - }, - "metadata": { - "description": "The metadata of the model.", - "allOf": [ - { - "$ref": "#/components/schemas/ModelMetadataDto" - } - ] - }, - "engine": { - "type": "string", - "description": "The engine used to run the model." - } - }, - "required": [ - "version", - "format", - "sources", - "id", - "name", - "description", - "settings", - "parameters", - "metadata", - "engine" - ] - }, - "StartModelSuccessDto": { - "type": "object", - "properties": { - "message": { - "type": "string", - "description": "The success or error message displayed when a model is successfully loaded or fails to load." - }, - "modelId": { - "type": "string", - "description": "The unique identifier of the model." - } - }, - "required": [ - "message", - "modelId" - ] - }, - "DownloadModelResponseDto": { - "type": "object", - "properties": { - "message": { - "type": "string", - "example": "Starting download mistral-ins-7b-q4", - "description": "Message indicates Jan starting download corresponding model." - } - }, - "required": [ - "message" - ] - }, - "ModelDto": { - "type": "object", - "properties": { - "source_url": { - "type": "string", - "example": "https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf", - "description": "URL to the source of the model." - }, - "id": { - "type": "string", - "example": "trinity-v1.2-7b", - "description": "Unique identifier used in chat-completions model_name, matches folder name." - }, - "object": { - "type": "string", - "example": "model" - }, - "name": { - "type": "string", - "example": "Trinity-v1.2 7B Q4", - "description": "Name of the model." - }, - "version": { - "type": "string", - "default": "1.0", - "description": "The version number of the model." - }, - "description": { - "type": "string", - "example": "Trinity is an experimental model merge using the Slerp method. Recommended for daily assistance purposes.", - "description": "Description of the model." - }, - "format": { - "type": "string", - "example": "gguf", - "description": "State format of the model, distinct from the engine." - }, - "ctx_len": { - "type": "number", - "description": "Context length.", - "example": 4096 - }, - "prompt_template": { - "type": "string", - "example": "system\n{system_message}\nuser\n{prompt}\nassistant" - }, - "temperature": { - "type": "number", - "example": 0.7 - }, - "top_p": { - "type": "number", - "example": 0.95 - }, - "stream": { - "type": "boolean", - "example": true - }, - "max_tokens": { - "type": "number", - "example": 4096 - }, - "stop": { - "example": [], - "type": "array", - "items": { - "type": "string" - } - }, - "frequency_penalty": { - "type": "number", - "example": 0 - }, - "presence_penalty": { - "type": "number", - "example": 0 - }, - "author": { - "type": "string", - "example": "Jan" - }, - "tags": { - "example": [ - "7B", - "Merged", - "Featured" - ], - "type": "array", - "items": { - "type": "string" - } - }, - "size": { - "type": "number", - "example": 4370000000 - }, - "cover": { - "type": "string", - "example": "https://raw.githubusercontent.com/janhq/jan/main/models/trinity-v1.2-7b/cover.png" - }, - "engine": { - "type": "string", - "example": "cortex" - } - }, - "required": [ - "source_url", - "id", - "object", - "name", - "version", - "description", - "format", - "ctx_len", - "prompt_template", - "temperature", - "top_p", - "stream", - "max_tokens", - "stop", - "frequency_penalty", - "presence_penalty", - "author", - "tags", - "size", - "cover", - "engine" - ] - }, - "ListModelsResponseDto": { - "type": "object", - "properties": { - "object": { - "type": "string", - "example": "list", - "enum": [ - "list" - ] - }, - "data": { - "description": "List of models", - "type": "array", - "items": { - "$ref": "#/components/schemas/ModelDto" - } - } - }, - "required": [ - "object", - "data" - ] - }, - "UpdateModelDto": { - "type": "object", - "properties": {} - }, - "DeleteModelResponseDto": { - "type": "object", - "properties": { - "id": { - "type": "string", - "example": "mistral-ins-7b-q4", - "description": "The identifier of the model that was deleted." - }, - "object": { - "type": "string", - "example": "model", - "description": "Type of the object, indicating it's a model.", - "default": "model" - }, - "deleted": { - "type": "boolean", - "example": true, - "description": "Indicates whether the model was successfully deleted." - } - }, - "required": [ - "id", - "object", - "deleted" - ] - }, - "ChatCompletionMessage": { - "type": "object", - "properties": { - "content": { - "type": "string", - "description": "The Content of the chat message." - }, - "role": { - "enum": [ - "system", - "assistant", - "user" - ], - "type": "string", - "description": "The role of the entity in the chat completion." - } - }, - "required": [ - "content", - "role" - ] - }, - "CreateChatCompletionDto": { - "type": "object", - "properties": { - "messages": { - "description": "Array of chat messages to be used for generating the chat completion.", - "type": "array", - "items": { - "$ref": "#/components/schemas/ChatCompletionMessage" - } - }, - "model": { - "type": "string", - "description": "The unique identifier of the model." - }, - "stream": { - "type": "boolean", - "description": "Determines the format for output generation. If set to `true`, the output is generated continuously, allowing for real-time streaming of responses. If set to `false`, the output is delivered in a single JSON file." - }, - "max_tokens": { - "type": "number", - "description": "Sets the upper limit on the number of tokens the model can generate in a single output." - }, - "stop": { - "description": "Defines specific tokens or phrases that signal the model to stop producing further output.", - "type": "array", - "items": { - "type": "string" - } - }, - "frequency_penalty": { - "type": "number", - "description": "Modifies the likelihood of the model repeating the same words or phrases within a single output." - }, - "presence_penalty": { - "type": "number", - "description": "Reduces the likelihood of repeating tokens, promoting novelty in the output." - }, - "temperature": { - "type": "number", - "description": "Influences the randomness of the model's output." - }, - "top_p": { - "type": "number", - "description": "Sets probability threshold for more relevant outputs." - } - }, - "required": [ - "messages", - "model", - "stream", - "max_tokens", - "stop", - "frequency_penalty", - "presence_penalty", - "temperature", - "top_p" - ] - }, - "MessageDto": { - "type": "object", - "properties": { - "content": { - "type": "string", - "description": "The textual content of the chat message or completion generated by the model." - }, - "role": { - "type": "string", - "description": "The role of the participant in the chat, such as 'user' or 'system', indicating who is the sender of the message." - } - }, - "required": [ - "content", - "role" - ] - }, - "ChoiceDto": { - "type": "object", - "properties": { - "finish_reason": { - "type": "string", - "description": "The reason the chat completion ended, typically indicating whether the model completed the text naturally or was cut off." - }, - "index": { - "type": "number", - "description": "The index of the completion relative to other generated completions, useful for identifying its order in a batch request." - }, - "message": { - "description": "An object representing the message details involved in the chat completion, encapsulated within a MessageDto.", - "allOf": [ - { - "$ref": "#/components/schemas/MessageDto" - } - ] - } - }, - "required": [ - "finish_reason", - "index", - "message" - ] - }, - "UsageDto": { - "type": "object", - "properties": { - "completion_tokens": { - "type": "number", - "description": "The number of tokens used in the completion part of the response generated by the model." - }, - "prompt_tokens": { - "type": "number", - "description": "The number of tokens used in the prompt part of the chat input, which is provided to the model." - }, - "total_tokens": { - "type": "number", - "description": "The total number of tokens used in both the prompt and the completion, summarizing the entire token count of the chat operation." - } - }, - "required": [ - "completion_tokens", - "prompt_tokens", - "total_tokens" - ] - }, - "ChatCompletionResponseDto": { - "type": "object", - "properties": { - "choices": { - "description": "A list of choices generated by the chat model.", - "type": "array", - "items": { - "$ref": "#/components/schemas/ChoiceDto" - } - }, - "created": { - "type": "number", - "description": "The timestamp of when the chat completion was created, expressed as a Unix timestamp." - }, - "id": { - "type": "string", - "description": "The unique identifier for the chat completion." - }, - "model": { - "type": "string", - "description": "The identifier of the model used to generate the chat completion." - }, - "object": { - "type": "string", - "description": "The type of object, typically set to 'chat_completion' to denote the nature of the API response." - }, - "system_fingerprint": { - "type": "string", - "description": "A unique fingerprint that identifies the system configuration used during the chat completion." - }, - "usage": { - "description": "An object representing the usage statistics of the model for the current completion.", - "allOf": [ - { - "$ref": "#/components/schemas/UsageDto" - } - ] - } - }, - "required": [ - "choices", - "created", - "id", - "model", - "object", - "system_fingerprint", - "usage" - ] - }, - "CreateAssistantDto": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The unique identifier of the assistant." - }, - "avatar": { - "type": "string", - "description": "The avatar of the assistant." - }, - "name": { - "type": "string", - "description": "The name of the assistant." - }, - "description": { - "type": "string", - "description": "The description of the assistant." - }, - "model": { - "type": "string", - "description": "The model of the assistant." - }, - "instructions": { - "type": "string", - "description": "The instructions for the assistant." - }, - "tools": { - "description": "The tools associated with the assistant.", - "type": "array", - "items": { - "$ref": "#/components/schemas/AssistantToolDto" - } - }, - "file_ids": { - "description": "The identifiers of the files that have been uploaded to the thread.", - "type": "array", - "items": { - "type": "string" - } - }, - "metadata": { - "type": "object", - "description": "The metadata of the assistant." - } - }, - "required": [ - "id", - "avatar", - "name", - "description", - "model", - "instructions", - "tools", - "file_ids" - ] - }, - "AssistantEntity": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "avatar": { - "type": "string" - }, - "thread_location": { - "type": "string" - }, - "object": { - "type": "string" - }, - "created_at": { - "type": "number" - }, - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "model": { - "type": "string" - }, - "instructions": { - "type": "string" - }, - "tools": { - "type": "array", - "items": { - "type": "object" - } - }, - "file_ids": { - "type": "array", - "items": { - "type": "string" - } - }, - "metadata": { - "type": "object" - } - }, - "required": [ - "id", - "avatar", - "object", - "created_at", - "name", - "model", - "file_ids" - ] - }, - "DeleteAssistantResponseDto": { - "type": "object", - "properties": { - "id": { - "type": "string", - "example": "assistant_123", - "description": "The identifier of the assistant that was deleted." - }, - "object": { - "type": "string", - "example": "assistant", - "description": "Type of the object, indicating it's a assistant.", - "default": "assistant" - }, - "deleted": { - "type": "boolean", - "example": true, - "description": "Indicates whether the assistant was successfully deleted." - } - }, - "required": [ - "id", - "object", - "deleted" - ] + } } - } + } + }, + "tags": ["Messages"] + }, + "get": { + "operationId": "MessagesController_findAll", + "summary": "List messages", + "description": "Retrieves all the messages in a thread.", + "parameters": [], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListMessagesResponseDto" + } + } + } + } + }, + "tags": ["Messages"] + } + }, + "/messages/{id}": { + "get": { + "operationId": "MessagesController_findOne", + "summary": "Retrieve message", + "description": "Retrieves a specific message defined by a message's `id`.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the message.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetMessageResponseDto" + } + } + } + } + }, + "tags": ["Messages"] + }, + "patch": { + "operationId": "MessagesController_update", + "summary": "Update message", + "description": "Updates a specific message defined by a message's `id`.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the message.", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateMessageDto" + } + } + } + }, + "responses": { + "200": { + "description": "The message has been successfully updated.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateMessageDto" + } + } + } + } + }, + "tags": ["Messages"] + }, + "delete": { + "operationId": "MessagesController_remove", + "summary": "Delete message", + "description": "Deletes a specific message defined by a message's `id`.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the message.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successfully deleted the message.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteMessageResponseDto" + } + } + } + } + }, + "tags": ["Messages"] + } + }, + "/threads": { + "post": { + "operationId": "ThreadsController_create", + "summary": "Create thread", + "description": "Creates a new thread.", + "parameters": [], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateThreadDto" + } + } + } + }, + "responses": { + "201": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ThreadEntity" + } + } + } + } + }, + "tags": ["Threads"] + }, + "get": { + "operationId": "ThreadsController_findAll", + "summary": "List threads", + "description": "Lists all the available threads along with its configurations.", + "parameters": [], + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ThreadEntity" + } + } + } + } + } + }, + "tags": ["Threads"] + } + }, + "/threads/{id}": { + "get": { + "operationId": "ThreadsController_findOne", + "summary": "Get thread", + "description": "Retrieves a thread along with its configurations.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the thread.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetThreadResponseDto" + } + } + } + } + }, + "tags": ["Threads"] + }, + "patch": { + "operationId": "ThreadsController_update", + "summary": "Update thread", + "description": "Updates a thread's configurations.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the thread.", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateThreadDto" + } + } + } + }, + "responses": { + "200": { + "description": "The thread has been successfully updated.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateThreadDto" + } + } + } + } + }, + "tags": ["Threads"] + }, + "delete": { + "operationId": "ThreadsController_remove", + "summary": "Delete thread", + "description": "Deletes a specific thread defined by a thread `id` .", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the thread.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The thread has been successfully deleted.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteThreadResponseDto" + } + } + } + } + }, + "tags": ["Threads"] + } + }, + "/models": { + "post": { + "operationId": "ModelsController_create", + "summary": "Create model", + "description": "Creates a model `.json` instance file manually.", + "parameters": [], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateModelDto" + } + } + } + }, + "responses": { + "201": { + "description": "The model has been successfully created.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StartModelSuccessDto" + } + } + } + } + }, + "tags": ["Models"] + }, + "get": { + "operationId": "ModelsController_findAll", + "summary": "List models", + "description": "Lists the currently available models, and provides basic information about each one such as the owner and availability. [Equivalent to OpenAI's list model](https://platform.openai.com/docs/api-reference/models/list).", + "parameters": [], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListModelsResponseDto" + } + } + } + } + }, + "tags": ["Models"] + } + }, + "/models/{modelId}/start": { + "post": { + "operationId": "ModelsController_startModel", + "summary": "Start model", + "description": "Starts a model operation defined by a model `id`.", + "parameters": [ + { + "name": "modelId", + "required": true, + "in": "path", + "description": "The unique identifier of the model.", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModelSettingParamsDto" + } + } + } + }, + "responses": { + "200": { + "description": "The model has been successfully started.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StartModelSuccessDto" + } + } + } + } + }, + "tags": ["Models"] + } + }, + "/models/{modelId}/stop": { + "post": { + "operationId": "ModelsController_stopModel", + "summary": "Stop model", + "description": "Stops a model operation defined by a model `id`.", + "parameters": [ + { + "name": "modelId", + "required": true, + "in": "path", + "description": "The unique identifier of the model.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The model has been successfully stopped.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StartModelSuccessDto" + } + } + } + } + }, + "tags": ["Models"] + } + }, + "/models/download/{modelId}": { + "get": { + "operationId": "ModelsController_downloadModel", + "summary": "Download model", + "description": "Downloads a specific model instance.", + "parameters": [ + { + "name": "modelId", + "required": true, + "in": "path", + "description": "The unique identifier of the model.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DownloadModelResponseDto" + } + } + } + } + }, + "tags": ["Models"] + } + }, + "/models/{id}": { + "get": { + "operationId": "ModelsController_findOne", + "summary": "Get model", + "description": "Retrieves a model instance, providing basic information about the model such as the owner and permissions. [Equivalent to OpenAI's list model](https://platform.openai.com/docs/api-reference/models/retrieve).", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the model.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModelDto" + } + } + } + } + }, + "tags": ["Models"] + }, + "patch": { + "operationId": "ModelsController_update", + "summary": "Update model", + "description": "Updates a model instance defined by a model's `id`.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the model.", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateModelDto" + } + } + } + }, + "responses": { + "200": { + "description": "The model has been successfully updated.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateModelDto" + } + } + } + } + }, + "tags": ["Models"] + }, + "delete": { + "operationId": "ModelsController_remove", + "summary": "Delete model", + "description": "Deletes a model. [Equivalent to OpenAI's delete model](https://platform.openai.com/docs/api-reference/models/delete).", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the model.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The model has been successfully deleted.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteModelResponseDto" + } + } + } + } + }, + "tags": ["Models"] + } + }, + "/chat/completions": { + "post": { + "operationId": "ChatController_create", + "summary": "Create chat completion", + "description": "Creates a model response for the given conversation.", + "parameters": [], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateChatCompletionDto" + } + } + } + }, + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatCompletionResponseDto" + } + } + } + } + }, + "tags": ["Inference"] + } + }, + "/assistants": { + "post": { + "operationId": "AssistantsController_create", + "summary": "Create assistant", + "description": "Creates a new assistant.", + "parameters": [], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateAssistantDto" + } + } + } + }, + "responses": { + "201": { + "description": "The assistant has been successfully created." + } + }, + "tags": ["Assistants"] + }, + "get": { + "operationId": "AssistantsController_findAll", + "summary": "List assistants", + "description": "Retrieves all the available assistants along with their settings.", + "parameters": [], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AssistantEntity" + } + } + } + } + } + }, + "tags": ["Assistants"] + } + }, + "/assistants/{id}": { + "get": { + "operationId": "AssistantsController_findOne", + "summary": "Get assistant", + "description": "Retrieves a specific assistant defined by an assistant's `id`.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the assistant.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AssistantEntity" + } + } + } + } + }, + "tags": ["Assistants"] + }, + "delete": { + "operationId": "AssistantsController_remove", + "summary": "Delete assistant", + "description": "Deletes a specific assistant defined by an assistant's `id`.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the assistant.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The assistant has been successfully deleted.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteAssistantResponseDto" + } + } + } + } + }, + "tags": ["Assistants"] + } } -} \ No newline at end of file + }, + "info": { + "title": "Cortex API", + "description": "Cortex API provides a command-line interface (CLI) for seamless interaction with large language models (LLMs). Fully compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference), it enables straightforward command execution and management of LLM interactions.", + "version": "1.0", + "contact": {} + }, + "tags": [ + { + "name": "Inference", + "description": "This endpoint initiates interaction with a Language Learning Model (LLM)." + }, + { + "name": "Assistants", + "description": "These endpoints manage the lifecycle of an Assistant within a conversation thread." + }, + { + "name": "Models", + "description": "These endpoints provide a list and descriptions of all available models within the Cortex framework." + }, + { + "name": "Messages", + "description": "These endpoints manage the retrieval and storage of conversation content, including responses from LLMs and other metadata related to chat interactions." + }, + { + "name": "Threads", + "description": "These endpoints handle the creation, retrieval, updating, and deletion of conversation threads." + } + ], + "servers": [ + { + "url": "http://localhost:1337" + }, + { + "url": "http://localhost:1337/v1" + } + ], + "components": { + "schemas": { + "ContentValueDto": { + "type": "object", + "properties": { + "value": { + "type": "string", + "description": "The text's value." + }, + "annotations": { + "description": "The text's annotation that categorize the text.", + "type": "array", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "The name or title of the text." + }, + "size": { + "type": "number", + "description": "The text's size in bytes." + } + }, + "required": ["value", "annotations"] + }, + "ThreadContentDto": { + "type": "object", + "properties": { + "type": { + "enum": ["text", "image", "pdf"], + "type": "string", + "description": "The type of content." + }, + "text": { + "description": "The content details.", + "allOf": [ + { + "$ref": "#/components/schemas/ContentValueDto" + } + ] + } + }, + "required": ["type", "text"] + }, + "CreateMessageDto": { + "type": "object", + "properties": { + "thread_id": { + "type": "string", + "description": "The ID of the thread to which the message will be posted." + }, + "assistant_id": { + "type": "string", + "description": "The assistant's unique identifier." + }, + "role": { + "enum": ["system", "assistant", "user"], + "type": "string", + "description": "The sources of the messages." + }, + "content": { + "description": "The content of the messages.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ThreadContentDto" + } + }, + "status": { + "enum": ["ready", "pending", "error", "stopped"], + "type": "string", + "description": "Current status of the message." + }, + "metadata": { + "type": "object", + "description": "Optional dictionary for additional unstructured message information." + }, + "type": { + "type": "string", + "description": "Type of the message." + }, + "error_code": { + "enum": [ + "invalid_api_key", + "insufficient_quota", + "invalid_request_error", + "unknown" + ], + "type": "string", + "description": "Specifies the cause of any error." + } + }, + "required": ["thread_id", "role", "content", "status"] + }, + "ListMessageObjectDto": { + "type": "object", + "properties": { + "id": { + "type": "string", + "example": "msg_abc123", + "description": "The identifier of the message." + }, + "object": { + "type": "string", + "example": "thread.message", + "description": "Type of the object, indicating it's a thread message." + }, + "created_at": { + "type": "integer", + "example": 1699017614, + "description": "Unix timestamp representing the creation time of the message." + }, + "thread_id": { + "type": "string", + "example": "thread_abc123", + "description": "Identifier of the thread to which this message belongs." + }, + "role": { + "type": "string", + "example": "user", + "description": "Role of the sender, either 'user' or 'assistant'." + }, + "file_ids": { + "description": "Array of file IDs associated with the message, if any.", + "example": [], + "type": "array", + "items": { + "type": "string" + } + }, + "assistant_id": { + "type": "string", + "nullable": true, + "description": "Identifier of the assistant involved in the message, if applicable.", + "example": null + }, + "run_id": { + "type": "string", + "nullable": true, + "description": "Run ID associated with the message, if applicable.", + "example": null + }, + "metadata": { + "type": "object", + "example": {}, + "description": "Metadata associated with the message." + } + }, + "required": [ + "id", + "object", + "created_at", + "thread_id", + "role", + "file_ids", + "assistant_id", + "run_id", + "metadata" + ] + }, + "ListMessagesResponseDto": { + "type": "object", + "properties": { + "object": { + "type": "string", + "example": "list", + "description": "Type of the object, indicating it's a list." + }, + "data": { + "description": "Array of message objects.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ListMessageObjectDto" + } + }, + "first_id": { + "type": "string", + "example": "msg_abc123", + "description": "Identifier of the first message in the list." + }, + "last_id": { + "type": "string", + "example": "msg_abc456", + "description": "Identifier of the last message in the list." + }, + "has_more": { + "type": "boolean", + "example": false, + "description": "Indicates whether there are more messages to retrieve." + } + }, + "required": ["object", "data", "first_id", "last_id", "has_more"] + }, + "ContentDto": { + "type": "object", + "properties": { + "type": { + "type": "string", + "example": "text", + "description": "Type of content, e.g., \"text\"." + }, + "text": { + "type": "object", + "example": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + }, + "description": "Text content of the message along with any annotations." + } + }, + "required": ["type", "text"] + }, + "GetMessageResponseDto": { + "type": "object", + "properties": { + "id": { + "type": "string", + "example": "msg_abc123", + "description": "The identifier of the message." + }, + "object": { + "type": "string", + "example": "thread.message", + "description": "Type of the object, indicating it's a thread message.", + "default": "thread.message" + }, + "created_at": { + "type": "integer", + "example": 1699017614, + "description": "Unix timestamp representing the creation time of the message." + }, + "thread_id": { + "type": "string", + "example": "thread_abc123", + "description": "Identifier of the thread to which this message belongs." + }, + "role": { + "type": "string", + "example": "user", + "description": "Role of the sender, either 'user' or 'assistant'." + }, + "content": { + "description": "Array of content objects detailing the message content.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ContentDto" + } + }, + "file_ids": { + "example": [], + "description": "Array of file IDs associated with the message, if any.", + "type": "array", + "items": { + "type": "string" + } + }, + "assistant_id": { + "type": "string", + "nullable": true, + "example": null, + "description": "Identifier of the assistant involved in the message, if applicable." + }, + "run_id": { + "type": "string", + "nullable": true, + "example": null, + "description": "Run ID associated with the message, if applicable." + }, + "metadata": { + "type": "object", + "example": {}, + "description": "Metadata associated with the message." + } + }, + "required": [ + "id", + "object", + "created_at", + "thread_id", + "role", + "content", + "file_ids", + "assistant_id", + "run_id", + "metadata" + ] + }, + "UpdateMessageDto": { + "type": "object", + "properties": {} + }, + "DeleteMessageResponseDto": { + "type": "object", + "properties": { + "id": { + "type": "string", + "example": "message_123", + "description": "The identifier of the message that was deleted." + }, + "object": { + "type": "string", + "example": "message", + "description": "Type of the object, indicating it's a message.", + "default": "message" + }, + "deleted": { + "type": "boolean", + "example": true, + "description": "Indicates whether the message was successfully deleted." + } + }, + "required": ["id", "object", "deleted"] + }, + "ModelSettingParamsDto": { + "type": "object", + "properties": { + "ctx_len": { + "type": "number", + "description": "Sets the maximum input the model can use to generate a response, it varies with the model used." + }, + "ngl": { + "type": "number", + "description": "Determines GPU layer usage." + }, + "embedding": { + "type": "boolean", + "description": "Enables embedding utilization for tasks like document-enhanced chat in RAG-based applications." + }, + "n_parallel": { + "type": "number", + "description": "Number of parallel processing units to use." + }, + "cpu_threads": { + "type": "number", + "description": "Determines CPU inference threads, limited by hardware and OS. " + }, + "prompt_template": { + "type": "string", + "description": "A predefined text or framework that guides the AI model's response generation." + }, + "system_prompt": { + "type": "string", + "description": "Specific prompt used by the system for generating model outputs." + }, + "ai_prompt": { + "type": "string", + "description": "The prompt fed into the AI, typically to guide or specify the nature of the content it should generate." + }, + "user_prompt": { + "type": "string", + "description": "Customizable prompt input by the user to direct the model’s output generation." + }, + "llama_model_path": { + "type": "string", + "description": "File path to a specific llama model." + }, + "mmproj": { + "type": "string", + "description": "The mmproj is a projection matrix that is used to project the embeddings from CLIP into tokens usable by llama/mistral." + }, + "cont_batching": { + "type": "boolean", + "description": "Controls continuous batching, enhancing throughput for LLM inference." + }, + "vision_model": { + "type": "boolean", + "description": "Specifies if a vision-based model (for image processing) should be used." + }, + "text_model": { + "type": "boolean", + "description": "Specifies if a text-based model is to be utilized, for tasks like text generation or analysis." + } + } + }, + "ModelRuntimeParamsDto": { + "type": "object", + "properties": { + "temperature": { + "type": "number", + "description": "Influences the randomness of the model's output." + }, + "token_limit": { + "type": "number", + "description": "Sets the maximum number of pieces (like words or characters) the model will produce at one time." + }, + "top_k": { + "type": "number", + "description": "Limits the model's choices when it's deciding what to write next." + }, + "top_p": { + "type": "number", + "description": "Sets probability threshold for more relevant outputs." + }, + "stream": { + "type": "boolean", + "description": "Determines the format for output generation. If set to `true`, the output is generated continuously, allowing for real-time streaming of responses. If set to `false`, the output is delivered in a single JSON file." + }, + "max_tokens": { + "type": "number", + "description": "Sets the upper limit on the number of tokens the model can generate in a single output." + }, + "stop": { + "description": "Defines specific tokens or phrases that signal the model to stop producing further output.", + "type": "array", + "items": { + "type": "string" + } + }, + "frequency_penalty": { + "type": "number", + "description": "Modifies the likelihood of the model repeating the same words or phrases within a single output." + }, + "presence_penalty": { + "type": "number", + "description": "Reduces the likelihood of repeating tokens, promoting novelty in the output." + }, + "engine": { + "type": "string", + "description": "The engine used to run the model." + } + } + }, + "CreateThreadModelInfoDto": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique identifier of the thread." + }, + "settings": { + "description": "The settings of the thread.", + "allOf": [ + { + "$ref": "#/components/schemas/ModelSettingParamsDto" + } + ] + }, + "parameters": { + "description": "The parameters of the thread.", + "allOf": [ + { + "$ref": "#/components/schemas/ModelRuntimeParamsDto" + } + ] + }, + "engine": { + "type": "string", + "description": "The engine used in the thread to operate the model." + } + }, + "required": ["id", "settings", "parameters"] + }, + "AssistantToolDto": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The type of the assistant's tool." + }, + "enabled": { + "type": "boolean", + "description": "Enable or disable the assistant's tool." + }, + "settings": { + "type": "object", + "description": "The setting of the assistant's tool." + } + }, + "required": ["type", "enabled", "settings"] + }, + "CreateThreadAssistantDto": { + "type": "object", + "properties": { + "assistant_id": { + "type": "string", + "description": "The unique identifier of the assistant." + }, + "assistant_name": { + "type": "string", + "description": "The name of the assistant." + }, + "model": { + "description": "The model's unique identifier and settings.", + "allOf": [ + { + "$ref": "#/components/schemas/CreateThreadModelInfoDto" + } + ] + }, + "instructions": { + "type": "string", + "description": "The assistant's specific instructions." + }, + "tools": { + "description": "The thread's tool(Knowledge Retrieval) configurations.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AssistantToolDto" + } + } + }, + "required": ["assistant_id", "assistant_name", "model"] + }, + "CreateThreadDto": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The title of the thread." + }, + "assistants": { + "description": "The details of the thread's settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/CreateThreadAssistantDto" + } + } + }, + "required": ["title", "assistants"] + }, + "ThreadEntity": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "object": { + "type": "string" + }, + "title": { + "type": "string" + }, + "assistants": { + "type": "array", + "items": { + "type": "object" + } + }, + "createdAt": { + "type": "number" + }, + "updatedAt": { + "type": "number" + }, + "metadata": { + "type": "object" + } + }, + "required": ["id", "object", "title", "assistants", "createdAt"] + }, + "GetThreadResponseDto": { + "type": "object", + "properties": { + "id": { + "type": "string", + "example": "thread_abc123", + "description": "The identifier of the thread." + }, + "object": { + "type": "string", + "example": "thread", + "description": "Type of the object" + }, + "created_at": { + "type": "integer", + "example": 1699014083, + "description": "Unix timestamp representing the creation time of the thread." + }, + "assistants": { + "example": ["assistant-001"], + "description": "List of assistants involved in the thread.", + "type": "array", + "items": { + "type": "string" + } + }, + "metadata": { + "type": "object", + "example": {}, + "description": "Metadata associated with the thread." + }, + "messages": { + "example": [], + "description": "List of messages within the thread.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "id", + "object", + "created_at", + "assistants", + "metadata", + "messages" + ] + }, + "UpdateThreadDto": { + "type": "object", + "properties": {} + }, + "DeleteThreadResponseDto": { + "type": "object", + "properties": { + "id": { + "type": "string", + "example": "thread_123", + "description": "The identifier of the thread that was deleted." + }, + "object": { + "type": "string", + "example": "thread", + "description": "Type of the object, indicating it's a thread.", + "default": "thread" + }, + "deleted": { + "type": "boolean", + "example": true, + "description": "Indicates whether the thread was successfully deleted." + } + }, + "required": ["id", "object", "deleted"] + }, + "ModelArtifactDto": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "The URL source of the model." + } + }, + "required": ["url"] + }, + "ModelMetadataDto": { + "type": "object", + "properties": { + "author": { + "type": "string", + "description": "The author of the model." + }, + "tags": { + "description": "The model's tags.", + "type": "array", + "items": { + "type": "string" + } + }, + "size": { + "type": "number", + "description": "The model's size." + }, + "cover": { + "type": "string", + "description": "The model's cover." + } + }, + "required": ["author", "tags", "size"] + }, + "CreateModelDto": { + "type": "object", + "properties": { + "version": { + "type": "string", + "description": "The version of the model." + }, + "format": { + "enum": ["gguf", "api"], + "type": "string", + "description": "The state format of the model." + }, + "sources": { + "description": "The URL sources from which the model downloaded or accessed.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ModelArtifactDto" + } + }, + "id": { + "type": "string", + "description": "The unique identifier of the model." + }, + "name": { + "type": "string", + "description": "The name of the model." + }, + "description": { + "type": "string", + "description": "A brief description of the model." + }, + "settings": { + "description": "The settings parameters of the model.", + "allOf": [ + { + "$ref": "#/components/schemas/ModelSettingParamsDto" + } + ] + }, + "parameters": { + "description": "The parameters configuration of the model.", + "allOf": [ + { + "$ref": "#/components/schemas/ModelRuntimeParamsDto" + } + ] + }, + "metadata": { + "description": "The metadata of the model.", + "allOf": [ + { + "$ref": "#/components/schemas/ModelMetadataDto" + } + ] + }, + "engine": { + "type": "string", + "description": "The engine used to run the model." + } + }, + "required": [ + "version", + "format", + "sources", + "id", + "name", + "description", + "settings", + "parameters", + "metadata", + "engine" + ] + }, + "StartModelSuccessDto": { + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "The success or error message displayed when a model is successfully loaded or fails to load." + }, + "modelId": { + "type": "string", + "description": "The unique identifier of the model." + } + }, + "required": ["message", "modelId"] + }, + "DownloadModelResponseDto": { + "type": "object", + "properties": { + "message": { + "type": "string", + "example": "Starting download mistral-ins-7b-q4", + "description": "Message indicates Jan starting download corresponding model." + } + }, + "required": ["message"] + }, + "ModelDto": { + "type": "object", + "properties": { + "source_url": { + "type": "string", + "example": "https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf", + "description": "URL to the source of the model." + }, + "id": { + "type": "string", + "example": "trinity-v1.2-7b", + "description": "Unique identifier used in chat-completions model_name, matches folder name." + }, + "object": { + "type": "string", + "example": "model" + }, + "name": { + "type": "string", + "example": "Trinity-v1.2 7B Q4", + "description": "Name of the model." + }, + "version": { + "type": "string", + "default": "1.0", + "description": "The version number of the model." + }, + "description": { + "type": "string", + "example": "Trinity is an experimental model merge using the Slerp method. Recommended for daily assistance purposes.", + "description": "Description of the model." + }, + "format": { + "type": "string", + "example": "gguf", + "description": "State format of the model, distinct from the engine." + }, + "ctx_len": { + "type": "number", + "description": "Context length.", + "example": 4096 + }, + "prompt_template": { + "type": "string", + "example": "system\n{system_message}\nuser\n{prompt}\nassistant" + }, + "temperature": { + "type": "number", + "example": 0.7 + }, + "top_p": { + "type": "number", + "example": 0.95 + }, + "stream": { + "type": "boolean", + "example": true + }, + "max_tokens": { + "type": "number", + "example": 4096 + }, + "stop": { + "example": [], + "type": "array", + "items": { + "type": "string" + } + }, + "frequency_penalty": { + "type": "number", + "example": 0 + }, + "presence_penalty": { + "type": "number", + "example": 0 + }, + "author": { + "type": "string", + "example": "Jan" + }, + "tags": { + "example": ["7B", "Merged", "Featured"], + "type": "array", + "items": { + "type": "string" + } + }, + "size": { + "type": "number", + "example": 4370000000 + }, + "cover": { + "type": "string", + "example": "https://raw.githubusercontent.com/menloresearch/jan/main/models/trinity-v1.2-7b/cover.png" + }, + "engine": { + "type": "string", + "example": "cortex" + } + }, + "required": [ + "source_url", + "id", + "object", + "name", + "version", + "description", + "format", + "ctx_len", + "prompt_template", + "temperature", + "top_p", + "stream", + "max_tokens", + "stop", + "frequency_penalty", + "presence_penalty", + "author", + "tags", + "size", + "cover", + "engine" + ] + }, + "ListModelsResponseDto": { + "type": "object", + "properties": { + "object": { + "type": "string", + "example": "list", + "enum": ["list"] + }, + "data": { + "description": "List of models", + "type": "array", + "items": { + "$ref": "#/components/schemas/ModelDto" + } + } + }, + "required": ["object", "data"] + }, + "UpdateModelDto": { + "type": "object", + "properties": {} + }, + "DeleteModelResponseDto": { + "type": "object", + "properties": { + "id": { + "type": "string", + "example": "mistral-ins-7b-q4", + "description": "The identifier of the model that was deleted." + }, + "object": { + "type": "string", + "example": "model", + "description": "Type of the object, indicating it's a model.", + "default": "model" + }, + "deleted": { + "type": "boolean", + "example": true, + "description": "Indicates whether the model was successfully deleted." + } + }, + "required": ["id", "object", "deleted"] + }, + "ChatCompletionMessage": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The Content of the chat message." + }, + "role": { + "enum": ["system", "assistant", "user"], + "type": "string", + "description": "The role of the entity in the chat completion." + } + }, + "required": ["content", "role"] + }, + "CreateChatCompletionDto": { + "type": "object", + "properties": { + "messages": { + "description": "Array of chat messages to be used for generating the chat completion.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ChatCompletionMessage" + } + }, + "model": { + "type": "string", + "description": "The unique identifier of the model." + }, + "stream": { + "type": "boolean", + "description": "Determines the format for output generation. If set to `true`, the output is generated continuously, allowing for real-time streaming of responses. If set to `false`, the output is delivered in a single JSON file." + }, + "max_tokens": { + "type": "number", + "description": "Sets the upper limit on the number of tokens the model can generate in a single output." + }, + "stop": { + "description": "Defines specific tokens or phrases that signal the model to stop producing further output.", + "type": "array", + "items": { + "type": "string" + } + }, + "frequency_penalty": { + "type": "number", + "description": "Modifies the likelihood of the model repeating the same words or phrases within a single output." + }, + "presence_penalty": { + "type": "number", + "description": "Reduces the likelihood of repeating tokens, promoting novelty in the output." + }, + "temperature": { + "type": "number", + "description": "Influences the randomness of the model's output." + }, + "top_p": { + "type": "number", + "description": "Sets probability threshold for more relevant outputs." + } + }, + "required": [ + "messages", + "model", + "stream", + "max_tokens", + "stop", + "frequency_penalty", + "presence_penalty", + "temperature", + "top_p" + ] + }, + "MessageDto": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The textual content of the chat message or completion generated by the model." + }, + "role": { + "type": "string", + "description": "The role of the participant in the chat, such as 'user' or 'system', indicating who is the sender of the message." + } + }, + "required": ["content", "role"] + }, + "ChoiceDto": { + "type": "object", + "properties": { + "finish_reason": { + "type": "string", + "description": "The reason the chat completion ended, typically indicating whether the model completed the text naturally or was cut off." + }, + "index": { + "type": "number", + "description": "The index of the completion relative to other generated completions, useful for identifying its order in a batch request." + }, + "message": { + "description": "An object representing the message details involved in the chat completion, encapsulated within a MessageDto.", + "allOf": [ + { + "$ref": "#/components/schemas/MessageDto" + } + ] + } + }, + "required": ["finish_reason", "index", "message"] + }, + "UsageDto": { + "type": "object", + "properties": { + "completion_tokens": { + "type": "number", + "description": "The number of tokens used in the completion part of the response generated by the model." + }, + "prompt_tokens": { + "type": "number", + "description": "The number of tokens used in the prompt part of the chat input, which is provided to the model." + }, + "total_tokens": { + "type": "number", + "description": "The total number of tokens used in both the prompt and the completion, summarizing the entire token count of the chat operation." + } + }, + "required": ["completion_tokens", "prompt_tokens", "total_tokens"] + }, + "ChatCompletionResponseDto": { + "type": "object", + "properties": { + "choices": { + "description": "A list of choices generated by the chat model.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ChoiceDto" + } + }, + "created": { + "type": "number", + "description": "The timestamp of when the chat completion was created, expressed as a Unix timestamp." + }, + "id": { + "type": "string", + "description": "The unique identifier for the chat completion." + }, + "model": { + "type": "string", + "description": "The identifier of the model used to generate the chat completion." + }, + "object": { + "type": "string", + "description": "The type of object, typically set to 'chat_completion' to denote the nature of the API response." + }, + "system_fingerprint": { + "type": "string", + "description": "A unique fingerprint that identifies the system configuration used during the chat completion." + }, + "usage": { + "description": "An object representing the usage statistics of the model for the current completion.", + "allOf": [ + { + "$ref": "#/components/schemas/UsageDto" + } + ] + } + }, + "required": [ + "choices", + "created", + "id", + "model", + "object", + "system_fingerprint", + "usage" + ] + }, + "CreateAssistantDto": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique identifier of the assistant." + }, + "avatar": { + "type": "string", + "description": "The avatar of the assistant." + }, + "name": { + "type": "string", + "description": "The name of the assistant." + }, + "description": { + "type": "string", + "description": "The description of the assistant." + }, + "model": { + "type": "string", + "description": "The model of the assistant." + }, + "instructions": { + "type": "string", + "description": "The instructions for the assistant." + }, + "tools": { + "description": "The tools associated with the assistant.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AssistantToolDto" + } + }, + "file_ids": { + "description": "The identifiers of the files that have been uploaded to the thread.", + "type": "array", + "items": { + "type": "string" + } + }, + "metadata": { + "type": "object", + "description": "The metadata of the assistant." + } + }, + "required": [ + "id", + "avatar", + "name", + "description", + "model", + "instructions", + "tools", + "file_ids" + ] + }, + "AssistantEntity": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "avatar": { + "type": "string" + }, + "thread_location": { + "type": "string" + }, + "object": { + "type": "string" + }, + "created_at": { + "type": "number" + }, + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "model": { + "type": "string" + }, + "instructions": { + "type": "string" + }, + "tools": { + "type": "array", + "items": { + "type": "object" + } + }, + "file_ids": { + "type": "array", + "items": { + "type": "string" + } + }, + "metadata": { + "type": "object" + } + }, + "required": [ + "id", + "avatar", + "object", + "created_at", + "name", + "model", + "file_ids" + ] + }, + "DeleteAssistantResponseDto": { + "type": "object", + "properties": { + "id": { + "type": "string", + "example": "assistant_123", + "description": "The identifier of the assistant that was deleted." + }, + "object": { + "type": "string", + "example": "assistant", + "description": "Type of the object, indicating it's a assistant.", + "default": "assistant" + }, + "deleted": { + "type": "boolean", + "example": true, + "description": "Indicates whether the assistant was successfully deleted." + } + }, + "required": ["id", "object", "deleted"] + } + } + } +} diff --git a/docs/public/sitemap-0.xml b/docs/public/sitemap-0.xml index 2b4a04975..517d84329 100644 --- a/docs/public/sitemap-0.xml +++ b/docs/public/sitemap-0.xml @@ -1,129 +1,148 @@ -https://jan.ai2024-09-09T08:19:45.721Zdaily1 -https://jan.ai/about2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/handbook2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/handbook/analytics2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/handbook/engineering2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/handbook/engineering/ci-cd2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/handbook/engineering/qa2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/handbook/product-design2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/handbook/project-management2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/handbook/strategy2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/handbook/website-docs2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/investors2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/team2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/vision2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/about/wall-of-love2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/blog2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2023-12-21-faster-inference-across-platform2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-01-16-settings-options-right-panel2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-01-29-local-api-server2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-02-05-jan-data-folder2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-02-26-home-servers-with-helm2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-03-06-ui-revamp-settings2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-03-11-import-models2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-03-19-nitro-tensorrt-llm-extension2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-04-02-groq-api-integration2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-04-15-new-mistral-extension2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-04-25-llama3-command-r-hugginface2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-05-20-llamacpp-upgrade-new-remote-models2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-06-21-nvidia-nim-support2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-07-15-claude-3-5-support2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/changelog/2024-09-01-llama3-1-gemma2-support2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/architecture2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/assistants2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/build-extension2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/chat2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/init2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/kill2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/models2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/models/download2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/models/get2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/models/list2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/models/remove2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/models/start2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/models/stop2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/models/update2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/ps2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/pull2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/run2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cli/serve2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/command-line2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cortex-cpp2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cortex-llamacpp2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cortex-openvino2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cortex-python2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/cortex-tensorrt-llm2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/embeddings2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/embeddings/overview2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/error-codes2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/ext-architecture2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/fine-tuning2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/fine-tuning/overview2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/function-calling2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/hardware2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/installation2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/installation/linux2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/installation/mac2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/installation/windows2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/model-operations2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/model-operations/overview2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/py-library2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/quickstart2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/rag2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/rag/overview2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/server2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/text-generation2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/ts-library2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/vision2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/cortex/vision/overview2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/assistants2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/built-in/llama-cpp2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/built-in/tensorrt-llm2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/data-folder2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/desktop2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/desktop/linux2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/desktop/mac2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/desktop/windows2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/error-codes2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/extensions2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/install-extensions2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/models2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/models/manage-models2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/models/model-parameters2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/quickstart2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/anthropic2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/azure2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/cohere2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/generic-openai2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/groq2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/martian2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/mistralai2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/nvidia-nim2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/openai2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/openrouter2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/remote-models/triton2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/settings2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/shortcuts2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/threads2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/tools/retrieval2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/docs/troubleshooting2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/download2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/integrations2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/integrations/coding/continue-dev2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/integrations/function-calling/interpreter2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/integrations/messaging/llmcord2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/integrations/workflow-automation/raycast2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/post/benchmarking-nvidia-tensorrt-llm2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/post/bitdefender2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/post/data-is-moat2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/post/rag-is-not-enough2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/privacy2024-09-09T08:19:45.722Zdaily1 -https://jan.ai/support2024-09-09T08:19:45.722Zdaily1 +https://jan.ai2025-03-10T05:06:47.876Zdaily1 +https://jan.ai/about2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/handbook2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/handbook/analytics2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/handbook/engineering2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/handbook/engineering/ci-cd2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/handbook/engineering/qa2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/handbook/product-design2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/handbook/project-management2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/handbook/strategy2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/handbook/website-docs2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/investors2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/team2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/vision2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/about/wall-of-love2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/blog2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2023-12-21-faster-inference-across-platform2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-01-16-settings-options-right-panel2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-01-29-local-api-server2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-02-05-jan-data-folder2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-02-10-jan-is-more-stable2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-02-26-home-servers-with-helm2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-03-06-ui-revamp-settings2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-03-11-import-models2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-03-19-nitro-tensorrt-llm-extension2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-04-02-groq-api-integration2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-04-15-new-mistral-extension2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-04-25-llama3-command-r-hugginface2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-05-20-llamacpp-upgrade-new-remote-models2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-06-21-nvidia-nim-support2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-07-15-claude-3-5-support2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-09-01-llama3-1-gemma2-support2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-09-17-improved-cpu-performance2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-10-24-jan-stable2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-11-22-jan-bugs2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-11.14-jan-supports-qwen-coder2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-12-03-jan-is-faster2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-12-05-jan-hot-fix-mac2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2024-12-30-jan-new-privacy2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2025-01-06-key-issues-resolved2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/changelog/2025-01-23-deepseek-r1-jan2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/architecture2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/assistants2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/build-extension2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/chat2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/init2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/kill2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/models2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/models/download2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/models/get2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/models/list2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/models/remove2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/models/start2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/models/stop2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/models/update2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/ps2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/pull2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/run2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cli/serve2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/command-line2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cortex-cpp2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cortex-llamacpp2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cortex-openvino2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cortex-python2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/cortex-tensorrt-llm2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/embeddings2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/embeddings/overview2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/error-codes2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/ext-architecture2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/fine-tuning2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/fine-tuning/overview2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/function-calling2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/hardware2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/installation2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/installation/linux2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/installation/mac2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/installation/windows2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/model-operations2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/model-operations/overview2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/py-library2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/quickstart2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/rag2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/rag/overview2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/server2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/text-generation2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/ts-library2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/vision2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/cortex/vision/overview2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/api-server2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/assistants2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/configure-extensions2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/data-folder2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/desktop2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/desktop/linux2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/desktop/mac2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/desktop/windows2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/error-codes2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/extensions2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/extensions-settings/model-management2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/extensions-settings/system-monitoring2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/install-engines2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/install-extensions2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/local-engines/llama-cpp2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/models2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/models/manage-models2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/models/model-parameters2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/privacy2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/privacy-policy2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/quickstart2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/anthropic2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/cohere2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/deepseek2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/google2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/groq2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/martian2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/mistralai2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/nvidia-nim2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/openai2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/openrouter2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/remote-models/triton2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/settings2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/threads2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/tools/retrieval2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/docs/troubleshooting2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/download2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/integrations2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/integrations/coding/continue-dev2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/integrations/coding/tabby2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/integrations/function-calling/interpreter2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/integrations/messaging/llmcord2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/integrations/workflow-automation/n8n2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/post/benchmarking-nvidia-tensorrt-llm2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/post/bitdefender2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/post/data-is-moat2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/post/deepseek-r1-locally2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/post/offline-chatgpt-alternative2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/post/rag-is-not-enough2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/post/run-ai-models-locally2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/privacy2025-03-10T05:06:47.877Zdaily1 +https://jan.ai/support2025-03-10T05:06:47.877Zdaily1 \ No newline at end of file diff --git a/docs/src/components/APIReference/index.tsx b/docs/src/components/APIReference/index.tsx index c18e522c4..1fe029e7a 100644 --- a/docs/src/components/APIReference/index.tsx +++ b/docs/src/components/APIReference/index.tsx @@ -27,7 +27,7 @@ export const APIReference = () => { {

Latest release updates from the Jan team. Check out our  Roadmap @@ -150,7 +150,7 @@ const Changelog = () => {

diff --git a/docs/src/components/Download/CardDownload.tsx b/docs/src/components/Download/CardDownload.tsx index f61f9f462..d8730593c 100644 --- a/docs/src/components/Download/CardDownload.tsx +++ b/docs/src/components/Download/CardDownload.tsx @@ -86,7 +86,7 @@ export default function CardDownload({ lastRelease }: Props) { .replace('{tag}', tag) return { ...system, - href: `https://github.com/janhq/jan/releases/download/${lastRelease.tag_name}/${downloadUrl}`, + href: `https://github.com/menloresearch/jan/releases/download/${lastRelease.tag_name}/${downloadUrl}`, } }) diff --git a/docs/src/components/DropdownDownload/index.tsx b/docs/src/components/DropdownDownload/index.tsx index c0cdfb73b..5eb66e151 100644 --- a/docs/src/components/DropdownDownload/index.tsx +++ b/docs/src/components/DropdownDownload/index.tsx @@ -149,7 +149,7 @@ const DropdownDownload = ({ lastRelease }: Props) => { .replace('{tag}', tag) return { ...system, - href: `https://github.com/janhq/jan/releases/download/${lastRelease.tag_name}/${downloadUrl}`, + href: `https://github.com/menloresearch/jan/releases/download/${lastRelease.tag_name}/${downloadUrl}`, } }) setSystems(updatedSystems) diff --git a/docs/src/components/FooterMenu/index.tsx b/docs/src/components/FooterMenu/index.tsx index 7b144a956..22b6f9e8a 100644 --- a/docs/src/components/FooterMenu/index.tsx +++ b/docs/src/components/FooterMenu/index.tsx @@ -1,4 +1,4 @@ -import React, { useState } from 'react' +import React, { useEffect, useState } from 'react' import ThemeImage from '@/components/ThemeImage' import { AiOutlineGithub } from 'react-icons/ai' import { RiTwitterXFill } from 'react-icons/ri' @@ -7,6 +7,7 @@ import { BiLogoDiscordAlt } from 'react-icons/bi' import { useForm } from 'react-hook-form' import LogoMark from '@/components/LogoMark' import { FaLinkedin } from 'react-icons/fa' +import posthog from 'posthog-js' const socials = [ { @@ -25,7 +26,7 @@ const socials = [ icon: ( ), - href: 'https://github.com/janhq/jan', + href: 'https://github.com/menloresearch/jan', }, { icon: , @@ -61,7 +62,7 @@ const menus = [ child: [ { menu: 'Github', - path: 'https://github.com/janhq/jan', + path: 'https://github.com/menloresearch/jan', external: true, }, { @@ -94,7 +95,7 @@ const menus = [ }, { menu: 'Careers', - path: 'https://homebrew.bamboohr.com/careers', + path: 'https://menlo.bamboohr.com/careers', external: true, }, ], @@ -104,6 +105,19 @@ const menus = [ const getCurrentYear = new Date().getFullYear() export default function Footer() { + useEffect(() => { + if (typeof window !== 'undefined') { + posthog.init(process.env.POSTHOG_KEY as string, { + api_host: process.env.POSTHOG_HOST, + disable_session_recording: true, + person_profiles: 'always', + persistence: 'localStorage', + }) + + posthog.capture('web_page_view', { timestamp: new Date() }) + } + }, []) + const { register, handleSubmit, reset } = useForm({ defaultValues: { email: '', @@ -237,14 +251,14 @@ export default function Footer() { ) })}
- ©{getCurrentYear} Homebrew Computer Company + ©{getCurrentYear} Menlo Research diff --git a/docs/src/components/Home/BuiltWithLove/index.tsx b/docs/src/components/Home/BuiltWithLove/index.tsx index b84fb3634..5624e82e1 100644 --- a/docs/src/components/Home/BuiltWithLove/index.tsx +++ b/docs/src/components/Home/BuiltWithLove/index.tsx @@ -23,7 +23,7 @@ const BuiltWithLove = () => {
diff --git a/docs/src/components/Home/Hero/index.tsx b/docs/src/components/Home/Hero/index.tsx index 99d8c6995..009681197 100644 --- a/docs/src/components/Home/Hero/index.tsx +++ b/docs/src/components/Home/Hero/index.tsx @@ -44,7 +44,7 @@ const Hero = () => {
diff --git a/docs/src/pages/_app.mdx b/docs/src/pages/_app.mdx index 8fa4b4875..08c72e32b 100644 --- a/docs/src/pages/_app.mdx +++ b/docs/src/pages/_app.mdx @@ -3,7 +3,6 @@ import '@code-hike/mdx/styles.css' import { Fragment } from "react" import Script from "next/script" - export default function App({ Component, pageProps }) { return ( diff --git a/docs/src/pages/about/handbook.mdx b/docs/src/pages/about/handbook.mdx index fc2775364..264d6d36d 100644 --- a/docs/src/pages/about/handbook.mdx +++ b/docs/src/pages/about/handbook.mdx @@ -26,19 +26,19 @@ Jan operates on open-source principles, giving everyone the freedom to adjust, p We embrace open development, showcasing our progress and upcoming features on GitHub, and we encourage your input and contributions: -- [Jan Framework](https://github.com/janhq/jan) (AGPLv3) +- [Jan Framework](https://github.com/menloresearch/jan) (AGPLv3) - [Jan Desktop Client & Local server](https://jan.ai) (AGPLv3, built on Jan Framework) -- [Nitro: run Local AI](https://github.com/janhq/nitro) (AGPLv3) +- [Nitro: run Local AI](https://github.com/menloresearch/nitro) (AGPLv3) ## Build in Public We use GitHub to build in public and welcome anyone to join in. -- [Jan's Kanban](https://github.com/orgs/janhq/projects/5) -- [Jan's Roadmap](https://github.com/orgs/janhq/projects/5/views/29) +- [Jan's Kanban](https://github.com/orgs/menloresearch/projects/5) +- [Jan's Roadmap](https://github.com/orgs/menloresearch/projects/5/views/29) ## Collaboration Our team spans the globe, working remotely to bring Jan to life. We coordinate through Discord and GitHub, valuing asynchronous communication and minimal, purposeful meetings. For collaboration and brainstorming, we utilize tools like [Excalidraw](https://excalidraw.com/) and [Miro](https://miro.com/), ensuring alignment and shared vision through visual storytelling and detailed documentation on [HackMD](https://hackmd.io/). -Check out the [Jan Framework](https://github.com/janhq/jan) and our desktop client & local server at [jan.ai](https://jan.ai), both licensed under AGPLv3 for maximum openness and user freedom. +Check out the [Jan Framework](https://github.com/menloresearch/jan) and our desktop client & local server at [jan.ai](https://jan.ai), both licensed under AGPLv3 for maximum openness and user freedom. diff --git a/docs/src/pages/about/handbook/engineering.mdx b/docs/src/pages/about/handbook/engineering.mdx index dcfa2a32e..3038ead76 100644 --- a/docs/src/pages/about/handbook/engineering.mdx +++ b/docs/src/pages/about/handbook/engineering.mdx @@ -19,5 +19,5 @@ keywords: ## Prerequisites -- [Requirements](https://github.com/janhq/jan?tab=readme-ov-file#requirements-for-running-jan) -- [Setting up local env](https://github.com/janhq/jan?tab=readme-ov-file#contributing) +- [Requirements](https://github.com/menloresearch/jan?tab=readme-ov-file#requirements-for-running-jan) +- [Setting up local env](https://github.com/menloresearch/jan?tab=readme-ov-file#contributing) diff --git a/docs/src/pages/about/handbook/project-management.mdx b/docs/src/pages/about/handbook/project-management.mdx index aa43b1ddd..d6c64318d 100644 --- a/docs/src/pages/about/handbook/project-management.mdx +++ b/docs/src/pages/about/handbook/project-management.mdx @@ -20,7 +20,7 @@ import { Callout } from 'nextra/components' # Project Management -We use the [Jan Monorepo Project](https://github.com/orgs/janhq/projects/5) in Github to manage our roadmap and sprint Kanbans. +We use the [Jan Monorepo Project](https://github.com/orgs/menloresearch/projects/5) in Github to manage our roadmap and sprint Kanbans. As much as possible, everyone owns their respective `epics` and `tasks`. @@ -30,37 +30,37 @@ As much as possible, everyone owns their respective `epics` and `tasks`. ## Quicklinks -- [High-level roadmap](https://github.com/orgs/janhq/projects/5/views/16): view used at at strategic level, for team wide alignment. Start & end dates reflect engineering implementation cycles. Typically product & design work preceeds these timelines. -- [Standup Kanban](https://github.com/orgs/janhq/projects/5/views/25): view used during daily standup. Sprints should be up to date. +- [High-level roadmap](https://github.com/orgs/menloresearch/projects/5/views/16): view used at at strategic level, for team wide alignment. Start & end dates reflect engineering implementation cycles. Typically product & design work preceeds these timelines. +- [Standup Kanban](https://github.com/orgs/menloresearch/projects/5/views/25): view used during daily standup. Sprints should be up to date. ## Organization -[`Roadmap Labels`](https://github.com/janhq/jan/labels?q=roadmap) +[`Roadmap Labels`](https://github.com/menloresearch/jan/labels?q=roadmap) - `Roadmap Labels` tag large, long-term, & strategic projects that can span multiple teams and multiple sprints - Example label: `roadmap: Jan has Mobile` - `Roadmaps` contain `epics` -[`Epics`](https://github.com/janhq/jan/issues?q=is%3Aissue+is%3Aopen+label%3A%22type%3A+epic%22) +[`Epics`](https://github.com/menloresearch/jan/issues?q=is%3Aissue+is%3Aopen+label%3A%22type%3A+epic%22) - `Epics` track large stories that span 1-2 weeks, and it outlines specs, architecture decisions, designs - `Epics` contain `tasks` - `Epics` should always have 1 owner -[`Milestones`](https://github.com/janhq/jan/milestones) +[`Milestones`](https://github.com/menloresearch/jan/milestones) - `Milestones` track release versions. We use [semantic versioning](https://semver.org/) - `Milestones` span ~2 weeks and have deadlines - `Milestones` usually fit within 2-week sprint cycles -[`Tasks`](https://github.com/janhq/jan/issues) +[`Tasks`](https://github.com/menloresearch/jan/issues) - Tasks are individual issues (feats, bugs, chores) that can be completed within a few days - Tasks, except for critical bugs, should always belong to an `epic` (and thus fit into our roadmap) - Tasks are usually named per [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) - Tasks should always have 1 owner -We aim to always sprint on `tasks` that are a part of the [current roadmap](https://github.com/orgs/janhq/projects/5/views/16). +We aim to always sprint on `tasks` that are a part of the [current roadmap](https://github.com/orgs/menloresearch/projects/5/views/16). ## Kanban @@ -80,4 +80,4 @@ We aim to always sprint on `tasks` that are a part of the [current roadmap](http As a result, our feature prioritization can feel a bit black box at times. -We'd appreciate high quality insights and volunteers for user interviews through [Discord](https://discord.gg/af6SaTdzpx) and [Github](https://github.com/janhq). +We'd appreciate high quality insights and volunteers for user interviews through [Discord](https://discord.gg/af6SaTdzpx) and [Github](https://github.com/menloresearch). diff --git a/docs/src/pages/about/handbook/website-docs.mdx b/docs/src/pages/about/handbook/website-docs.mdx index 9ab2c2e10..773fcceea 100644 --- a/docs/src/pages/about/handbook/website-docs.mdx +++ b/docs/src/pages/about/handbook/website-docs.mdx @@ -37,7 +37,7 @@ We try to **keep routes consistent** to maintain SEO. ## How to Contribute -Refer to the [Contributing Guide](https://github.com/janhq/jan/blob/dev/CONTRIBUTING.md) for more comprehensive information on how to contribute to the Jan project. +Refer to the [Contributing Guide](https://github.com/menloresearch/jan/blob/dev/CONTRIBUTING.md) for more comprehensive information on how to contribute to the Jan project. ## Pre-requisites and Installation diff --git a/docs/src/pages/about/index.mdx b/docs/src/pages/about/index.mdx index c50093e53..81a1b3d27 100644 --- a/docs/src/pages/about/index.mdx +++ b/docs/src/pages/about/index.mdx @@ -1,9 +1,9 @@ --- -title: Homebrew Computer Company -description: We are Homebrew Computer Company, the creators and maintainers of Jan, Cortex and other tools. +title: Menlo Research +description: We are Menlo Research, the creators and maintainers of Jan, Cortex and other tools. keywords: [ - Homebrew Computer Company, + Menlo Research, Jan, local AI, open-source alternative to chatgpt, @@ -22,7 +22,7 @@ keywords: import { Callout } from 'nextra/components' -# Homebrew Computer Company +# Menlo Research ![Eniac](./_assets/eniac.jpeg) _[Eniac](https://www.computerhistory.org/revolution/birth-of-the-computer/4/78), the World's First Computer (Photo courtesy of US Army)_ @@ -36,7 +36,7 @@ We're a team of AI researchers and engineers. We are the creators and lead maint - More to come! -The [Homebrew Computer Club](https://en.wikipedia.org/wiki/Homebrew_Computer_Club) was an early computer hobbyist group from 1975 to 1986 that led to Apple and the personal computer revolution. +The [Menlo Research](https://en.wikipedia.org/wiki/Homebrew_Computer_Club) was an early computer hobbyist group from 1975 to 1986 that led to Apple and the personal computer revolution. ### Mission @@ -81,7 +81,7 @@ Our products are designed with [Extension APIs](/docs/extensions), and we do our We are part of a larger open-source community and are committed to being a good jigsaw puzzle piece. We credit and actively contribute to upstream projects. -We adopt a public-by-default approach to [Project Management](https://github.com/orgs/janhq/projects/5), [Roadmaps](https://github.com/orgs/janhq/projects/5/views/31), and Helpdesk for our products. +We adopt a public-by-default approach to [Project Management](https://github.com/orgs/menloresearch/projects/5), [Roadmaps](https://github.com/orgs/menloresearch/projects/5/views/31), and Helpdesk for our products. ## Inspirations @@ -93,7 +93,7 @@ We are inspired by and actively try to emulate the paths of companies we admire - [Obsidian](https://obsidian.md/) - [Discourse](https://www.discourse.org/about) - [Gitlab](https://handbook.gitlab.com/handbook/company/history/#2017-gitlab-storytime) -- [Redhat](https://www.redhat.com/en/about/development-model) +- [Red Hat](https://www.redhat.com/en/about/development-model) - [Ghost](https://ghost.org/docs/contributing/) - [Lago](https://www.getlago.com/blog/open-source-licensing-and-why-lago-chose-agplv3) - [Twenty](https://twenty.com/story) diff --git a/docs/src/pages/about/team.mdx b/docs/src/pages/about/team.mdx index 205ac106e..d9c8e6b27 100644 --- a/docs/src/pages/about/team.mdx +++ b/docs/src/pages/about/team.mdx @@ -20,7 +20,7 @@ import { Cards, Card } from 'nextra/components' We're a small, fully-remote team, mostly based in Southeast Asia. -We are committed to become a global company. You can check our [Careers page](https://homebrew.bamboohr.com/careers) if you'd like to join us on our adventure. +We are committed to become a global company. You can check our [Careers page](https://menlo.bamboohr.com/careers) if you'd like to join us on our adventure. Ping us in [Discord](https://discord.gg/AAGQNpJQtH) if you're keen to talk to us! diff --git a/docs/src/pages/changelog/2024-02-10-jan-is-more-stable.mdx b/docs/src/pages/changelog/2024-02-10-jan-is-more-stable.mdx index 59e78a0a7..5410d86a9 100644 --- a/docs/src/pages/changelog/2024-02-10-jan-is-more-stable.mdx +++ b/docs/src/pages/changelog/2024-02-10-jan-is-more-stable.mdx @@ -24,4 +24,4 @@ Fixes 💫 Update your product or download the latest: https://jan.ai -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.5). \ No newline at end of file +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.5). \ No newline at end of file diff --git a/docs/src/pages/changelog/2024-03-06-ui-revamp-settings.mdx b/docs/src/pages/changelog/2024-03-06-ui-revamp-settings.mdx index e2ba0c6be..037d21a8f 100644 --- a/docs/src/pages/changelog/2024-03-06-ui-revamp-settings.mdx +++ b/docs/src/pages/changelog/2024-03-06-ui-revamp-settings.mdx @@ -24,4 +24,4 @@ Jan now supports Mistral's new model Codestral. Thanks [Bartowski](https://huggi More GGUF models can run in Jan - we rebased to llama.cpp b3012.Big thanks to [ggerganov](https://github.com/ggerganov) -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.0). +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.0). diff --git a/docs/src/pages/changelog/2024-04-25-llama3-command-r-hugginface.mdx b/docs/src/pages/changelog/2024-04-25-llama3-command-r-hugginface.mdx index 8998b9a8a..7c6b01a3e 100644 --- a/docs/src/pages/changelog/2024-04-25-llama3-command-r-hugginface.mdx +++ b/docs/src/pages/changelog/2024-04-25-llama3-command-r-hugginface.mdx @@ -28,4 +28,4 @@ Jan now understands LaTeX, allowing users to process and understand complex math ![Latex](/assets/images/changelog/jan_update_latex.gif) -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.4.12). +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.4.12). diff --git a/docs/src/pages/changelog/2024-05-20-llamacpp-upgrade-new-remote-models.mdx b/docs/src/pages/changelog/2024-05-20-llamacpp-upgrade-new-remote-models.mdx index 11e8013ac..c3ecb905d 100644 --- a/docs/src/pages/changelog/2024-05-20-llamacpp-upgrade-new-remote-models.mdx +++ b/docs/src/pages/changelog/2024-05-20-llamacpp-upgrade-new-remote-models.mdx @@ -28,4 +28,4 @@ Users can now connect to OpenAI's new model GPT-4o. ![GPT4o](/assets/images/changelog/jan_v0_4_13_openai_gpt4o.gif) -For more details, see the [GitHub release notes.](https://github.com/janhq/jan/releases/tag/v0.4.13) +For more details, see the [GitHub release notes.](https://github.com/menloresearch/jan/releases/tag/v0.4.13) diff --git a/docs/src/pages/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium.mdx b/docs/src/pages/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium.mdx index 3d628afd7..a6823050e 100644 --- a/docs/src/pages/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium.mdx +++ b/docs/src/pages/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium.mdx @@ -16,4 +16,4 @@ More GGUF models can run in Jan - we rebased to llama.cpp b2961. Huge shoutouts to [ggerganov](https://github.com/ggerganov) and contributors for llama.cpp, and [Bartowski](https://huggingface.co/bartowski) for GGUF models. -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.4.14). +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.4.14). diff --git a/docs/src/pages/changelog/2024-06-21-nvidia-nim-support.mdx b/docs/src/pages/changelog/2024-06-21-nvidia-nim-support.mdx index ee7e7282f..27ead7ff2 100644 --- a/docs/src/pages/changelog/2024-06-21-nvidia-nim-support.mdx +++ b/docs/src/pages/changelog/2024-06-21-nvidia-nim-support.mdx @@ -26,4 +26,4 @@ We've updated to llama.cpp b3088 for better performance - thanks to [GG](https:/ - Reduced chat font weight (back to normal!) - Restored the maximize button -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.1). +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.1). diff --git a/docs/src/pages/changelog/2024-07-15-claude-3-5-support.mdx b/docs/src/pages/changelog/2024-07-15-claude-3-5-support.mdx index b3d8854e3..b80c31cfe 100644 --- a/docs/src/pages/changelog/2024-07-15-claude-3-5-support.mdx +++ b/docs/src/pages/changelog/2024-07-15-claude-3-5-support.mdx @@ -32,4 +32,4 @@ We've restored the tooltip hover functionality, which makes it easier to access The right-click options for thread settings are now fully operational again. You can now manage your threads with this fix. -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.2). +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.2). diff --git a/docs/src/pages/changelog/2024-09-01-llama3-1-gemma2-support.mdx b/docs/src/pages/changelog/2024-09-01-llama3-1-gemma2-support.mdx index 4f869cd0c..413a7380a 100644 --- a/docs/src/pages/changelog/2024-09-01-llama3-1-gemma2-support.mdx +++ b/docs/src/pages/changelog/2024-09-01-llama3-1-gemma2-support.mdx @@ -23,4 +23,4 @@ We've been working on stability issues over the last few weeks. Jan is now more - Fixed the GPU memory utilization bar - Some UX and copy improvements -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.3). +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.3). diff --git a/docs/src/pages/changelog/2024-09-17-improved-cpu-performance.mdx b/docs/src/pages/changelog/2024-09-17-improved-cpu-performance.mdx index 9bf0def6e..da2d31e2a 100644 --- a/docs/src/pages/changelog/2024-09-17-improved-cpu-performance.mdx +++ b/docs/src/pages/changelog/2024-09-17-improved-cpu-performance.mdx @@ -32,4 +32,4 @@ Switching between threads used to reset your instruction settings. That’s fixe ### Minor UI Tweaks & Bug Fixes We’ve also resolved issues with the input slider on the right panel and tackled several smaller bugs to keep everything running smoothly. -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.4). +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.4). diff --git a/docs/src/pages/changelog/2024-10-24-jan-stable.mdx b/docs/src/pages/changelog/2024-10-24-jan-stable.mdx index 7c24d51ee..3abdd568e 100644 --- a/docs/src/pages/changelog/2024-10-24-jan-stable.mdx +++ b/docs/src/pages/changelog/2024-10-24-jan-stable.mdx @@ -23,4 +23,4 @@ Fixes 💫 Update your product or download the latest: https://jan.ai -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.7). \ No newline at end of file +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.7). \ No newline at end of file diff --git a/docs/src/pages/changelog/2024-11-22-jan-bugs.mdx b/docs/src/pages/changelog/2024-11-22-jan-bugs.mdx index 2b7462d1d..413d32876 100644 --- a/docs/src/pages/changelog/2024-11-22-jan-bugs.mdx +++ b/docs/src/pages/changelog/2024-11-22-jan-bugs.mdx @@ -22,4 +22,4 @@ Jan v0.5.9 is here: fixing what needed fixing Update your product or download the latest: https://jan.ai -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.9). \ No newline at end of file +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.9). \ No newline at end of file diff --git a/docs/src/pages/changelog/2024-11.14-jan-supports-qwen-coder.mdx b/docs/src/pages/changelog/2024-11.14-jan-supports-qwen-coder.mdx index b4589e8d6..72f650021 100644 --- a/docs/src/pages/changelog/2024-11.14-jan-supports-qwen-coder.mdx +++ b/docs/src/pages/changelog/2024-11.14-jan-supports-qwen-coder.mdx @@ -22,4 +22,4 @@ and various UI/UX enhancements 💫 Update your product or download the latest: https://jan.ai -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.8). \ No newline at end of file +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.8). \ No newline at end of file diff --git a/docs/src/pages/changelog/2024-12-03-jan-is-faster.mdx b/docs/src/pages/changelog/2024-12-03-jan-is-faster.mdx index 119758234..9eca1ed86 100644 --- a/docs/src/pages/changelog/2024-12-03-jan-is-faster.mdx +++ b/docs/src/pages/changelog/2024-12-03-jan-is-faster.mdx @@ -19,4 +19,4 @@ Jan v0.5.10 is live: Jan is faster, smoother, and more reliable. Update your product or download the latest: https://jan.ai -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.10). \ No newline at end of file +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.10). \ No newline at end of file diff --git a/docs/src/pages/changelog/2024-12-05-jan-hot-fix-mac.mdx b/docs/src/pages/changelog/2024-12-05-jan-hot-fix-mac.mdx index b38b506ec..25283118e 100644 --- a/docs/src/pages/changelog/2024-12-05-jan-hot-fix-mac.mdx +++ b/docs/src/pages/changelog/2024-12-05-jan-hot-fix-mac.mdx @@ -23,4 +23,4 @@ Jan v0.5.11 is here - critical issues fixed, Mac installation updated. Update your product or download the latest: https://jan.ai -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.11). \ No newline at end of file +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.11). \ No newline at end of file diff --git a/docs/src/pages/changelog/2024-12-30-jan-new-privacy.mdx b/docs/src/pages/changelog/2024-12-30-jan-new-privacy.mdx index 792a72b3c..370ef3efe 100644 --- a/docs/src/pages/changelog/2024-12-30-jan-new-privacy.mdx +++ b/docs/src/pages/changelog/2024-12-30-jan-new-privacy.mdx @@ -25,4 +25,4 @@ Jan v0.5.11 is here - critical issues fixed, Mac installation updated. Update your product or download the latest: https://jan.ai -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.12). \ No newline at end of file +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.12). \ No newline at end of file diff --git a/docs/src/pages/changelog/2025-01-06-key-issues-resolved.mdx b/docs/src/pages/changelog/2025-01-06-key-issues-resolved.mdx index 86f63a336..8c3d87250 100644 --- a/docs/src/pages/changelog/2025-01-06-key-issues-resolved.mdx +++ b/docs/src/pages/changelog/2025-01-06-key-issues-resolved.mdx @@ -20,4 +20,4 @@ import ChangelogHeader from "@/components/Changelog/ChangelogHeader" Update your product or download the latest: https://jan.ai -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.13). \ No newline at end of file +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.13). diff --git a/docs/src/pages/changelog/2025-01-23-deepseek-r1-jan.mdx b/docs/src/pages/changelog/2025-01-23-deepseek-r1-jan.mdx index 0cb7a9f8c..16c99d80e 100644 --- a/docs/src/pages/changelog/2025-01-23-deepseek-r1-jan.mdx +++ b/docs/src/pages/changelog/2025-01-23-deepseek-r1-jan.mdx @@ -33,4 +33,4 @@ Llama Update your Jan or [download the latest](https://jan.ai/). -For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.14). \ No newline at end of file +For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.14). diff --git a/docs/src/pages/cortex/cortex-llamacpp.mdx b/docs/src/pages/cortex/cortex-llamacpp.mdx index e4d5faa90..b8e121a35 100644 --- a/docs/src/pages/cortex/cortex-llamacpp.mdx +++ b/docs/src/pages/cortex/cortex-llamacpp.mdx @@ -40,7 +40,7 @@ If you already use [Jan](/docs) or [Cortex](/cortex), cortex.llamacpp is bundled ## Usage -To include cortex.llamacpp in your own server implementation, follow this [server example](https://github.com/janhq/cortex.llamacpp/tree/main/examples/server). +To include cortex.llamacpp in your own server implementation, follow this [server example](https://github.com/menloresearch/cortex.llamacpp/tree/main/examples/server). ## Interface @@ -140,4 +140,4 @@ The future plans for Cortex.llamacpp are focused on enhancing performance and ex - Performance Enhancements: Optimizing speed and reducing memory usage to ensure efficient processing of tasks. - Multimodal Model Compatibility: Expanding support to include a variety of multimodal models, enabling a broader range of applications and use cases. -To follow the latest developments, see the [cortex.llamacpp GitHub](https://github.com/janhq/cortex.llamacpp) \ No newline at end of file +To follow the latest developments, see the [cortex.llamacpp GitHub](https://github.com/menloresearch/cortex.llamacpp) \ No newline at end of file diff --git a/docs/src/pages/cortex/installation/linux.mdx b/docs/src/pages/cortex/installation/linux.mdx index 2d49f2714..90f396811 100644 --- a/docs/src/pages/cortex/installation/linux.mdx +++ b/docs/src/pages/cortex/installation/linux.mdx @@ -152,7 +152,7 @@ cortex -h To install Cortex from the source, follow the steps below: -1. Clone the Cortex repository [here](https://github.com/janhq/cortex/tree/dev). +1. Clone the Cortex repository [here](https://github.com/menloresearch/cortex/tree/dev). 2. Navigate to the `cortex-js` folder. 3. Open the terminal and run the following command to build the Cortex project: diff --git a/docs/src/pages/cortex/installation/mac.mdx b/docs/src/pages/cortex/installation/mac.mdx index 9e5bd7e72..e3afaaf5d 100644 --- a/docs/src/pages/cortex/installation/mac.mdx +++ b/docs/src/pages/cortex/installation/mac.mdx @@ -118,7 +118,7 @@ cortex -h To install Cortex from the source, follow the steps below: -1. Clone the Cortex repository [here](https://github.com/janhq/cortex/tree/dev). +1. Clone the Cortex repository [here](https://github.com/menloresearch/cortex/tree/dev). 2. Navigate to the `cortex-js` folder. 3. Open the terminal and run the following command to build the Cortex project: diff --git a/docs/src/pages/cortex/installation/windows.mdx b/docs/src/pages/cortex/installation/windows.mdx index 600ae0b07..326288bfd 100644 --- a/docs/src/pages/cortex/installation/windows.mdx +++ b/docs/src/pages/cortex/installation/windows.mdx @@ -168,7 +168,7 @@ cortex -h To install Cortex from the source, follow the steps below: -1. Clone the Cortex repository [here](https://github.com/janhq/cortex/tree/dev). +1. Clone the Cortex repository [here](https://github.com/menloresearch/cortex/tree/dev). 2. Navigate to the `cortex-js` folder. 3. Open the terminal and run the following command to build the Cortex project: diff --git a/docs/src/pages/docs/_assets/anthropic.png b/docs/src/pages/docs/_assets/anthropic.png index 108576903..d82ba291d 100644 Binary files a/docs/src/pages/docs/_assets/anthropic.png and b/docs/src/pages/docs/_assets/anthropic.png differ diff --git a/docs/src/pages/docs/_assets/api-server.png b/docs/src/pages/docs/_assets/api-server.png index 65469a3b0..2d62edbe5 100644 Binary files a/docs/src/pages/docs/_assets/api-server.png and b/docs/src/pages/docs/_assets/api-server.png differ diff --git a/docs/src/pages/docs/_assets/assistant-01.png b/docs/src/pages/docs/_assets/assistant-01.png index eabf2bba2..ebbb134ed 100644 Binary files a/docs/src/pages/docs/_assets/assistant-01.png and b/docs/src/pages/docs/_assets/assistant-01.png differ diff --git a/docs/src/pages/docs/_assets/cohere.png b/docs/src/pages/docs/_assets/cohere.png index 87d3d0fa4..0d8f7f010 100644 Binary files a/docs/src/pages/docs/_assets/cohere.png and b/docs/src/pages/docs/_assets/cohere.png differ diff --git a/docs/src/pages/docs/_assets/deepseek.png b/docs/src/pages/docs/_assets/deepseek.png new file mode 100644 index 000000000..9e1084aee Binary files /dev/null and b/docs/src/pages/docs/_assets/deepseek.png differ diff --git a/docs/src/pages/docs/_assets/extensions-01.png b/docs/src/pages/docs/_assets/extensions-01.png index 9ac2d9a98..98fe85480 100644 Binary files a/docs/src/pages/docs/_assets/extensions-01.png and b/docs/src/pages/docs/_assets/extensions-01.png differ diff --git a/docs/src/pages/docs/_assets/extensions-02.png b/docs/src/pages/docs/_assets/extensions-02.png index e2e582b37..5761b6b7f 100644 Binary files a/docs/src/pages/docs/_assets/extensions-02.png and b/docs/src/pages/docs/_assets/extensions-02.png differ diff --git a/docs/src/pages/docs/_assets/extensions-03.png b/docs/src/pages/docs/_assets/extensions-03.png index bbdd2c371..a74230a0a 100644 Binary files a/docs/src/pages/docs/_assets/extensions-03.png and b/docs/src/pages/docs/_assets/extensions-03.png differ diff --git a/docs/src/pages/docs/_assets/extensions-04.png b/docs/src/pages/docs/_assets/extensions-04.png index e643f435b..20b48bcf5 100644 Binary files a/docs/src/pages/docs/_assets/extensions-04.png and b/docs/src/pages/docs/_assets/extensions-04.png differ diff --git a/docs/src/pages/docs/_assets/extensions-05.png b/docs/src/pages/docs/_assets/extensions-05.png new file mode 100644 index 000000000..3967d90a8 Binary files /dev/null and b/docs/src/pages/docs/_assets/extensions-05.png differ diff --git a/docs/src/pages/docs/_assets/extensions-06.png b/docs/src/pages/docs/_assets/extensions-06.png new file mode 100644 index 000000000..b169e3926 Binary files /dev/null and b/docs/src/pages/docs/_assets/extensions-06.png differ diff --git a/docs/src/pages/docs/_assets/extensions-07.png b/docs/src/pages/docs/_assets/extensions-07.png new file mode 100644 index 000000000..3d39f56d9 Binary files /dev/null and b/docs/src/pages/docs/_assets/extensions-07.png differ diff --git a/docs/src/pages/docs/_assets/extensions-08.png b/docs/src/pages/docs/_assets/extensions-08.png new file mode 100644 index 000000000..3d124e367 Binary files /dev/null and b/docs/src/pages/docs/_assets/extensions-08.png differ diff --git a/docs/src/pages/docs/_assets/extensions-09.png b/docs/src/pages/docs/_assets/extensions-09.png new file mode 100644 index 000000000..7d7cd6193 Binary files /dev/null and b/docs/src/pages/docs/_assets/extensions-09.png differ diff --git a/docs/src/pages/docs/_assets/extensions-10.png b/docs/src/pages/docs/_assets/extensions-10.png new file mode 100644 index 000000000..ecadd8475 Binary files /dev/null and b/docs/src/pages/docs/_assets/extensions-10.png differ diff --git a/docs/src/pages/docs/_assets/google.png b/docs/src/pages/docs/_assets/google.png new file mode 100644 index 000000000..8a99146b7 Binary files /dev/null and b/docs/src/pages/docs/_assets/google.png differ diff --git a/docs/src/pages/docs/_assets/groq.png b/docs/src/pages/docs/_assets/groq.png index d9927fe7a..cef77e7de 100644 Binary files a/docs/src/pages/docs/_assets/groq.png and b/docs/src/pages/docs/_assets/groq.png differ diff --git a/docs/src/pages/docs/_assets/install-engines-01.png b/docs/src/pages/docs/_assets/install-engines-01.png index 85db02b98..95e824a7f 100644 Binary files a/docs/src/pages/docs/_assets/install-engines-01.png and b/docs/src/pages/docs/_assets/install-engines-01.png differ diff --git a/docs/src/pages/docs/_assets/install-engines-02.png b/docs/src/pages/docs/_assets/install-engines-02.png index dcdd7e4df..b6b6a6a58 100644 Binary files a/docs/src/pages/docs/_assets/install-engines-02.png and b/docs/src/pages/docs/_assets/install-engines-02.png differ diff --git a/docs/src/pages/docs/_assets/install-engines-03.png b/docs/src/pages/docs/_assets/install-engines-03.png index cb2ecde56..a65145a4c 100644 Binary files a/docs/src/pages/docs/_assets/install-engines-03.png and b/docs/src/pages/docs/_assets/install-engines-03.png differ diff --git a/docs/src/pages/docs/_assets/jan-app.png b/docs/src/pages/docs/_assets/jan-app.png index 2490d925a..8b3acf3b3 100644 Binary files a/docs/src/pages/docs/_assets/jan-app.png and b/docs/src/pages/docs/_assets/jan-app.png differ diff --git a/docs/src/pages/docs/_assets/llama.cpp-01.png b/docs/src/pages/docs/_assets/llama.cpp-01.png index b794fe1df..0e04856d9 100644 Binary files a/docs/src/pages/docs/_assets/llama.cpp-01.png and b/docs/src/pages/docs/_assets/llama.cpp-01.png differ diff --git a/docs/src/pages/docs/_assets/martian.png b/docs/src/pages/docs/_assets/martian.png index e00349b01..840fd083d 100644 Binary files a/docs/src/pages/docs/_assets/martian.png and b/docs/src/pages/docs/_assets/martian.png differ diff --git a/docs/src/pages/docs/_assets/mistralai.png b/docs/src/pages/docs/_assets/mistralai.png index 70139c6ae..98c550680 100644 Binary files a/docs/src/pages/docs/_assets/mistralai.png and b/docs/src/pages/docs/_assets/mistralai.png differ diff --git a/docs/src/pages/docs/_assets/model-management-01.png b/docs/src/pages/docs/_assets/model-management-01.png index 9020c66b2..6598eda21 100644 Binary files a/docs/src/pages/docs/_assets/model-management-01.png and b/docs/src/pages/docs/_assets/model-management-01.png differ diff --git a/docs/src/pages/docs/_assets/model-management-02.png b/docs/src/pages/docs/_assets/model-management-02.png index bb446d056..3fc2e7b37 100644 Binary files a/docs/src/pages/docs/_assets/model-management-02.png and b/docs/src/pages/docs/_assets/model-management-02.png differ diff --git a/docs/src/pages/docs/_assets/model-management-04.png b/docs/src/pages/docs/_assets/model-management-04.png index 71d72dff2..696217cb7 100644 Binary files a/docs/src/pages/docs/_assets/model-management-04.png and b/docs/src/pages/docs/_assets/model-management-04.png differ diff --git a/docs/src/pages/docs/_assets/model-management-05.png b/docs/src/pages/docs/_assets/model-management-05.png index 891fa943a..85676ccd2 100644 Binary files a/docs/src/pages/docs/_assets/model-management-05.png and b/docs/src/pages/docs/_assets/model-management-05.png differ diff --git a/docs/src/pages/docs/_assets/model-management-06.png b/docs/src/pages/docs/_assets/model-management-06.png index ebd2de3d1..eced32503 100644 Binary files a/docs/src/pages/docs/_assets/model-management-06.png and b/docs/src/pages/docs/_assets/model-management-06.png differ diff --git a/docs/src/pages/docs/_assets/model-management-07.png b/docs/src/pages/docs/_assets/model-management-07.png index 612904ac7..ca9880ac0 100644 Binary files a/docs/src/pages/docs/_assets/model-management-07.png and b/docs/src/pages/docs/_assets/model-management-07.png differ diff --git a/docs/src/pages/docs/_assets/model-management-08.png b/docs/src/pages/docs/_assets/model-management-08.png new file mode 100644 index 000000000..98c02a19d Binary files /dev/null and b/docs/src/pages/docs/_assets/model-management-08.png differ diff --git a/docs/src/pages/docs/_assets/model-management-09.png b/docs/src/pages/docs/_assets/model-management-09.png new file mode 100644 index 000000000..990b53710 Binary files /dev/null and b/docs/src/pages/docs/_assets/model-management-09.png differ diff --git a/docs/src/pages/docs/_assets/model-parameters.png b/docs/src/pages/docs/_assets/model-parameters.png index a99deb75e..2d4c12ec2 100644 Binary files a/docs/src/pages/docs/_assets/model-parameters.png and b/docs/src/pages/docs/_assets/model-parameters.png differ diff --git a/docs/src/pages/docs/_assets/nvidia-nim.png b/docs/src/pages/docs/_assets/nvidia-nim.png index 8eb637c12..e748756f7 100644 Binary files a/docs/src/pages/docs/_assets/nvidia-nim.png and b/docs/src/pages/docs/_assets/nvidia-nim.png differ diff --git a/docs/src/pages/docs/_assets/openai.png b/docs/src/pages/docs/_assets/openai.png index e6407d9ed..6c489c4dd 100644 Binary files a/docs/src/pages/docs/_assets/openai.png and b/docs/src/pages/docs/_assets/openai.png differ diff --git a/docs/src/pages/docs/_assets/openrouter.png b/docs/src/pages/docs/_assets/openrouter.png index d9e032700..3e4c6b472 100644 Binary files a/docs/src/pages/docs/_assets/openrouter.png and b/docs/src/pages/docs/_assets/openrouter.png differ diff --git a/docs/src/pages/docs/_assets/quick-start-01.png b/docs/src/pages/docs/_assets/quick-start-01.png index 430b01fb0..03b101aa2 100644 Binary files a/docs/src/pages/docs/_assets/quick-start-01.png and b/docs/src/pages/docs/_assets/quick-start-01.png differ diff --git a/docs/src/pages/docs/_assets/quick-start-02.png b/docs/src/pages/docs/_assets/quick-start-02.png index 998e04dc8..977d8ebdb 100644 Binary files a/docs/src/pages/docs/_assets/quick-start-02.png and b/docs/src/pages/docs/_assets/quick-start-02.png differ diff --git a/docs/src/pages/docs/_assets/quick-start-03.png b/docs/src/pages/docs/_assets/quick-start-03.png index 51d6de0d2..49cf3064d 100644 Binary files a/docs/src/pages/docs/_assets/quick-start-03.png and b/docs/src/pages/docs/_assets/quick-start-03.png differ diff --git a/docs/src/pages/docs/_assets/retrieval-01.png b/docs/src/pages/docs/_assets/retrieval-01.png index b29b964b5..1d120e745 100644 Binary files a/docs/src/pages/docs/_assets/retrieval-01.png and b/docs/src/pages/docs/_assets/retrieval-01.png differ diff --git a/docs/src/pages/docs/_assets/retrieval-02.png b/docs/src/pages/docs/_assets/retrieval-02.png index 37fc8bf99..2ec4ba029 100644 Binary files a/docs/src/pages/docs/_assets/retrieval-02.png and b/docs/src/pages/docs/_assets/retrieval-02.png differ diff --git a/docs/src/pages/docs/_assets/settings-01.png b/docs/src/pages/docs/_assets/settings-01.png index 7a8539d65..e2e5aead5 100644 Binary files a/docs/src/pages/docs/_assets/settings-01.png and b/docs/src/pages/docs/_assets/settings-01.png differ diff --git a/docs/src/pages/docs/_assets/settings-02.png b/docs/src/pages/docs/_assets/settings-02.png index b00924d5a..6c1699a1c 100644 Binary files a/docs/src/pages/docs/_assets/settings-02.png and b/docs/src/pages/docs/_assets/settings-02.png differ diff --git a/docs/src/pages/docs/_assets/settings-03.png b/docs/src/pages/docs/_assets/settings-03.png index aced464be..4e32c390b 100644 Binary files a/docs/src/pages/docs/_assets/settings-03.png and b/docs/src/pages/docs/_assets/settings-03.png differ diff --git a/docs/src/pages/docs/_assets/settings-04.png b/docs/src/pages/docs/_assets/settings-04.png index 36dc64fce..72198ced5 100644 Binary files a/docs/src/pages/docs/_assets/settings-04.png and b/docs/src/pages/docs/_assets/settings-04.png differ diff --git a/docs/src/pages/docs/_assets/settings-05.png b/docs/src/pages/docs/_assets/settings-05.png index 8fb76e0fc..489d6cd50 100644 Binary files a/docs/src/pages/docs/_assets/settings-05.png and b/docs/src/pages/docs/_assets/settings-05.png differ diff --git a/docs/src/pages/docs/_assets/settings-06.png b/docs/src/pages/docs/_assets/settings-06.png index 3a582b42a..a90a10ad2 100644 Binary files a/docs/src/pages/docs/_assets/settings-06.png and b/docs/src/pages/docs/_assets/settings-06.png differ diff --git a/docs/src/pages/docs/_assets/settings-07.png b/docs/src/pages/docs/_assets/settings-07.png index 878c4ea66..4ecadd0dc 100644 Binary files a/docs/src/pages/docs/_assets/settings-07.png and b/docs/src/pages/docs/_assets/settings-07.png differ diff --git a/docs/src/pages/docs/_assets/settings-08.png b/docs/src/pages/docs/_assets/settings-08.png index a1c7d7dd1..dc289c558 100644 Binary files a/docs/src/pages/docs/_assets/settings-08.png and b/docs/src/pages/docs/_assets/settings-08.png differ diff --git a/docs/src/pages/docs/_assets/settings-09.png b/docs/src/pages/docs/_assets/settings-09.png index 8d5e577e1..db79947f2 100644 Binary files a/docs/src/pages/docs/_assets/settings-09.png and b/docs/src/pages/docs/_assets/settings-09.png differ diff --git a/docs/src/pages/docs/_assets/settings-10.png b/docs/src/pages/docs/_assets/settings-10.png index b89c9dace..30c4dd4d4 100644 Binary files a/docs/src/pages/docs/_assets/settings-10.png and b/docs/src/pages/docs/_assets/settings-10.png differ diff --git a/docs/src/pages/docs/_assets/settings-11.png b/docs/src/pages/docs/_assets/settings-11.png index b304f9f3b..8f44e83ec 100644 Binary files a/docs/src/pages/docs/_assets/settings-11.png and b/docs/src/pages/docs/_assets/settings-11.png differ diff --git a/docs/src/pages/docs/_assets/settings-12.png b/docs/src/pages/docs/_assets/settings-12.png index a4d45dedd..15bbc6c96 100644 Binary files a/docs/src/pages/docs/_assets/settings-12.png and b/docs/src/pages/docs/_assets/settings-12.png differ diff --git a/docs/src/pages/docs/_assets/settings-13.png b/docs/src/pages/docs/_assets/settings-13.png index f2b41ba3f..851976d4e 100644 Binary files a/docs/src/pages/docs/_assets/settings-13.png and b/docs/src/pages/docs/_assets/settings-13.png differ diff --git a/docs/src/pages/docs/_assets/settings-14.png b/docs/src/pages/docs/_assets/settings-14.png index b253cb0dc..7845fb593 100644 Binary files a/docs/src/pages/docs/_assets/settings-14.png and b/docs/src/pages/docs/_assets/settings-14.png differ diff --git a/docs/src/pages/docs/_assets/settings-15.png b/docs/src/pages/docs/_assets/settings-15.png index f739f8513..ccc52f497 100644 Binary files a/docs/src/pages/docs/_assets/settings-15.png and b/docs/src/pages/docs/_assets/settings-15.png differ diff --git a/docs/src/pages/docs/_assets/settings-17.png b/docs/src/pages/docs/_assets/settings-17.png index 83c10fa95..d48996f9b 100644 Binary files a/docs/src/pages/docs/_assets/settings-17.png and b/docs/src/pages/docs/_assets/settings-17.png differ diff --git a/docs/src/pages/docs/_assets/settings-18.png b/docs/src/pages/docs/_assets/settings-18.png index 63718d0ef..629d546fb 100644 Binary files a/docs/src/pages/docs/_assets/settings-18.png and b/docs/src/pages/docs/_assets/settings-18.png differ diff --git a/docs/src/pages/docs/_assets/tensorrt-llm-01.png b/docs/src/pages/docs/_assets/tensorrt-llm-01.png new file mode 100644 index 000000000..2f839f7a5 Binary files /dev/null and b/docs/src/pages/docs/_assets/tensorrt-llm-01.png differ diff --git a/docs/src/pages/docs/_assets/tensorrt-llm-02.png b/docs/src/pages/docs/_assets/tensorrt-llm-02.png new file mode 100644 index 000000000..de9841874 Binary files /dev/null and b/docs/src/pages/docs/_assets/tensorrt-llm-02.png differ diff --git a/docs/src/pages/docs/_assets/threads-01.png b/docs/src/pages/docs/_assets/threads-01.png index 564c753c2..55d45d7ce 100644 Binary files a/docs/src/pages/docs/_assets/threads-01.png and b/docs/src/pages/docs/_assets/threads-01.png differ diff --git a/docs/src/pages/docs/_assets/threads-02.png b/docs/src/pages/docs/_assets/threads-02.png index f51916571..67005c575 100644 Binary files a/docs/src/pages/docs/_assets/threads-02.png and b/docs/src/pages/docs/_assets/threads-02.png differ diff --git a/docs/src/pages/docs/_assets/threads-03.png b/docs/src/pages/docs/_assets/threads-03.png index 5a5fd3238..2b975dbd6 100644 Binary files a/docs/src/pages/docs/_assets/threads-03.png and b/docs/src/pages/docs/_assets/threads-03.png differ diff --git a/docs/src/pages/docs/_assets/threads-04.png b/docs/src/pages/docs/_assets/threads-04.png index 748282868..b0ea7a31e 100644 Binary files a/docs/src/pages/docs/_assets/threads-04.png and b/docs/src/pages/docs/_assets/threads-04.png differ diff --git a/docs/src/pages/docs/_assets/threads-05.png b/docs/src/pages/docs/_assets/threads-05.png index 4c11667d8..1ea34caa9 100644 Binary files a/docs/src/pages/docs/_assets/threads-05.png and b/docs/src/pages/docs/_assets/threads-05.png differ diff --git a/docs/src/pages/docs/_assets/trouble-shooting-01.png b/docs/src/pages/docs/_assets/trouble-shooting-01.png index a12ea97b0..82c9a20b5 100644 Binary files a/docs/src/pages/docs/_assets/trouble-shooting-01.png and b/docs/src/pages/docs/_assets/trouble-shooting-01.png differ diff --git a/docs/src/pages/docs/_assets/trouble-shooting-02.png b/docs/src/pages/docs/_assets/trouble-shooting-02.png index 290c4a8ce..7c1a2d296 100644 Binary files a/docs/src/pages/docs/_assets/trouble-shooting-02.png and b/docs/src/pages/docs/_assets/trouble-shooting-02.png differ diff --git a/docs/src/pages/docs/configure-extensions.mdx b/docs/src/pages/docs/configure-extensions.mdx index c72d8c6a5..71d226554 100644 --- a/docs/src/pages/docs/configure-extensions.mdx +++ b/docs/src/pages/docs/configure-extensions.mdx @@ -55,7 +55,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "Conversational", "version": "1.0.0", "main": "dist/index.js", - "description": "This extension enables conversations and state persistence via your filesystem", + "description": "This extension enables conversations and state persistence via your filesystem.", "url": "extension://@janhq/conversational-extension/dist/index.js" }, "@janhq/inference-anthropic-extension": { @@ -70,7 +70,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "Anthropic Inference Engine", "version": "1.0.2", "main": "dist/index.js", - "description": "This extension enables Anthropic chat completion API calls", + "description": "This extension enables Anthropic chat completion API calls.", "url": "extension://@janhq/inference-anthropic-extension/dist/index.js" }, "@janhq/inference-triton-trt-llm-extension": { @@ -85,7 +85,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "Triton-TRT-LLM Inference Engine", "version": "1.0.0", "main": "dist/index.js", - "description": "This extension enables Nvidia's TensorRT-LLM as an inference engine option", + "description": "This extension enables Nvidia's TensorRT-LLM as an inference engine option.", "url": "extension://@janhq/inference-triton-trt-llm-extension/dist/index.js" }, "@janhq/inference-mistral-extension": { @@ -100,7 +100,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "MistralAI Inference Engine", "version": "1.0.1", "main": "dist/index.js", - "description": "This extension enables Mistral chat completion API calls", + "description": "This extension enables Mistral chat completion API calls.", "url": "extension://@janhq/inference-mistral-extension/dist/index.js" }, "@janhq/inference-martian-extension": { @@ -115,7 +115,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "Martian Inference Engine", "version": "1.0.1", "main": "dist/index.js", - "description": "This extension enables Martian chat completion API calls", + "description": "This extension enables Martian chat completion API calls.", "url": "extension://@janhq/inference-martian-extension/dist/index.js" }, "@janhq/inference-openrouter-extension": { @@ -130,7 +130,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "OpenRouter Inference Engine", "version": "1.0.0", "main": "dist/index.js", - "description": "This extension enables Open Router chat completion API calls", + "description": "This extension enables Open Router chat completion API calls.", "url": "extension://@janhq/inference-openrouter-extension/dist/index.js" }, "@janhq/inference-nvidia-extension": { @@ -145,7 +145,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "NVIDIA NIM Inference Engine", "version": "1.0.1", "main": "dist/index.js", - "description": "This extension enables NVIDIA chat completion API calls", + "description": "This extension enables NVIDIA chat completion API calls.", "url": "extension://@janhq/inference-nvidia-extension/dist/index.js" }, "@janhq/inference-groq-extension": { @@ -160,7 +160,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "Groq Inference Engine", "version": "1.0.1", "main": "dist/index.js", - "description": "This extension enables fast Groq chat completion API calls", + "description": "This extension enables fast Groq chat completion API calls.", "url": "extension://@janhq/inference-groq-extension/dist/index.js" }, "@janhq/inference-openai-extension": { @@ -175,7 +175,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "OpenAI Inference Engine", "version": "1.0.2", "main": "dist/index.js", - "description": "This extension enables OpenAI chat completion API calls", + "description": "This extension enables OpenAI chat completion API calls.", "url": "extension://@janhq/inference-openai-extension/dist/index.js" }, "@janhq/inference-cohere-extension": { @@ -190,7 +190,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "Cohere Inference Engine", "version": "1.0.0", "main": "dist/index.js", - "description": "This extension enables Cohere chat completion API calls", + "description": "This extension enables Cohere chat completion API calls.", "url": "extension://@janhq/inference-cohere-extension/dist/index.js" }, "@janhq/model-extension": { @@ -205,7 +205,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "Model Management", "version": "1.0.33", "main": "dist/index.js", - "description": "Model Management Extension provides model exploration and seamless downloads", + "description": "Model Management Extension provides model exploration and seamless downloads.", "url": "extension://@janhq/model-extension/dist/index.js" }, "@janhq/monitoring-extension": { @@ -220,7 +220,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "System Monitoring", "version": "1.0.10", "main": "dist/index.js", - "description": "This extension provides system health and OS level data", + "description": "This extension provides system health and OS level data.", "url": "extension://@janhq/monitoring-extension/dist/index.js" }, "@janhq/assistant-extension": { @@ -235,7 +235,7 @@ Extensions in Jan are configured through a JSON file that defines their behavior "productName": "Jan Assistant", "version": "1.0.1", "main": "dist/index.js", - "description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models", + "description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models.", "url": "extension://@janhq/assistant-extension/dist/index.js" }, "@janhq/tensorrt-llm-extension": { diff --git a/docs/src/pages/docs/index.mdx b/docs/src/pages/docs/index.mdx index 23c5e3ae9..f247b76a9 100644 --- a/docs/src/pages/docs/index.mdx +++ b/docs/src/pages/docs/index.mdx @@ -25,7 +25,7 @@ import FAQBox from '@/components/FaqBox' ![Jan's Cover Image](./_assets/jan-app.png) -Jan is a ChatGPT-alternative that runs 100% offline on your desktop & mobile (*comming soon*). Our goal is to make it easy for a layperson[^1] to download and run LLMs and use AI with full control and [privacy](https://www.reuters.com/legal/legalindustry/privacy-paradox-with-ai-2023-10-31/). +Jan is a ChatGPT-alternative that runs 100% offline on your desktop & mobile (*coming soon*). Our goal is to make it easy for a layperson[^1] to download and run LLMs and use AI with full control and [privacy](https://www.reuters.com/legal/legalindustry/privacy-paradox-with-ai-2023-10-31/). Jan is powered by [Cortex](https://cortex.so/), our embeddable local AI engine. @@ -46,7 +46,7 @@ You'll be able to use it with [Continue.dev](https://jan.ai/integrations/coding/ ### Philosophy Jan is built to be [user-owned](about#-user-owned): -- Open source via the [AGPLv3 license](https://github.com/janhq/jan/blob/dev/LICENSE) +- Open source via the [AGPLv3 license](https://github.com/menloresearch/jan/blob/dev/LICENSE) - [Local-first](https://www.inkandswitch.com/local-first/), with all data stored locally - Runs 100% offline, with privacy by default - Free choice of AI models, both local and cloud-based @@ -134,7 +134,7 @@ Jan has an extensible architecture like VSCode and Obsidian - you can build cust - Contributions can be made through [GitHub](https://github.com/janhq/jan) and [Discord](https://discord.gg/Exe46xPMbK), where you can also suggest features and make pull requests. No need to ask for permission. We're fully open-source! + Contributions can be made through [GitHub](https://github.com/menloresearch/jan) and [Discord](https://discord.gg/Exe46xPMbK), where you can also suggest features and make pull requests. No need to ask for permission. We're fully open-source! @@ -150,7 +150,7 @@ Jan has an extensible architecture like VSCode and Obsidian - you can build cust Yes! We love the self-hosted movement. You can: - [Download Jan](./download.mdx) and run it directly. - - Fork and build from our [GitHub](https://github.com/janhq/jan) repository. + - Fork and build from our [GitHub](https://github.com/menloresearch/jan) repository. @@ -158,10 +158,10 @@ Jan has an extensible architecture like VSCode and Obsidian - you can build cust - Yes! We love hiring from our community. Check out our open positions at [Careers](https://homebrew.bamboohr.com/careers). + Yes! We love hiring from our community. Check out our open positions at [Careers](https://menlo.bamboohr.com/careers). ## Footnotes -[^1]: Our definition of "Non-technical" == don't need to know how to use Command Line \ No newline at end of file +[^1]: Our definition of "Non-technical" == don't need to know how to use Command Line diff --git a/docs/src/pages/docs/install-engines.mdx b/docs/src/pages/docs/install-engines.mdx index 4e99d337d..cded71c83 100644 --- a/docs/src/pages/docs/install-engines.mdx +++ b/docs/src/pages/docs/install-engines.mdx @@ -47,8 +47,8 @@ To add a new remote engine: |-------|-------------|----------| | Engine Name | Name for your engine (e.g., "OpenAI", "Claude") | ✓ | | API URL | The base URL of the provider's API | ✓ | -| API Key | Your authentication key from the provider | ✓ | -| Model List URL | URL for fetching available models | | +| API Key | Your authentication key to activate this engine | ✓ | +| Model List URL | The endpoint URL to fetch available models | | API Key Template | Custom authorization header format | | | Request Format Conversion | Function to convert Jan's request format to provider's format | | | Response Format Conversion | Function to convert provider's response format to Jan's format | | diff --git a/docs/src/pages/docs/install-extensions.mdx b/docs/src/pages/docs/install-extensions.mdx index d5b27013c..53d535da4 100644 --- a/docs/src/pages/docs/install-extensions.mdx +++ b/docs/src/pages/docs/install-extensions.mdx @@ -96,7 +96,7 @@ my-extension/ ### Example Extension Template -You can find a template for creating Jan extensions in our [example repository](https://github.com/janhq/extension-template). +You can find a template for creating Jan extensions in our [example repository](https://github.com/menloresearch/extension-template). ## Install Extensions To install a custom extension in Jan: diff --git a/docs/src/pages/docs/models/manage-models.mdx b/docs/src/pages/docs/models/manage-models.mdx index 01f0fc06f..797d08853 100644 --- a/docs/src/pages/docs/models/manage-models.mdx +++ b/docs/src/pages/docs/models/manage-models.mdx @@ -18,7 +18,7 @@ keywords: ] --- import { Callout, Steps } from 'nextra/components' -import { Settings, EllipsisVertical, Plus, FolderOpen, Pencil } from 'lucide-react' +import { Settings, Trash2, EllipsisVertical, Plus, FolderOpen, Pencil } from 'lucide-react' # Model Management @@ -201,12 +201,12 @@ Modify model parameters under the settings array. Key fields to configure: ## Cloud model -When using cloud models, be aware of any associated costs and rate limits from the providers. +When using cloud models, be aware of any associated costs and rate limits from the providers. See detailed guide for each cloud model provider [here](/docs/remote-models/anthropic). Jan supports connecting to various AI cloud providers that are OpenAI API-compatible, including: OpenAI (GPT-4, o1,...), Anthropic (Claude), Groq, Mistral, and more. 1. Navigate to **Settings** () -2. Under **Remote Engines** section in the left sidebar, choose your preferred engines (OpenAI, Anthropic, etc.) +2. Under **Remote Engine** section in the left sidebar, choose your preferred engines (OpenAI, Anthropic, etc.) 3. Enter your API key 4. The activated cloud models will be available in your model selector in **Threads** @@ -214,7 +214,25 @@ Jan supports connecting to various AI cloud providers that are OpenAI API-compat ![Download Model](../_assets/model-management-06.png)
-Cloud models cannot be deleted, but you can hide them by disabling their respective provider engines in **Settings** > **Engines**. +You can manage & hide respective provider engines in **Settings** > **Engines**:
![Download Model](../_assets/model-management-07.png) +
+ +### Add Models +1. In your respective provider's settings page, click **Add Model** +2. Enter the **model ID**. Check detailed model IDs in the provider's model list, for example, `claude-3-opus-latest`. +3. A success message will appear when the model is added + +
+![Add Model](../_assets/model-management-08.png) +
+ +### Delete Models +1. In your respective provider's settings page, choose the model you want to delete +2. Click **Trash Bin** () icon +3. The model will be removed from your available models list + +
+![Delete Model](../_assets/model-management-09.png)
\ No newline at end of file diff --git a/docs/src/pages/docs/remote-models/_meta.json b/docs/src/pages/docs/remote-models/_meta.json index 529047774..62709526e 100644 --- a/docs/src/pages/docs/remote-models/_meta.json +++ b/docs/src/pages/docs/remote-models/_meta.json @@ -7,6 +7,14 @@ "title": "Cohere", "href": "/docs/remote-models/cohere" }, + "deepseek": { + "title": "DeepSeek", + "href": "/docs/remote-models/deepseek" + }, + "google": { + "title": "Google", + "href": "/docs/remote-models/google" + }, "groq": { "title": "Groq", "href": "/docs/remote-models/groq" @@ -20,7 +28,7 @@ "href": "/docs/remote-models/mistralai" }, "nvidia-nim": { - "title": "Nvidia NIM", + "title": "Nvidia", "href": "/docs/remote-models/nvidia-nim" }, "openai": { diff --git a/docs/src/pages/docs/remote-models/anthropic.mdx b/docs/src/pages/docs/remote-models/anthropic.mdx index 8b7fdbccd..cf83782b9 100644 --- a/docs/src/pages/docs/remote-models/anthropic.mdx +++ b/docs/src/pages/docs/remote-models/anthropic.mdx @@ -60,7 +60,7 @@ There are two ways to add your Anthropic API keys in Jan: ## Available Anthropic Models -Jan automatically includes Anthropic's available models. In case you want to use a specific Anthropic model that you cannot find in **Jan**, follow instructions in [Manual Setup](/docs/models/manage-models#4-manual-setup) to add custom models: +Jan automatically includes Anthropic's available models. In case you want to use a specific Anthropic model that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/models/manage-models#add-models-1): - See list of available models in [Anthropic Models](https://docs.anthropic.com/claude/docs/models-overview). - The `id` property must match the model name in the list. For example, `claude-3-opus-20240229`, `claude-3-sonnet-20240229`, or `claude-2.1`. diff --git a/docs/src/pages/docs/remote-models/cohere.mdx b/docs/src/pages/docs/remote-models/cohere.mdx index 41e60431d..c53e7e84b 100644 --- a/docs/src/pages/docs/remote-models/cohere.mdx +++ b/docs/src/pages/docs/remote-models/cohere.mdx @@ -60,7 +60,7 @@ There are two ways to add your Cohere API keys in Jan: ## Available Cohere Models -Jan automatically includes Cohere's available models. In case you want to use a specific Cohere model that you cannot find in **Jan**, follow instructions in [Manual Setup](/docs/models/manage-models#4-manual-setup) to add custom models: +Jan automatically includes Cohere's available models. In case you want to use a specific Cohere model that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/models/manage-models#add-models-1): - See list of available models in [Cohere Documentation](https://docs.cohere.com/v2/docs/models). - The `id` property must match the model name in the list. For example, `command-nightly` or `command-light`. diff --git a/docs/src/pages/docs/remote-models/deepseek.mdx b/docs/src/pages/docs/remote-models/deepseek.mdx new file mode 100644 index 000000000..67b596846 --- /dev/null +++ b/docs/src/pages/docs/remote-models/deepseek.mdx @@ -0,0 +1,86 @@ +--- +title: DeepSeek +description: Learn how to integrate DeepSeek with Jan for enhanced functionality. +keywords: + [ + Anthropic API, + Jan, + Jan AI, + ChatGPT alternative, + conversational AI, + large language model, + integration, + Anthropic integration, + API integration + ] +--- + +import { Callout, Steps } from 'nextra/components' +import { Settings, Plus } from 'lucide-react' + +# DeepSeek + +Jan supports [DeepSeek](https://www.deepseek.com/) API integration, allowing you to use DeepSeek models through Jan's interface. + +## Integrate DeepSeek API with Jan + + +### Step 1: Get Your API Key +1. Visit [DeepSeek Console](https://platform.deepseek.com/) and sign in +2. Create & copy a new API key or copy your existing one + + +Ensure your API key has sufficient credits + + +### Step 2: Configure Jan +There are two ways to add your DeepSeek API keys in Jan: + +**Through Threads:** +1. In Threads, click **Model** tab in the **right sidebar** or **model selector** in input field +2. Once the selector is poped up, choose the **Cloud** tab +3. Click **Add** () icon next to **DeepSeek** +4. Once you are directed to DeepSeek settings, insert your **API Key** + +**Through Settings:** +1. Navigate to **Settings** () +2. Under **Remote Engines**, select **DeepSeek** +3. Insert your **API Key** + +
+![DeepSeek](../_assets/deepseek.png) +
+ +### Step 3: Start Using DeepSeek's Models + +1. In any existing **Threads** or create a new one +2. Select an DeepSeek model from **model selector** +3. Start chatting +
+ +## Available DeepSeek Models + +Jan automatically includes DeepSeek's available models. In case you want to use a specific DeepSeek model that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/models/manage-models#add-models-1): + - See list of available models in [DeepSeek Models](https://api-docs.deepseek.com/quick_start/pricing). + - The `id` property must match the model name in the list. For example, `deepseek-chat` or `deepseek-reasoner`. + +## Troubleshooting + +Common issues and solutions: + +**1. API Key Issues** +- Verify your API key is correct and not expired +- Check if you have billing set up on your DeepSeek account +- Ensure you have access to the model you're trying to use + +**2. Connection Problems** +- Check your internet connection +- Verify [DeepSeek's system status](https://status.deepseek.com/) +- Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs) + +**3. Model Unavailable** +- Confirm your API key has access to the model +- Check if you're using the correct model ID +- Verify your DeepSeek account has the necessary permissions + +Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH). \ No newline at end of file diff --git a/docs/src/pages/docs/remote-models/google.mdx b/docs/src/pages/docs/remote-models/google.mdx new file mode 100644 index 000000000..e9dd6e716 --- /dev/null +++ b/docs/src/pages/docs/remote-models/google.mdx @@ -0,0 +1,86 @@ +--- +title: Google +description: Learn how to integrate Google with Jan for enhanced functionality. +keywords: + [ + Anthropic API, + Jan, + Jan AI, + ChatGPT alternative, + conversational AI, + large language model, + integration, + Anthropic integration, + API integration + ] +--- + +import { Callout, Steps } from 'nextra/components' +import { Settings, Plus } from 'lucide-react' + +# Google + +Jan supports [Google](https://ai.google/get-started/our-models/) API integration, allowing you to use Google models (like Gemini series) through Jan's interface. + +## Integrate Google API with Jan + + +### Step 1: Get Your API Key +1. Visit [Google AI Studio](https://aistudio.google.com/app/apikey) and sign in +2. Create & copy a new API key or copy your existing one + + +Ensure your API key has sufficient credits + + +### Step 2: Configure Jan +There are two ways to add your Google API keys in Jan: + +**Through Threads:** +1. In Threads, click **Model** tab in the **right sidebar** or **model selector** in input field +2. Once the selector is poped up, choose the **Cloud** tab +3. Click **Add** () icon next to **Google** +4. Once you are directed to Google settings, insert your **API Key** + +**Through Settings:** +1. Navigate to **Settings** () +2. Under **Remote Engines**, select **Google** +3. Insert your **API Key** + +
+![Google](../_assets/google.png) +
+ +### Step 3: Start Using Google's Models + +1. In any existing **Threads** or create a new one +2. Select an Google model from **model selector** +3. Start chatting +
+ +## Available Google Models + +Jan automatically includes Google's available models like Gemini series. In case you want to use a specific Google model that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/models/manage-models#add-models-1): + - See list of available models in [Google Models](https://ai.google.dev/gemini-api/docs/models/gemini). + - The `id` property must match the model name in the list. For example, `gemini-1.5-pro` or `gemini-2.0-flash-lite-preview`. + +## Troubleshooting + +Common issues and solutions: + +**1. API Key Issues** +- Verify your API key is correct and not expired +- Check if you have billing set up on your Google account +- Ensure you have access to the model you're trying to use + +**2. Connection Problems** +- Check your internet connection +- Verify [Gemini's system status](https://www.google.com/appsstatus/dashboard/) +- Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs) + +**3. Model Unavailable** +- Confirm your API key has access to the model +- Check if you're using the correct model ID +- Verify your Google account has the necessary permissions + +Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH). \ No newline at end of file diff --git a/docs/src/pages/docs/remote-models/groq.mdx b/docs/src/pages/docs/remote-models/groq.mdx index c0ccaf627..327e4a6b7 100644 --- a/docs/src/pages/docs/remote-models/groq.mdx +++ b/docs/src/pages/docs/remote-models/groq.mdx @@ -61,7 +61,7 @@ There are two ways to add your Groq API keys in Jan: ## Available Models Through Groq -Jan automatically includes Groq's available models. In case you want to use a specific Groq model that you cannot find in **Jan**, follow instructions in [Manual Setup](/docs/models/manage-models#4-manual-setup) to add custom models: +Jan automatically includes Groq's available models. In case you want to use a specific Groq model that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/models/manage-models#add-models-1): - See list of available models in [Groq Documentation](https://console.groq.com/docs/models). - The `id` property must match the model name in the list. For example, if you want to use Llama3.3 70B, you must set the `id` property to `llama-3.3-70b-versatile`. diff --git a/docs/src/pages/docs/remote-models/mistralai.mdx b/docs/src/pages/docs/remote-models/mistralai.mdx index c1051a703..68efe5073 100644 --- a/docs/src/pages/docs/remote-models/mistralai.mdx +++ b/docs/src/pages/docs/remote-models/mistralai.mdx @@ -61,7 +61,7 @@ There are two ways to add your Mistral AI keys in Jan: ## Available Mistral Models -Jan automatically includes Mistral's available models. In case you want to use a specific Mistral model that you cannot find in **Jan**, follow instructions in [Manual Setup](/docs/models/manage-models#4-manual-setup) to add custom models: +Jan automatically includes Mistral's available models. In case you want to use a specific Mistral model that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/models/manage-models#add-models-1): - See list of available models in [Mistral AI Documentation](https://docs.mistral.ai/platform/endpoints). - The `id` property must match the model name in the list. For example, if you want to use Mistral Large, you must set the `id` property to `mistral-large-latest` diff --git a/docs/src/pages/docs/remote-models/nvidia-nim.mdx b/docs/src/pages/docs/remote-models/nvidia-nim.mdx index f2e3b0d06..fc4e26549 100644 --- a/docs/src/pages/docs/remote-models/nvidia-nim.mdx +++ b/docs/src/pages/docs/remote-models/nvidia-nim.mdx @@ -1,5 +1,5 @@ --- -title: NVIDIA NIM +title: NVIDIA description: Learn how to integrate NVIDIA NIM with Jan for enhanced functionality. keywords: [ @@ -20,17 +20,17 @@ import { Settings, Plus } from 'lucide-react' # NVIDIA NIM -Jan supports [NVIDIA NIM](https://www.nvidia.com/en-us/ai/) API integration, allowing you to use NVIDIA's Large Language Models through Jan's interface. +Jan supports [Nvidia](https://www.nvidia.com/en-us/ai/) API integration, allowing you to use Nvidia's Large Language Models through Jan's interface. -NVIDIA NIM extension is only supported on Jan version 0.5.1 or later. +Nvidia engine is only supported on Jan version 0.5.1 or later. ## Integrate Nvidia NIM API with Jan ### Step 1: Get Your API Key -1. Visit [NVIDIA Docs](https://docs.nvidia.com/nim/nemo-retriever/text-reranking/latest/getting-started.html#generate-an-api-key) and generate an API key +1. Visit [Nvidia](https://docs.nvidia.com/nim/nemo-retriever/text-reranking/latest/getting-started.html#generate-an-api-key) and generate an API key 2. Copy your API key @@ -38,17 +38,17 @@ Ensure your API key has sufficient credits ### Step 2: Configure Jan -There are two ways to add your Nvidia NIM API keys in Jan: +There are two ways to add your Nvidia API keys in Jan: **Through Threads:** 1. In Threads, click **Model** tab in the **right sidebar** or **model selector** in input field 2. Once the selector is poped up, choose the **Cloud** tab -3. Click **Add** () icon next to **Nvidia NIM** -4. Once you are directed to Nvidia NIM settings, insert your **API Key** +3. Click **Add** () icon next to **Nvidia** +4. Once you are directed to Nvidia settings, insert your **API Key** **Through Settings:** 1. Navigate to **Settings** () -2. Under **Remote Engines**, select **Nvidia NIM** +2. Under **Remote Engines**, select **Nvidia** 3. Insert your **API Key**
@@ -58,14 +58,14 @@ There are two ways to add your Nvidia NIM API keys in Jan: ### Step 3: Start Using Nvidia NIM Models 1. In any existing **Threads** or create a new one -2. Select a NVIDIA NIM model from **model selector** +2. Select a Nvidia model from **model selector** 3. Start chatting
## Available Nvidia NIM Models -Jan automatically includes NVIDIA NIM's available models. In case you want to use a specific model that you cannot find in **Jan**, follow instructions in [Manual Setup](/docs/models/manage-models#4-manual-setup) to add custom models: - - See list of available models in [Nvidia NIM Documentation](https://build.nvidia.com/models). +Jan automatically includes Nvidia's available models. In case you want to use a specific model that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/models/manage-models#add-models-1): + - See list of available models in [Nvidia Documentation](https://build.nvidia.com/models). - The `id` property must match the model name in the list. ## Troubleshooting @@ -74,18 +74,18 @@ Common issues and solutions: **1. API Key Issues** - Verify your API key is correct and not expired -- Check if you have billing set up on your NVIDIA account +- Check if you have billing set up on your Nvidia account - Ensure you have access to the model you're trying to use **2. Connection Problems** - Check your internet connection -- Verify NVIDIA's system status +- Verify Nvidia's system status - Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs) **3. Model Unavailable** - Confirm your API key has access to the model - Check if you're using the correct model ID -- Verify your NVIDIA account has the necessary permissions +- Verify your Nvidia account has the necessary permissions - Make sure you're using Jan version 0.5.1 or later Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH) or check the [Nvidia documentation](https://docs.nvidia.com/nim/large-language-models/latest/getting-started.html). \ No newline at end of file diff --git a/docs/src/pages/docs/remote-models/openai.mdx b/docs/src/pages/docs/remote-models/openai.mdx index 502891f3d..f3cb63ef6 100644 --- a/docs/src/pages/docs/remote-models/openai.mdx +++ b/docs/src/pages/docs/remote-models/openai.mdx @@ -62,7 +62,7 @@ Start chatting ## Available OpenAI Models -Jan automatically includes popular OpenAI models. In case you want to use a specific OpenAI model that you cannot find in Jan, follow instructions in [Manual Setup](/docs/models/manage-models#4-manual-setup) to add custom models: +Jan automatically includes popular OpenAI models. In case you want to use a specific OpenAI model that you cannot find in Jan, follow instructions in [Add Cloud Models](/docs/models/manage-models#add-models-1): - See list of available models in [OpenAI Platform](https://platform.openai.com/docs/models/overview). - The id property must match the model name in the list. For example, if you want to use the [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo), you must set the id property to gpt-4-1106-preview. diff --git a/docs/src/pages/docs/troubleshooting.mdx b/docs/src/pages/docs/troubleshooting.mdx index f2fdf3ea8..900bd5918 100644 --- a/docs/src/pages/docs/troubleshooting.mdx +++ b/docs/src/pages/docs/troubleshooting.mdx @@ -393,7 +393,7 @@ The "Unexpected token" error usually relates to OpenAI API authentication or reg ## Need Further Support? If you can't find what you need in our troubleshooting guide, feel free reach out to us for extra help: - **Copy** your [app logs](/docs/troubleshooting#how-to-get-error-logs) -- Go to our [Discord](https://discord.com/invite/FTk2MvZwJH) & send it to **#🆘|get-help** channel for further support. +- Go to our [Discord](https://discord.com/invite/FTk2MvZwJH) & send it to **#🆘|jan-help** channel for further support. diff --git a/docs/src/pages/download.mdx b/docs/src/pages/download.mdx index 54f68001f..be5684824 100644 --- a/docs/src/pages/download.mdx +++ b/docs/src/pages/download.mdx @@ -19,7 +19,7 @@ keywords: import Download from "@/components/Download" export const getStaticProps = async() => { - const resRelease = await fetch('https://api.github.com/repos/janhq/jan/releases/latest') + const resRelease = await fetch('https://api.github.com/repos/menloresearch/jan/releases/latest') const release = await resRelease.json() return { diff --git a/docs/src/pages/index.mdx b/docs/src/pages/index.mdx index ef3484297..bb97f123e 100644 --- a/docs/src/pages/index.mdx +++ b/docs/src/pages/index.mdx @@ -19,9 +19,9 @@ keywords: import Home from "@/components/Home" export const getStaticProps = async() => { - const resReleaseLatest = await fetch('https://api.github.com/repos/janhq/jan/releases/latest') - const resRelease = await fetch('https://api.github.com/repos/janhq/jan/releases?per_page=500') - const resRepo = await fetch('https://api.github.com/repos/janhq/jan') + const resReleaseLatest = await fetch('https://api.github.com/repos/menloresearch/jan/releases/latest') + const resRelease = await fetch('https://api.github.com/repos/menloresearch/jan/releases?per_page=500') + const resRepo = await fetch('https://api.github.com/repos/menloresearch/jan') const repo = await resRepo.json() const latestRelease = await resReleaseLatest.json() const release = await resRelease.json() diff --git a/docs/src/pages/post/benchmarking-nvidia-tensorrt-llm.mdx b/docs/src/pages/post/benchmarking-nvidia-tensorrt-llm.mdx index cb384e553..fca8ed526 100644 --- a/docs/src/pages/post/benchmarking-nvidia-tensorrt-llm.mdx +++ b/docs/src/pages/post/benchmarking-nvidia-tensorrt-llm.mdx @@ -14,12 +14,12 @@ import CTABlog from '@/components/Blog/CTA' Jan now supports [NVIDIA TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) in addition to [llama.cpp](https://github.com/ggerganov/llama.cpp), making Jan multi-engine and ultra-fast for users with Nvidia GPUs. -We've been excited for TensorRT-LLM for a while, and [had a lot of fun implementing it](https://github.com/janhq/nitro-tensorrt-llm). As part of the process, we've run some benchmarks, to see how TensorRT-LLM fares on consumer hardware (e.g. [4090s](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/), [3090s](https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/)) we commonly see in the [Jan's hardware community](https://discord.com/channels/1107178041848909847/1201834752206974996). +We've been excited for TensorRT-LLM for a while, and [had a lot of fun implementing it](https://github.com/menloresearch/nitro-tensorrt-llm). As part of the process, we've run some benchmarks, to see how TensorRT-LLM fares on consumer hardware (e.g. [4090s](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/), [3090s](https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/)) we commonly see in the [Jan's hardware community](https://discord.com/channels/1107178041848909847/1201834752206974996). **Give it a try!** Jan's [TensorRT-LLM extension](/docs/built-in/tensorrt-llm) is available in Jan v0.4.9 and up ([see more](/docs/built-in/tensorrt-llm)). We precompiled some TensorRT-LLM models for you to try: `Mistral 7b`, `TinyLlama-1.1b`, `TinyJensen-1.1b` 😂 - Bugs or feedback? Let us know on [GitHub](https://github.com/janhq/jan) or via [Discord](https://discord.com/channels/1107178041848909847/1201832734704795688). + Bugs or feedback? Let us know on [GitHub](https://github.com/menloresearch/jan) or via [Discord](https://discord.com/channels/1107178041848909847/1201832734704795688). diff --git a/docs/src/pages/post/rag-is-not-enough.mdx b/docs/src/pages/post/rag-is-not-enough.mdx index 699954f58..36cf50e2d 100644 --- a/docs/src/pages/post/rag-is-not-enough.mdx +++ b/docs/src/pages/post/rag-is-not-enough.mdx @@ -13,7 +13,7 @@ import CTABlog from '@/components/Blog/CTA' ## Abstract -We present a straightforward approach to customizing small, open-source models using fine-tuning and RAG that outperforms GPT-3.5 for specialized use cases. With it, we achieved superior Q&A results of [technical documentation](https://nitro.jan.ai/docs) for a small codebase [codebase](https://github.com/janhq/nitro). +We present a straightforward approach to customizing small, open-source models using fine-tuning and RAG that outperforms GPT-3.5 for specialized use cases. With it, we achieved superior Q&A results of [technical documentation](https://nitro.jan.ai/docs) for a small codebase [codebase](https://github.com/menloresearch/nitro). In short, (1) extending a general foundation model like [Mistral](https://huggingface.co/mistralai/Mistral-7B-v0.1) with strong math and coding, and (2) training it over a high-quality, synthetic dataset generated from the intended corpus, and (3) adding RAG capabilities, can lead to significant accuracy improvements. @@ -93,11 +93,11 @@ This final model can be found [here on Huggingface](https://huggingface.co/jan-h As an additional step, we also added [Retrieval Augmented Generation (RAG)](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/) as an experiment parameter. -A simple RAG setup was done using **[Llamaindex](https://www.llamaindex.ai/)** and the **[bge-en-base-v1.5 embedding](https://huggingface.co/BAAI/bge-base-en-v1.5)** model for efficient documentation retrieval and question-answering. You can find the RAG implementation [here](https://github.com/janhq/open-foundry/blob/main/rag-is-not-enough/rag/nitro_rag.ipynb). +A simple RAG setup was done using **[Llamaindex](https://www.llamaindex.ai/)** and the **[bge-en-base-v1.5 embedding](https://huggingface.co/BAAI/bge-base-en-v1.5)** model for efficient documentation retrieval and question-answering. You can find the RAG implementation [here](https://github.com/menloresearch/open-foundry/blob/main/rag-is-not-enough/rag/nitro_rag.ipynb). ## Benchmarking the Results -We curated a new set of [50 multiple-choice questions](https://github.com/janhq/open-foundry/blob/main/rag-is-not-enough/rag/mcq_nitro.csv) (MCQ) based on the Nitro docs. The questions had varying levels of difficulty and had trick components that challenged the model's ability to discern misleading information. +We curated a new set of [50 multiple-choice questions](https://github.com/menloresearch/open-foundry/blob/main/rag-is-not-enough/rag/mcq_nitro.csv) (MCQ) based on the Nitro docs. The questions had varying levels of difficulty and had trick components that challenged the model's ability to discern misleading information. ![image](https://hackmd.io/_uploads/By9vaE1Ta.png) @@ -121,7 +121,7 @@ We conclude that this combination of model merging + finetuning + RAG yields pro Anecdotally, we’ve had some success using this model in practice to onboard new team members to the Nitro codebase. -A full research report with more statistics can be found [here](https://github.com/janhq/open-foundry/blob/main/rag-is-not-enough/README.md). +A full research report with more statistics can be found [here](https://github.com/menloresearch/open-foundry/blob/main/rag-is-not-enough/README.md). # References diff --git a/docs/src/pages/post/run-ai-models-locally.mdx b/docs/src/pages/post/run-ai-models-locally.mdx index 7e8398aab..efe8bc594 100644 --- a/docs/src/pages/post/run-ai-models-locally.mdx +++ b/docs/src/pages/post/run-ai-models-locally.mdx @@ -1,6 +1,6 @@ --- title: "How to run AI models locally as a beginner?" -description: "A straightforward guide to running AI models locally for enhanced privacy, regardless of your background." +description: "A straightforward guide to running AI models locally on your computer, regardless of your background." tags: AI, local models, Jan, GGUF, privacy, local AI categories: guides date: 2025-01-31 diff --git a/docs/src/pages/privacy.mdx b/docs/src/pages/privacy.mdx index 44127a37a..2700953e0 100644 --- a/docs/src/pages/privacy.mdx +++ b/docs/src/pages/privacy.mdx @@ -6,11 +6,11 @@ import { Callout } from 'nextra/components' # Privacy -Homebrew Computer Company is committed to protecting your privacy and ensuring that your personal information is handled safely and responsibly. This policy outlines how we collect, store, and use your personal information when you use any of our products. +Menlo Research is committed to protecting your privacy and ensuring that your personal information is handled safely and responsibly. This policy outlines how we collect, store, and use your personal information when you use any of our products. ## Data Collection -Jan, Cortex, and all Homebrew Computer Company products do not collect personally identifying information. You can read about [our philosophy](/about#philosophy) here and audit our open-source codebases. +Jan, Cortex, and all Menlo Research products do not collect personally identifying information. You can read about [our philosophy](/about#philosophy) here and audit our open-source codebases. ### When you voluntarily provide data diff --git a/docs/src/pages/support.mdx b/docs/src/pages/support.mdx index ef3c90e7f..13f6d9f85 100644 --- a/docs/src/pages/support.mdx +++ b/docs/src/pages/support.mdx @@ -4,7 +4,7 @@ title: Support - Jan # Support -- Bugs & requests: file a GitHub ticket [here](https://github.com/janhq/jan/issues) +- Bugs & requests: file a GitHub ticket [here](https://github.com/menloresearch/jan/issues) - For discussion: join our Discord [here](https://discord.gg/FTk2MvZwJH) - For business inquiries: email hello@jan.ai - For jobs: please email hr@jan.ai \ No newline at end of file diff --git a/docs/src/utils/format.ts b/docs/src/utils/format.ts index 7580961d0..1ce858d59 100644 --- a/docs/src/utils/format.ts +++ b/docs/src/utils/format.ts @@ -1,5 +1,5 @@ export function formatCompactNumber(count: number) { - const formatter = Intl.NumberFormat('en', { notation: 'compact' }) + const formatter = Intl.NumberFormat('en', { notation: 'compact', maximumFractionDigits: 1 }) return formatter.format(count) } diff --git a/docs/theme.config.tsx b/docs/theme.config.tsx index d0c15a704..b030fb97c 100644 --- a/docs/theme.config.tsx +++ b/docs/theme.config.tsx @@ -30,7 +30,7 @@ const config: DocsThemeConfig = {
), - docsRepositoryBase: 'https://github.com/janhq/jan/tree/dev/docs', + docsRepositoryBase: 'https://github.com/menloresearch/jan/tree/dev/docs', feedback: { content: 'Question? Give us feedback →', labels: 'feedback', @@ -59,7 +59,7 @@ const config: DocsThemeConfig = {
- +
diff --git a/docs/yarn.lock b/docs/yarn.lock index 6c7e314f2..ac691682b 100644 --- a/docs/yarn.lock +++ b/docs/yarn.lock @@ -3,9 +3,7 @@ "@adobe/css-tools@^4.4.0": - version "4.4.1" - resolved "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.1.tgz" - integrity sha512-12WGKBQzjUAI4ayyF4IAtfw2QR/IDoqk6jTddXDhtYTJF9ASmoE1zst7cVtP0aL/F1jUJL5r+JxKXKEgHNbEUQ== + version "4.4.2" "@alloc/quick-lru@^5.2.0": version "5.2.0" @@ -32,23 +30,17 @@ integrity sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ== "@babel/parser@^7.25.3": - version "7.26.7" - resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.26.7.tgz" - integrity sha512-kEvgGGgEjRUutvdVvZhbn/BxVt+5VSpwXz1j3WYXQbXDo8KzFOPNG2GQbdAiNq8g6wn1yKk7C/qrke03a84V+w== + version "7.26.10" dependencies: - "@babel/types" "^7.26.7" + "@babel/types" "^7.26.10" "@babel/runtime@^7.12.5", "@babel/runtime@^7.21.0", "@babel/runtime@^7.23.8": - version "7.26.0" - resolved "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.0.tgz" - integrity sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw== + version "7.26.10" dependencies: regenerator-runtime "^0.14.0" -"@babel/types@^7.26.7": - version "7.26.7" - resolved "https://registry.npmjs.org/@babel/types/-/types-7.26.7.tgz" - integrity sha512-t8kDRGrKXyp6+tjUh7hw2RLyclsW4TRoRvRHtSyAX9Bb5ldlFh+90YAYY6awRXrlB4G5G2izNeGySpATlFzmOg== +"@babel/types@^7.26.10": + version "7.26.10" dependencies: "@babel/helper-string-parser" "^7.25.9" "@babel/helper-validator-identifier" "^7.25.9" @@ -72,9 +64,7 @@ node-fetch "^2.0.0" "@codemirror/autocomplete@^6.0.0", "@codemirror/autocomplete@^6.12.0": - version "6.18.4" - resolved "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.4.tgz" - integrity sha512-sFAphGQIqyQZfP2ZBsSHV7xQvo9Py0rV0dW7W3IMRdS+zDuNb2l3no78CvUaWKGfzFjI4FTrLdUSj86IGb2hRA== + version "6.18.6" dependencies: "@codemirror/language" "^6.0.0" "@codemirror/state" "^6.0.0" @@ -82,9 +72,7 @@ "@lezer/common" "^1.0.0" "@codemirror/commands@^6.0.0", "@codemirror/commands@^6.3.3": - version "6.7.1" - resolved "https://registry.npmjs.org/@codemirror/commands/-/commands-6.7.1.tgz" - integrity sha512-llTrboQYw5H4THfhN4U3qCnSZ1SOJ60ohhz+SzU0ADGtwlc533DtklQP0vSFaQuCPDn3BPpOd1GbbnUtwNjsrw== + version "6.8.0" dependencies: "@codemirror/language" "^6.0.0" "@codemirror/state" "^6.4.0" @@ -118,9 +106,7 @@ "@lezer/html" "^1.3.0" "@codemirror/lang-javascript@^6.0.0": - version "6.2.2" - resolved "https://registry.npmjs.org/@codemirror/lang-javascript/-/lang-javascript-6.2.2.tgz" - integrity sha512-VGQfY+FCc285AhWuwjYxQyUQcYurWlxdKYT4bqwr3Twnd5wP5WSeu52t4tvvuWmljT4EmgEgZCqSieokhtY8hg== + version "6.2.3" dependencies: "@codemirror/autocomplete" "^6.0.0" "@codemirror/language" "^6.6.0" @@ -173,25 +159,19 @@ crelt "^1.0.5" "@codemirror/search@^6.0.0": - version "6.5.8" - resolved "https://registry.npmjs.org/@codemirror/search/-/search-6.5.8.tgz" - integrity sha512-PoWtZvo7c1XFeZWmmyaOp2G0XVbOnm+fJzvghqGAktBW3cufwJUWvSCcNG0ppXiBEM05mZu6RhMtXPv2hpllig== + version "6.5.10" dependencies: "@codemirror/state" "^6.0.0" "@codemirror/view" "^6.0.0" crelt "^1.0.5" "@codemirror/state@^6.0.0", "@codemirror/state@^6.4.0", "@codemirror/state@^6.5.0": - version "6.5.0" - resolved "https://registry.npmjs.org/@codemirror/state/-/state-6.5.0.tgz" - integrity sha512-MwBHVK60IiIHDcoMet78lxt6iw5gJOGSbNbOIVBHWVXIH4/Nq1+GQgLLGgI1KlnN86WDXsPudVaqYHKBIx7Eyw== + version "6.5.2" dependencies: "@marijn/find-cluster-break" "^1.0.0" "@codemirror/view@^6.0.0", "@codemirror/view@^6.17.0", "@codemirror/view@^6.23.0", "@codemirror/view@^6.23.1", "@codemirror/view@^6.27.0", "@codemirror/view@^6.35.0": - version "6.36.1" - resolved "https://registry.npmjs.org/@codemirror/view/-/view-6.36.1.tgz" - integrity sha512-miD1nyT4m4uopZaDdO2uXU/LLHliKNYL9kB1C1wJHrunHLm/rpkb5QVSokqgw9hFqEZakrdlb/VGWX8aYZTslQ== + version "6.36.4" dependencies: "@codemirror/state" "^6.5.0" style-mod "^4.1.0" @@ -207,15 +187,8 @@ resolved "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz" integrity sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ== -"@esbuild/darwin-arm64@0.24.2": - version "0.24.2" - resolved "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.24.2.tgz" - integrity sha512-kj3AnYWc+CekmZnS5IPu9D+HWtUI49hbnyqk0FLEJDbzCIQt7hg7ucF1SQAilhtYpIujfaHr6O0UHlzzSPdOeA== - "@eslint-community/eslint-utils@^4.2.0": - version "4.4.1" - resolved "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.1.tgz" - integrity sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA== + version "4.5.0" dependencies: eslint-visitor-keys "^3.4.3" @@ -245,19 +218,15 @@ integrity sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q== "@floating-ui/core@^1.6.0": - version "1.6.8" - resolved "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.8.tgz" - integrity sha512-7XJ9cPU+yI2QeLS+FCSlqNFZJq8arvswefkZrYI1yQBbftw6FyrZOxYSh+9S7z7TpeWlRt9zJ5IhM1WIL334jA== + version "1.6.9" dependencies: - "@floating-ui/utils" "^0.2.8" + "@floating-ui/utils" "^0.2.9" "@floating-ui/dom@^1.0.0": - version "1.6.12" - resolved "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.12.tgz" - integrity sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w== + version "1.6.13" dependencies: "@floating-ui/core" "^1.6.0" - "@floating-ui/utils" "^0.2.8" + "@floating-ui/utils" "^0.2.9" "@floating-ui/react-dom@^2.0.0": version "2.1.2" @@ -266,18 +235,14 @@ dependencies: "@floating-ui/dom" "^1.0.0" -"@floating-ui/utils@^0.2.2", "@floating-ui/utils@^0.2.8": - version "0.2.8" - resolved "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.8.tgz" - integrity sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig== +"@floating-ui/utils@^0.2.2", "@floating-ui/utils@^0.2.9": + version "0.2.9" "@floating-ui/vue@^1.0.2": - version "1.1.5" - resolved "https://registry.npmjs.org/@floating-ui/vue/-/vue-1.1.5.tgz" - integrity sha512-ynL1p5Z+woPVSwgMGqeDrx6HrJfGIDzFyESFkyqJKilGW1+h/8yVY29Khn0LaU6wHBRwZ13ntG6reiHWK6jyzw== + version "1.1.6" dependencies: "@floating-ui/dom" "^1.0.0" - "@floating-ui/utils" "^0.2.8" + "@floating-ui/utils" "^0.2.9" vue-demi ">=0.13.0" "@headlessui/react@^1.7.17": @@ -310,9 +275,7 @@ integrity sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA== "@humanwhocodes/momoa@^3.0.1": - version "3.3.5" - resolved "https://registry.npmjs.org/@humanwhocodes/momoa/-/momoa-3.3.5.tgz" - integrity sha512-NI9codbQNjw9g4SS/cOizi8JDZ93B3oGVko8M3y0XF3gITaGDSQqea35V8fswWehnRQBLxPfZY5TJnuNhNCEzA== + version "3.3.8" "@humanwhocodes/object-schema@^2.0.3": version "2.0.3" @@ -332,9 +295,7 @@ integrity sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg== "@inquirer/figures@^1.0.3": - version "1.0.9" - resolved "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.9.tgz" - integrity sha512-BXvGj0ehzrngHTPTDqUoDT3NXL8U0RxUk2zJm2A66RhCEIWdtU1v6GuUqNAgArW4PQ9CinqIWyHdQgdwOj06zQ== + version "1.0.11" "@isaacs/cliui@^8.0.2": version "8.0.2" @@ -386,9 +347,7 @@ integrity sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA== "@lezer/css@^1.1.0", "@lezer/css@^1.1.7": - version "1.1.9" - resolved "https://registry.npmjs.org/@lezer/css/-/css-1.1.9.tgz" - integrity sha512-TYwgljcDv+YrV0MZFFvYFQHCfGgbPMR6nuqLabBdmZoFH3EP1gvw8t0vae326Ne3PszQkbXfVBjCnf3ZVCr0bA== + version "1.1.10" dependencies: "@lezer/common" "^1.2.0" "@lezer/highlight" "^1.0.0" @@ -510,10 +469,8 @@ resolved "https://registry.npmjs.org/@next/env/-/env-13.5.8.tgz" integrity sha512-YmiG58BqyZ2FjrF2+5uZExL2BrLr8RTQzLXNDJ8pJr0O+rPlOeDPXp1p1/4OrR3avDidzZo3D8QO2cuDv1KCkw== -"@next/env@14.2.22": - version "14.2.22" - resolved "https://registry.npmjs.org/@next/env/-/env-14.2.22.tgz" - integrity sha512-EQ6y1QeNQglNmNIXvwP/Bb+lf7n9WtgcWvtoFsHquVLCJUuxRs+6SfZ5EK0/EqkkLex4RrDySvKgKNN7PXip7Q== +"@next/env@14.2.24": + version "14.2.24" "@next/eslint-plugin-next@14.1.4": version "14.1.4" @@ -522,15 +479,11 @@ dependencies: glob "10.3.10" -"@next/swc-darwin-arm64@14.2.22": - version "14.2.22" - resolved "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.22.tgz" - integrity sha512-HUaLiehovgnqY4TMBZJ3pDaOsTE1spIXeR10pWgdQVPYqDGQmHJBj3h3V6yC0uuo/RoY2GC0YBFRkOX3dI9WVQ== +"@next/swc-darwin-arm64@14.2.24": + version "14.2.24" "@next/third-parties@^14.1.4": - version "14.2.22" - resolved "https://registry.npmjs.org/@next/third-parties/-/third-parties-14.2.22.tgz" - integrity sha512-DDa1uhvLE+bz0OUUK1Xk4m4yduo3yVKvXyjywiLeApX8EpwyF4dfKrM7DLKIgYJyN2At5Eg+lJR68MY4sa/rgw== + version "14.2.24" dependencies: third-party-capital "1.0.20" @@ -560,34 +513,30 @@ resolved "https://registry.npmjs.org/@nolyfill/is-core-module/-/is-core-module-1.0.39.tgz" integrity sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA== -"@parcel/watcher-darwin-arm64@2.5.0": - version "2.5.0" - resolved "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.0.tgz" - integrity sha512-hyZ3TANnzGfLpRA2s/4U1kbw2ZI4qGxaRJbBH2DCSREFfubMswheh8TeiC1sGZ3z2jUf3s37P0BBlrD3sjVTUw== +"@parcel/watcher-darwin-arm64@2.5.1": + version "2.5.1" "@parcel/watcher@^2.4.1": - version "2.5.0" - resolved "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.0.tgz" - integrity sha512-i0GV1yJnm2n3Yq1qw6QrUrd/LI9bE8WEBOTtOkpCXHHdyN3TAGgqAK/DAT05z4fq2x04cARXt2pDmjWjL92iTQ== + version "2.5.1" dependencies: detect-libc "^1.0.3" is-glob "^4.0.3" micromatch "^4.0.5" node-addon-api "^7.0.0" optionalDependencies: - "@parcel/watcher-android-arm64" "2.5.0" - "@parcel/watcher-darwin-arm64" "2.5.0" - "@parcel/watcher-darwin-x64" "2.5.0" - "@parcel/watcher-freebsd-x64" "2.5.0" - "@parcel/watcher-linux-arm-glibc" "2.5.0" - "@parcel/watcher-linux-arm-musl" "2.5.0" - "@parcel/watcher-linux-arm64-glibc" "2.5.0" - "@parcel/watcher-linux-arm64-musl" "2.5.0" - "@parcel/watcher-linux-x64-glibc" "2.5.0" - "@parcel/watcher-linux-x64-musl" "2.5.0" - "@parcel/watcher-win32-arm64" "2.5.0" - "@parcel/watcher-win32-ia32" "2.5.0" - "@parcel/watcher-win32-x64" "2.5.0" + "@parcel/watcher-android-arm64" "2.5.1" + "@parcel/watcher-darwin-arm64" "2.5.1" + "@parcel/watcher-darwin-x64" "2.5.1" + "@parcel/watcher-freebsd-x64" "2.5.1" + "@parcel/watcher-linux-arm-glibc" "2.5.1" + "@parcel/watcher-linux-arm-musl" "2.5.1" + "@parcel/watcher-linux-arm64-glibc" "2.5.1" + "@parcel/watcher-linux-arm64-musl" "2.5.1" + "@parcel/watcher-linux-x64-glibc" "2.5.1" + "@parcel/watcher-linux-x64-musl" "2.5.1" + "@parcel/watcher-win32-arm64" "2.5.1" + "@parcel/watcher-win32-ia32" "2.5.1" + "@parcel/watcher-win32-x64" "2.5.1" "@pkgjs/parseargs@^0.11.0": version "0.11.0" @@ -604,12 +553,10 @@ resolved "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.1.tgz" integrity sha512-SJ31y+Q/zAyShtXJc8x83i9TYdbAfHZ++tUZnvjJJqFjzsdUnKsxPL6IEtBlxKkU7yzer//GQtZSV4GbldL3YA== -"@radix-ui/react-arrow@1.1.1": - version "1.1.1" - resolved "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.1.tgz" - integrity sha512-NaVpZfmv8SKeZbn4ijN2V3jlHA9ngBG16VnIIm22nUR0Yk8KUALyBxT3KYEUnNuch9sTE8UTsS3whzBgKOL30w== +"@radix-ui/react-arrow@1.1.2": + version "1.1.2" dependencies: - "@radix-ui/react-primitive" "2.0.1" + "@radix-ui/react-primitive" "2.0.2" "@radix-ui/react-compose-refs@1.1.1": version "1.1.1" @@ -622,33 +569,29 @@ integrity sha512-UASk9zi+crv9WteK/NU4PLvOoL3OuE6BWVKNF6hPRBtYBDXQ2u5iu3O59zUlJiTVvkyuycnqrztsHVJwcK9K+Q== "@radix-ui/react-dialog@^1.0.5": - version "1.1.4" - resolved "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.4.tgz" - integrity sha512-Ur7EV1IwQGCyaAuyDRiOLA5JIUZxELJljF+MbM/2NC0BYwfuRrbpS30BiQBJrVruscgUkieKkqXYDOoByaxIoA== + version "1.1.6" dependencies: "@radix-ui/primitive" "1.1.1" "@radix-ui/react-compose-refs" "1.1.1" "@radix-ui/react-context" "1.1.1" - "@radix-ui/react-dismissable-layer" "1.1.3" + "@radix-ui/react-dismissable-layer" "1.1.5" "@radix-ui/react-focus-guards" "1.1.1" - "@radix-ui/react-focus-scope" "1.1.1" + "@radix-ui/react-focus-scope" "1.1.2" "@radix-ui/react-id" "1.1.0" - "@radix-ui/react-portal" "1.1.3" + "@radix-ui/react-portal" "1.1.4" "@radix-ui/react-presence" "1.1.2" - "@radix-ui/react-primitive" "2.0.1" - "@radix-ui/react-slot" "1.1.1" + "@radix-ui/react-primitive" "2.0.2" + "@radix-ui/react-slot" "1.1.2" "@radix-ui/react-use-controllable-state" "1.1.0" - aria-hidden "^1.1.1" - react-remove-scroll "^2.6.1" + aria-hidden "^1.2.4" + react-remove-scroll "^2.6.3" -"@radix-ui/react-dismissable-layer@1.1.3": - version "1.1.3" - resolved "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.3.tgz" - integrity sha512-onrWn/72lQoEucDmJnr8uczSNTujT0vJnA/X5+3AkChVPowr8n1yvIKIabhWyMQeMvvmdpsvcyDqx3X1LEXCPg== +"@radix-ui/react-dismissable-layer@1.1.5": + version "1.1.5" dependencies: "@radix-ui/primitive" "1.1.1" "@radix-ui/react-compose-refs" "1.1.1" - "@radix-ui/react-primitive" "2.0.1" + "@radix-ui/react-primitive" "2.0.2" "@radix-ui/react-use-callback-ref" "1.1.0" "@radix-ui/react-use-escape-keydown" "1.1.0" @@ -657,13 +600,11 @@ resolved "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.1.tgz" integrity sha512-pSIwfrT1a6sIoDASCSpFwOasEwKTZWDw/iBdtnqKO7v6FeOzYJ7U53cPzYFVR3geGGXgVHaH+CdngrrAzqUGxg== -"@radix-ui/react-focus-scope@1.1.1": - version "1.1.1" - resolved "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.1.tgz" - integrity sha512-01omzJAYRxXdG2/he/+xy+c8a8gCydoQ1yOxnWNcRhrrBW5W+RQJ22EK1SaO8tb3WoUsuEw7mJjBozPzihDFjA== +"@radix-ui/react-focus-scope@1.1.2": + version "1.1.2" dependencies: "@radix-ui/react-compose-refs" "1.1.1" - "@radix-ui/react-primitive" "2.0.1" + "@radix-ui/react-primitive" "2.0.2" "@radix-ui/react-use-callback-ref" "1.1.0" "@radix-ui/react-icons@^1.3.0": @@ -678,28 +619,24 @@ dependencies: "@radix-ui/react-use-layout-effect" "1.1.0" -"@radix-ui/react-popper@1.2.1": - version "1.2.1" - resolved "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.1.tgz" - integrity sha512-3kn5Me69L+jv82EKRuQCXdYyf1DqHwD2U/sxoNgBGCB7K9TRc3bQamQ+5EPM9EvyPdli0W41sROd+ZU1dTCztw== +"@radix-ui/react-popper@1.2.2": + version "1.2.2" dependencies: "@floating-ui/react-dom" "^2.0.0" - "@radix-ui/react-arrow" "1.1.1" + "@radix-ui/react-arrow" "1.1.2" "@radix-ui/react-compose-refs" "1.1.1" "@radix-ui/react-context" "1.1.1" - "@radix-ui/react-primitive" "2.0.1" + "@radix-ui/react-primitive" "2.0.2" "@radix-ui/react-use-callback-ref" "1.1.0" "@radix-ui/react-use-layout-effect" "1.1.0" "@radix-ui/react-use-rect" "1.1.0" "@radix-ui/react-use-size" "1.1.0" "@radix-ui/rect" "1.1.0" -"@radix-ui/react-portal@1.1.3": - version "1.1.3" - resolved "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.3.tgz" - integrity sha512-NciRqhXnGojhT93RPyDaMPfLH3ZSl4jjIFbZQ1b/vxvZEdHsBZ49wP9w8L3HzUQwep01LcWtkUvm0OVB5JAHTw== +"@radix-ui/react-portal@1.1.4": + version "1.1.4" dependencies: - "@radix-ui/react-primitive" "2.0.1" + "@radix-ui/react-primitive" "2.0.2" "@radix-ui/react-use-layout-effect" "1.1.0" "@radix-ui/react-presence@1.1.2": @@ -710,37 +647,31 @@ "@radix-ui/react-compose-refs" "1.1.1" "@radix-ui/react-use-layout-effect" "1.1.0" -"@radix-ui/react-primitive@2.0.1": - version "2.0.1" - resolved "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.1.tgz" - integrity sha512-sHCWTtxwNn3L3fH8qAfnF3WbUZycW93SM1j3NFDzXBiz8D6F5UTTy8G1+WFEaiCdvCVRJWj6N2R4Xq6HdiHmDg== +"@radix-ui/react-primitive@2.0.2": + version "2.0.2" dependencies: - "@radix-ui/react-slot" "1.1.1" + "@radix-ui/react-slot" "1.1.2" -"@radix-ui/react-slot@1.1.1": - version "1.1.1" - resolved "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.1.tgz" - integrity sha512-RApLLOcINYJA+dMVbOju7MYv1Mb2EBp2nH4HdDzXTSyaR5optlm6Otrz1euW3HbdOR8UmmFK06TD+A9frYWv+g== +"@radix-ui/react-slot@1.1.2": + version "1.1.2" dependencies: "@radix-ui/react-compose-refs" "1.1.1" "@radix-ui/react-tooltip@^1.0.7": - version "1.1.6" - resolved "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.6.tgz" - integrity sha512-TLB5D8QLExS1uDn7+wH/bjEmRurNMTzNrtq7IjaS4kjion9NtzsTGkvR5+i7yc9q01Pi2KMM2cN3f8UG4IvvXA== + version "1.1.8" dependencies: "@radix-ui/primitive" "1.1.1" "@radix-ui/react-compose-refs" "1.1.1" "@radix-ui/react-context" "1.1.1" - "@radix-ui/react-dismissable-layer" "1.1.3" + "@radix-ui/react-dismissable-layer" "1.1.5" "@radix-ui/react-id" "1.1.0" - "@radix-ui/react-popper" "1.2.1" - "@radix-ui/react-portal" "1.1.3" + "@radix-ui/react-popper" "1.2.2" + "@radix-ui/react-portal" "1.1.4" "@radix-ui/react-presence" "1.1.2" - "@radix-ui/react-primitive" "2.0.1" - "@radix-ui/react-slot" "1.1.1" + "@radix-ui/react-primitive" "2.0.2" + "@radix-ui/react-slot" "1.1.2" "@radix-ui/react-use-controllable-state" "1.1.0" - "@radix-ui/react-visually-hidden" "1.1.1" + "@radix-ui/react-visually-hidden" "1.1.2" "@radix-ui/react-use-callback-ref@1.1.0": version "1.1.0" @@ -780,12 +711,10 @@ dependencies: "@radix-ui/react-use-layout-effect" "1.1.0" -"@radix-ui/react-visually-hidden@1.1.1": - version "1.1.1" - resolved "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.1.1.tgz" - integrity sha512-vVfA2IZ9q/J+gEamvj761Oq1FpWgCDaNOOIfbPVp2MVPLEomUr5+Vf7kJGwQ24YxZSlQVar7Bes8kyTo5Dshpg== +"@radix-ui/react-visually-hidden@1.1.2": + version "1.1.2" dependencies: - "@radix-ui/react-primitive" "2.0.1" + "@radix-ui/react-primitive" "2.0.2" "@radix-ui/rect@1.1.0": version "1.1.0" @@ -797,10 +726,8 @@ resolved "https://registry.npmjs.org/@replit/codemirror-css-color-picker/-/codemirror-css-color-picker-6.3.0.tgz" integrity sha512-19biDANghUm7Fz7L1SNMIhK48tagaWuCOHj4oPPxc7hxPGkTVY2lU/jVZ8tsbTKQPVG7BO2CBDzs7CBwb20t4A== -"@rollup/rollup-darwin-arm64@4.29.1": - version "4.29.1" - resolved "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.29.1.tgz" - integrity sha512-2ORr7T31Y0Mnk6qNuwtyNmy14MunTAMx06VAPI6/Ju52W10zk1i7i5U3vlDRWjhOI5quBcrvhkCHyF76bI7kEw== +"@rollup/rollup-darwin-arm64@4.35.0": + version "4.35.0" "@rtsao/scc@^1.1.0": version "1.1.0" @@ -808,9 +735,7 @@ integrity sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g== "@rushstack/eslint-patch@^1.3.3": - version "1.10.4" - resolved "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.10.4.tgz" - integrity sha512-WJgX9nzTqknM393q1QJDJmoW28kUfEnybeTfVNcNAPnIx210RXm2DiXiHzfNPJNIUUb1tJnz/l4QGtJ30PgWmA== + version "1.11.0" "@scalar/api-client@1.2.39": version "1.2.39" @@ -1014,15 +939,15 @@ resolved "https://registry.npmjs.org/@scalar/use-tooltip/-/use-tooltip-0.6.2.tgz" integrity sha512-ntiHkA1A/4DHS7ISqIsE4az0AvG3LovwwJpX6LcnsiezwGfIswe6DSSwX2T0OIOO1n1Amg2/VhGFg+xOyWGOKQ== -"@storybook/core@8.5.3": - version "8.5.3" - resolved "https://registry.npmjs.org/@storybook/core/-/core-8.5.3.tgz" - integrity sha512-ZLlr2pltbj/hmC54lggJTnh09FCAJR62lIdiXNwa+V+/eJz0CfD8tfGmZGKPSmaQeZBpMwAOeRM97k2oLPF+0w== +"@storybook/core@8.6.4": + version "8.6.4" + resolved "https://registry.npmjs.org/@storybook/core/-/core-8.6.4.tgz" + integrity sha512-glDbjEBi3wokw1T+KQtl93irHO9N0LCwgylWfWVXYDdQjUJ7pGRQGnw73gPX7Ds9tg3myXFC83GjmY94UYSMbA== dependencies: - "@storybook/csf" "0.1.12" + "@storybook/theming" "8.6.4" better-opn "^3.0.2" browser-assert "^1.2.1" - esbuild "^0.18.0 || ^0.19.0 || ^0.20.0 || ^0.21.0 || ^0.22.0 || ^0.23.0 || ^0.24.0" + esbuild "^0.18.0 || ^0.19.0 || ^0.20.0 || ^0.21.0 || ^0.22.0 || ^0.23.0 || ^0.24.0 || ^0.25.0" esbuild-register "^3.5.0" jsdoc-type-pratt-parser "^4.0.0" process "^0.11.10" @@ -1031,53 +956,44 @@ util "^0.12.5" ws "^8.2.3" -"@storybook/csf@^0.1.11": - version "0.1.13" - resolved "https://registry.npmjs.org/@storybook/csf/-/csf-0.1.13.tgz" - integrity sha512-7xOOwCLGB3ebM87eemep89MYRFTko+D8qE7EdAAq74lgdqRR5cOUtYWJLjO2dLtP94nqoOdHJo6MdLLKzg412Q== - dependencies: - type-fest "^2.19.0" - -"@storybook/csf@0.1.12": - version "0.1.12" - resolved "https://registry.npmjs.org/@storybook/csf/-/csf-0.1.12.tgz" - integrity sha512-9/exVhabisyIVL0VxTCxo01Tdm8wefIXKXfltAPTSr8cbLn5JAxGQ6QV3mjdecLGEOucfoVhAKtJfVHxEK1iqw== - dependencies: - type-fest "^2.19.0" - "@storybook/global@^5.0.0": version "5.0.0" resolved "https://registry.npmjs.org/@storybook/global/-/global-5.0.0.tgz" integrity sha512-FcOqPAXACP0I3oJ/ws6/rrPT9WGhu915Cg8D02a9YxLo0DE9zI+a9A5gRGvmQ09fiWPukqI8ZAEoQEdWUKMQdQ== -"@storybook/instrumenter@8.4.7": - version "8.4.7" - resolved "https://registry.npmjs.org/@storybook/instrumenter/-/instrumenter-8.4.7.tgz" - integrity sha512-k6NSD3jaRCCHAFtqXZ7tw8jAzD/yTEWXGya+REgZqq5RCkmJ+9S4Ytp/6OhQMPtPFX23gAuJJzTQVLcCr+gjRg== +"@storybook/instrumenter@8.6.4": + version "8.6.4" dependencies: "@storybook/global" "^5.0.0" "@vitest/utils" "^2.1.1" "@storybook/test@^8.0.8": - version "8.4.7" - resolved "https://registry.npmjs.org/@storybook/test/-/test-8.4.7.tgz" - integrity sha512-AhvJsu5zl3uG40itSQVuSy5WByp3UVhS6xAnme4FWRwgSxhvZjATJ3AZkkHWOYjnnk+P2/sbz/XuPli1FVCWoQ== + version "8.6.4" dependencies: - "@storybook/csf" "^0.1.11" "@storybook/global" "^5.0.0" - "@storybook/instrumenter" "8.4.7" + "@storybook/instrumenter" "8.6.4" "@testing-library/dom" "10.4.0" "@testing-library/jest-dom" "6.5.0" "@testing-library/user-event" "14.5.2" "@vitest/expect" "2.0.5" "@vitest/spy" "2.0.5" +"@storybook/theming@8.6.4": + version "8.6.4" + resolved "https://registry.npmjs.org/@storybook/theming/-/theming-8.6.4.tgz" + integrity sha512-g9Ns4uenC9oAWETaJ/tEKEIPMdS+CqjNWZz5Wbw1bLNhXwADZgKrVqawzZi64+bYYtQ+i8VCTjPoFa6s2eHiDQ== + "@swc/counter@^0.1.3": version "0.1.3" resolved "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz" integrity sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ== -"@swc/helpers@^0.5.3", "@swc/helpers@0.5.5": +"@swc/helpers@^0.5.3": + version "0.5.15" + dependencies: + tslib "^2.8.0" + +"@swc/helpers@0.5.5": version "0.5.5" resolved "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz" integrity sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A== @@ -1086,23 +1002,17 @@ tslib "^2.4.0" "@tanstack/react-virtual@^3.0.0-beta.60": - version "3.11.2" - resolved "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.11.2.tgz" - integrity sha512-OuFzMXPF4+xZgx8UzJha0AieuMihhhaWG0tCqpp6tDzlFwOmNBPYMuLOtMJ1Tr4pXLHmgjcWhG6RlknY2oNTdQ== + version "3.13.2" dependencies: - "@tanstack/virtual-core" "3.11.2" + "@tanstack/virtual-core" "3.13.2" -"@tanstack/virtual-core@3.11.2": - version "3.11.2" - resolved "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.11.2.tgz" - integrity sha512-vTtpNt7mKCiZ1pwU9hfKPhpdVO2sVzFQsxoVBGtOSHxlrRRzYr8iQ2TlwbAcRYCcEiZ9ECAM8kBzH0v2+VzfKw== +"@tanstack/virtual-core@3.13.2": + version "3.13.2" "@tanstack/vue-virtual@^3.0.0-beta.60": - version "3.11.2" - resolved "https://registry.npmjs.org/@tanstack/vue-virtual/-/vue-virtual-3.11.2.tgz" - integrity sha512-y0b1p1FTlzxcSt/ZdGWY1AZ52ddwSU69pvFRYAELUSdLLxV8QOPe9dyT/KATO43UCb3DAwiyzi96h2IoYstBOQ== + version "3.13.2" dependencies: - "@tanstack/virtual-core" "3.11.2" + "@tanstack/virtual-core" "3.13.2" "@testing-library/dom@>=7.21.4", "@testing-library/dom@10.4.0": version "10.4.0" @@ -1178,9 +1088,7 @@ integrity sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ== "@types/d3-scale@^4.0.3": - version "4.0.8" - resolved "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.8.tgz" - integrity sha512-gkK1VVTr5iNiYJ7vWDI+yUFFlszhNMtVeneJ6lUTKPjprsvLLI9/tgEGiXJOnlINJA8FyA88gfnQsHbybVZrYQ== + version "4.0.9" dependencies: "@types/d3-time" "*" @@ -1283,14 +1191,10 @@ integrity sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw== "@types/ms@*": - version "0.7.34" - resolved "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz" - integrity sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g== + version "2.1.0" "@types/node@*", "@types/node@^18.0.0 || >=20.0.0", "@types/node@^20", "@types/node@^20.11.26": - version "20.17.11" - resolved "https://registry.npmjs.org/@types/node/-/node-20.17.11.tgz" - integrity sha512-Ept5glCK35R8yeyIeYlRIZtX6SLRyqMhOFTgj5SOkMpLTdw3SEHI9fHx60xaUZ+V1aJxQJODE+7/j5ocZydYTg== + version "20.17.24" dependencies: undici-types "~6.19.2" @@ -1319,16 +1223,16 @@ dependencies: "@types/node" "*" -"@types/unist@*", "@types/unist@^2", "@types/unist@^2.0.0": - version "2.0.11" - resolved "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz" - integrity sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA== - -"@types/unist@^3.0.0": +"@types/unist@*", "@types/unist@^3.0.0": version "3.0.3" resolved "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz" integrity sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q== +"@types/unist@^2", "@types/unist@^2.0.0": + version "2.0.11" + resolved "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz" + integrity sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA== + "@types/web-bluetooth@^0.0.20": version "0.0.20" resolved "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.20.tgz" @@ -1381,41 +1285,32 @@ eslint-visitor-keys "^3.4.1" "@uiw/codemirror-themes@^4.21.21": - version "4.23.7" - resolved "https://registry.npmjs.org/@uiw/codemirror-themes/-/codemirror-themes-4.23.7.tgz" - integrity sha512-UNf1XOx1hG9OmJnrtT86PxKcdcwhaNhbrcD+nsk8WxRJ3n5c8nH6euDvgVPdVLPwbizsaQcZTILACgA/FjRpVg== + version "4.23.10" dependencies: "@codemirror/language" "^6.0.0" "@codemirror/state" "^6.0.0" "@codemirror/view" "^6.0.0" "@ungap/structured-clone@^1.0.0", "@ungap/structured-clone@^1.2.0": - version "1.2.1" - resolved "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.1.tgz" - integrity sha512-fEzPV3hSkSMltkw152tJKNARhOupqbH96MZWyRjNaYZOMIzbrTeQDG+MTc6Mr2pgzFQzFxAfmhGDNP5QK++2ZA== + version "1.3.0" -"@unhead/dom@1.11.14": - version "1.11.14" - resolved "https://registry.npmjs.org/@unhead/dom/-/dom-1.11.14.tgz" - integrity sha512-FaHCWo9JR4h7PCpSRaXuMC6ifXOuBzlI0PD1MmUcxND2ayDl1d6DauIbN8TUf9TDRxNkrK1Ehb0OCXjC1ZJtrg== +"@unhead/dom@1.11.20": + version "1.11.20" dependencies: - "@unhead/schema" "1.11.14" - "@unhead/shared" "1.11.14" + "@unhead/schema" "1.11.20" + "@unhead/shared" "1.11.20" -"@unhead/schema@^1.9.5", "@unhead/schema@1.11.14": - version "1.11.14" - resolved "https://registry.npmjs.org/@unhead/schema/-/schema-1.11.14.tgz" - integrity sha512-V9W9u5tF1/+TiLqxu+Qvh1ShoMDkPEwHoEo4DKdDG6ko7YlbzFfDxV6el9JwCren45U/4Vy/4Xi7j8OH02wsiA== +"@unhead/schema@^1.9.5", "@unhead/schema@1.11.20": + version "1.11.20" dependencies: hookable "^5.5.3" zhead "^2.2.4" -"@unhead/shared@1.11.14": - version "1.11.14" - resolved "https://registry.npmjs.org/@unhead/shared/-/shared-1.11.14.tgz" - integrity sha512-41Qt4PJKYVrEGOTXgBJLRYrEu3S7n5stoB4TFC6312CIBVedXqg7voHQurn32LVDjpfJftjLa2ggCjpqdqoRDw== +"@unhead/shared@1.11.20": + version "1.11.20" dependencies: - "@unhead/schema" "1.11.14" + "@unhead/schema" "1.11.20" + packrup "^0.1.2" "@vcarl/remark-headings@^0.1.0": version "0.1.0" @@ -1442,10 +1337,8 @@ dependencies: tinyrainbow "^1.2.0" -"@vitest/pretty-format@2.1.8": - version "2.1.8" - resolved "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.8.tgz" - integrity sha512-9HiSZ9zpqNLKlbIDRWOnAWqgcA7xu+8YxXSekhr0Ykab7PAYFkhkwoqVArPOtJhPmYeE2YHgKZlj3CP36z2AJQ== +"@vitest/pretty-format@2.1.9": + version "2.1.9" dependencies: tinyrainbow "^1.2.0" @@ -1457,11 +1350,9 @@ tinyspy "^3.0.0" "@vitest/utils@^2.1.1": - version "2.1.8" - resolved "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.8.tgz" - integrity sha512-dwSoui6djdwbfFmIgbIjX2ZhIoG7Ex/+xpxyiEgIGzjliY8xGkcpITKTlp6B4MgtGkF2ilvm97cPM96XZaAgcA== + version "2.1.9" dependencies: - "@vitest/pretty-format" "2.1.8" + "@vitest/pretty-format" "2.1.9" loupe "^3.1.2" tinyrainbow "^1.2.0" @@ -1583,9 +1474,7 @@ acorn-jsx@^5.0.0, acorn-jsx@^5.3.2: integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== "acorn@^6.0.0 || ^7.0.0 || ^8.0.0", acorn@^8.0.0, acorn@^8.9.0: - version "8.14.0" - resolved "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz" - integrity sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA== + version "8.14.1" aggregate-error@^4.0.0: version "4.0.1" @@ -1645,9 +1534,7 @@ ansi-regex@^6.0.1: integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== ansi-sequence-parser@^1.1.0: - version "1.1.1" - resolved "https://registry.npmjs.org/ansi-sequence-parser/-/ansi-sequence-parser-1.1.1.tgz" - integrity sha512-vJXt3yiaUL4UU546s3rPXlsry/RnM730G1+HkpKE012AN0sx1eOrxSu95oKDIonskeLTijMgqWZ3uDEe3NFvyg== + version "1.1.3" ansi-styles@^3.1.0: version "3.2.1" @@ -1713,25 +1600,28 @@ argparse@^2.0.1: resolved "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz" integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== -aria-hidden@^1.1.1: +aria-hidden@^1.2.4: version "1.2.4" - resolved "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.4.tgz" - integrity sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A== dependencies: tslib "^2.0.0" -aria-query@^5.0.0, aria-query@5.3.0: - version "5.3.0" - resolved "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz" - integrity sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A== - dependencies: - dequal "^2.0.3" +aria-query@^5.0.0: + version "5.3.2" + resolved "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz" + integrity sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw== aria-query@^5.3.2: version "5.3.2" resolved "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz" integrity sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw== +aria-query@5.3.0: + version "5.3.0" + resolved "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz" + integrity sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A== + dependencies: + dequal "^2.0.3" + array-buffer-byte-length@^1.0.1, array-buffer-byte-length@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz" @@ -1857,21 +1747,22 @@ astring@^1.8.0: resolved "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz" integrity sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg== +async-function@^1.0.0: + version "1.0.0" + asynckit@^0.4.0: version "0.4.0" resolved "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz" integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== autoprefixer@^10.0.1: - version "10.4.20" - resolved "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz" - integrity sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g== + version "10.4.21" dependencies: - browserslist "^4.23.3" - caniuse-lite "^1.0.30001646" + browserslist "^4.24.4" + caniuse-lite "^1.0.30001702" fraction.js "^4.3.7" normalize-range "^0.1.2" - picocolors "^1.0.1" + picocolors "^1.1.1" postcss-value-parser "^4.2.0" available-typed-arrays@^1.0.7: @@ -1882,14 +1773,10 @@ available-typed-arrays@^1.0.7: possible-typed-array-names "^1.0.0" axe-core@^4.10.0: - version "4.10.2" - resolved "https://registry.npmjs.org/axe-core/-/axe-core-4.10.2.tgz" - integrity sha512-RE3mdQ7P3FRSe7eqCWoeQ/Z9QXrtniSjp1wUjt5nRC3WIpz5rSCve6o3fsZ2aCpJtrZjSZgjwXAoTO5k4tEI0w== + version "4.10.3" axios@^1.5, axios@^1.6.8: - version "1.7.9" - resolved "https://registry.npmjs.org/axios/-/axios-1.7.9.tgz" - integrity sha512-LhLcE7Hbiryz8oMDdDptSrWowmB4Bl6RCt6sIJKpRB4XtVf0iEgewX3au/pJqm+Py1kCASkb/FFKjxQaLtxJvw== + version "1.8.2" dependencies: follow-redirects "^1.15.6" form-data "^4.0.0" @@ -1963,10 +1850,8 @@ browser-assert@^1.2.1: resolved "https://registry.npmjs.org/browser-assert/-/browser-assert-1.2.1.tgz" integrity sha512-nfulgvOR6S4gt9UKCeGJOuSGBPGiFT6oQ/2UBnvTY/5aQ1PnksW72fhZkM30DzoRRv2WpwZf1vHHEr3mtuXIWQ== -browserslist@^4.23.3, "browserslist@>= 4.21.0": - version "4.24.3" - resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.24.3.tgz" - integrity sha512-1CPmv8iobE2fyRMV97dAcMVegvvWKxmq94hkLiAkUGwKVTyDLw33K+ZxiFrREKmmps4rIw6grcCFCnTMSZ/YiA== +browserslist@^4.24.4, "browserslist@>= 4.21.0": + version "4.24.4" dependencies: caniuse-lite "^1.0.30001688" electron-to-chromium "^1.5.73" @@ -1988,10 +1873,8 @@ busboy@1.6.0: dependencies: streamsearch "^1.1.0" -call-bind-apply-helpers@^1.0.0, call-bind-apply-helpers@^1.0.1: - version "1.0.1" - resolved "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.1.tgz" - integrity sha512-BhYE+WDaywFg2TBWYNXAE+8B1ATnThNBqXHP5nQu0jWJdVvY2hvkpyB3qOmtmDePiS5/BDQ8wASEWGMWRG148g== +call-bind-apply-helpers@^1.0.0, call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: + version "1.0.2" dependencies: es-errors "^1.3.0" function-bind "^1.1.2" @@ -2006,13 +1889,11 @@ call-bind@^1.0.7, call-bind@^1.0.8: get-intrinsic "^1.2.4" set-function-length "^1.2.2" -call-bound@^1.0.2, call-bound@^1.0.3: - version "1.0.3" - resolved "https://registry.npmjs.org/call-bound/-/call-bound-1.0.3.tgz" - integrity sha512-YTd+6wGlNlPxSuri7Y6X8tY2dmm12UMH66RpKMhiX6rsk5wXXnYgbUcOt8kiS31/AjfoTOvCsE+w8nZQLQnzHA== +call-bound@^1.0.2, call-bound@^1.0.3, call-bound@^1.0.4: + version "1.0.4" dependencies: - call-bind-apply-helpers "^1.0.1" - get-intrinsic "^1.2.6" + call-bind-apply-helpers "^1.0.2" + get-intrinsic "^1.3.0" callsites@^3.0.0: version "3.1.0" @@ -2032,10 +1913,8 @@ camelcase-css@^2.0.1: resolved "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz" integrity sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA== -caniuse-lite@^1.0.30001579, caniuse-lite@^1.0.30001646, caniuse-lite@^1.0.30001688: - version "1.0.30001690" - resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001690.tgz" - integrity sha512-5ExiE3qQN6oF8Clf8ifIDcMRCRE/dMGcETG/XGMD8/XiXm6HXQgQTh1yZYLXXpSOsEUlJm1Xr7kGULZTuGtP/w== +caniuse-lite@^1.0.30001579, caniuse-lite@^1.0.30001688, caniuse-lite@^1.0.30001702: + version "1.0.30001703" capital-case@^1.0.4: version "1.0.4" @@ -2052,9 +1931,7 @@ ccount@^2.0.0: integrity sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg== chai@^5.1.1: - version "5.1.2" - resolved "https://registry.npmjs.org/chai/-/chai-5.1.2.tgz" - integrity sha512-aGtmf24DW6MLHHG5gCx4zaI3uBq3KRtxeVs0DjFH6Z0rDNbsvTxFASFvdj79pxjxZ8/5u3PIiN3IwEIQkiiuPw== + version "5.2.0" dependencies: assertion-error "^2.0.1" check-error "^2.1.1" @@ -2216,7 +2093,10 @@ clone@^1.0.2: resolved "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz" integrity sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg== -clsx@^2.0.0, clsx@2.0.0: +clsx@^2.0.0: + version "2.1.1" + +clsx@2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz" integrity sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q== @@ -2302,9 +2182,7 @@ commander@7: integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw== compute-scroll-into-view@^3.0.2: - version "3.1.0" - resolved "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-3.1.0.tgz" - integrity sha512-rj8l8pD4bJ1nx+dAkMhV1xB5RuZEyVysfxJqB1pRchh1KVvwOv9b7CGB8ZfjTImVv2oF+sYMUkMZq6Na5Ftmbg== + version "3.1.1" concat-map@0.0.1: version "0.0.1" @@ -2320,6 +2198,11 @@ constant-case@^3.0.4: tslib "^2.0.3" upper-case "^2.0.2" +core-js@^3.38.1: + version "3.41.0" + resolved "https://registry.npmjs.org/core-js/-/core-js-3.41.0.tgz" + integrity sha512-SJ4/EHwS36QMJd6h/Rg+GyR4A5xE0FSI3eZ+iBVpfqf1x0eTSg1smWLHrA+2jQThZSh97fmSgFSU8B61nxosxA== + cose-base@^1.0.0: version "1.0.3" resolved "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz" @@ -2341,7 +2224,7 @@ cross-spawn@^5.0.1: shebang-command "^1.2.0" which "^1.2.9" -cross-spawn@^7.0.0, cross-spawn@^7.0.2: +cross-spawn@^7.0.2, cross-spawn@^7.0.6: version "7.0.6" resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz" integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== @@ -2380,9 +2263,7 @@ cytoscape-cose-bilkent@^4.1.0: cose-base "^1.0.0" cytoscape@^3.2.0, cytoscape@^3.28.1: - version "3.30.4" - resolved "https://registry.npmjs.org/cytoscape/-/cytoscape-3.30.4.tgz" - integrity sha512-OxtlZwQl1WbwMmLiyPSEBuzeTIQnwZhJYYWFzZ2PhEHVFwpeaqNIkUzSiso00D98qk60l8Gwon2RP304d3BJ1A== + version "3.31.1" d3-array@^3.2.0, "d3-array@2 - 3", "d3-array@2.10.0 - 3", "d3-array@2.5.0 - 3", d3-array@3: version "3.2.4" @@ -2734,9 +2615,7 @@ debug@^4.0.0, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4, debug@^4.3.7: ms "^2.1.3" decode-named-character-reference@^1.0.0: - version "1.0.2" - resolved "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz" - integrity sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg== + version "1.1.0" dependencies: character-entities "^2.0.0" @@ -2912,9 +2791,7 @@ eastasianwidth@^0.2.0: integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== electron-to-chromium@^1.5.73: - version "1.5.76" - resolved "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.76.tgz" - integrity sha512-CjVQyG7n7Sr+eBXE86HIulnL5N8xZY1sgmOPGuq/F0Rr0FJq63lg0kEtOIDfZBk44FnDLf6FUJ+dsJcuiUDdDQ== + version "1.5.114" elkjs@^0.9.0: version "0.9.3" @@ -2922,37 +2799,25 @@ elkjs@^0.9.0: integrity sha512-f/ZeWvW/BCXbhGEf1Ujp29EASo/lk1FDnETgNKwJrsVvGZhUWCZyg3xLJjAsxfOmt8KjswHmI5EwCQcPMpOYhQ== embla-carousel-auto-height@^8.0.0: - version "8.5.1" - resolved "https://registry.npmjs.org/embla-carousel-auto-height/-/embla-carousel-auto-height-8.5.1.tgz" - integrity sha512-pH0LlCEX6D2uNf0zuEHPL14YCnlJK+xIlhjcWNy53TG+9qDPgUUwBLBoAdbWro+8/MzqzVf+kHDgsy25jkzu4g== + version "8.5.2" embla-carousel-auto-scroll@^8.0.0: - version "8.5.1" - resolved "https://registry.npmjs.org/embla-carousel-auto-scroll/-/embla-carousel-auto-scroll-8.5.1.tgz" - integrity sha512-fbkZ5+kPHJnJ0aVhRClodnBuaWp8RvV/AW4ex+YhXtvkTld9ApAxmyKQsZzycQc24uz15kzyRjSTNfEvzXPYuQ== + version "8.5.2" embla-carousel-autoplay@^8.0.0: - version "8.5.1" - resolved "https://registry.npmjs.org/embla-carousel-autoplay/-/embla-carousel-autoplay-8.5.1.tgz" - integrity sha512-FnZklFpePfp8wbj177UwVaGFehgs+ASVcJvYLWTtHuYKURynCc3IdDn2qrn0E5Qpa3g9yeGwCS4p8QkrZmO8xg== + version "8.5.2" embla-carousel-react@^8.0.0: - version "8.5.1" - resolved "https://registry.npmjs.org/embla-carousel-react/-/embla-carousel-react-8.5.1.tgz" - integrity sha512-z9Y0K84BJvhChXgqn2CFYbfEi6AwEr+FFVVKm/MqbTQ2zIzO1VQri6w67LcfpVF0AjbhwVMywDZqY4alYkjW5w== + version "8.5.2" dependencies: - embla-carousel "8.5.1" - embla-carousel-reactive-utils "8.5.1" + embla-carousel "8.5.2" + embla-carousel-reactive-utils "8.5.2" -embla-carousel-reactive-utils@8.5.1: - version "8.5.1" - resolved "https://registry.npmjs.org/embla-carousel-reactive-utils/-/embla-carousel-reactive-utils-8.5.1.tgz" - integrity sha512-n7VSoGIiiDIc4MfXF3ZRTO59KDp820QDuyBDGlt5/65+lumPHxX2JLz0EZ23hZ4eg4vZGUXwMkYv02fw2JVo/A== +embla-carousel-reactive-utils@8.5.2: + version "8.5.2" -embla-carousel@8.5.1: - version "8.5.1" - resolved "https://registry.npmjs.org/embla-carousel/-/embla-carousel-8.5.1.tgz" - integrity sha512-JUb5+FOHobSiWQ2EJNaueCNT/cQU9L6XWBbWmorWPQT9bkbk+fhsuLr8wWrzXKagO3oWszBO7MSx+GfaRk4E6A== +embla-carousel@8.5.2: + version "8.5.2" emoji-regex@^10.3.0: version "10.4.0" @@ -2970,9 +2835,7 @@ emoji-regex@^9.2.2: integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== enhanced-resolve@^5.15.0: - version "5.18.0" - resolved "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.0.tgz" - integrity sha512-0/r0MySGYG8YqlayBZ6MuCfECmHFdJ5qyPh8s8wa5Hnm6SaFLSK1VYCbj+NKp090Nm1caZhD+QTnmxO7esYGyQ== + version "5.18.1" dependencies: graceful-fs "^4.2.4" tapable "^2.2.0" @@ -2982,10 +2845,8 @@ entities@^4.5.0: resolved "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz" integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== -es-abstract@^1.17.5, es-abstract@^1.23.2, es-abstract@^1.23.3, es-abstract@^1.23.5, es-abstract@^1.23.6: - version "1.23.8" - resolved "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.8.tgz" - integrity sha512-lfab8IzDn6EpI1ibZakcgS6WsfEBiB+43cuJo+wgylx1xKXf+Sp+YR3vFuQwC/u3sxYwV8Cxe3B0DpVUu/WiJQ== +es-abstract@^1.17.5, es-abstract@^1.23.2, es-abstract@^1.23.3, es-abstract@^1.23.5, es-abstract@^1.23.6, es-abstract@^1.23.9: + version "1.23.9" dependencies: array-buffer-byte-length "^1.0.2" arraybuffer.prototype.slice "^1.0.4" @@ -2998,10 +2859,11 @@ es-abstract@^1.17.5, es-abstract@^1.23.2, es-abstract@^1.23.3, es-abstract@^1.23 es-define-property "^1.0.1" es-errors "^1.3.0" es-object-atoms "^1.0.0" - es-set-tostringtag "^2.0.3" + es-set-tostringtag "^2.1.0" es-to-primitive "^1.3.0" function.prototype.name "^1.1.8" - get-intrinsic "^1.2.6" + get-intrinsic "^1.2.7" + get-proto "^1.0.0" get-symbol-description "^1.1.0" globalthis "^1.0.4" gopd "^1.2.0" @@ -3022,11 +2884,12 @@ es-abstract@^1.17.5, es-abstract@^1.23.2, es-abstract@^1.23.3, es-abstract@^1.23 object-inspect "^1.13.3" object-keys "^1.1.1" object.assign "^4.1.7" - own-keys "^1.0.0" + own-keys "^1.0.1" regexp.prototype.flags "^1.5.3" safe-array-concat "^1.1.3" safe-push-apply "^1.0.0" safe-regex-test "^1.1.0" + set-proto "^1.0.0" string.prototype.trim "^1.2.10" string.prototype.trimend "^1.0.9" string.prototype.trimstart "^1.0.8" @@ -3069,14 +2932,12 @@ es-iterator-helpers@^1.2.1: iterator.prototype "^1.1.4" safe-array-concat "^1.1.3" -es-object-atoms@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz" - integrity sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw== +es-object-atoms@^1.0.0, es-object-atoms@^1.1.1: + version "1.1.1" dependencies: es-errors "^1.3.0" -es-set-tostringtag@^2.0.3: +es-set-tostringtag@^2.0.3, es-set-tostringtag@^2.1.0: version "2.1.0" resolved "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz" integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA== @@ -3087,11 +2948,9 @@ es-set-tostringtag@^2.0.3: hasown "^2.0.2" es-shim-unscopables@^1.0.2: - version "1.0.2" - resolved "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz" - integrity sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw== + version "1.1.0" dependencies: - hasown "^2.0.0" + hasown "^2.0.2" es-to-primitive@^1.3.0: version "1.3.0" @@ -3109,38 +2968,7 @@ esbuild-register@^3.5.0: dependencies: debug "^4.3.4" -"esbuild@^0.18.0 || ^0.19.0 || ^0.20.0 || ^0.21.0 || ^0.22.0 || ^0.23.0 || ^0.24.0", "esbuild@>=0.12 <1": - version "0.24.2" - resolved "https://registry.npmjs.org/esbuild/-/esbuild-0.24.2.tgz" - integrity sha512-+9egpBW8I3CD5XPe0n6BfT5fxLzxrlDzqydF3aviG+9ni1lDC/OvMHcxqEFV0+LANZG5R1bFMWfUrjVsdwxJvA== - optionalDependencies: - "@esbuild/aix-ppc64" "0.24.2" - "@esbuild/android-arm" "0.24.2" - "@esbuild/android-arm64" "0.24.2" - "@esbuild/android-x64" "0.24.2" - "@esbuild/darwin-arm64" "0.24.2" - "@esbuild/darwin-x64" "0.24.2" - "@esbuild/freebsd-arm64" "0.24.2" - "@esbuild/freebsd-x64" "0.24.2" - "@esbuild/linux-arm" "0.24.2" - "@esbuild/linux-arm64" "0.24.2" - "@esbuild/linux-ia32" "0.24.2" - "@esbuild/linux-loong64" "0.24.2" - "@esbuild/linux-mips64el" "0.24.2" - "@esbuild/linux-ppc64" "0.24.2" - "@esbuild/linux-riscv64" "0.24.2" - "@esbuild/linux-s390x" "0.24.2" - "@esbuild/linux-x64" "0.24.2" - "@esbuild/netbsd-arm64" "0.24.2" - "@esbuild/netbsd-x64" "0.24.2" - "@esbuild/openbsd-arm64" "0.24.2" - "@esbuild/openbsd-x64" "0.24.2" - "@esbuild/sunos-x64" "0.24.2" - "@esbuild/win32-arm64" "0.24.2" - "@esbuild/win32-ia32" "0.24.2" - "@esbuild/win32-x64" "0.24.2" - -esbuild@^0.21.3: +"esbuild@^0.18.0 || ^0.19.0 || ^0.20.0 || ^0.21.0 || ^0.22.0 || ^0.23.0 || ^0.24.0 || ^0.25.0", esbuild@^0.21.3, "esbuild@>=0.12 <1": version "0.21.5" resolved "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz" integrity sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw== @@ -3219,18 +3047,15 @@ eslint-import-resolver-node@^0.3.6, eslint-import-resolver-node@^0.3.9: resolve "^1.22.4" eslint-import-resolver-typescript@^3.5.2: - version "3.7.0" - resolved "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.7.0.tgz" - integrity sha512-Vrwyi8HHxY97K5ebydMtffsWAn1SCR9eol49eCd5fJS4O1WV7PaAjbcjmbfJJSMz/t4Mal212Uz/fQZrOB8mow== + version "3.8.5" dependencies: "@nolyfill/is-core-module" "1.0.39" debug "^4.3.7" enhanced-resolve "^5.15.0" - fast-glob "^3.3.2" - get-tsconfig "^4.7.5" + get-tsconfig "^4.10.0" is-bun-module "^1.0.2" - is-glob "^4.0.3" stable-hash "^0.0.4" + tinyglobby "^0.2.12" eslint-module-utils@^2.12.0: version "2.12.0" @@ -3286,14 +3111,10 @@ eslint-plugin-jsx-a11y@^6.7.1: string.prototype.includes "^2.0.1" "eslint-plugin-react-hooks@^4.5.0 || 5.0.0-canary-7118f5dd7-20230705": - version "5.0.0-canary-7118f5dd7-20230705" - resolved "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.0.0-canary-7118f5dd7-20230705.tgz" - integrity sha512-AZYbMo/NW9chdL7vk6HQzQhT+PvTAEVqWk9ziruUoW2kAOcN5qNyelv70e0F1VNQAbvutOC9oc+xfWycI9FxDw== + version "4.6.2" eslint-plugin-react@^7.33.2: - version "7.37.3" - resolved "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.3.tgz" - integrity sha512-DomWuTQPFYZwF/7c9W2fkKkStqZmBd3uugfqBYLdkZ3Hii23WzZuOLUskGxB8qkSKqftxEeGL1TB2kMhrce0jA== + version "7.37.4" dependencies: array-includes "^3.1.8" array.prototype.findlast "^1.2.5" @@ -3518,15 +3339,13 @@ fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== fast-glob@^3.2.12, fast-glob@^3.2.9, fast-glob@^3.3.0, fast-glob@^3.3.2: - version "3.3.2" - resolved "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz" - integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== + version "3.3.3" dependencies: "@nodelib/fs.stat" "^2.0.2" "@nodelib/fs.walk" "^1.2.3" glob-parent "^5.1.2" merge2 "^1.3.0" - micromatch "^4.0.4" + micromatch "^4.0.8" fast-json-stable-stringify@^2.0.0: version "2.1.0" @@ -3539,17 +3358,21 @@ fast-levenshtein@^2.0.6: integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== fast-uri@^3.0.1: - version "3.0.3" - resolved "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.3.tgz" - integrity sha512-aLrHthzCjH5He4Z2H9YZ+v6Ujb9ocRuW6ZzkJQOrTxleEijANq4v1TsaPaVG1PZcuurEzrLcWRyYBYXD5cEiaw== + version "3.0.6" fastq@^1.6.0: - version "1.18.0" - resolved "https://registry.npmjs.org/fastq/-/fastq-1.18.0.tgz" - integrity sha512-QKHXPW0hD8g4UET03SdOdunzSouc9N4AuHdsX8XNcTsuz+yYFILVNIX4l9yHABMhiEI9Db0JTTIpu0wB+Y1QQw== + version "1.19.1" dependencies: reusify "^1.0.4" +fdir@^6.4.3: + version "6.4.3" + +fflate@^0.4.8: + version "0.4.8" + resolved "https://registry.npmjs.org/fflate/-/fflate-0.4.8.tgz" + integrity sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA== + file-entry-cache@^6.0.1: version "6.0.1" resolved "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz" @@ -3608,9 +3431,7 @@ flat-cache@^3.0.4: rimraf "^3.0.2" flatted@^3.2.9: - version "3.3.2" - resolved "https://registry.npmjs.org/flatted/-/flatted-3.3.2.tgz" - integrity sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA== + version "3.3.3" flexsearch@^0.7.31: version "0.7.43" @@ -3627,12 +3448,10 @@ follow-redirects@^1.15.6: resolved "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz" integrity sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ== -for-each@^0.3.3: - version "0.3.3" - resolved "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz" - integrity sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw== +for-each@^0.3.3, for-each@^0.3.5: + version "0.3.5" dependencies: - is-callable "^1.1.3" + is-callable "^1.2.7" for-in@^1.0.1: version "1.0.2" @@ -3647,20 +3466,17 @@ for-own@^1.0.0: for-in "^1.0.1" foreground-child@^3.1.0: - version "3.3.0" - resolved "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz" - integrity sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg== + version "3.3.1" dependencies: - cross-spawn "^7.0.0" + cross-spawn "^7.0.6" signal-exit "^4.0.1" form-data@^4.0.0: - version "4.0.1" - resolved "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz" - integrity sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw== + version "4.0.2" dependencies: asynckit "^0.4.0" combined-stream "^1.0.8" + es-set-tostringtag "^2.1.0" mime-types "^2.1.12" formdata-node@^4.4.1: @@ -3686,6 +3502,9 @@ fs@^0.0.1-security: resolved "https://registry.npmjs.org/fs/-/fs-0.0.1-security.tgz" integrity sha512-3XY9e1pP0CVEUCdj5BmfIZxRBTSDycnbqhIOGec9QYtmVH2fbLpj86CFWkrNOkt/Fvty4KZG5lTglL9j/gJ87w== +fsevents@~2.3.2, fsevents@~2.3.3: + version "2.3.3" + function-bind@^1.1.2: version "1.1.2" resolved "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz" @@ -3718,21 +3537,19 @@ get-east-asian-width@^1.0.0: resolved "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.3.0.tgz" integrity sha512-vpeMIQKxczTD/0s2CdEWHcb0eeJe6TFjxb+J5xgX7hScxqrGuyjmv4c1D4A/gelKfyox0gJJwIHF+fLjeaM8kQ== -get-intrinsic@^1.2.4, get-intrinsic@^1.2.5, get-intrinsic@^1.2.6: - version "1.2.6" - resolved "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.6.tgz" - integrity sha512-qxsEs+9A+u85HhllWJJFicJfPDhRmjzoYdl64aMWW9yRIJmSyxdn8IEkuIM530/7T+lv0TIHd8L6Q/ra0tEoeA== +get-intrinsic@^1.2.4, get-intrinsic@^1.2.5, get-intrinsic@^1.2.6, get-intrinsic@^1.2.7, get-intrinsic@^1.3.0: + version "1.3.0" dependencies: - call-bind-apply-helpers "^1.0.1" - dunder-proto "^1.0.0" + call-bind-apply-helpers "^1.0.2" es-define-property "^1.0.1" es-errors "^1.3.0" - es-object-atoms "^1.0.0" + es-object-atoms "^1.1.1" function-bind "^1.1.2" + get-proto "^1.0.1" gopd "^1.2.0" has-symbols "^1.1.0" hasown "^2.0.2" - math-intrinsics "^1.0.0" + math-intrinsics "^1.1.0" get-nonce@^1.0.0: version "1.0.1" @@ -3744,10 +3561,8 @@ get-own-enumerable-property-symbols@^3.0.0: resolved "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz" integrity sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g== -get-proto@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/get-proto/-/get-proto-1.0.0.tgz" - integrity sha512-TtLgOcKaF1nMP2ijJnITkE4nRhbpshHhmzKiuhmSniiwWzovoqwqQ8rNuhf0mXJOqIY5iU+QkUe0CkJYrLsG9w== +get-proto@^1.0.0, get-proto@^1.0.1: + version "1.0.1" dependencies: dunder-proto "^1.0.1" es-object-atoms "^1.0.0" @@ -3766,10 +3581,8 @@ get-symbol-description@^1.1.0: es-errors "^1.3.0" get-intrinsic "^1.2.6" -get-tsconfig@^4.7.5: - version "4.8.1" - resolved "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.8.1.tgz" - integrity sha512-k9PN+cFBmaLWtVz29SkUoqU5O0slLuHJXt/2P+tMVFT+phsSGXGkp9t3rQIqdz0e+06EHNGs3oM6ZX1s2zHxRg== +get-tsconfig@^4.10.0: + version "4.10.0" dependencies: resolve-pkg-maps "^1.0.0" @@ -3977,7 +3790,7 @@ has-symbols@^1.0.3, has-symbols@^1.1.0: resolved "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz" integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== -has-tostringtag@^1.0.0, has-tostringtag@^1.0.2: +has-tostringtag@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz" integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== @@ -3993,7 +3806,7 @@ hash-obj@^4.0.0: sort-keys "^5.0.0" type-fest "^1.0.2" -hasown@^2.0.0, hasown@^2.0.2: +hasown@^2.0.2: version "2.0.2" resolved "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz" integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== @@ -4053,15 +3866,13 @@ hast-util-from-html@^2.0.0: vfile-message "^4.0.0" hast-util-from-parse5@^8.0.0: - version "8.0.2" - resolved "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.2.tgz" - integrity sha512-SfMzfdAi/zAoZ1KkFEyyeXBn7u/ShQrfd675ZEE9M3qj+PMFX05xubzRyF76CCSJu8au9jgVxDV1+okFvgZU4A== + version "8.0.3" dependencies: "@types/hast" "^3.0.0" "@types/unist" "^3.0.0" devlop "^1.0.0" hastscript "^9.0.0" - property-information "^6.0.0" + property-information "^7.0.0" vfile "^6.0.0" vfile-location "^5.0.0" web-namespaces "^2.0.0" @@ -4166,9 +3977,7 @@ hast-util-to-estree@^2.0.0: zwitch "^2.0.0" hast-util-to-html@^9.0.0: - version "9.0.4" - resolved "https://registry.npmjs.org/hast-util-to-html/-/hast-util-to-html-9.0.4.tgz" - integrity sha512-wxQzXtdbhiwGAUKrnQJXlOPmHnEehzphwkK7aluUPQ+lEc1xefC8pblMgpp2w5ldBTEfveRIrADcrhGIWrlTDA== + version "9.0.5" dependencies: "@types/hast" "^3.0.0" "@types/unist" "^3.0.0" @@ -4177,15 +3986,13 @@ hast-util-to-html@^9.0.0: hast-util-whitespace "^3.0.0" html-void-elements "^3.0.0" mdast-util-to-hast "^13.0.0" - property-information "^6.0.0" + property-information "^7.0.0" space-separated-tokens "^2.0.0" stringify-entities "^4.0.0" zwitch "^2.0.4" hast-util-to-jsx-runtime@^2.0.0: - version "2.3.2" - resolved "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.2.tgz" - integrity sha512-1ngXYb+V9UT5h+PxNRa1O1FYguZK/XL+gkeqvp7EdHlB9oHUG0eYRo/vY5inBdcqo3RkPMC58/H94HvkbfGdyg== + version "2.3.6" dependencies: "@types/estree" "^1.0.0" "@types/hast" "^3.0.0" @@ -4197,9 +4004,9 @@ hast-util-to-jsx-runtime@^2.0.0: mdast-util-mdx-expression "^2.0.0" mdast-util-mdx-jsx "^3.0.0" mdast-util-mdxjs-esm "^2.0.0" - property-information "^6.0.0" + property-information "^7.0.0" space-separated-tokens "^2.0.0" - style-to-object "^1.0.0" + style-to-js "^1.0.0" unist-util-position "^5.0.0" vfile-message "^4.0.0" @@ -4239,14 +4046,12 @@ hast-util-whitespace@^3.0.0: "@types/hast" "^3.0.0" hastscript@^9.0.0: - version "9.0.0" - resolved "https://registry.npmjs.org/hastscript/-/hastscript-9.0.0.tgz" - integrity sha512-jzaLBGavEDKHrc5EfFImKN7nZKKBdSLIdGvCwDZ9TfzbF2ffXiov8CKE445L2Z1Ek2t/m4SKQ2j6Ipv7NyUolw== + version "9.0.1" dependencies: "@types/hast" "^3.0.0" comma-separated-tokens "^2.0.0" hast-util-parse-selector "^4.0.0" - property-information "^6.0.0" + property-information "^7.0.0" space-separated-tokens "^2.0.0" header-case@^2.0.4: @@ -4328,9 +4133,7 @@ immutable@^5.0.2: integrity sha512-P8IdPQHq3lA1xVeBRi5VPqUm5HDgKnx0Ru51wZz5mjxHr5n3RWhjIpOFU7ybkUxfB+5IToy+OLaHYDBIWsv+uw== import-fresh@^3.2.1: - version "3.3.0" - resolved "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz" - integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== + version "3.3.1" dependencies: parent-module "^1.0.0" resolve-from "^4.0.0" @@ -4358,16 +4161,22 @@ inflight@^1.0.4: once "^1.3.0" wrappy "1" -inherits@^2.0.3, inherits@2, inherits@2.0.3: - version "2.0.3" - resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz" - integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw== +inherits@^2.0.3: + version "2.0.4" inherits@^2.0.4: version "2.0.4" resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== +inherits@2: + version "2.0.4" + +inherits@2.0.3: + version "2.0.3" + resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz" + integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw== + ini@^1.3.4: version "1.3.8" resolved "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz" @@ -4416,9 +4225,9 @@ internmap@^1.0.0: integrity sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw== "internmap@1 - 2": - version "2.0.3" - resolved "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz" - integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg== + version "1.0.1" + resolved "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz" + integrity sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw== interpret@^3.1.1: version "3.1.1" @@ -4479,11 +4288,13 @@ is-arrayish@^0.3.1: integrity sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ== is-async-function@^2.0.0: - version "2.0.0" - resolved "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz" - integrity sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA== + version "2.1.1" dependencies: - has-tostringtag "^1.0.0" + async-function "^1.0.0" + call-bound "^1.0.3" + get-proto "^1.0.1" + has-tostringtag "^1.0.2" + safe-regex-test "^1.1.0" is-bigint@^1.1.0: version "1.1.0" @@ -4500,11 +4311,9 @@ is-binary-path@~2.1.0: binary-extensions "^2.0.0" is-boolean-object@^1.2.1: - version "1.2.1" - resolved "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.1.tgz" - integrity sha512-l9qO6eFlUETHtuihLcYOaLKByJ1f+N4kthcU9YjHy3N+B3hWv0y/2Nd0mu/7lTFnRQHTrSdXF50HQ3bl5fEnng== + version "1.2.2" dependencies: - call-bound "^1.0.2" + call-bound "^1.0.3" has-tostringtag "^1.0.2" is-buffer@^2.0.0: @@ -4519,7 +4328,7 @@ is-bun-module@^1.0.2: dependencies: semver "^7.6.3" -is-callable@^1.1.3, is-callable@^1.2.7: +is-callable@^1.2.7: version "1.2.7" resolved "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz" integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== @@ -4581,11 +4390,12 @@ is-fullwidth-code-point@^3.0.0: integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== is-generator-function@^1.0.10, is-generator-function@^1.0.7: - version "1.0.10" - resolved "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz" - integrity sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A== + version "1.1.0" dependencies: - has-tostringtag "^1.0.0" + call-bound "^1.0.3" + get-proto "^1.0.0" + has-tostringtag "^1.0.2" + safe-regex-test "^1.1.0" is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1: version "4.0.3" @@ -4709,9 +4519,7 @@ is-shared-array-buffer@^1.0.4: call-bound "^1.0.3" is-ssh@^1.4.0: - version "1.4.0" - resolved "https://registry.npmjs.org/is-ssh/-/is-ssh-1.4.0.tgz" - integrity sha512-x7+VxdxOdlV3CYpjvRLBv5Lo9OJerlYanjwFrPR9fuGPjCiNiCzFgAWpiLAohSbsnH4ZAys3SBh+hq5rJosxUQ== + version "1.4.1" dependencies: protocols "^2.0.1" @@ -4772,11 +4580,9 @@ is-weakmap@^2.0.2: integrity sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w== is-weakref@^1.0.2, is-weakref@^1.1.0: - version "1.1.0" - resolved "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.0.tgz" - integrity sha512-SXM8Nwyys6nT5WP6pltOwKytLV7FqQ4UiibxVmW+EIosHcmCqkkjViTb5SNssDlkCiEYRP1/pdWUKVvZBmsR2Q== + version "1.1.1" dependencies: - call-bound "^1.0.2" + call-bound "^1.0.3" is-weakset@^2.0.3: version "2.0.4" @@ -4929,9 +4735,7 @@ jsonpointer@^5.0.1: object.values "^1.1.6" katex@^0.16.0, katex@^0.16.9: - version "0.16.19" - resolved "https://registry.npmjs.org/katex/-/katex-0.16.19.tgz" - integrity sha512-3IA6DYVhxhBabjSLTNO9S4+OliA3Qvb8pBQXMfC4WxXJgLwZgnfDl0BmB4z6nBMdznBsZ+CGM8DrGZ5hcguDZg== + version "0.16.21" dependencies: commander "^8.3.0" @@ -5074,9 +4878,7 @@ loose-envify@^1.1.0, loose-envify@^1.4.0: js-tokens "^3.0.0 || ^4.0.0" loupe@^3.1.0, loupe@^3.1.1, loupe@^3.1.2: - version "3.1.2" - resolved "https://registry.npmjs.org/loupe/-/loupe-3.1.2.tgz" - integrity sha512-23I4pFZHmAemUnz8WZXbYRSKYj801VDaNv9ETuMh7IrMc7VuVVSo+Z9iLE3ni30+U48iDWfi30d3twAXBYmnCg== + version "3.1.3" lower-case@^2.0.2: version "2.0.2" @@ -5154,7 +4956,7 @@ match-sorter@^6.3.1: "@babel/runtime" "^7.23.8" remove-accents "0.5.0" -math-intrinsics@^1.0.0, math-intrinsics@^1.1.0: +math-intrinsics@^1.1.0: version "1.1.0" resolved "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz" integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== @@ -5179,9 +4981,7 @@ mdast-util-find-and-replace@^2.0.0: unist-util-visit-parents "^5.0.0" mdast-util-find-and-replace@^3.0.0: - version "3.0.1" - resolved "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz" - integrity sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA== + version "3.0.2" dependencies: "@types/mdast" "^4.0.0" escape-string-regexp "^5.0.0" @@ -5255,9 +5055,7 @@ mdast-util-gfm-footnote@^1.0.0: micromark-util-normalize-identifier "^1.0.0" mdast-util-gfm-footnote@^2.0.0: - version "2.0.0" - resolved "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz" - integrity sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ== + version "2.1.0" dependencies: "@types/mdast" "^4.0.0" devlop "^1.1.0" @@ -5335,9 +5133,7 @@ mdast-util-gfm@^2.0.0: mdast-util-to-markdown "^1.0.0" mdast-util-gfm@^3.0.0: - version "3.0.0" - resolved "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz" - integrity sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw== + version "3.1.0" dependencies: mdast-util-from-markdown "^2.0.0" mdast-util-gfm-autolink-literal "^2.0.0" @@ -5398,9 +5194,7 @@ mdast-util-mdx-jsx@^2.0.0: vfile-message "^3.0.0" mdast-util-mdx-jsx@^3.0.0: - version "3.1.3" - resolved "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz" - integrity sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ== + version "3.2.0" dependencies: "@types/estree-jsx" "^1.0.0" "@types/hast" "^3.0.0" @@ -5591,9 +5385,7 @@ micromark-core-commonmark@^1.0.0, micromark-core-commonmark@^1.0.1: uvu "^0.5.0" micromark-core-commonmark@^2.0.0: - version "2.0.2" - resolved "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.2.tgz" - integrity sha512-FKjQKbxd1cibWMM1P9N+H8TwlgGgSkWZMmfuVucLCHaYqeSvJ0hFeHsIa65pA2nYbes0f8LDHPMrd9X7Ujxg9w== + version "2.0.3" dependencies: decode-named-character-reference "^1.0.0" devlop "^1.0.0" @@ -5696,9 +5488,7 @@ micromark-extension-gfm-table@^1.0.0: uvu "^0.5.0" micromark-extension-gfm-table@^2.0.0: - version "2.1.0" - resolved "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz" - integrity sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g== + version "2.1.1" dependencies: devlop "^1.0.0" micromark-factory-space "^2.0.0" @@ -6117,7 +5907,16 @@ micromark-util-resolve-all@^2.0.0: dependencies: micromark-util-types "^2.0.0" -micromark-util-sanitize-uri@^1.0.0, micromark-util-sanitize-uri@^1.1.0: +micromark-util-sanitize-uri@^1.0.0: + version "1.2.0" + resolved "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz" + integrity sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A== + dependencies: + micromark-util-character "^1.0.0" + micromark-util-encode "^1.0.0" + micromark-util-symbol "^1.0.0" + +micromark-util-sanitize-uri@^1.1.0: version "1.2.0" resolved "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz" integrity sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A== @@ -6146,9 +5945,7 @@ micromark-util-subtokenize@^1.0.0: uvu "^0.5.0" micromark-util-subtokenize@^2.0.0: - version "2.0.3" - resolved "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.3.tgz" - integrity sha512-VXJJuNxYWSoYL6AJ6OQECCFGhIU2GGHMw8tahogePBrjkG8aCCas3ibkp7RnVOSTClg2is05/R7maAhF1XyQMg== + version "2.1.0" dependencies: devlop "^1.0.0" micromark-util-chunked "^2.0.0" @@ -6171,9 +5968,7 @@ micromark-util-types@^1.0.0, micromark-util-types@^1.0.1: integrity sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg== micromark-util-types@^2.0.0: - version "2.0.1" - resolved "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.1.tgz" - integrity sha512-534m2WhVTddrcKVepwmVEVnUAmtrx9bfIjNoQHRqfnvdaHQiFytEhJoTgpWJvDEXCO5gLTQh3wYC1PgOJA4NSQ== + version "2.0.2" micromark@^3.0.0: version "3.2.0" @@ -6199,9 +5994,7 @@ micromark@^3.0.0: uvu "^0.5.0" micromark@^4.0.0: - version "4.0.1" - resolved "https://registry.npmjs.org/micromark/-/micromark-4.0.1.tgz" - integrity sha512-eBPdkcoCNvYcxQOAKAlceo5SNdzZWfF+FcSupREAzdAh9rRmE239CEQAiTwIgblwnoM8zzj35sZ5ZwvSEOF6Kw== + version "4.0.2" dependencies: "@types/debug" "^4.0.0" debug "^4.0.0" @@ -6322,19 +6115,19 @@ mz@^2.7.0: thenify-all "^1.0.0" nanoid@^3.3.6: - version "3.3.8" - resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz" - integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== + version "3.3.9" + resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.9.tgz" + integrity sha512-SppoicMGpZvbF1l3z4x7No3OlIjP7QJvC9XR7AhZr1kL133KHnKPztkKDc+Ir4aJ/1VhTySrtKhrsycmrMQfvg== -nanoid@^3.3.7: - version "3.3.8" - resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz" - integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== +nanoid@^3.3.8: + version "3.3.9" + resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.9.tgz" + integrity sha512-SppoicMGpZvbF1l3z4x7No3OlIjP7QJvC9XR7AhZr1kL133KHnKPztkKDc+Ir4aJ/1VhTySrtKhrsycmrMQfvg== nanoid@^5.0.1, "nanoid@4 - 5": - version "5.0.9" - resolved "https://registry.npmjs.org/nanoid/-/nanoid-5.0.9.tgz" - integrity sha512-Aooyr6MXU6HpvvWXKoVoXwKMs/KyVakWwg7xQfv5/S/RIgJMy0Ifa45H9qqYy7pTCszrHzP21Uk4PZq2HpEM8Q== + version "5.1.3" + resolved "https://registry.npmjs.org/nanoid/-/nanoid-5.1.3.tgz" + integrity sha512-zAbEOEr7u2CbxwoMRlz/pNSpRP0FdAU4pRaYunCdEezWohXFs+a0Xw7RfkKaezMsmSM1vttcLthJtwRnVtOfHQ== natural-compare@^1.4.0: version "1.4.0" @@ -6377,11 +6170,9 @@ next-themes@^0.2.1: integrity sha512-B+AKNfYNIzh0vqQQKqQItTS8evEouKD7H5Hj3kmuPERwddR2TxvDSFZuTj6T7Jfn1oyeUyJMydPl1Bkxkh0W7A== next@*, "next@^13.0.0 || ^14.0.0", next@^14.1.4, "next@^8.1.1-canary.54 || >=9.0.0", next@>=9.5.3: - version "14.2.22" - resolved "https://registry.npmjs.org/next/-/next-14.2.22.tgz" - integrity sha512-Ps2caobQ9hlEhscLPiPm3J3SYhfwfpMqzsoCMZGWxt9jBRK9hoBZj2A37i8joKhsyth2EuVKDVJCTF5/H4iEDw== + version "14.2.24" dependencies: - "@next/env" "14.2.22" + "@next/env" "14.2.24" "@swc/helpers" "0.5.5" busboy "1.6.0" caniuse-lite "^1.0.30001579" @@ -6389,15 +6180,15 @@ next@*, "next@^13.0.0 || ^14.0.0", next@^14.1.4, "next@^8.1.1-canary.54 || >=9.0 postcss "8.4.31" styled-jsx "5.1.1" optionalDependencies: - "@next/swc-darwin-arm64" "14.2.22" - "@next/swc-darwin-x64" "14.2.22" - "@next/swc-linux-arm64-gnu" "14.2.22" - "@next/swc-linux-arm64-musl" "14.2.22" - "@next/swc-linux-x64-gnu" "14.2.22" - "@next/swc-linux-x64-musl" "14.2.22" - "@next/swc-win32-arm64-msvc" "14.2.22" - "@next/swc-win32-ia32-msvc" "14.2.22" - "@next/swc-win32-x64-msvc" "14.2.22" + "@next/swc-darwin-arm64" "14.2.24" + "@next/swc-darwin-x64" "14.2.24" + "@next/swc-linux-arm64-gnu" "14.2.24" + "@next/swc-linux-arm64-musl" "14.2.24" + "@next/swc-linux-x64-gnu" "14.2.24" + "@next/swc-linux-x64-musl" "14.2.24" + "@next/swc-win32-arm64-msvc" "14.2.24" + "@next/swc-win32-ia32-msvc" "14.2.24" + "@next/swc-win32-x64-msvc" "14.2.24" nextra-theme-docs@^2.13.4: version "2.13.4" @@ -6537,9 +6328,7 @@ object-hash@^3.0.0: integrity sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw== object-inspect@^1.13.3: - version "1.13.3" - resolved "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.3.tgz" - integrity sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA== + version "1.13.4" object-keys@^1.1.1: version "1.1.1" @@ -6684,9 +6473,7 @@ ora@^5.4.1: wcwidth "^1.0.1" ora@^8.0.0: - version "8.1.1" - resolved "https://registry.npmjs.org/ora/-/ora-8.1.1.tgz" - integrity sha512-YWielGi1XzG1UTvOaCFaNgEnuhZVMSHYkW/FQ7UX8O26PtlpdM84c0f7wLPlkvx2RfiQmnzd61d/MGxmpQeJPw== + version "8.2.0" dependencies: chalk "^5.3.0" cli-cursor "^5.0.0" @@ -6703,10 +6490,8 @@ os-tmpdir@~1.0.2: resolved "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz" integrity sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g== -own-keys@^1.0.0: +own-keys@^1.0.1: version "1.0.1" - resolved "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz" - integrity sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg== dependencies: get-intrinsic "^1.2.6" object-keys "^1.1.1" @@ -6738,6 +6523,9 @@ p-map@^5.5.0: dependencies: aggregate-error "^4.0.0" +packrup@^0.1.2: + version "0.1.2" + param-case@^3.0.4: version "3.0.4" resolved "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz" @@ -6791,9 +6579,7 @@ parse-passwd@^1.0.0: integrity sha512-1Y1A//QUXEZK7YKz+rD9WydcE1+EuPr6ZBgKecAB8tmoW6UFv0NREVJe1p+jRxtThkcbbKkfwIbWJe/IeE6m2Q== parse-path@^7.0.0: - version "7.0.0" - resolved "https://registry.npmjs.org/parse-path/-/parse-path-7.0.0.tgz" - integrity sha512-Euf9GG8WT9CdqwuWJGdf3RkUcTBArppHABkO7Lm8IzRQp0e2r/kkFnmhu4TSK30Wcu5rVAZLmfPKSBBi9tWFog== + version "7.0.1" dependencies: protocols "^2.0.0" @@ -6899,16 +6685,31 @@ periscopic@^3.0.0: estree-walker "^3.0.0" is-reference "^3.0.0" -picocolors@^1.0.0, picocolors@^1.0.1, picocolors@^1.1.0, picocolors@^1.1.1: +picocolors@^1.0.0, picocolors@^1.1.1: version "1.1.1" resolved "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== -picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: +picomatch@^2.0.4: version "2.3.1" resolved "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== +picomatch@^2.2.1: + version "2.3.1" + resolved "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== + +picomatch@^2.3.1: + version "2.3.1" + resolved "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== + +"picomatch@^3 || ^4", picomatch@^4.0.2: + version "4.0.2" + resolved "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz" + integrity sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg== + pify@^2.3.0: version "2.3.0" resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz" @@ -6941,9 +6742,7 @@ plop@^4.0.1: v8flags "^4.0.1" possible-typed-array-names@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz" - integrity sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q== + version "1.1.0" postcss-import@^15.1.0: version "15.1.0" @@ -6990,11 +6789,9 @@ postcss-value-parser@^4.0.0, postcss-value-parser@^4.2.0: integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== postcss@^8, postcss@^8.0.0, postcss@^8.1.0, postcss@^8.2.14, postcss@^8.4.21, postcss@^8.4.43, postcss@^8.4.47, postcss@^8.4.48, postcss@>=8.0.9: - version "8.4.49" - resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz" - integrity sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA== + version "8.5.3" dependencies: - nanoid "^3.3.7" + nanoid "^3.3.8" picocolors "^1.1.1" source-map-js "^1.2.1" @@ -7007,15 +6804,26 @@ postcss@8.4.31: picocolors "^1.0.0" source-map-js "^1.0.2" +posthog-js@^1.194.6: + version "1.230.2" + dependencies: + core-js "^3.38.1" + fflate "^0.4.8" + preact "^10.19.3" + web-vitals "^4.2.0" + +preact@^10.19.3: + version "10.26.4" + resolved "https://registry.npmjs.org/preact/-/preact-10.26.4.tgz" + integrity sha512-KJhO7LBFTjP71d83trW+Ilnjbo+ySsaAgCfXOXUlmGzJ4ygYPWmysm77yg4emwfmoz3b22yvH5IsVFHbhUaH5w== + prelude-ls@^1.2.1: version "1.2.1" resolved "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz" integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== "prettier@^2 || ^3", prettier@^3.2.5: - version "3.4.2" - resolved "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz" - integrity sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ== + version "3.5.3" pretty-bytes@^6.1.1: version "6.1.1" @@ -7039,9 +6847,7 @@ pretty-ms@^8.0.0: parse-ms "^3.0.0" prismjs@^1.29.0: - version "1.29.0" - resolved "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz" - integrity sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q== + version "1.30.0" process@^0.11.1, process@^0.11.10: version "0.11.10" @@ -7062,10 +6868,11 @@ property-information@^6.0.0: resolved "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz" integrity sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig== +property-information@^7.0.0: + version "7.0.0" + protocols@^2.0.0, protocols@^2.0.1: - version "2.0.1" - resolved "https://registry.npmjs.org/protocols/-/protocols-2.0.1.tgz" - integrity sha512-/XJ368cyBJ7fzLMwLKv1e4vLxOju2MNAIokcr7meSaNcVbWz/CPcW22cP04mwxOErdA5mwjA8Q6w/cdAQxVn7Q== + version "2.0.2" proxy-from-env@^1.1.0: version "1.1.0" @@ -7087,7 +6894,7 @@ queue-microtask@^1.2.2: resolved "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz" integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== -react-dom@*, "react-dom@^16 || ^17 || ^18", "react-dom@^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom@^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", react-dom@^18, react-dom@^18.2.0, "react-dom@>= 18.0.0", react-dom@>=16.0.0, react-dom@>=16.13.1, react-dom@>=16.8.0, "react-dom@>=16.x <=18.x": +react-dom@*, "react-dom@^16 || ^17 || ^18", "react-dom@^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom@^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", react-dom@^18, "react-dom@^18.0.0 || ^19.0.0", react-dom@^18.2.0, react-dom@>=16.0.0, react-dom@>=16.13.1, react-dom@>=16.8.0, "react-dom@>=16.x <=18.x": version "18.3.1" resolved "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz" integrity sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw== @@ -7101,9 +6908,7 @@ react-hook-form@^7.51.1: integrity sha512-eHpAUgUjWbZocoQYUHposymRb4ZP6d0uwUnooL2uOybA9/3tPUvoAKqEWK1WaSiTxxOfTpffNZP7QwlnM3/gEg== react-icons@^5.0.1: - version "5.4.0" - resolved "https://registry.npmjs.org/react-icons/-/react-icons-5.4.0.tgz" - integrity sha512-7eltJxgVt7X64oHh6wSWNwwbKTCtMfK35hcjvJS0yxEAhPM8oUKdS3+kqaW1vicIltw+kR2unHaa12S9pPALoQ== + version "5.5.0" react-is@^16.13.1: version "16.13.1" @@ -7116,11 +6921,10 @@ react-is@^17.0.1: integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== react-markdown@^9.0.1: - version "9.0.1" - resolved "https://registry.npmjs.org/react-markdown/-/react-markdown-9.0.1.tgz" - integrity sha512-186Gw/vF1uRkydbsOIkcGXw7aHq0sZOCRFFjGrr7b9+nVZg4UfA4enXCaxm4fUzecU38sWfrNDitGhshuU7rdg== + version "9.1.0" dependencies: "@types/hast" "^3.0.0" + "@types/mdast" "^4.0.0" devlop "^1.0.0" hast-util-to-jsx-runtime "^2.0.0" html-url-attributes "^3.0.0" @@ -7139,26 +6943,22 @@ react-remove-scroll-bar@^2.3.7: react-style-singleton "^2.2.2" tslib "^2.0.0" -react-remove-scroll@^2.6.1: - version "2.6.2" - resolved "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.6.2.tgz" - integrity sha512-KmONPx5fnlXYJQqC62Q+lwIeAk64ws/cUw6omIumRzMRPqgnYqhSSti99nbj0Ry13bv7dF+BKn7NB+OqkdZGTw== +react-remove-scroll@^2.6.3: + version "2.6.3" dependencies: react-remove-scroll-bar "^2.3.7" - react-style-singleton "^2.2.1" + react-style-singleton "^2.2.3" tslib "^2.1.0" use-callback-ref "^1.3.3" - use-sidecar "^1.1.2" + use-sidecar "^1.1.3" react-share@^5.1.0: - version "5.1.2" - resolved "https://registry.npmjs.org/react-share/-/react-share-5.1.2.tgz" - integrity sha512-qwMT72LrGvDsg3E5lKlvUcXH4YWqjLcqlinxIomGaey2SoS/1/DBkTpan3TyzX9gDr0dcp1bl+z5LbdOOE4bmA== + version "5.2.2" dependencies: classnames "^2.3.2" jsonp "^0.2.1" -react-style-singleton@^2.2.1, react-style-singleton@^2.2.2: +react-style-singleton@^2.2.2, react-style-singleton@^2.2.3: version "2.2.3" resolved "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.3.tgz" integrity sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ== @@ -7167,15 +6967,13 @@ react-style-singleton@^2.2.1, react-style-singleton@^2.2.2: tslib "^2.0.0" react-tweet@^3.2.0: - version "3.2.1" - resolved "https://registry.npmjs.org/react-tweet/-/react-tweet-3.2.1.tgz" - integrity sha512-dktP3RMuwRB4pnSDocKpSsW5Hq1IXRW6fONkHhxT5EBIXsKZzdQuI70qtub1XN2dtZdkJWWxfBm/Q+kN+vRYFA== + version "3.2.2" dependencies: "@swc/helpers" "^0.5.3" clsx "^2.0.0" swr "^2.2.4" -react@*, "react@^16 || ^17 || ^18", "react@^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", "react@^16.5.1 || ^17.0.0 || ^18.0.0", "react@^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react@^16.8.0 || ^17 || ^18 || ^19", "react@^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", "react@^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react@^16.8.0 || ^17.0.1 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react@^16.8.3 || ^17 || ^18", "react@^16.x || ^17.x || ^18.x || ^19.0.0 || ^19.0.0-rc", "react@^17 || ^18 || ^19", react@^18, react@^18.0.0, react@^18.2.0, react@^18.3.1, "react@>= 16.8.0 || 17.x.x || ^18.0.0-0", "react@>= 18.0.0", react@>=16, react@>=16.0.0, react@>=16.13.1, react@>=16.8.0, "react@>=16.x <=18.x", react@>=18: +react@*, "react@^16 || ^17 || ^18", "react@^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", "react@^16.5.1 || ^17.0.0 || ^18.0.0", "react@^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react@^16.8.0 || ^17 || ^18 || ^19", "react@^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", "react@^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react@^16.8.0 || ^17.0.1 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react@^16.8.3 || ^17 || ^18", "react@^16.x || ^17.x || ^18.x || ^19.0.0 || ^19.0.0-rc", "react@^17 || ^18 || ^19", react@^18, react@^18.0.0, "react@^18.0.0 || ^19.0.0", react@^18.2.0, react@^18.3.1, "react@>= 16.8.0 || 17.x.x || ^18.0.0-0", react@>=16, react@>=16.0.0, react@>=16.13.1, react@>=16.8.0, "react@>=16.x <=18.x", react@>=18: version "18.3.1" resolved "https://registry.npmjs.org/react/-/react-18.3.1.tgz" integrity sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ== @@ -7199,9 +6997,7 @@ readable-stream@^3.4.0: util-deprecate "^1.0.1" readdirp@^4.0.1: - version "4.0.2" - resolved "https://registry.npmjs.org/readdirp/-/readdirp-4.0.2.tgz" - integrity sha512-yDMz9g+VaZkqBYS/ozoBJwaBhTbZo3UNYQHNRw1D3UFQB8oHB4uS/tAODO+ZLjGWmUbKnIlOWO+aaIiAxrUWHA== + version "4.1.2" readdirp@~3.6.0: version "3.6.0" @@ -7216,9 +7012,9 @@ reading-time@^1.3.0: integrity sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg== recast@^0.23.5: - version "0.23.9" - resolved "https://registry.npmjs.org/recast/-/recast-0.23.9.tgz" - integrity sha512-Hx/BGIbwj+Des3+xy5uAtAbdCyqK9y9wbBcDFDYanLS9JnMqf7OeF87HQwUimE87OEc72mr6tkKUKMBBL+hF9Q== + version "0.23.11" + resolved "https://registry.npmjs.org/recast/-/recast-0.23.11.tgz" + integrity sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA== dependencies: ast-types "^0.16.1" esprima "~4.0.0" @@ -7242,17 +7038,15 @@ redent@^3.0.0: strip-indent "^3.0.0" reflect.getprototypeof@^1.0.6, reflect.getprototypeof@^1.0.9: - version "1.0.9" - resolved "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.9.tgz" - integrity sha512-r0Ay04Snci87djAsI4U+WNRcSw5S4pOH7qFjd/veA5gC7TbqESR3tcj28ia95L/fYUDw11JKP7uqUKUAfVvV5Q== + version "1.0.10" dependencies: call-bind "^1.0.8" define-properties "^1.2.1" - dunder-proto "^1.0.1" - es-abstract "^1.23.6" + es-abstract "^1.23.9" es-errors "^1.3.0" - get-intrinsic "^1.2.6" - gopd "^1.2.0" + es-object-atoms "^1.0.0" + get-intrinsic "^1.2.7" + get-proto "^1.0.1" which-builtin-type "^1.2.1" regenerator-runtime@^0.14.0: @@ -7261,13 +7055,13 @@ regenerator-runtime@^0.14.0: integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== regexp.prototype.flags@^1.5.3: - version "1.5.3" - resolved "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.3.tgz" - integrity sha512-vqlC04+RQoFalODCbCumG2xIOvapzVMHwsyIGM/SIE8fRhFFsXeH8/QQ+s0T0kDAhKc4k30s73/0ydkHQz6HlQ== + version "1.5.4" dependencies: - call-bind "^1.0.7" + call-bind "^1.0.8" define-properties "^1.2.1" es-errors "^1.3.0" + get-proto "^1.0.1" + gopd "^1.2.0" set-function-name "^2.0.2" rehype-external-links@^3.0.0: @@ -7291,9 +7085,7 @@ rehype-format@^5.0.0: hast-util-format "^1.0.0" rehype-highlight@^7.0.0: - version "7.0.1" - resolved "https://registry.npmjs.org/rehype-highlight/-/rehype-highlight-7.0.1.tgz" - integrity sha512-dB/vVGFsbm7xPglqnYbg0ABg6rAuIWKycTvuXaOO27SgLoOFNoTlniTBtAxp3n5ZyMioW1a3KwiNqgjkb6Skjg== + version "7.0.2" dependencies: "@types/hast" "^3.0.0" hast-util-to-text "^4.0.0" @@ -7360,9 +7152,7 @@ remark-gfm@^3.0.1: unified "^10.0.0" remark-gfm@^4.0.0: - version "4.0.0" - resolved "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.0.tgz" - integrity sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA== + version "4.0.1" dependencies: "@types/mdast" "^4.0.0" mdast-util-gfm "^3.0.0" @@ -7511,9 +7301,7 @@ restore-cursor@^5.0.0: signal-exit "^4.1.0" reusify@^1.0.4: - version "1.0.4" - resolved "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz" - integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + version "1.1.0" rimraf@^3.0.2: version "3.0.2" @@ -7528,31 +7316,29 @@ robust-predicates@^3.0.2: integrity sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg== rollup@^4.20.0: - version "4.29.1" - resolved "https://registry.npmjs.org/rollup/-/rollup-4.29.1.tgz" - integrity sha512-RaJ45M/kmJUzSWDs1Nnd5DdV4eerC98idtUOVr6FfKcgxqvjwHmxc5upLF9qZU9EpsVzzhleFahrT3shLuJzIw== + version "4.35.0" dependencies: "@types/estree" "1.0.6" optionalDependencies: - "@rollup/rollup-android-arm-eabi" "4.29.1" - "@rollup/rollup-android-arm64" "4.29.1" - "@rollup/rollup-darwin-arm64" "4.29.1" - "@rollup/rollup-darwin-x64" "4.29.1" - "@rollup/rollup-freebsd-arm64" "4.29.1" - "@rollup/rollup-freebsd-x64" "4.29.1" - "@rollup/rollup-linux-arm-gnueabihf" "4.29.1" - "@rollup/rollup-linux-arm-musleabihf" "4.29.1" - "@rollup/rollup-linux-arm64-gnu" "4.29.1" - "@rollup/rollup-linux-arm64-musl" "4.29.1" - "@rollup/rollup-linux-loongarch64-gnu" "4.29.1" - "@rollup/rollup-linux-powerpc64le-gnu" "4.29.1" - "@rollup/rollup-linux-riscv64-gnu" "4.29.1" - "@rollup/rollup-linux-s390x-gnu" "4.29.1" - "@rollup/rollup-linux-x64-gnu" "4.29.1" - "@rollup/rollup-linux-x64-musl" "4.29.1" - "@rollup/rollup-win32-arm64-msvc" "4.29.1" - "@rollup/rollup-win32-ia32-msvc" "4.29.1" - "@rollup/rollup-win32-x64-msvc" "4.29.1" + "@rollup/rollup-android-arm-eabi" "4.35.0" + "@rollup/rollup-android-arm64" "4.35.0" + "@rollup/rollup-darwin-arm64" "4.35.0" + "@rollup/rollup-darwin-x64" "4.35.0" + "@rollup/rollup-freebsd-arm64" "4.35.0" + "@rollup/rollup-freebsd-x64" "4.35.0" + "@rollup/rollup-linux-arm-gnueabihf" "4.35.0" + "@rollup/rollup-linux-arm-musleabihf" "4.35.0" + "@rollup/rollup-linux-arm64-gnu" "4.35.0" + "@rollup/rollup-linux-arm64-musl" "4.35.0" + "@rollup/rollup-linux-loongarch64-gnu" "4.35.0" + "@rollup/rollup-linux-powerpc64le-gnu" "4.35.0" + "@rollup/rollup-linux-riscv64-gnu" "4.35.0" + "@rollup/rollup-linux-s390x-gnu" "4.35.0" + "@rollup/rollup-linux-x64-gnu" "4.35.0" + "@rollup/rollup-linux-x64-musl" "4.35.0" + "@rollup/rollup-win32-arm64-msvc" "4.35.0" + "@rollup/rollup-win32-ia32-msvc" "4.35.0" + "@rollup/rollup-win32-x64-msvc" "4.35.0" fsevents "~2.3.2" run-async@^3.0.0: @@ -7573,9 +7359,7 @@ rw@1: integrity sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ== rxjs@^7.2.0, rxjs@^7.8.1: - version "7.8.1" - resolved "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz" - integrity sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg== + version "7.8.2" dependencies: tslib "^2.1.0" @@ -7625,9 +7409,7 @@ safe-regex-test@^1.0.3, safe-regex-test@^1.1.0: integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== sass@*, sass@^1.3.0, sass@^1.72.0: - version "1.83.0" - resolved "https://registry.npmjs.org/sass/-/sass-1.83.0.tgz" - integrity sha512-qsSxlayzoOjdvXMVLkzF84DJFc2HZEL/rFyGIKbbilYtAvlCxyuzUeff9LawTn4btVnLKg75Z8MMr1lxU1lfGw== + version "1.85.1" dependencies: chokidar "^4.0.0" immutable "^5.0.2" @@ -7663,9 +7445,7 @@ semver@^6.3.1: integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== semver@^7.5.4, semver@^7.6.2, semver@^7.6.3: - version "7.6.3" - resolved "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz" - integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + version "7.7.1" sentence-case@^3.0.4: version "3.0.4" @@ -7698,6 +7478,13 @@ set-function-name@^2.0.2: functions-have-names "^1.2.3" has-property-descriptors "^1.0.2" +set-proto@^1.0.0: + version "1.0.0" + dependencies: + dunder-proto "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + sharp@^0.33.3: version "0.33.5" resolved "https://registry.npmjs.org/sharp/-/sharp-0.33.5.tgz" @@ -7888,12 +7675,12 @@ stdin-discarder@^0.2.2: resolved "https://registry.npmjs.org/stdin-discarder/-/stdin-discarder-0.2.2.tgz" integrity sha512-UhDfHmA92YAlNnCfhmq0VeNL5bDbiZGg7sZ2IvPsXubGkiNa9EC+tUTsjBRsYUAz87btI6/1wf4XoVvQ3uRnmQ== -storybook@^8.4.7: - version "8.5.3" - resolved "https://registry.npmjs.org/storybook/-/storybook-8.5.3.tgz" - integrity sha512-2WtNBZ45u1AhviRU+U+ld588tH8gDa702dNSq5C8UBaE9PlOsazGsyp90dw1s9YRvi+ejrjKAupQAU0GwwUiVg== +"storybook@^8.2.0 || ^8.3.0-0 || ^8.4.0-0 || ^8.5.0-0 || ^8.6.0-0", storybook@^8.6.4: + version "8.6.4" + resolved "https://registry.npmjs.org/storybook/-/storybook-8.6.4.tgz" + integrity sha512-XXh1Acvf1r3BQX0BDLQw6yhZ7yUGvYxIcKOBuMdetnX7iXtczipJTfw0uyFwk0ltkKEE9PpJvivYmARF3u64VQ== dependencies: - "@storybook/core" "8.5.3" + "@storybook/core" "8.6.4" streamsearch@^1.1.0: version "1.1.0" @@ -8088,6 +7875,11 @@ style-mod@^4.0.0, style-mod@^4.1.0: resolved "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz" integrity sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw== +style-to-js@^1.0.0: + version "1.1.16" + dependencies: + style-to-object "1.0.8" + style-to-object@^0.4.1: version "0.4.4" resolved "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz" @@ -8095,10 +7887,8 @@ style-to-object@^0.4.1: dependencies: inline-style-parser "0.1.1" -style-to-object@^1.0.0: +style-to-object@1.0.8: version "1.0.8" - resolved "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.8.tgz" - integrity sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g== dependencies: inline-style-parser "0.2.4" @@ -8110,9 +7900,7 @@ styled-jsx@5.1.1: client-only "0.0.1" stylis@^4.1.3: - version "4.3.4" - resolved "https://registry.npmjs.org/stylis/-/stylis-4.3.4.tgz" - integrity sha512-osIBl6BGUmSfDkyH2mB7EFvCJntXDrLhKjHTRj/rK6xLH0yuPrHULDRQzKokSOD4VoorhtKpfcfW1GAntu8now== + version "4.3.6" sucrase@^3.35.0: version "3.35.0" @@ -8147,9 +7935,7 @@ supports-preserve-symlinks-flag@^1.0.0: integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== swr@^2.2.4: - version "2.3.0" - resolved "https://registry.npmjs.org/swr/-/swr-2.3.0.tgz" - integrity sha512-NyZ76wA4yElZWBHzSgEJc28a0u6QZvhb6w0azeL2k7+Q1gAzVK+IqQYXhVOC/mzi+HZIozrZvBVeSeOZNR2bqA== + version "2.3.3" dependencies: dequal "^2.0.3" use-sync-external-store "^1.4.0" @@ -8221,6 +8007,12 @@ tiny-invariant@^1.3.3: resolved "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz" integrity sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg== +tinyglobby@^0.2.12: + version "0.2.12" + dependencies: + fdir "^6.4.3" + picomatch "^4.0.2" + tinyrainbow@^1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz" @@ -8307,7 +8099,7 @@ tsconfig-paths@^3.15.0: minimist "^1.2.6" strip-bom "^3.0.0" -tslib@^2.0.0, tslib@^2.0.1, tslib@^2.0.3, tslib@^2.1.0, tslib@^2.4.0: +tslib@^2.0.0, tslib@^2.0.1, tslib@^2.0.3, tslib@^2.1.0, tslib@^2.4.0, tslib@^2.8.0: version "2.8.1" resolved "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz" integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== @@ -8334,11 +8126,6 @@ type-fest@^1.0.2: resolved "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz" integrity sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA== -type-fest@^2.19.0: - version "2.19.0" - resolved "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz" - integrity sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA== - typed-array-buffer@^1.0.3: version "1.0.3" resolved "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz" @@ -8385,9 +8172,7 @@ typed-array-length@^1.0.7: reflect.getprototypeof "^1.0.6" typescript@*, typescript@^5, "typescript@>= 4.5.5 < 6", typescript@>=3.3.1, typescript@>=4.2.0: - version "5.7.2" - resolved "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz" - integrity sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg== + version "5.8.2" uglify-js@^3.1.4: version "3.19.3" @@ -8415,13 +8200,11 @@ undici-types@~6.19.2: integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== unhead@^1.8.3: - version "1.11.14" - resolved "https://registry.npmjs.org/unhead/-/unhead-1.11.14.tgz" - integrity sha512-XmXW0aZyX9kGk9ejCKCSvv/J4T3Rt4hoAe2EofM+nhG+zwZ7AArUMK/0F/fj6FTkfgY0u0/JryE00qUDULgygA== + version "1.11.20" dependencies: - "@unhead/dom" "1.11.14" - "@unhead/schema" "1.11.14" - "@unhead/shared" "1.11.14" + "@unhead/dom" "1.11.20" + "@unhead/schema" "1.11.20" + "@unhead/shared" "1.11.20" hookable "^5.5.3" unified@^10.0.0: @@ -8545,15 +8328,7 @@ unist-util-visit-parents@^4.0.0: "@types/unist" "^2.0.0" unist-util-is "^5.0.0" -unist-util-visit-parents@^5.0.0: - version "5.1.3" - resolved "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz" - integrity sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg== - dependencies: - "@types/unist" "^2.0.0" - unist-util-is "^5.0.0" - -unist-util-visit-parents@^5.1.1: +unist-util-visit-parents@^5.0.0, unist-util-visit-parents@^5.1.1: version "5.1.3" resolved "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz" integrity sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg== @@ -8597,12 +8372,10 @@ unist-util-visit@^5.0.0: unist-util-visit-parents "^6.0.0" update-browserslist-db@^1.1.1: - version "1.1.1" - resolved "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz" - integrity sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A== + version "1.1.3" dependencies: escalade "^3.2.0" - picocolors "^1.1.0" + picocolors "^1.1.1" upper-case-first@^2.0.2: version "2.0.2" @@ -8632,10 +8405,8 @@ use-callback-ref@^1.3.3: dependencies: tslib "^2.0.0" -use-sidecar@^1.1.2: +use-sidecar@^1.1.3: version "1.1.3" - resolved "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz" - integrity sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ== dependencies: detect-node-es "^1.1.0" tslib "^2.0.0" @@ -8721,7 +8492,17 @@ vfile-message@^4.0.0: "@types/unist" "^3.0.0" unist-util-stringify-position "^4.0.0" -vfile@^5.0.0, vfile@^5.3.0: +vfile@^5.0.0: + version "5.3.7" + resolved "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz" + integrity sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g== + dependencies: + "@types/unist" "^2.0.0" + is-buffer "^2.0.0" + unist-util-stringify-position "^3.0.0" + vfile-message "^3.0.0" + +vfile@^5.3.0: version "5.3.7" resolved "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz" integrity sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g== @@ -8740,9 +8521,7 @@ vfile@^6.0.0: vfile-message "^4.0.0" vite@^5.1.6: - version "5.4.11" - resolved "https://registry.npmjs.org/vite/-/vite-5.4.11.tgz" - integrity sha512-c7jFQRklXua0mTzneGW9QVyxFjUgwcihC4bXEtujIo2ouWCe1Ajt/amn2PCxYnhYfd5k09JX3SB7OYWFKYqj8Q== + version "5.4.14" dependencies: esbuild "^0.21.3" postcss "^8.4.43" @@ -8808,10 +8587,13 @@ web-streams-polyfill@4.0.0-beta.3: resolved "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz" integrity sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug== +web-vitals@^4.2.0: + version "4.2.4" + resolved "https://registry.npmjs.org/web-vitals/-/web-vitals-4.2.4.tgz" + integrity sha512-r4DIlprAGwJ7YM11VZp4R884m0Vmgr6EAKe3P+kO0PPj3Unqyvv59rczf6UiGcb9Z8QxZVcqKNwv/g0WNdWwsw== + web-worker@^1.2.0: - version "1.3.0" - resolved "https://registry.npmjs.org/web-worker/-/web-worker-1.3.0.tgz" - integrity sha512-BSR9wyRsy/KOValMgd5kMyr3JzpdeoR9KVId8u5GVlTTAtNChlsE4yTxeY7zMdNSyOmoKBv8NH2qeRY9Tg+IaA== + version "1.5.0" webidl-conversions@^3.0.0: version "3.0.1" @@ -8867,14 +8649,13 @@ which-collection@^1.0.2: is-weakset "^2.0.3" which-typed-array@^1.1.16, which-typed-array@^1.1.18, which-typed-array@^1.1.2: - version "1.1.18" - resolved "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.18.tgz" - integrity sha512-qEcY+KJYlWyLH9vNbsr6/5j59AXk5ni5aakf8ldzBvGde6Iz4sxZGkJyWSAueTG7QhOvNRYb1lDdFmL5Td0QKA== + version "1.1.19" dependencies: available-typed-arrays "^1.0.7" call-bind "^1.0.8" - call-bound "^1.0.3" - for-each "^0.3.3" + call-bound "^1.0.4" + for-each "^0.3.5" + get-proto "^1.0.1" gopd "^1.2.0" has-tostringtag "^1.0.2" @@ -8942,9 +8723,9 @@ wrappy@1: integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== ws@^8.2.3: - version "8.18.0" - resolved "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz" - integrity sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw== + version "8.18.1" + resolved "https://registry.npmjs.org/ws/-/ws-8.18.1.tgz" + integrity sha512-RKW2aJZMXeMxVpnZ6bck+RswznaxmzdULiBr6KY7XkTnW8uvt0iT9H5DkHUChXrc+uurzwa0rVI16n/Xzjdz1w== y-codemirror.next@^0.3.2: version "0.3.5" @@ -8964,9 +8745,9 @@ yaml@^2.3.4, yaml@^2.4.1: integrity sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA== yjs@^13.5.6, yjs@^13.6.0: - version "13.6.23" - resolved "https://registry.npmjs.org/yjs/-/yjs-13.6.23.tgz" - integrity sha512-ExtnT5WIOVpkL56bhLeisG/N5c4fmzKn4k0ROVfJa5TY2QHbH7F0Wu2T5ZhR7ErsFWQEFafyrnSI8TPKVF9Few== + version "13.6.24" + resolved "https://registry.npmjs.org/yjs/-/yjs-13.6.24.tgz" + integrity sha512-xn/pYLTZa3uD1uDG8lpxfLRo5SR/rp0frdASOl2a71aYNvUXdWcLtVL91s2y7j+Q8ppmjZ9H3jsGVgoFMbT2VA== dependencies: lib0 "^0.2.99" @@ -8986,9 +8767,7 @@ zhead@^2.2.4: integrity sha512-8F0OI5dpWIA5IGG5NHUg9staDwz/ZPxZtvGVf01j7vHqSyZ0raHY+78atOVxRqb73AotX22uV1pXt3gYSstGag== zod@^3.22.3: - version "3.24.1" - resolved "https://registry.npmjs.org/zod/-/zod-3.24.1.tgz" - integrity sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A== + version "3.24.2" zwitch@^2.0.0, zwitch@^2.0.4: version "2.0.4" diff --git a/electron/.eslintrc.js b/electron/.eslintrc.js index 8b270b52e..20e79804f 100644 --- a/electron/.eslintrc.js +++ b/electron/.eslintrc.js @@ -35,5 +35,12 @@ module.exports = { { name: 'Link', linkAttribute: 'to' }, ], }, - ignorePatterns: ['build', 'renderer', 'node_modules', '@global', 'playwright-report'], + ignorePatterns: [ + 'build', + 'renderer', + 'node_modules', + '@global', + 'playwright-report', + 'test-data', + ], } diff --git a/electron/handlers/native.ts b/electron/handlers/native.ts index 81a2fc7f5..f8f70c302 100644 --- a/electron/handlers/native.ts +++ b/electron/handlers/native.ts @@ -1,4 +1,5 @@ import { app, ipcMain, dialog, shell, nativeTheme } from 'electron' +import { autoUpdater } from 'electron-updater' import { join } from 'path' import { windowManager } from '../managers/window' import { @@ -28,6 +29,10 @@ export function handleAppIPCs() { shell.openPath(getJanDataFolderPath()) }) + ipcMain.handle(NativeRoute.appUpdateDownload, async (_event) => { + autoUpdater.downloadUpdate() + }) + /** * Handles the "setNativeThemeLight" IPC message by setting the native theme source to "light". * This will change the appearance of the app to the light theme. @@ -312,4 +317,11 @@ export function handleAppIPCs() { const { stopServer } = require('@janhq/server') return stopServer() }) + + /** + * Handles the "appToken" IPC message to generate a random app ID. + */ + ipcMain.handle(NativeRoute.appToken, async (_event): Promise => { + return process.env.appToken ?? 'cortex.cpp' + }) } diff --git a/electron/handlers/update.ts b/electron/handlers/update.ts index 5e2200e51..5dcbda582 100644 --- a/electron/handlers/update.ts +++ b/electron/handlers/update.ts @@ -16,15 +16,13 @@ export function handleAppUpdates() { if (!app.isPackaged) { return } + /* New Update Available */ autoUpdater.on('update-available', async (_info: UpdateInfo) => { - const action = await dialog.showMessageBox({ - title: 'Update Available', - message: 'Would you like to download and install it now?', - buttons: ['Download', 'Later'], - }) - - if (action.response === 0) await autoUpdater.downloadUpdate() + windowManager.mainWindow?.webContents.send( + AppEvent.onAppUpdateAvailable, + {} + ) }) /* App Update Completion Message */ diff --git a/electron/icons/icon.ico b/electron/icons/icon.ico index 40c76171d..5d18719e8 100644 Binary files a/electron/icons/icon.ico and b/electron/icons/icon.ico differ diff --git a/electron/icons_dev/jan-beta-512x512.png b/electron/icons_dev/jan-beta-512x512.png new file mode 100644 index 000000000..4b715494d Binary files /dev/null and b/electron/icons_dev/jan-beta-512x512.png differ diff --git a/electron/icons_dev/jan-beta-tray.png b/electron/icons_dev/jan-beta-tray.png new file mode 100644 index 000000000..eaca9ad9a Binary files /dev/null and b/electron/icons_dev/jan-beta-tray.png differ diff --git a/electron/icons_dev/jan-beta-tray@2x.png b/electron/icons_dev/jan-beta-tray@2x.png new file mode 100644 index 000000000..deb83aace Binary files /dev/null and b/electron/icons_dev/jan-beta-tray@2x.png differ diff --git a/electron/icons_dev/jan-beta.ico b/electron/icons_dev/jan-beta.ico new file mode 100644 index 000000000..85cf0c1b4 Binary files /dev/null and b/electron/icons_dev/jan-beta.ico differ diff --git a/electron/icons_dev/jan-beta.png b/electron/icons_dev/jan-beta.png new file mode 100644 index 000000000..4b715494d Binary files /dev/null and b/electron/icons_dev/jan-beta.png differ diff --git a/electron/icons_dev/jan-nightly-512x512.png b/electron/icons_dev/jan-nightly-512x512.png new file mode 100644 index 000000000..23f532947 Binary files /dev/null and b/electron/icons_dev/jan-nightly-512x512.png differ diff --git a/electron/icons_dev/jan-nightly-tray.png b/electron/icons_dev/jan-nightly-tray.png new file mode 100644 index 000000000..bf164a0a6 Binary files /dev/null and b/electron/icons_dev/jan-nightly-tray.png differ diff --git a/electron/icons_dev/jan-nightly-tray@2x.png b/electron/icons_dev/jan-nightly-tray@2x.png new file mode 100644 index 000000000..3cab5709d Binary files /dev/null and b/electron/icons_dev/jan-nightly-tray@2x.png differ diff --git a/electron/icons_dev/jan-nightly.ico b/electron/icons_dev/jan-nightly.ico new file mode 100644 index 000000000..8e64ba8b1 Binary files /dev/null and b/electron/icons_dev/jan-nightly.ico differ diff --git a/electron/icons_dev/jan-nightly.png b/electron/icons_dev/jan-nightly.png new file mode 100644 index 000000000..23f532947 Binary files /dev/null and b/electron/icons_dev/jan-nightly.png differ diff --git a/electron/main.ts b/electron/main.ts index 6ce7f476a..59e72ca24 100644 --- a/electron/main.ts +++ b/electron/main.ts @@ -28,8 +28,11 @@ import { setupReactDevTool } from './utils/dev' import { trayManager } from './managers/tray' import { logSystemInfo } from './utils/system' import { registerGlobalShortcuts } from './utils/shortcut' +import { registerLogger } from './utils/logger' +import { randomBytes } from 'crypto' const preloadPath = join(__dirname, 'preload.js') +const preloadQuickAskPath = join(__dirname, 'preload.quickask.js') const rendererPath = join(__dirname, '..', 'renderer') const quickAskPath = join(rendererPath, 'search.html') const mainPath = join(rendererPath, 'index.html') @@ -54,6 +57,10 @@ const createMainWindow = () => { windowManager.createMainWindow(preloadPath, startUrl) } +// Generate a random token for the app +// This token is used for authentication when making request to cortex.cpp server +process.env.appToken = randomBytes(16).toString('hex') + app .whenReady() .then(() => { @@ -79,13 +86,14 @@ app }) .then(setupCore) .then(createUserSpace) + .then(registerLogger) .then(migrate) .then(setupExtensions) .then(setupMenu) .then(handleIPCs) - .then(handleAppUpdates) .then(() => process.env.CI !== 'e2e' && createQuickAskWindow()) .then(createMainWindow) + .then(handleAppUpdates) .then(registerGlobalShortcuts) .then(() => { if (!app.isPackaged) { @@ -131,7 +139,7 @@ function createQuickAskWindow() { // Feature Toggle for Quick Ask if (!getAppConfigurations().quick_ask) return const startUrl = app.isPackaged ? `file://${quickAskPath}` : quickAskUrl - windowManager.createQuickAskWindow(preloadPath, startUrl) + windowManager.createQuickAskWindow(preloadQuickAskPath, startUrl) } /** diff --git a/electron/managers/quickAskWindowConfig.ts b/electron/managers/quickAskWindowConfig.ts index eb30e8ebc..93180dd07 100644 --- a/electron/managers/quickAskWindowConfig.ts +++ b/electron/managers/quickAskWindowConfig.ts @@ -13,10 +13,10 @@ export const quickAskWindowConfig: Electron.BrowserWindowConstructorOptions = { fullscreenable: false, resizable: false, center: true, - movable: false, + movable: true, maximizable: false, focusable: true, - transparent: true, + transparent: false, frame: false, type: 'panel', } diff --git a/electron/managers/window.ts b/electron/managers/window.ts index 918036365..dbb3a5101 100644 --- a/electron/managers/window.ts +++ b/electron/managers/window.ts @@ -141,6 +141,9 @@ class WindowManager { return this._quickAskWindow?.isDestroyed() ?? true } + /** + * Expand the quick ask window + */ expandQuickAskWindow(heightOffset: number): void { const width = quickAskWindowConfig.width! const height = quickAskWindowConfig.height! + heightOffset @@ -148,6 +151,9 @@ class WindowManager { this._quickAskWindow?.setSize(width, height, true) } + /** + * Send the selected text to the quick ask window. + */ sendQuickAskSelectedText(selectedText: string): void { this._quickAskWindow?.webContents.send( AppEvent.onSelectedText, @@ -180,6 +186,9 @@ class WindowManager { } } + /** + * Clean up all windows. + */ cleanUp(): void { if (!this.mainWindow?.isDestroyed()) { this.mainWindow?.close() diff --git a/electron/package.json b/electron/package.json index 700f009a5..8b673114b 100644 --- a/electron/package.json +++ b/electron/package.json @@ -1,11 +1,11 @@ { "name": "jan", - "version": "0.1.4", + "version": "0.1.1740752217", "main": "./build/main.js", "author": "Jan ", "license": "MIT", "productName": "Jan", - "homepage": "https://github.com/janhq/jan/tree/main/electron", + "homepage": "https://github.com/menloresearch/jan/tree/main/electron", "description": "Use offline LLMs with your own data. Run open source models like Llama2 or Falcon on your internal computers/servers.", "build": { "appId": "jan.ai.app", @@ -113,7 +113,6 @@ "electron-store": "^8.1.0", "electron-updater": "^6.1.7", "fs-extra": "^11.2.0", - "node-fetch": "2", "pacote": "^21.0.0", "request": "^2.88.2", "request-progress": "^3.0.0", diff --git a/electron/preload.quickask.ts b/electron/preload.quickask.ts new file mode 100644 index 000000000..7c2cadeb6 --- /dev/null +++ b/electron/preload.quickask.ts @@ -0,0 +1,32 @@ +/** + * Exposes a set of APIs to the renderer process via the contextBridge object. + * @module preload + */ + +import { APIEvents, APIRoutes } from '@janhq/core/node' +import { contextBridge, ipcRenderer } from 'electron' + +const interfaces: { [key: string]: (...args: any[]) => any } = {} + +// Loop over each route in APIRoutes +APIRoutes.forEach((method) => { + // For each method, create a function on the interfaces object + // This function invokes the method on the ipcRenderer with any provided arguments + + interfaces[method] = (...args: any[]) => ipcRenderer.invoke(method, ...args) +}) + +// Loop over each method in APIEvents +APIEvents.forEach((method) => { + // For each method, create a function on the interfaces object + // This function sets up an event listener on the ipcRenderer for the method + // The handler for the event is provided as an argument to the function + interfaces[method] = (handler: any) => ipcRenderer.on(method, handler) +}) + +// Expose the 'interfaces' object in the main world under the name 'electronAPI' +// This allows the renderer process to access these methods directly +contextBridge.exposeInMainWorld('electronAPI', { + ...interfaces, + isQuickAsk: () => true, +}) diff --git a/electron/preload.ts b/electron/preload.ts index 05f48d37a..dbfcd1f1e 100644 --- a/electron/preload.ts +++ b/electron/preload.ts @@ -3,7 +3,7 @@ * @module preload */ -import { APIEvents, APIRoutes, AppConfiguration, getAppConfigurations, updateAppConfiguration } from '@janhq/core/node' +import { APIEvents, APIRoutes, AppConfiguration } from '@janhq/core/node' import { contextBridge, ipcRenderer } from 'electron' import { readdirSync } from 'fs' @@ -13,9 +13,8 @@ const interfaces: { [key: string]: (...args: any[]) => any } = {} APIRoutes.forEach((method) => { // For each method, create a function on the interfaces object // This function invokes the method on the ipcRenderer with any provided arguments - + interfaces[method] = (...args: any[]) => ipcRenderer.invoke(method, ...args) - }) // Loop over each method in APIEvents @@ -26,20 +25,21 @@ APIEvents.forEach((method) => { interfaces[method] = (handler: any) => ipcRenderer.on(method, handler) }) - -interfaces['changeDataFolder'] = async path => { - const appConfiguration: AppConfiguration = await ipcRenderer.invoke('getAppConfigurations') +interfaces['changeDataFolder'] = async (path) => { + const appConfiguration: AppConfiguration = await ipcRenderer.invoke( + 'getAppConfigurations' + ) const currentJanDataFolder = appConfiguration.data_folder appConfiguration.data_folder = path const reflect = require('@alumna/reflect') const { err } = await reflect({ - src: currentJanDataFolder, - dest: path, - recursive: true, - delete: false, - overwrite: true, - errorOnExist: false, - }) + src: currentJanDataFolder, + dest: path, + recursive: true, + delete: false, + overwrite: true, + errorOnExist: false, + }) if (err) { console.error(err) throw err @@ -47,7 +47,7 @@ interfaces['changeDataFolder'] = async path => { await ipcRenderer.invoke('updateAppConfiguration', appConfiguration) } -interfaces['isDirectoryEmpty'] = async path => { +interfaces['isDirectoryEmpty'] = async (path) => { const dirChildren = await readdirSync(path) return dirChildren.filter((x) => x !== '.DS_Store').length === 0 } @@ -56,4 +56,5 @@ interfaces['isDirectoryEmpty'] = async path => { // This allows the renderer process to access these methods directly contextBridge.exposeInMainWorld('electronAPI', { ...interfaces, + isQuickAsk: () => false, }) diff --git a/electron/tsconfig.json b/electron/tsconfig.json index 11c9d8577..5116f0e88 100644 --- a/electron/tsconfig.json +++ b/electron/tsconfig.json @@ -19,5 +19,5 @@ "esm": true }, "include": ["./**/*.ts"], - "exclude": ["core", "build", "dist", "tests", "node_modules"] + "exclude": ["core", "build", "dist", "tests", "node_modules", "test-data"] } diff --git a/extensions/monitoring-extension/src/node/logger.ts b/electron/utils/logger.ts similarity index 77% rename from extensions/monitoring-extension/src/node/logger.ts rename to electron/utils/logger.ts index ca64ea2d9..48af0b93a 100644 --- a/extensions/monitoring-extension/src/node/logger.ts +++ b/electron/utils/logger.ts @@ -1,16 +1,28 @@ -import fs from 'fs' +import { + createWriteStream, + existsSync, + mkdirSync, + readdir, + stat, + unlink, + writeFileSync, +} from 'fs' import util from 'util' import { getAppConfigurations, getJanDataFolderPath, Logger, + LoggerManager, } from '@janhq/core/node' import path, { join } from 'path' -export class FileLogger extends Logger { +/** + * File Logger + */ +export class FileLogger implements Logger { name = 'file' logCleaningInterval: number = 120000 - timeout: NodeJS.Timeout | null = null + timeout: NodeJS.Timeout | undefined appLogPath: string = './' logEnabled: boolean = true @@ -18,14 +30,13 @@ export class FileLogger extends Logger { logEnabled: boolean = true, logCleaningInterval: number = 120000 ) { - super() this.logEnabled = logEnabled if (logCleaningInterval) this.logCleaningInterval = logCleaningInterval const appConfigurations = getAppConfigurations() const logFolderPath = join(appConfigurations.data_folder, 'logs') - if (!fs.existsSync(logFolderPath)) { - fs.mkdirSync(logFolderPath, { recursive: true }) + if (!existsSync(logFolderPath)) { + mkdirSync(logFolderPath, { recursive: true }) } this.appLogPath = join(logFolderPath, 'app.log') @@ -69,8 +80,8 @@ export class FileLogger extends Logger { const logDirectory = path.join(getJanDataFolderPath(), 'logs') // Perform log cleaning const currentDate = new Date() - if (fs.existsSync(logDirectory)) - fs.readdir(logDirectory, (err, files) => { + if (existsSync(logDirectory)) + readdir(logDirectory, (err, files) => { if (err) { console.error('Error reading log directory:', err) return @@ -78,7 +89,7 @@ export class FileLogger extends Logger { files.forEach((file) => { const filePath = path.join(logDirectory, file) - fs.stat(filePath, (err, stats) => { + stat(filePath, (err, stats) => { if (err) { console.error('Error getting file stats:', err) return @@ -86,7 +97,7 @@ export class FileLogger extends Logger { // Check size if (stats.size > size) { - fs.unlink(filePath, (err) => { + unlink(filePath, (err) => { if (err) { console.error('Error deleting log file:', err) return @@ -103,7 +114,7 @@ export class FileLogger extends Logger { (1000 * 3600 * 24) ) if (daysDifference > days) { - fs.unlink(filePath, (err) => { + unlink(filePath, (err) => { if (err) { console.error('Error deleting log file:', err) return @@ -124,15 +135,20 @@ export class FileLogger extends Logger { } } +/** + * Write log function implementation + * @param message + * @param logPath + */ const writeLog = (message: string, logPath: string) => { - if (!fs.existsSync(logPath)) { + if (!existsSync(logPath)) { const logDirectory = path.join(getJanDataFolderPath(), 'logs') - if (!fs.existsSync(logDirectory)) { - fs.mkdirSync(logDirectory) + if (!existsSync(logDirectory)) { + mkdirSync(logDirectory) } - fs.writeFileSync(logPath, message) + writeFileSync(logPath, message) } else { - const logFile = fs.createWriteStream(logPath, { + const logFile = createWriteStream(logPath, { flags: 'a', }) logFile.write(util.format(message) + '\n') @@ -140,3 +156,12 @@ const writeLog = (message: string, logPath: string) => { console.debug(message) } } + +/** + * Register logger for global application logging + */ +export const registerLogger = () => { + const logger = new FileLogger() + LoggerManager.instance().register(logger) + logger.cleanLogs() +} diff --git a/electron/utils/menu.ts b/electron/utils/menu.ts index 553412faf..bab70da79 100644 --- a/electron/utils/menu.ts +++ b/electron/utils/menu.ts @@ -28,9 +28,10 @@ const template: (Electron.MenuItemConstructorOptions | Electron.MenuItem)[] = [ !updateCheckResult?.updateInfo || updateCheckResult?.updateInfo.version === app.getVersion() ) { - dialog.showMessageBox({ - message: `No updates available.`, - }) + windowManager.mainWindow?.webContents.send( + AppEvent.onAppUpdateNotAvailable, + {} + ) return } }) diff --git a/extensions/assistant-extension/README.md b/extensions/assistant-extension/README.md index f9690da09..b9595b6e1 100644 --- a/extensions/assistant-extension/README.md +++ b/extensions/assistant-extension/README.md @@ -70,6 +70,6 @@ There are a few things to keep in mind when writing your extension code: ``` For more information about the Jan Extension Core module, see the - [documentation](https://github.com/janhq/jan/blob/main/core/README.md). + [documentation](https://github.com/menloresearch/jan/blob/main/core/README.md). So, what are you waiting for? Go ahead and start customizing your extension! diff --git a/extensions/assistant-extension/rolldown.config.mjs b/extensions/assistant-extension/rolldown.config.mjs index c8fdefd7d..e549ea7d9 100644 --- a/extensions/assistant-extension/rolldown.config.mjs +++ b/extensions/assistant-extension/rolldown.config.mjs @@ -26,6 +26,9 @@ export default defineConfig([ resolve: { extensions: ['.js', '.ts'], }, + define: { + CORTEX_API_URL: JSON.stringify(`http://127.0.0.1:${process.env.CORTEX_API_PORT ?? "39291"}`), + }, platform: 'node', }, ]) diff --git a/extensions/assistant-extension/src/@types/global.d.ts b/extensions/assistant-extension/src/@types/global.d.ts index 2ca4a4080..b724db8d0 100644 --- a/extensions/assistant-extension/src/@types/global.d.ts +++ b/extensions/assistant-extension/src/@types/global.d.ts @@ -1,2 +1,3 @@ declare const NODE: string declare const VERSION: string +declare const CORTEX_API_URL: string diff --git a/extensions/assistant-extension/src/node/retrieval.ts b/extensions/assistant-extension/src/node/retrieval.ts index 5804ff763..05fa67d54 100644 --- a/extensions/assistant-extension/src/node/retrieval.ts +++ b/extensions/assistant-extension/src/node/retrieval.ts @@ -23,12 +23,17 @@ export class Retrieval { constructor(chunkSize: number = 4000, chunkOverlap: number = 200) { this.updateTextSplitter(chunkSize, chunkOverlap) + this.initialize() + } + + private async initialize() { + const apiKey = await window.core?.api.appToken() ?? 'cortex.cpp' // declare time-weighted retriever and storage this.timeWeightedVectorStore = new MemoryVectorStore( new OpenAIEmbeddings( - { openAIApiKey: 'cortex-embedding' }, - { basePath: 'http://127.0.0.1:39291/v1' } + { openAIApiKey: apiKey }, + { basePath: `${CORTEX_API_URL}/v1` } ) ) this.timeWeightedretriever = new TimeWeightedVectorStoreRetriever({ @@ -47,11 +52,12 @@ export class Retrieval { }) } - public updateEmbeddingEngine(model: string, engine: string): void { + public async updateEmbeddingEngine(model: string, engine: string) { + const apiKey = await window.core?.api.appToken() ?? 'cortex.cpp' this.embeddingModel = new OpenAIEmbeddings( - { openAIApiKey: 'cortex-embedding', model }, + { openAIApiKey: apiKey, model }, // TODO: Raw settings - { basePath: 'http://127.0.0.1:39291/v1' } + { basePath: `${CORTEX_API_URL}/v1` } ) // update time-weighted embedding model diff --git a/extensions/conversational-extension/rolldown.config.mjs b/extensions/conversational-extension/rolldown.config.mjs index 4645021a8..6d396f611 100644 --- a/extensions/conversational-extension/rolldown.config.mjs +++ b/extensions/conversational-extension/rolldown.config.mjs @@ -8,7 +8,6 @@ export default defineConfig({ }, platform: 'browser', define: { - API_URL: JSON.stringify('http://127.0.0.1:39291'), - SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'), + API_URL: JSON.stringify(`http://127.0.0.1:${process.env.CORTEX_API_PORT ?? "39291"}`), }, }) diff --git a/extensions/conversational-extension/src/@types/global.d.ts b/extensions/conversational-extension/src/@types/global.d.ts index 813a893f4..abe60d318 100644 --- a/extensions/conversational-extension/src/@types/global.d.ts +++ b/extensions/conversational-extension/src/@types/global.d.ts @@ -1,5 +1,4 @@ declare const API_URL: string -declare const SOCKET_URL: string interface Core { api: APIFunctions diff --git a/extensions/conversational-extension/src/index.ts b/extensions/conversational-extension/src/index.ts index eeb4fcf38..791385fc9 100644 --- a/extensions/conversational-extension/src/index.ts +++ b/extensions/conversational-extension/src/index.ts @@ -4,7 +4,7 @@ import { ThreadAssistantInfo, ThreadMessage, } from '@janhq/core' -import ky from 'ky' +import ky, { KyInstance } from 'ky' import PQueue from 'p-queue' type ThreadList = { @@ -22,6 +22,22 @@ type MessageList = { export default class CortexConversationalExtension extends ConversationalExtension { queue = new PQueue({ concurrency: 1 }) + api?: KyInstance + /** + * Get the API instance + * @returns + */ + async apiInstance(): Promise { + if(this.api) return this.api + const apiKey = (await window.core?.api.appToken()) ?? 'cortex.cpp' + this.api = ky.extend({ + prefixUrl: API_URL, + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }) + return this.api + } /** * Called when the extension is loaded. */ @@ -39,10 +55,12 @@ export default class CortexConversationalExtension extends ConversationalExtensi */ async listThreads(): Promise { return this.queue.add(() => - ky - .get(`${API_URL}/v1/threads?limit=-1`) - .json() - .then((e) => e.data) + this.apiInstance().then((api) => + api + .get('v1/threads?limit=-1') + .json() + .then((e) => e.data) + ) ) as Promise } @@ -52,7 +70,9 @@ export default class CortexConversationalExtension extends ConversationalExtensi */ async createThread(thread: Thread): Promise { return this.queue.add(() => - ky.post(`${API_URL}/v1/threads`, { json: thread }).json() + this.apiInstance().then((api) => + api.post('v1/threads', { json: thread }).json() + ) ) as Promise } @@ -63,7 +83,9 @@ export default class CortexConversationalExtension extends ConversationalExtensi async modifyThread(thread: Thread): Promise { return this.queue .add(() => - ky.patch(`${API_URL}/v1/threads/${thread.id}`, { json: thread }) + this.apiInstance().then((api) => + api.patch(`v1/threads/${thread.id}`, { json: thread }) + ) ) .then() } @@ -74,7 +96,9 @@ export default class CortexConversationalExtension extends ConversationalExtensi */ async deleteThread(threadId: string): Promise { return this.queue - .add(() => ky.delete(`${API_URL}/v1/threads/${threadId}`)) + .add(() => + this.apiInstance().then((api) => api.delete(`v1/threads/${threadId}`)) + ) .then() } @@ -85,11 +109,13 @@ export default class CortexConversationalExtension extends ConversationalExtensi */ async createMessage(message: ThreadMessage): Promise { return this.queue.add(() => - ky - .post(`${API_URL}/v1/threads/${message.thread_id}/messages`, { - json: message, - }) - .json() + this.apiInstance().then((api) => + api + .post(`v1/threads/${message.thread_id}/messages`, { + json: message, + }) + .json() + ) ) as Promise } @@ -100,14 +126,13 @@ export default class CortexConversationalExtension extends ConversationalExtensi */ async modifyMessage(message: ThreadMessage): Promise { return this.queue.add(() => - ky - .patch( - `${API_URL}/v1/threads/${message.thread_id}/messages/${message.id}`, - { + this.apiInstance().then((api) => + api + .patch(`v1/threads/${message.thread_id}/messages/${message.id}`, { json: message, - } - ) - .json() + }) + .json() + ) ) as Promise } @@ -120,7 +145,9 @@ export default class CortexConversationalExtension extends ConversationalExtensi async deleteMessage(threadId: string, messageId: string): Promise { return this.queue .add(() => - ky.delete(`${API_URL}/v1/threads/${threadId}/messages/${messageId}`) + this.apiInstance().then((api) => + api.delete(`v1/threads/${threadId}/messages/${messageId}`) + ) ) .then() } @@ -132,10 +159,12 @@ export default class CortexConversationalExtension extends ConversationalExtensi */ async listMessages(threadId: string): Promise { return this.queue.add(() => - ky - .get(`${API_URL}/v1/threads/${threadId}/messages?order=asc&limit=-1`) - .json() - .then((e) => e.data) + this.apiInstance().then((api) => + api + .get(`v1/threads/${threadId}/messages?order=asc&limit=-1`) + .json() + .then((e) => e.data) + ) ) as Promise } @@ -147,9 +176,11 @@ export default class CortexConversationalExtension extends ConversationalExtensi */ async getThreadAssistant(threadId: string): Promise { return this.queue.add(() => - ky - .get(`${API_URL}/v1/assistants/${threadId}?limit=-1`) - .json() + this.apiInstance().then((api) => + api + .get(`v1/assistants/${threadId}?limit=-1`) + .json() + ) ) as Promise } /** @@ -163,9 +194,11 @@ export default class CortexConversationalExtension extends ConversationalExtensi assistant: ThreadAssistantInfo ): Promise { return this.queue.add(() => - ky - .post(`${API_URL}/v1/assistants/${threadId}`, { json: assistant }) - .json() + this.apiInstance().then((api) => + api + .post(`v1/assistants/${threadId}`, { json: assistant }) + .json() + ) ) as Promise } @@ -180,9 +213,11 @@ export default class CortexConversationalExtension extends ConversationalExtensi assistant: ThreadAssistantInfo ): Promise { return this.queue.add(() => - ky - .patch(`${API_URL}/v1/assistants/${threadId}`, { json: assistant }) - .json() + this.apiInstance().then((api) => + api + .patch(`v1/assistants/${threadId}`, { json: assistant }) + .json() + ) ) as Promise } @@ -191,10 +226,12 @@ export default class CortexConversationalExtension extends ConversationalExtensi * @returns */ async healthz(): Promise { - return ky - .get(`${API_URL}/healthz`, { - retry: { limit: 20, delay: () => 500, methods: ['get'] }, - }) + return this.apiInstance() + .then((api) => + api.get('healthz', { + retry: { limit: 20, delay: () => 500, methods: ['get'] }, + }) + ) .then(() => {}) } } diff --git a/extensions/engine-management-extension/engines.mjs b/extensions/engine-management-extension/engines.mjs index e85035423..eafe8a09c 100644 --- a/extensions/engine-management-extension/engines.mjs +++ b/extensions/engine-management-extension/engines.mjs @@ -6,6 +6,8 @@ import groq from './resources/groq.json' with { type: 'json' } import martian from './resources/martian.json' with { type: 'json' } import mistral from './resources/mistral.json' with { type: 'json' } import nvidia from './resources/nvidia.json' with { type: 'json' } +import deepseek from './resources/deepseek.json' with { type: 'json' } +import googleGemini from './resources/google_gemini.json' with { type: 'json' } import anthropicModels from './models/anthropic.json' with { type: 'json' } import cohereModels from './models/cohere.json' with { type: 'json' } @@ -15,6 +17,8 @@ import groqModels from './models/groq.json' with { type: 'json' } import martianModels from './models/martian.json' with { type: 'json' } import mistralModels from './models/mistral.json' with { type: 'json' } import nvidiaModels from './models/nvidia.json' with { type: 'json' } +import deepseekModels from './models/deepseek.json' with { type: 'json' } +import googleGeminiModels from './models/google_gemini.json' with { type: 'json' } const engines = [ anthropic, @@ -25,6 +29,8 @@ const engines = [ mistral, martian, nvidia, + deepseek, + googleGemini, ] const models = [ ...anthropicModels, @@ -35,5 +41,7 @@ const models = [ ...mistralModels, ...martianModels, ...nvidiaModels, + ...deepseekModels, + ...googleGeminiModels, ] export { engines, models } diff --git a/extensions/engine-management-extension/models/anthropic.json b/extensions/engine-management-extension/models/anthropic.json index 46b5893d1..2b3d7d683 100644 --- a/extensions/engine-management-extension/models/anthropic.json +++ b/extensions/engine-management-extension/models/anthropic.json @@ -8,6 +8,7 @@ "inference_params": { "max_tokens": 4096, "temperature": 0.7, + "max_temperature": 1.0, "stream": true }, "engine": "anthropic" @@ -21,6 +22,7 @@ "inference_params": { "max_tokens": 8192, "temperature": 0.7, + "max_temperature": 1.0, "stream": true }, "engine": "anthropic" @@ -34,6 +36,21 @@ "inference_params": { "max_tokens": 8192, "temperature": 0.7, + "max_temperature": 1.0, + "stream": true + }, + "engine": "anthropic" + }, + { + "model": "claude-3-7-sonnet-latest", + "object": "model", + "name": "Claude 3.7 Sonnet Latest", + "version": "1.0", + "description": "Claude 3.7 Sonnet is the first hybrid reasoning model on the market. It is the most intelligent model yet. It is faster, more cost effective, and more capable than any other model in its class.", + "inference_params": { + "max_tokens": 8192, + "temperature": 0.7, + "max_temperature": 1.0, "stream": true }, "engine": "anthropic" diff --git a/extensions/engine-management-extension/models/cohere.json b/extensions/engine-management-extension/models/cohere.json index 458e4278b..f78498b93 100644 --- a/extensions/engine-management-extension/models/cohere.json +++ b/extensions/engine-management-extension/models/cohere.json @@ -8,7 +8,8 @@ "inference_params": { "max_tokens": 4096, "temperature": 0.7, - "stream": false + "max_temperature": 1.0, + "stream": true }, "engine": "cohere" }, @@ -21,7 +22,8 @@ "inference_params": { "max_tokens": 4096, "temperature": 0.7, - "stream": false + "max_temperature": 1.0, + "stream": true }, "engine": "cohere" } diff --git a/extensions/engine-management-extension/models/deepseek.json b/extensions/engine-management-extension/models/deepseek.json new file mode 100644 index 000000000..29d5406bf --- /dev/null +++ b/extensions/engine-management-extension/models/deepseek.json @@ -0,0 +1,28 @@ +[ + { + "model": "deepseek-chat", + "object": "model", + "name": "DeepSeek Chat", + "version": "1.0", + "description": "The deepseek-chat model has been upgraded to DeepSeek-V3. deepseek-reasoner points to the new model DeepSeek-R1", + "inference_params": { + "max_tokens": 8192, + "temperature": 0.6, + "stream": true + }, + "engine": "deepseek" + }, + { + "model": "deepseek-reasoner", + "object": "model", + "name": "DeepSeek R1", + "version": "1.0", + "description": "CoT (Chain of Thought) is the reasoning content deepseek-reasoner gives before output the final answer. For details, please refer to Reasoning Model.", + "inference_params": { + "max_tokens": 8192, + "temperature": 0.6, + "stream": true + }, + "engine": "deepseek" + } +] diff --git a/extensions/engine-management-extension/models/google_gemini.json b/extensions/engine-management-extension/models/google_gemini.json new file mode 100644 index 000000000..392754ee6 --- /dev/null +++ b/extensions/engine-management-extension/models/google_gemini.json @@ -0,0 +1,67 @@ +[ + { + "model": "gemini-2.0-flash", + "object": "model", + "name": "Gemini 2.0 Flash", + "version": "1.0", + "description": "Gemini 2.0 Flash delivers next-gen features and improved capabilities, including superior speed, native tool use, multimodal generation, and a 1M token context window.", + "inference_params": { + "max_tokens": 8192, + "temperature": 0.6, + "stream": true + }, + "engine": "google_gemini" + }, + { + "model": "gemini-2.0-flash-lite-preview", + "object": "model", + "name": "Gemini 2.0 Flash-Lite Preview", + "version": "1.0", + "description": "A Gemini 2.0 Flash model optimized for cost efficiency and low latency.", + "inference_params": { + "max_tokens": 8192, + "temperature": 0.6, + "stream": true + }, + "engine": "google_gemini" + }, + { + "model": "gemini-1.5-flash", + "object": "model", + "name": "Gemini 1.5 Flash", + "version": "1.0", + "description": "Gemini 1.5 Flash is a fast and versatile multimodal model for scaling across diverse tasks.", + "inference_params": { + "max_tokens": 8192, + "temperature": 0.6, + "stream": true + }, + "engine": "google_gemini" + }, + { + "model": "gemini-1.5-flash-8b", + "object": "model", + "name": "Gemini 1.5 Flash-8B", + "version": "1.0", + "description": "Gemini 1.5 Flash-8B is a small model designed for lower intelligence tasks.", + "inference_params": { + "max_tokens": 8192, + "temperature": 0.6, + "stream": true + }, + "engine": "google_gemini" + }, + { + "model": "gemini-1.5-pro", + "object": "model", + "name": "Gemini 1.5 Pro", + "version": "1.0", + "description": "Gemini 1.5 Pro is a mid-size multimodal model that is optimized for a wide-range of reasoning tasks. 1.5 Pro can process large amounts of data at once, including 2 hours of video, 19 hours of audio, codebases with 60,000 lines of code, or 2,000 pages of text. ", + "inference_params": { + "max_tokens": 8192, + "temperature": 0.6, + "stream": true + }, + "engine": "google_gemini" + } +] diff --git a/extensions/engine-management-extension/models/mistral.json b/extensions/engine-management-extension/models/mistral.json index 12fcf938d..47df5d506 100644 --- a/extensions/engine-management-extension/models/mistral.json +++ b/extensions/engine-management-extension/models/mistral.json @@ -8,6 +8,7 @@ "inference_params": { "max_tokens": 32000, "temperature": 0.7, + "max_temperature": 1.0, "top_p": 0.95, "stream": true }, @@ -22,6 +23,7 @@ "inference_params": { "max_tokens": 32000, "temperature": 0.7, + "max_temperature": 1.0, "top_p": 0.95, "stream": true }, @@ -36,6 +38,7 @@ "inference_params": { "max_tokens": 32000, "temperature": 0.7, + "max_temperature": 1.0, "top_p": 0.95, "stream": true }, diff --git a/extensions/engine-management-extension/models/nvidia.json b/extensions/engine-management-extension/models/nvidia.json index dfce9f8bc..cb6f9dec1 100644 --- a/extensions/engine-management-extension/models/nvidia.json +++ b/extensions/engine-management-extension/models/nvidia.json @@ -8,6 +8,7 @@ "inference_params": { "max_tokens": 1024, "temperature": 0.3, + "max_temperature": 1.0, "top_p": 1, "stream": false, "frequency_penalty": 0, diff --git a/extensions/engine-management-extension/models/openai.json b/extensions/engine-management-extension/models/openai.json index 8f59b42ea..b2314ec0b 100644 --- a/extensions/engine-management-extension/models/openai.json +++ b/extensions/engine-management-extension/models/openai.json @@ -1,4 +1,22 @@ [ + { + "model": "gpt-4.5-preview", + "object": "model", + "name": "OpenAI GPT 4.5 Preview", + "version": "1.2", + "description": "OpenAI GPT 4.5 Preview is a research preview of GPT-4.5, our largest and most capable GPT model yet", + "format": "api", + "inference_params": { + "max_tokens": 16384, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "engine": "openai" + }, { "model": "gpt-4-turbo", "object": "model", @@ -79,12 +97,7 @@ "description": "OpenAI o1 is a new model with complex reasoning", "format": "api", "inference_params": { - "max_tokens": 100000, - "temperature": 1, - "top_p": 1, - "stream": true, - "frequency_penalty": 0, - "presence_penalty": 0 + "max_tokens": 100000 }, "engine": "openai" }, @@ -97,11 +110,7 @@ "format": "api", "inference_params": { "max_tokens": 32768, - "temperature": 1, - "top_p": 1, - "stream": true, - "frequency_penalty": 0, - "presence_penalty": 0 + "stream": true }, "engine": "openai" }, @@ -114,11 +123,20 @@ "format": "api", "inference_params": { "max_tokens": 65536, - "temperature": 1, - "top_p": 1, - "stream": true, - "frequency_penalty": 0, - "presence_penalty": 0 + "stream": true + }, + "engine": "openai" + }, + { + "model": "o3-mini", + "object": "model", + "name": "OpenAI o3-mini", + "version": "1.0", + "description": "OpenAI most recent reasoning model, providing high intelligence at the same cost and latency targets of o1-mini.", + "format": "api", + "inference_params": { + "max_tokens": 100000, + "stream": true }, "engine": "openai" } diff --git a/extensions/engine-management-extension/models/openrouter.json b/extensions/engine-management-extension/models/openrouter.json index b9714bb57..bf132533c 100644 --- a/extensions/engine-management-extension/models/openrouter.json +++ b/extensions/engine-management-extension/models/openrouter.json @@ -1,16 +1,91 @@ [ { - "model": "open-router-auto", + "model": "deepseek/deepseek-r1:free", "object": "model", - "name": "OpenRouter", + "name": "DeepSeek: R1", "version": "1.0", - "description": " OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.", + "description": "OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.", "inference_params": { - "max_tokens": 128000, "temperature": 0.7, "top_p": 0.95, "frequency_penalty": 0, - "presence_penalty": 0 + "presence_penalty": 0, + "stream": true + }, + "engine": "openrouter" + }, + { + "model": "deepseek/deepseek-r1-distill-llama-70b:free", + "object": "model", + "name": "DeepSeek: R1 Distill Llama 70B", + "version": "1.0", + "description": " OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.", + "inference_params": { + "temperature": 0.7, + "top_p": 0.95, + "frequency_penalty": 0, + "presence_penalty": 0, + "stream": true + }, + "engine": "openrouter" + }, + { + "model": "deepseek/deepseek-r1-distill-llama-70b:free", + "object": "model", + "name": "DeepSeek: R1 Distill Llama 70B", + "version": "1.0", + "description": "OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.", + "inference_params": { + "temperature": 0.7, + "top_p": 0.95, + "frequency_penalty": 0, + "presence_penalty": 0, + "stream": true + }, + "engine": "openrouter" + }, + { + "model": "meta-llama/llama-3.1-405b-instruct:free", + "object": "model", + "name": "Meta: Llama 3.1 405B Instruct", + "version": "1.0", + "description": "OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.", + "inference_params": { + "temperature": 0.7, + "top_p": 0.95, + "frequency_penalty": 0, + "presence_penalty": 0, + "stream": true + }, + "engine": "openrouter" + }, + { + "model": "qwen/qwen-vl-plus:free", + "object": "model", + "name": "Qwen: Qwen VL Plus", + "version": "1.0", + "description": "OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.", + "inference_params": { + "temperature": 0.7, + "top_p": 0.95, + "frequency_penalty": 0, + "presence_penalty": 0, + "stream": true + }, + "engine": "openrouter" + }, + { + "model": "qwen/qwen2.5-vl-72b-instruct:free", + "object": "model", + "name": "Qwen: Qwen2.5 VL 72B Instruct", + "version": "1.0", + "description": "OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.", + "inference_params": { + "temperature": 0.7, + "top_p": 0.95, + "frequency_penalty": 0, + "presence_penalty": 0, + "stream": true }, "engine": "openrouter" } diff --git a/extensions/engine-management-extension/package.json b/extensions/engine-management-extension/package.json index 96f962ffd..d08998ba8 100644 --- a/extensions/engine-management-extension/package.json +++ b/extensions/engine-management-extension/package.json @@ -1,14 +1,14 @@ { "name": "@janhq/engine-management-extension", "productName": "Engine Management", - "version": "1.0.0", + "version": "1.0.3", "description": "Manages AI engines and their configurations.", "main": "dist/index.js", "node": "dist/node/index.cjs.js", "author": "Jan ", "license": "MIT", "scripts": { - "test": "jest", + "test": "vitest run", "build": "rolldown -c rolldown.config.mjs", "codesign:darwin": "../../.github/scripts/auto-sign.sh", "codesign:win32:linux": "echo 'No codesigning required'", @@ -25,16 +25,15 @@ "rolldown": "^1.0.0-beta.1", "run-script-os": "^1.1.6", "ts-loader": "^9.5.0", - "typescript": "^5.3.3" + "typescript": "^5.3.3", + "vitest": "^3.0.6" }, "dependencies": { "@janhq/core": "../../core/package.tgz", - "cpu-instructions": "^0.0.13", "ky": "^1.7.2", "p-queue": "^8.0.1" }, "bundledDependencies": [ - "cpu-instructions", "@janhq/core" ], "engines": { diff --git a/extensions/engine-management-extension/resources/anthropic.json b/extensions/engine-management-extension/resources/anthropic.json index 12a3f08b8..f8ba74e2b 100644 --- a/extensions/engine-management-extension/resources/anthropic.json +++ b/extensions/engine-management-extension/resources/anthropic.json @@ -1,5 +1,5 @@ { - "id": "@janhq/inference-anthropic-extension", + "id": "anthropic", "type": "remote", "engine": "anthropic", "url": "https://console.anthropic.com/settings/keys", @@ -10,13 +10,14 @@ "transform_req": { "chat_completions": { "url": "https://api.anthropic.com/v1/messages", - "template": "{ {% for key, value in input_request %} {% if key == \"messages\" %} {% if input_request.messages.0.role == \"system\" %} \"system\": \"{{ input_request.messages.0.content }}\", \"messages\": [{% for message in input_request.messages %} {% if not loop.is_first %} {\"role\": \"{{ message.role }}\", \"content\": \"{{ message.content }}\" } {% if not loop.is_last %},{% endif %} {% endif %} {% endfor %}] {% else %} \"messages\": [{% for message in input_request.messages %} {\"role\": \"{{ message.role}}\", \"content\": \"{{ message.content }}\" } {% if not loop.is_last %},{% endif %} {% endfor %}] {% endif %} {% if not loop.is_last %},{% endif %} {% else if key == \"system\" or key == \"model\" or key == \"temperature\" or key == \"store\" or key == \"max_tokens\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stop\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" %}\"{{ key }}\": {{ tojson(value) }} {% if not loop.is_last %},{% endif %} {% endif %} {% endfor %} }" + "template": "{ {% for key, value in input_request %} {% if key == \"messages\" %} {% if input_request.messages.0.role == \"system\" %} \"system\": {{ tojson(input_request.messages.0.content) }}, \"messages\": [{% for message in input_request.messages %} {% if not loop.is_first %} {\"role\": {{ tojson(message.role) }}, \"content\": {% if not message.content or message.content == \"\" %} \".\" {% else %} {{ tojson(message.content) }} {% endif %} } {% if not loop.is_last %},{% endif %} {% endif %} {% endfor %}] {% else %} \"messages\": [{% for message in input_request.messages %} {\"role\": {{ tojson(message.role) }}, \"content\": {% if not message.content or message.content == \"\" %} \".\" {% else %} {{ tojson(message.content) }} {% endif %} } {% if not loop.is_last %},{% endif %} {% endfor %}] {% endif %} {% if not loop.is_last %},{% endif %} {% else if key == \"system\" or key == \"model\" or key == \"temperature\" or key == \"store\" or key == \"max_tokens\" or key == \"stream\" or key == \"metadata\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stop\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" %}\"{{ key }}\": {{ tojson(value) }} {% if not loop.is_last %},{% endif %} {% endif %} {% endfor %} }" } }, "transform_resp": { "chat_completions": { - "template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.type == \"message_start\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"ping\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_delta\" %} \"role\": \"assistant\", \"content\": \"{{ input_request.delta.text }}\" {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.type == \"content_block_stop\" %} \"finish_reason\": \"stop\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {\"id\": \"{{ input_request.id }}\", \"created\": null, \"object\": \"chat.completion\", \"model\": \"{{ input_request.model }}\", \"choices\": [{ \"index\": 0, \"message\": { \"role\": \"{{ input_request.role }}\", \"content\": {% if input_request.content and input_request.content.0.type == \"text\" %} \"{{input_request.content.0.text}}\" {% else %} null {% endif %}, \"refusal\": null }, \"logprobs\": null, \"finish_reason\": \"{{ input_request.stop_reason }}\" } ], \"usage\": { \"prompt_tokens\": {{ input_request.usage.input_tokens }}, \"completion_tokens\": {{ input_request.usage.output_tokens }}, \"total_tokens\": {{ input_request.usage.input_tokens + input_request.usage.output_tokens }}, \"prompt_tokens_details\": { \"cached_tokens\": 0 }, \"completion_tokens_details\": { \"reasoning_tokens\": 0, \"accepted_prediction_tokens\": 0, \"rejected_prediction_tokens\": 0 } }, \"system_fingerprint\": \"fp_6b68a8204b\"} {% endif %}" + "template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.type == \"message_start\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"ping\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_delta\" %} \"role\": \"assistant\", \"content\": {{ tojson(input_request.delta.text) }} {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% else if input_request.type == \"content_block_stop\" %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.type == \"content_block_stop\" %} \"finish_reason\": \"stop\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {{tojson(input_request)}} {% endif %}" } - } + }, + "explore_models_url": "https://docs.anthropic.com/en/docs/about-claude/models" } } diff --git a/extensions/engine-management-extension/resources/cohere.json b/extensions/engine-management-extension/resources/cohere.json index b10e00e5b..02f1cc625 100644 --- a/extensions/engine-management-extension/resources/cohere.json +++ b/extensions/engine-management-extension/resources/cohere.json @@ -1,5 +1,5 @@ { - "id": "@janhq/inference-cohere-extension", + "id": "cohere", "type": "remote", "engine": "cohere", "url": "https://dashboard.cohere.com/api-keys", @@ -10,13 +10,14 @@ "transform_req": { "chat_completions": { "url": "https://api.cohere.ai/v1/chat", - "template": "{ {% for key, value in input_request %} {% if key == \"messages\" %} {% if input_request.messages.0.role == \"system\" %} \"preamble\": \"{{ input_request.messages.0.content }}\", {% if length(input_request.messages) > 2 %} \"chatHistory\": [{% for message in input_request.messages %} {% if not loop.is_first and not loop.is_last %} {\"role\": {% if message.role == \"user\" %} \"USER\" {% else %} \"CHATBOT\" {% endif %}, \"content\": \"{{ message.content }}\" } {% if loop.index < length(input_request.messages) - 2 %},{% endif %} {% endif %} {% endfor %}], {% endif %} \"message\": \"{{ last(input_request.messages).content }}\" {% else %} {% if length(input_request.messages) > 2 %} \"chatHistory\": [{% for message in input_request.messages %} {% if not loop.is_last %} { \"role\": {% if message.role == \"user\" %} \"USER\" {% else %} \"CHATBOT\" {% endif %}, \"content\": \"{{ message.content }}\" } {% if loop.index < length(input_request.messages) - 2 %},{% endif %} {% endif %} {% endfor %}],{% endif %}\"message\": \"{{ last(input_request.messages).content }}\" {% endif %}{% if not loop.is_last %},{% endif %} {% else if key == \"system\" or key == \"model\" or key == \"temperature\" or key == \"store\" or key == \"max_tokens\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stop\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" %} \"{{ key }}\": {{ tojson(value) }} {% if not loop.is_last %},{% endif %} {% endif %} {% endfor %} }" + "template": "{ {% for key, value in input_request %} {% if key == \"messages\" %} {% if input_request.messages.0.role == \"system\" %} \"preamble\": {{ tojson(input_request.messages.0.content) }}, {% if length(input_request.messages) > 2 %} \"chatHistory\": [{% for message in input_request.messages %} {% if not loop.is_first and not loop.is_last %} {\"role\": {% if message.role == \"user\" %} \"USER\" {% else %} \"CHATBOT\" {% endif %}, \"content\": {{ tojson(message.content) }} } {% if loop.index < length(input_request.messages) - 2 %},{% endif %} {% endif %} {% endfor %}], {% endif %} \"message\": {{ tojson(last(input_request.messages).content) }} {% else %} {% if length(input_request.messages) > 2 %} \"chatHistory\": [{% for message in input_request.messages %} {% if not loop.is_last %} { \"role\": {% if message.role == \"user\" %} \"USER\" {% else %} \"CHATBOT\" {% endif %}, \"content\": {{ tojson(message.content) }} } {% if loop.index < length(input_request.messages) - 2 %},{% endif %} {% endif %} {% endfor %}],{% endif %}\"message\": {{ tojson(last(input_request.messages).content) }} {% endif %}{% if not loop.is_last %},{% endif %} {% else if key == \"system\" or key == \"model\" or key == \"temperature\" or key == \"store\" or key == \"max_tokens\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stop\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" %} \"{{ key }}\": {{ tojson(value) }} {% if not loop.is_last %},{% endif %} {% endif %} {% endfor %} }" } }, "transform_resp": { "chat_completions": { - "template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.event_type == \"text-generation\" %} \"role\": \"assistant\", \"content\": \"{{ input_request.text }}\" {% else %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.event_type == \"stream-end\" %} \"finish_reason\": \"{{ input_request.finish_reason }}\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {\"id\": \"{{ input_request.generation_id }}\", \"created\": null, \"object\": \"chat.completion\", \"model\": {% if input_request.model %} \"{{ input_request.model }}\" {% else %} \"command-r-plus-08-2024\" {% endif %}, \"choices\": [{ \"index\": 0, \"message\": { \"role\": \"assistant\", \"content\": {% if not input_request.text %} null {% else %} \"{{ input_request.text }}\" {% endif %}, \"refusal\": null }, \"logprobs\": null, \"finish_reason\": \"{{ input_request.finish_reason }}\" } ], \"usage\": { \"prompt_tokens\": {{ input_request.meta.tokens.input_tokens }}, \"completion_tokens\": {{ input_request.meta.tokens.output_tokens }},\"total_tokens\": {{ input_request.meta.tokens.input_tokens + input_request.meta.tokens.output_tokens }}, \"prompt_tokens_details\": { \"cached_tokens\": 0 },\"completion_tokens_details\": { \"reasoning_tokens\": 0, \"accepted_prediction_tokens\": 0, \"rejected_prediction_tokens\": 0 } }, \"system_fingerprint\": \"fp_6b68a8204b\"} {% endif %}" + "template": "{% if input_request.stream %} {\"object\": \"chat.completion.chunk\", \"model\": \"{{ input_request.model }}\", \"choices\": [{\"index\": 0, \"delta\": { {% if input_request.event_type == \"text-generation\" %} \"role\": \"assistant\", \"content\": {{ tojson(input_request.text) }} {% else %} \"role\": \"assistant\", \"content\": null {% endif %} }, {% if input_request.event_type == \"stream-end\" %} \"finish_reason\": \"{{ input_request.finish_reason }}\" {% else %} \"finish_reason\": null {% endif %} }]} {% else %} {\"id\": \"{{ input_request.generation_id }}\", \"created\": null, \"object\": \"chat.completion\", \"model\": {% if input_request.model %} \"{{ input_request.model }}\" {% else %} \"command-r-plus-08-2024\" {% endif %}, \"choices\": [{ \"index\": 0, \"message\": { \"role\": \"assistant\", \"content\": {% if not input_request.text %} null {% else %} {{ tojson(input_request.text) }} {% endif %}, \"refusal\": null }, \"logprobs\": null, \"finish_reason\": \"{{ input_request.finish_reason }}\" } ], \"usage\": { \"prompt_tokens\": {{ input_request.meta.tokens.input_tokens }}, \"completion_tokens\": {{ input_request.meta.tokens.output_tokens }},\"total_tokens\": {{ input_request.meta.tokens.input_tokens + input_request.meta.tokens.output_tokens }}, \"prompt_tokens_details\": { \"cached_tokens\": 0 },\"completion_tokens_details\": { \"reasoning_tokens\": 0, \"accepted_prediction_tokens\": 0, \"rejected_prediction_tokens\": 0 } }, \"system_fingerprint\": \"fp_6b68a8204b\"} {% endif %}" } - } + }, + "explore_models_url": "https://docs.cohere.com/v2/docs/models" } } diff --git a/extensions/engine-management-extension/resources/deepseek.json b/extensions/engine-management-extension/resources/deepseek.json new file mode 100644 index 000000000..214ec3b23 --- /dev/null +++ b/extensions/engine-management-extension/resources/deepseek.json @@ -0,0 +1,23 @@ +{ + "id": "deepseek", + "type": "remote", + "engine": "deepseek", + "url": "https://platform.deepseek.com/api_keys", + "api_key": "", + "metadata": { + "get_models_url": "https://api.deepseek.com/models", + "header_template": "Authorization: Bearer {{api_key}}", + "transform_req": { + "chat_completions": { + "url": "https://api.deepseek.com/chat/completions", + "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"messages\" or key == \"temperature\" or key == \"store\" or key == \"max_tokens\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"model\" or key == \"seed\" or key == \"stop\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" + } + }, + "transform_resp": { + "chat_completions": { + "template": "{{tojson(input_request)}}" + } + }, + "explore_models_url": "https://api-docs.deepseek.com/quick_start/pricing" + } +} diff --git a/extensions/engine-management-extension/resources/google_gemini.json b/extensions/engine-management-extension/resources/google_gemini.json new file mode 100644 index 000000000..e0fa809a5 --- /dev/null +++ b/extensions/engine-management-extension/resources/google_gemini.json @@ -0,0 +1,23 @@ +{ + "id": "google_gemini", + "type": "remote", + "engine": "google_gemini", + "url": "https://aistudio.google.com/apikey", + "api_key": "", + "metadata": { + "get_models_url": "https://generativelanguage.googleapis.com/v1beta/models", + "header_template": "Authorization: Bearer {{api_key}}", + "transform_req": { + "chat_completions": { + "url": "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions", + "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"messages\" or key == \"temperature\" or key == \"store\" or key == \"max_tokens\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"model\" or key == \"seed\" or key == \"stop\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" + } + }, + "transform_resp": { + "chat_completions": { + "template": "{{tojson(input_request)}}" + } + }, + "explore_models_url": "https://ai.google.dev/gemini-api/docs/models/gemini" + } +} diff --git a/extensions/engine-management-extension/resources/groq.json b/extensions/engine-management-extension/resources/groq.json index 60d553a92..87d215ab2 100644 --- a/extensions/engine-management-extension/resources/groq.json +++ b/extensions/engine-management-extension/resources/groq.json @@ -1,5 +1,5 @@ { - "id": "@janhq/inference-groq-extension", + "id": "groq", "type": "remote", "engine": "groq", "url": "https://console.groq.com/keys", @@ -15,8 +15,9 @@ }, "transform_resp": { "chat_completions": { - "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" + "template": "{{tojson(input_request)}}" } - } + }, + "explore_models_url": "https://console.groq.com/docs/models" } } diff --git a/extensions/engine-management-extension/resources/martian.json b/extensions/engine-management-extension/resources/martian.json index 3a65f3981..3fd458660 100644 --- a/extensions/engine-management-extension/resources/martian.json +++ b/extensions/engine-management-extension/resources/martian.json @@ -1,5 +1,5 @@ { - "id": "@janhq/inference-martian-extension", + "id": "martian", "type": "remote", "engine": "martian", "url": "https://withmartian.com/dashboard", @@ -15,8 +15,9 @@ }, "transform_resp": { "chat_completions": { - "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" + "template": "{{tojson(input_request)}}" } - } + }, + "explore_models_url": "https://withmartian.github.io/llm-adapters/" } } diff --git a/extensions/engine-management-extension/resources/mistral.json b/extensions/engine-management-extension/resources/mistral.json index 3f447dc4c..4a24471a2 100644 --- a/extensions/engine-management-extension/resources/mistral.json +++ b/extensions/engine-management-extension/resources/mistral.json @@ -1,5 +1,5 @@ { - "id": "@janhq/inference-mistral-extension", + "id": "mistral", "type": "remote", "engine": "mistral", "url": "https://console.mistral.ai/api-keys/", @@ -17,6 +17,7 @@ "chat_completions": { "template": "{{tojson(input_request)}}" } - } + }, + "explore_models_url": "https://docs.mistral.ai/getting-started/models/models_overview/" } } diff --git a/extensions/engine-management-extension/resources/nvidia.json b/extensions/engine-management-extension/resources/nvidia.json index 240130090..573bad4f6 100644 --- a/extensions/engine-management-extension/resources/nvidia.json +++ b/extensions/engine-management-extension/resources/nvidia.json @@ -1,5 +1,5 @@ { - "id": "@janhq/inference-nvidia-extension", + "id": "nvidia", "type": "remote", "engine": "nvidia", "url": "https://org.ngc.nvidia.com/setup/personal-keys", @@ -15,8 +15,9 @@ }, "transform_resp": { "chat_completions": { - "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" + "template": "{{tojson(input_request)}}" } - } + }, + "explore_models_url": "https://build.nvidia.com/models" } } diff --git a/extensions/engine-management-extension/resources/openai.json b/extensions/engine-management-extension/resources/openai.json index 97effd42a..f178a1a6f 100644 --- a/extensions/engine-management-extension/resources/openai.json +++ b/extensions/engine-management-extension/resources/openai.json @@ -1,5 +1,5 @@ { - "id": "@janhq/inference-openai-extension", + "id": "openai", "type": "remote", "engine": "openai", "url": "https://platform.openai.com/account/api-keys", @@ -10,13 +10,14 @@ "transform_req": { "chat_completions": { "url": "https://api.openai.com/v1/chat/completions", - "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"model\" or key == \"temperature\" or key == \"store\" or key == \"messages\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" or key == \"max_tokens\" or ((input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\") and (key == \"stop\")) %} {% if not first %} , {% endif %} {% if key == \"messages\" and (input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\") and input_request.messages.0.role == \"system\" %} \"messages\": [{% for message in input_request.messages %} {% if not loop.is_first %} { \"role\": \"{{ message.role }}\", \"content\": \"{{ message.content }}\" } {% if not loop.is_last %} , {% endif %} {% endif %} {% endfor %}] {% else if key == \"max_tokens\" and (input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\") %} \"max_completion_tokens\": {{ tojson(value) }} {% else %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endif %} {% endfor %} }" + "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"model\" or key == \"temperature\" or key == \"store\" or key == \"messages\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" or key == \"max_tokens\" or key == \"stop\" %} {% if not first %}, {% endif %} {% if key == \"messages\" and (input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\") and input_request.messages.0.role == \"system\" %} \"messages\": [ {% for message in input_request.messages %} {% if not loop.is_first %} { \"role\": \"{{ message.role }}\", \"content\": \"{{ message.content }}\" } {% if not loop.is_last %}, {% endif %} {% endif %} {% endfor %} ] {% else if key == \"stop\" and (input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\" or input_request.model == \"o3\" or input_request.model == \"o3-mini\") %} {% set first = false %} {% else if key == \"max_tokens\" and (input_request.model == \"o1\" or input_request.model == \"o1-preview\" or input_request.model == \"o1-mini\" or input_request.model == \"o3\" or input_request.model == \"o3-mini\") %} \"max_completion_tokens\": {{ tojson(value) }} {% set first = false %} {% else %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endif %} {% endfor %} }" } }, "transform_resp": { "chat_completions": { - "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" + "template": "{{tojson(input_request)}}" } - } + }, + "explore_models_url": "https://platform.openai.com/docs/models" } } diff --git a/extensions/engine-management-extension/resources/openrouter.json b/extensions/engine-management-extension/resources/openrouter.json index 45dc48414..798199708 100644 --- a/extensions/engine-management-extension/resources/openrouter.json +++ b/extensions/engine-management-extension/resources/openrouter.json @@ -1,5 +1,5 @@ { - "id": "@janhq/inference-openrouter-extension", + "id": "openrouter", "type": "remote", "engine": "openrouter", "url": "https://openrouter.ai/keys", @@ -10,13 +10,14 @@ "transform_req": { "chat_completions": { "url": "https://openrouter.ai/api/v1/chat/completions", - "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"messages\" or key == \"temperature\" or key == \"store\" or key == \"max_tokens\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"seed\" or key == \"stop\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" + "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"messages\" or key == \"temperature\" or key == \"store\" or key == \"max_tokens\" or key == \"stream\" or key == \"presence_penalty\" or key == \"metadata\" or key == \"frequency_penalty\" or key == \"tools\" or key == \"tool_choice\" or key == \"logprobs\" or key == \"top_logprobs\" or key == \"logit_bias\" or key == \"n\" or key == \"modalities\" or key == \"prediction\" or key == \"response_format\" or key == \"service_tier\" or key == \"model\" or key == \"seed\" or key == \"stop\" or key == \"stream_options\" or key == \"top_p\" or key == \"parallel_tool_calls\" or key == \"user\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" } }, "transform_resp": { "chat_completions": { - "template": "{ {% set first = true %} {% for key, value in input_request %} {% if key == \"choices\" or key == \"created\" or key == \"model\" or key == \"service_tier\" or key == \"system_fingerprint\" or key == \"stream\" or key == \"object\" or key == \"usage\" %} {% if not first %},{% endif %} \"{{ key }}\": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }" + "template": "{{tojson(input_request)}}" } - } + }, + "explore_models_url": "https://openrouter.ai/models" } } diff --git a/extensions/engine-management-extension/rolldown.config.mjs b/extensions/engine-management-extension/rolldown.config.mjs index 1290338db..02b84b363 100644 --- a/extensions/engine-management-extension/rolldown.config.mjs +++ b/extensions/engine-management-extension/rolldown.config.mjs @@ -11,11 +11,23 @@ export default defineConfig([ }, define: { NODE: JSON.stringify(`${pkgJson.name}/${pkgJson.node}`), - API_URL: JSON.stringify('http://127.0.0.1:39291'), - SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'), - CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.49'), + API_URL: JSON.stringify( + `http://127.0.0.1:${process.env.CORTEX_API_PORT ?? '39291'}` + ), + PLATFORM: JSON.stringify(process.platform), + CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.55'), DEFAULT_REMOTE_ENGINES: JSON.stringify(engines), DEFAULT_REMOTE_MODELS: JSON.stringify(models), + DEFAULT_REQUEST_PAYLOAD_TRANSFORM: JSON.stringify( + `{ {% set first = true %} {% for key, value in input_request %} {% if key == "messages" or key == "model" or key == "temperature" or key == "store" or key == "max_tokens" or key == "stream" or key == "presence_penalty" or key == "metadata" or key == "frequency_penalty" or key == "tools" or key == "tool_choice" or key == "logprobs" or key == "top_logprobs" or key == "logit_bias" or key == "n" or key == "modalities" or key == "prediction" or key == "response_format" or key == "service_tier" or key == "seed" or key == "stop" or key == "stream_options" or key == "top_p" or key == "parallel_tool_calls" or key == "user" %} {% if not first %},{% endif %} "{{ key }}": {{ tojson(value) }} {% set first = false %} {% endif %} {% endfor %} }` + ), + DEFAULT_RESPONSE_BODY_TRANSFORM: JSON.stringify( + '{{tojson(input_request)}}' + ), + DEFAULT_REQUEST_HEADERS_TRANSFORM: JSON.stringify( + 'Authorization: Bearer {{api_key}}' + ), + VERSION: JSON.stringify(pkgJson.version ?? '0.0.0'), }, }, { @@ -26,18 +38,7 @@ export default defineConfig([ file: 'dist/node/index.cjs.js', }, define: { - CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.49'), - }, - }, - { - input: 'src/node/cpuInfo.ts', - output: { - format: 'cjs', - file: 'dist/node/cpuInfo.js', - }, - external: ['cpu-instructions'], - resolve: { - extensions: ['.ts', '.js', '.svg'], + CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.55'), }, }, ]) diff --git a/extensions/engine-management-extension/src/@types/global.d.ts b/extensions/engine-management-extension/src/@types/global.d.ts index 2d520d5f9..0dbed3806 100644 --- a/extensions/engine-management-extension/src/@types/global.d.ts +++ b/extensions/engine-management-extension/src/@types/global.d.ts @@ -1,7 +1,11 @@ declare const API_URL: string declare const CORTEX_ENGINE_VERSION: string -declare const SOCKET_URL: string +declare const PLATFORM: string declare const NODE: string +declare const DEFAULT_REQUEST_PAYLOAD_TRANSFORM: string +declare const DEFAULT_RESPONSE_BODY_TRANSFORM: string +declare const DEFAULT_REQUEST_HEADERS_TRANSFORM: string +declare const VERSION: string declare const DEFAULT_REMOTE_ENGINES: ({ id: string diff --git a/extensions/engine-management-extension/src/api.test.ts b/extensions/engine-management-extension/src/api.test.ts new file mode 100644 index 000000000..ab72f8127 --- /dev/null +++ b/extensions/engine-management-extension/src/api.test.ts @@ -0,0 +1,199 @@ +import { describe, beforeEach, it, expect, vi } from 'vitest' +import JanEngineManagementExtension from './index' +import { InferenceEngine } from '@janhq/core' + +describe('API methods', () => { + let extension: JanEngineManagementExtension + + beforeEach(() => { + // @ts-ignore + extension = new JanEngineManagementExtension() + vi.resetAllMocks() + }) + + describe('getReleasedEnginesByVersion', () => { + it('should return engines filtered by platform if provided', async () => { + const mockEngines = [ + { + name: 'windows-amd64-avx2', + version: '1.0.0', + }, + { + name: 'linux-amd64-avx2', + version: '1.0.0', + }, + ] + + vi.mock('ky', () => ({ + default: { + get: () => ({ + json: () => Promise.resolve(mockEngines), + }), + }, + })) + + const mock = vi.spyOn(extension, 'getReleasedEnginesByVersion') + mock.mockImplementation(async (name, version, platform) => { + const result = await Promise.resolve(mockEngines) + return platform ? result.filter(r => r.name.includes(platform)) : result + }) + + const result = await extension.getReleasedEnginesByVersion( + InferenceEngine.cortex_llamacpp, + '1.0.0', + 'windows' + ) + + expect(result).toHaveLength(1) + expect(result[0].name).toBe('windows-amd64-avx2') + }) + + it('should return all engines if platform is not provided', async () => { + const mockEngines = [ + { + name: 'windows-amd64-avx2', + version: '1.0.0', + }, + { + name: 'linux-amd64-avx2', + version: '1.0.0', + }, + ] + + vi.mock('ky', () => ({ + default: { + get: () => ({ + json: () => Promise.resolve(mockEngines), + }), + }, + })) + + const mock = vi.spyOn(extension, 'getReleasedEnginesByVersion') + mock.mockImplementation(async (name, version, platform) => { + const result = await Promise.resolve(mockEngines) + return platform ? result.filter(r => r.name.includes(platform)) : result + }) + + const result = await extension.getReleasedEnginesByVersion( + InferenceEngine.cortex_llamacpp, + '1.0.0' + ) + + expect(result).toHaveLength(2) + }) + }) + + describe('getLatestReleasedEngine', () => { + it('should return engines filtered by platform if provided', async () => { + const mockEngines = [ + { + name: 'windows-amd64-avx2', + version: '1.0.0', + }, + { + name: 'linux-amd64-avx2', + version: '1.0.0', + }, + ] + + vi.mock('ky', () => ({ + default: { + get: () => ({ + json: () => Promise.resolve(mockEngines), + }), + }, + })) + + const mock = vi.spyOn(extension, 'getLatestReleasedEngine') + mock.mockImplementation(async (name, platform) => { + const result = await Promise.resolve(mockEngines) + return platform ? result.filter(r => r.name.includes(platform)) : result + }) + + const result = await extension.getLatestReleasedEngine( + InferenceEngine.cortex_llamacpp, + 'linux' + ) + + expect(result).toHaveLength(1) + expect(result[0].name).toBe('linux-amd64-avx2') + }) + }) + + describe('installEngine', () => { + it('should send install request with correct parameters', async () => { + const mockEngineConfig = { + variant: 'windows-amd64-avx2', + version: '1.0.0', + } + + vi.mock('ky', () => ({ + default: { + post: (url, options) => { + expect(url).toBe(`${API_URL}/v1/engines/${InferenceEngine.cortex_llamacpp}/install`) + expect(options.json).toEqual(mockEngineConfig) + return Promise.resolve({ messages: 'OK' }) + }, + }, + })) + + const result = await extension.installEngine( + InferenceEngine.cortex_llamacpp, + mockEngineConfig + ) + + expect(result).toEqual({ messages: 'OK' }) + }) + }) + + describe('uninstallEngine', () => { + it('should send uninstall request with correct parameters', async () => { + const mockEngineConfig = { + variant: 'windows-amd64-avx2', + version: '1.0.0', + } + + vi.mock('ky', () => ({ + default: { + delete: (url, options) => { + expect(url).toBe(`${API_URL}/v1/engines/${InferenceEngine.cortex_llamacpp}/install`) + expect(options.json).toEqual(mockEngineConfig) + return Promise.resolve({ messages: 'OK' }) + }, + }, + })) + + const result = await extension.uninstallEngine( + InferenceEngine.cortex_llamacpp, + mockEngineConfig + ) + + expect(result).toEqual({ messages: 'OK' }) + }) + }) + + describe('addRemoteModel', () => { + it('should send add model request with correct parameters', async () => { + const mockModel = { + id: 'gpt-4', + name: 'GPT-4', + engine: InferenceEngine.openai, + } + + vi.mock('ky', () => ({ + default: { + post: (url, options) => { + expect(url).toBe(`${API_URL}/v1/models/add`) + expect(options.json).toHaveProperty('id', 'gpt-4') + expect(options.json).toHaveProperty('engine', InferenceEngine.openai) + expect(options.json).toHaveProperty('inference_params') + return Promise.resolve() + }, + }, + })) + + await extension.addRemoteModel(mockModel) + // Success is implied by no thrown exceptions + }) + }) +}) \ No newline at end of file diff --git a/extensions/engine-management-extension/src/error.test.ts b/extensions/engine-management-extension/src/error.test.ts new file mode 100644 index 000000000..87389c50c --- /dev/null +++ b/extensions/engine-management-extension/src/error.test.ts @@ -0,0 +1,19 @@ +import { describe, it, expect } from 'vitest' +import { EngineError } from './error' + +describe('EngineError', () => { + it('should create an error with the correct message', () => { + const errorMessage = 'Test error message' + const error = new EngineError(errorMessage) + + expect(error).toBeInstanceOf(Error) + expect(error.message).toBe(errorMessage) + expect(error.name).toBe('EngineError') + }) + + it('should create an error with default message if none provided', () => { + const error = new EngineError() + + expect(error.message).toBe('Engine error occurred') + }) +}) \ No newline at end of file diff --git a/extensions/engine-management-extension/src/index.test.ts b/extensions/engine-management-extension/src/index.test.ts new file mode 100644 index 000000000..174992f3b --- /dev/null +++ b/extensions/engine-management-extension/src/index.test.ts @@ -0,0 +1,449 @@ +import { describe, beforeEach, it, expect, vi } from 'vitest' +import JanEngineManagementExtension from './index' +import { Engines, InferenceEngine } from '@janhq/core' +import { EngineError } from './error' +import { HTTPError } from 'ky' + +vi.stubGlobal('API_URL', 'http://localhost:3000') + +const mockEngines: Engines = [ + { + name: 'variant1', + version: '1.0.0', + type: 'local', + engine: InferenceEngine.cortex_llamacpp, + }, +] + +const mockRemoteEngines: Engines = [ + { + name: 'openai', + version: '1.0.0', + type: 'remote', + engine: InferenceEngine.openai, + }, +] + +const mockRemoteModels = { + data: [ + { + id: 'gpt-4', + name: 'GPT-4', + engine: InferenceEngine.openai, + }, + ], +} + +vi.stubGlobal('DEFAULT_REMOTE_ENGINES', mockEngines) +vi.stubGlobal('DEFAULT_REMOTE_MODELS', mockRemoteModels.data) + +describe('migrate engine settings', () => { + let extension: JanEngineManagementExtension + + beforeEach(() => { + // @ts-ignore + extension = new JanEngineManagementExtension() + vi.resetAllMocks() + }) + + it('engines should be migrated', async () => { + vi.stubGlobal('VERSION', '2.0.0') + + vi.spyOn(extension, 'getEngines').mockResolvedValue([]) + const mockUpdateEngines = vi + .spyOn(extension, 'updateEngine') + .mockReturnThis() + + mockUpdateEngines.mockResolvedValue({ + messages: 'OK', + }) + + await extension.migrate() + + // Assert that the returned value is equal to the mockEngines object + expect(mockUpdateEngines).toBeCalled() + }) + + it('should not migrate when extension version is not updated', async () => { + vi.stubGlobal('VERSION', '0.0.0') + vi.spyOn(extension, 'getEngines').mockResolvedValue([]) + const mockUpdateEngines = vi + .spyOn(extension, 'updateEngine') + .mockReturnThis() + + mockUpdateEngines.mockResolvedValue({ + messages: 'OK', + }) + + await extension.migrate() + + // Assert that the returned value is equal to the mockEngines object + expect(mockUpdateEngines).not.toBeCalled() + }) +}) + +describe('getEngines', () => { + let extension: JanEngineManagementExtension + + beforeEach(() => { + // @ts-ignore + extension = new JanEngineManagementExtension() + vi.resetAllMocks() + }) + + it('should return a list of engines', async () => { + const mockKyGet = vi.spyOn(extension, 'getEngines') + mockKyGet.mockResolvedValue(mockEngines) + + const engines = await extension.getEngines() + + expect(engines).toEqual(mockEngines) + }) +}) + +describe('getRemoteModels', () => { + let extension: JanEngineManagementExtension + + beforeEach(() => { + // @ts-ignore + extension = new JanEngineManagementExtension() + vi.resetAllMocks() + }) + + it('should return a list of remote models', async () => { + vi.mock('ky', () => ({ + default: { + get: () => ({ + json: () => Promise.resolve(mockRemoteModels), + }), + }, + })) + + const models = await extension.getRemoteModels('openai') + expect(models).toEqual(mockRemoteModels) + }) + + it('should return empty data array when request fails', async () => { + vi.mock('ky', () => ({ + default: { + get: () => ({ + json: () => Promise.reject(new Error('Failed to fetch')), + }), + }, + })) + + const models = await extension.getRemoteModels('openai') + expect(models).toEqual({ data: [] }) + }) +}) + +describe('getInstalledEngines', () => { + let extension: JanEngineManagementExtension + + beforeEach(() => { + // @ts-ignore + extension = new JanEngineManagementExtension() + vi.resetAllMocks() + }) + + it('should return a list of installed engines', async () => { + const mockEngineVariants = [ + { + name: 'windows-amd64-noavx', + version: '1.0.0', + }, + ] + + vi.mock('ky', () => ({ + default: { + get: () => ({ + json: () => Promise.resolve(mockEngineVariants), + }), + }, + })) + + const mock = vi.spyOn(extension, 'getInstalledEngines') + mock.mockResolvedValue(mockEngineVariants) + + const engines = await extension.getInstalledEngines(InferenceEngine.cortex_llamacpp) + expect(engines).toEqual(mockEngineVariants) + }) +}) + +describe('healthz', () => { + let extension: JanEngineManagementExtension + + beforeEach(() => { + // @ts-ignore + extension = new JanEngineManagementExtension() + vi.resetAllMocks() + }) + + it('should perform health check successfully', async () => { + vi.mock('ky', () => ({ + default: { + get: () => Promise.resolve(), + }, + })) + + await extension.healthz() + expect(extension.queue.concurrency).toBe(Infinity) + }) +}) + +describe('updateDefaultEngine', () => { + let extension: JanEngineManagementExtension + + beforeEach(() => { + // @ts-ignore + extension = new JanEngineManagementExtension() + vi.resetAllMocks() + }) + + it('should set default engine variant if not installed', async () => { + vi.stubGlobal('PLATFORM', 'win32') + vi.stubGlobal('CORTEX_ENGINE_VERSION', '1.0.0') + + const mockGetDefaultEngineVariant = vi.spyOn( + extension, + 'getDefaultEngineVariant' + ) + mockGetDefaultEngineVariant.mockResolvedValue({ + variant: 'variant1', + version: '1.0.0', + }) + + const mockGetInstalledEngines = vi.spyOn(extension, 'getInstalledEngines') + mockGetInstalledEngines.mockResolvedValue([]) + + const mockSetDefaultEngineVariant = vi.spyOn( + extension, + 'setDefaultEngineVariant' + ) + mockSetDefaultEngineVariant.mockResolvedValue({ messages: 'OK' }) + + vi.mock('@janhq/core', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + systemInformation: vi.fn().mockResolvedValue({ gpuSetting: 'high' }), + } + }) + + vi.mock('./utils', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + engineVariant: vi.fn().mockResolvedValue('windows-amd64-noavx'), + } + }) + + await extension.updateDefaultEngine() + + expect(mockSetDefaultEngineVariant).toHaveBeenCalledWith('llama-cpp', { + variant: 'windows-amd64-noavx', + version: '1.0.0', + }) + }) + + it('should not reset default engine variant if installed', async () => { + vi.stubGlobal('PLATFORM', 'win32') + vi.stubGlobal('CORTEX_ENGINE_VERSION', '1.0.0') + + const mockGetDefaultEngineVariant = vi.spyOn( + extension, + 'getDefaultEngineVariant' + ) + mockGetDefaultEngineVariant.mockResolvedValue({ + variant: 'windows-amd64-noavx', + version: '1.0.0', + }) + + const mockGetInstalledEngines = vi.spyOn(extension, 'getInstalledEngines') + mockGetInstalledEngines.mockResolvedValue([ + { + name: 'windows-amd64-noavx', + version: '1.0.0', + type: 'local', + engine: InferenceEngine.cortex_llamacpp, + }, + ]) + + const mockSetDefaultEngineVariant = vi.spyOn( + extension, + 'setDefaultEngineVariant' + ) + mockSetDefaultEngineVariant.mockResolvedValue({ messages: 'OK' }) + + vi.mock('@janhq/core', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + systemInformation: vi.fn().mockResolvedValue({ gpuSetting: 'high' }), + } + }) + + vi.mock('./utils', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + engineVariant: vi.fn().mockResolvedValue('windows-amd64-noavx'), + } + }) + + await extension.updateDefaultEngine() + + expect(mockSetDefaultEngineVariant).not.toBeCalled() + }) + + it('should handle HTTPError when getting default engine variant', async () => { + vi.stubGlobal('PLATFORM', 'win32') + vi.stubGlobal('CORTEX_ENGINE_VERSION', '1.0.0') + + const httpError = new Error('HTTP Error') as HTTPError + httpError.response = { status: 400 } as Response + + const mockGetDefaultEngineVariant = vi.spyOn( + extension, + 'getDefaultEngineVariant' + ) + mockGetDefaultEngineVariant.mockRejectedValue(httpError) + + const mockSetDefaultEngineVariant = vi.spyOn( + extension, + 'setDefaultEngineVariant' + ) + mockSetDefaultEngineVariant.mockResolvedValue({ messages: 'OK' }) + + vi.mock('@janhq/core', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + systemInformation: vi.fn().mockResolvedValue({ gpuSetting: 'high' }), + } + }) + + vi.mock('./utils', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + engineVariant: vi.fn().mockResolvedValue('windows-amd64-noavx'), + } + }) + + await extension.updateDefaultEngine() + + expect(mockSetDefaultEngineVariant).toHaveBeenCalledWith('llama-cpp', { + variant: 'windows-amd64-noavx', + version: '1.0.0', + }) + }) + + it('should handle EngineError when getting default engine variant', async () => { + vi.stubGlobal('PLATFORM', 'win32') + vi.stubGlobal('CORTEX_ENGINE_VERSION', '1.0.0') + + const mockGetDefaultEngineVariant = vi.spyOn( + extension, + 'getDefaultEngineVariant' + ) + mockGetDefaultEngineVariant.mockRejectedValue(new EngineError('Test error')) + + const mockSetDefaultEngineVariant = vi.spyOn( + extension, + 'setDefaultEngineVariant' + ) + mockSetDefaultEngineVariant.mockResolvedValue({ messages: 'OK' }) + + vi.mock('@janhq/core', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + systemInformation: vi.fn().mockResolvedValue({ gpuSetting: 'high' }), + } + }) + + vi.mock('./utils', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + engineVariant: vi.fn().mockResolvedValue('windows-amd64-noavx'), + } + }) + + await extension.updateDefaultEngine() + + expect(mockSetDefaultEngineVariant).toHaveBeenCalledWith('llama-cpp', { + variant: 'windows-amd64-noavx', + version: '1.0.0', + }) + }) + + it('should handle unexpected errors gracefully', async () => { + vi.stubGlobal('PLATFORM', 'win32') + + const mockGetDefaultEngineVariant = vi.spyOn( + extension, + 'getDefaultEngineVariant' + ) + mockGetDefaultEngineVariant.mockRejectedValue(new Error('Unexpected error')) + + const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}) + + await extension.updateDefaultEngine() + + expect(consoleSpy).toHaveBeenCalled() + }) +}) + +describe('populateDefaultRemoteEngines', () => { + let extension: JanEngineManagementExtension + + beforeEach(() => { + // @ts-ignore + extension = new JanEngineManagementExtension() + vi.resetAllMocks() + }) + + it('should not add default remote engines if remote engines already exist', async () => { + const mockGetEngines = vi.spyOn(extension, 'getEngines') + mockGetEngines.mockResolvedValue(mockRemoteEngines) + + const mockAddRemoteEngine = vi.spyOn(extension, 'addRemoteEngine') + + await extension.populateDefaultRemoteEngines() + + expect(mockAddRemoteEngine).not.toBeCalled() + }) + + it('should add default remote engines if no remote engines exist', async () => { + const mockGetEngines = vi.spyOn(extension, 'getEngines') + mockGetEngines.mockResolvedValue([]) + + const mockAddRemoteEngine = vi.spyOn(extension, 'addRemoteEngine') + mockAddRemoteEngine.mockResolvedValue({ messages: 'OK' }) + + const mockAddRemoteModel = vi.spyOn(extension, 'addRemoteModel') + mockAddRemoteModel.mockResolvedValue(undefined) + + vi.mock('@janhq/core', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + events: { + emit: vi.fn(), + }, + joinPath: vi.fn().mockResolvedValue('/path/to/settings.json'), + getJanDataFolderPath: vi.fn().mockResolvedValue('/path/to/data'), + fs: { + existsSync: vi.fn().mockResolvedValue(false), + }, + } + }) + + await extension.populateDefaultRemoteEngines() + + expect(mockAddRemoteEngine).toHaveBeenCalled() + expect(mockAddRemoteModel).toHaveBeenCalled() + }) +}) diff --git a/extensions/engine-management-extension/src/index.ts b/extensions/engine-management-extension/src/index.ts index 0d30bf4ea..7d0c9f9c4 100644 --- a/extensions/engine-management-extension/src/index.ts +++ b/extensions/engine-management-extension/src/index.ts @@ -15,18 +15,38 @@ import { ModelEvent, EngineEvent, } from '@janhq/core' -import ky, { HTTPError } from 'ky' +import ky, { HTTPError, KyInstance } from 'ky' import PQueue from 'p-queue' import { EngineError } from './error' import { getJanDataFolderPath } from '@janhq/core' +import { engineVariant } from './utils' +interface ModelList { + data: Model[] +} /** - * JSONEngineManagementExtension is a EngineManagementExtension implementation that provides + * JanEngineManagementExtension is a EngineManagementExtension implementation that provides * functionality for managing engines. */ -export default class JSONEngineManagementExtension extends EngineManagementExtension { +export default class JanEngineManagementExtension extends EngineManagementExtension { queue = new PQueue({ concurrency: 1 }) + api?: KyInstance + /** + * Get the API instance + * @returns + */ + async apiInstance(): Promise { + if(this.api) return this.api + const apiKey = (await window.core?.api.appToken()) ?? 'cortex.cpp' + this.api = ky.extend({ + prefixUrl: API_URL, + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }) + return this.api + } /** * Called when the extension is loaded. */ @@ -40,6 +60,9 @@ export default class JSONEngineManagementExtension extends EngineManagementExten // Populate default remote engines this.populateDefaultRemoteEngines() + + // Migrate + this.migrate() } /** @@ -52,10 +75,12 @@ export default class JSONEngineManagementExtension extends EngineManagementExten */ async getEngines(): Promise { return this.queue.add(() => - ky - .get(`${API_URL}/v1/engines`) - .json() - .then((e) => e) + this.apiInstance().then((api) => + api + .get('v1/engines') + .json() + .then((e) => e) + ) ) as Promise } @@ -63,13 +88,15 @@ export default class JSONEngineManagementExtension extends EngineManagementExten * @returns A Promise that resolves to an object of list engines. */ async getRemoteModels(name: string): Promise { - return this.queue.add(() => - ky - .get(`${API_URL}/v1/models/remote/${name}`) - .json() - .then((e) => e) - .catch(() => []) - ) as Promise + return this.apiInstance().then( + (api) => + api + .get(`v1/models/remote/${name}`) + .json() + .catch(() => ({ + data: [], + })) as Promise + ) } /** @@ -78,10 +105,12 @@ export default class JSONEngineManagementExtension extends EngineManagementExten */ async getInstalledEngines(name: InferenceEngine): Promise { return this.queue.add(() => - ky - .get(`${API_URL}/v1/engines/${name}`) - .json() - .then((e) => e) + this.apiInstance().then((api) => + api + .get(`v1/engines/${name}`) + .json() + .then((e) => e) + ) ) as Promise } @@ -97,12 +126,14 @@ export default class JSONEngineManagementExtension extends EngineManagementExten platform?: string ) { return this.queue.add(() => - ky - .get(`${API_URL}/v1/engines/${name}/releases/${version}`) - .json() - .then((e) => - platform ? e.filter((r) => r.name.includes(platform)) : e - ) + this.apiInstance().then((api) => + api + .get(`v1/engines/${name}/releases/${version}`) + .json() + .then((e) => + platform ? e.filter((r) => r.name.includes(platform)) : e + ) + ) ) as Promise } @@ -113,12 +144,14 @@ export default class JSONEngineManagementExtension extends EngineManagementExten */ async getLatestReleasedEngine(name: InferenceEngine, platform?: string) { return this.queue.add(() => - ky - .get(`${API_URL}/v1/engines/${name}/releases/latest`) - .json() - .then((e) => - platform ? e.filter((r) => r.name.includes(platform)) : e - ) + this.apiInstance().then((api) => + api + .get(`v1/engines/${name}/releases/latest`) + .json() + .then((e) => + platform ? e.filter((r) => r.name.includes(platform)) : e + ) + ) ) as Promise } @@ -128,9 +161,11 @@ export default class JSONEngineManagementExtension extends EngineManagementExten */ async installEngine(name: string, engineConfig: EngineConfig) { return this.queue.add(() => - ky - .post(`${API_URL}/v1/engines/${name}/install`, { json: engineConfig }) - .then((e) => e) + this.apiInstance().then((api) => + api + .post(`v1/engines/${name}/install`, { json: engineConfig }) + .then((e) => e) + ) ) as Promise<{ messages: string }> } @@ -138,9 +173,40 @@ export default class JSONEngineManagementExtension extends EngineManagementExten * Add a new remote engine * @returns A Promise that resolves to intall of engine. */ - async addRemoteEngine(engineConfig: EngineConfig) { + async addRemoteEngine( + engineConfig: EngineConfig, + persistModels: boolean = true + ) { + // Populate default settings + if ( + engineConfig.metadata?.transform_req?.chat_completions && + !engineConfig.metadata.transform_req.chat_completions.template + ) + engineConfig.metadata.transform_req.chat_completions.template = + DEFAULT_REQUEST_PAYLOAD_TRANSFORM + + if ( + engineConfig.metadata?.transform_resp?.chat_completions && + !engineConfig.metadata.transform_resp.chat_completions?.template + ) + engineConfig.metadata.transform_resp.chat_completions.template = + DEFAULT_RESPONSE_BODY_TRANSFORM + + if (engineConfig.metadata && !engineConfig.metadata?.header_template) + engineConfig.metadata.header_template = DEFAULT_REQUEST_HEADERS_TRANSFORM + return this.queue.add(() => - ky.post(`${API_URL}/v1/engines`, { json: engineConfig }).then((e) => e) + this.apiInstance().then((api) => + api.post('v1/engines', { json: engineConfig }).then((e) => { + if (persistModels && engineConfig.metadata?.get_models_url) { + // Pull /models from remote models endpoint + return this.populateRemoteModels(engineConfig) + .then(() => e) + .catch(() => e) + } + return e + }) + ) ) as Promise<{ messages: string }> } @@ -150,9 +216,11 @@ export default class JSONEngineManagementExtension extends EngineManagementExten */ async uninstallEngine(name: InferenceEngine, engineConfig: EngineConfig) { return this.queue.add(() => - ky - .delete(`${API_URL}/v1/engines/${name}/install`, { json: engineConfig }) - .then((e) => e) + this.apiInstance().then((api) => + api + .delete(`v1/engines/${name}/install`, { json: engineConfig }) + .then((e) => e) + ) ) as Promise<{ messages: string }> } @@ -162,7 +230,25 @@ export default class JSONEngineManagementExtension extends EngineManagementExten */ async addRemoteModel(model: Model) { return this.queue.add(() => - ky.post(`${API_URL}/v1/models/add`, { json: model }).then((e) => e) + this.apiInstance() + .then((api) => + api + .post('v1/models/add', { + json: { + inference_params: { + max_tokens: 4096, + temperature: 0.7, + top_p: 0.95, + stream: true, + frequency_penalty: 0, + presence_penalty: 0, + }, + ...model, + }, + }) + .then((e) => e) + ) + .then(() => {}) ) } @@ -172,10 +258,12 @@ export default class JSONEngineManagementExtension extends EngineManagementExten */ async getDefaultEngineVariant(name: InferenceEngine) { return this.queue.add(() => - ky - .get(`${API_URL}/v1/engines/${name}/default`) - .json<{ messages: string }>() - .then((e) => e) + this.apiInstance().then((api) => + api + .get(`v1/engines/${name}/default`) + .json<{ messages: string }>() + .then((e) => e) + ) ) as Promise } @@ -189,9 +277,11 @@ export default class JSONEngineManagementExtension extends EngineManagementExten engineConfig: EngineConfig ) { return this.queue.add(() => - ky - .post(`${API_URL}/v1/engines/${name}/default`, { json: engineConfig }) - .then((e) => e) + this.apiInstance().then((api) => + api + .post(`v1/engines/${name}/default`, { json: engineConfig }) + .then((e) => e) + ) ) as Promise<{ messages: string }> } @@ -200,9 +290,11 @@ export default class JSONEngineManagementExtension extends EngineManagementExten */ async updateEngine(name: InferenceEngine, engineConfig?: EngineConfig) { return this.queue.add(() => - ky - .post(`${API_URL}/v1/engines/${name}/update`, { json: engineConfig }) - .then((e) => e) + this.apiInstance().then((api) => + api + .post(`v1/engines/${name}/update`, { json: engineConfig }) + .then((e) => e) + ) ) as Promise<{ messages: string }> } @@ -211,11 +303,15 @@ export default class JSONEngineManagementExtension extends EngineManagementExten * @returns */ async healthz(): Promise { - return ky - .get(`${API_URL}/healthz`, { - retry: { limit: 20, delay: () => 500, methods: ['get'] }, + return this.apiInstance() + .then((api) => + api.get('healthz', { + retry: { limit: 20, delay: () => 500, methods: ['get'] }, + }) + ) + .then(() => { + this.queue.concurrency = Infinity }) - .then(() => {}) } /** @@ -233,7 +329,8 @@ export default class JSONEngineManagementExtension extends EngineManagementExten if ( !installedEngines.some( (e) => e.name === variant.variant && e.version === variant.version - ) + ) || + variant.version < CORTEX_ENGINE_VERSION ) { throw new EngineError( 'Default engine is not available, use bundled version.' @@ -245,11 +342,7 @@ export default class JSONEngineManagementExtension extends EngineManagementExten error instanceof EngineError ) { const systemInfo = await systemInformation() - const variant = await executeOnMain( - NODE, - 'engineVariant', - systemInfo.gpuSetting - ) + const variant = await engineVariant(systemInfo.gpuSetting) await this.setDefaultEngineVariant(InferenceEngine.cortex_llamacpp, { variant: variant, version: `${CORTEX_ENGINE_VERSION}`, @@ -293,14 +386,73 @@ export default class JSONEngineManagementExtension extends EngineManagementExten data.api_key = api_key /// END - Migrate legacy api key settings - await this.addRemoteEngine(data).catch(console.error) + await this.addRemoteEngine(data, false).catch(console.error) }) ) events.emit(EngineEvent.OnEngineUpdate, {}) - DEFAULT_REMOTE_MODELS.forEach(async (data: Model) => { - await this.addRemoteModel(data).catch(() => {}) - }) + await Promise.all( + DEFAULT_REMOTE_MODELS.map((data: Model) => + this.addRemoteModel(data).catch(() => {}) + ) + ) events.emit(ModelEvent.OnModelsUpdate, { fetch: true }) } } + + /** + * Pulls models list from the remote provider and persist + * @param engineConfig + * @returns + */ + private populateRemoteModels = async (engineConfig: EngineConfig) => { + return this.getRemoteModels(engineConfig.engine) + .then((models: ModelList) => { + if (models?.data) + Promise.all( + models.data.map((model) => + this.addRemoteModel({ + ...model, + engine: engineConfig.engine as InferenceEngine, + model: model.model ?? model.id, + }).catch(console.info) + ) + ).then(() => { + events.emit(ModelEvent.OnModelsUpdate, { fetch: true }) + }) + }) + .catch(console.info) + } + + /** + * Update engine settings to the latest version + */ + migrate = async () => { + // Ensure health check is done + await this.queue.onEmpty() + + const version = await this.getSetting('version', '0.0.0') + const engines = await this.getEngines() + if (version < VERSION) { + console.log('Migrating engine settings...') + // Migrate engine settings + await Promise.all( + DEFAULT_REMOTE_ENGINES.map((engine) => { + const { id, ...data } = engine + + data.api_key = engines[id]?.api_key + return this.updateEngine(id, { + ...data, + }).catch(console.error) + }) + ) + await this.updateSettings([ + { + key: 'version', + controllerProps: { + value: VERSION, + }, + }, + ]) + } + } } diff --git a/extensions/engine-management-extension/src/node/cpuInfo.ts b/extensions/engine-management-extension/src/node/cpuInfo.ts deleted file mode 100644 index 4366a995b..000000000 --- a/extensions/engine-management-extension/src/node/cpuInfo.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { cpuInfo } from 'cpu-instructions' - -// Check the CPU info and determine the supported instruction set -const info = cpuInfo.cpuInfo().some((e) => e.toUpperCase() === 'AVX512') - ? 'avx512' - : cpuInfo.cpuInfo().some((e) => e.toUpperCase() === 'AVX2') - ? 'avx2' - : cpuInfo.cpuInfo().some((e) => e.toUpperCase() === 'AVX') - ? 'avx' - : 'noavx' - -// Send the result and wait for confirmation before exiting -new Promise((resolve, reject) => { - // @ts-ignore - process.send(info, (error: Error | null) => { - if (error) { - reject(error) - } else { - resolve() - } - }) -}) - .then(() => process.exit(0)) - .catch((error) => { - console.error('Failed to send info:', error) - process.exit(1) - }) diff --git a/extensions/engine-management-extension/src/node/index.test.ts b/extensions/engine-management-extension/src/node/index.test.ts deleted file mode 100644 index c73feb9c6..000000000 --- a/extensions/engine-management-extension/src/node/index.test.ts +++ /dev/null @@ -1,475 +0,0 @@ -import { describe, expect, it } from '@jest/globals' -import engine from './index' -import { GpuSetting } from '@janhq/core/node' -import { cpuInfo } from 'cpu-instructions' -import { fork } from 'child_process' - -let testSettings: GpuSetting = { - run_mode: 'cpu', - vulkan: false, - cuda: { - exist: false, - version: '11', - }, - gpu_highest_vram: '0', - gpus: [], - gpus_in_use: [], - is_initial: false, - notify: true, - nvidia_driver: { - exist: false, - version: '11', - }, -} -const originalPlatform = process.platform - -jest.mock('cpu-instructions', () => ({ - cpuInfo: { - cpuInfo: jest.fn(), - }, -})) -let mockCpuInfo = cpuInfo.cpuInfo as jest.Mock -mockCpuInfo.mockReturnValue([]) - -jest.mock('@janhq/core/node', () => ({ - appResourcePath: () => '.', - log: jest.fn(), -})) -jest.mock('child_process', () => ({ - fork: jest.fn(), -})) -const mockFork = fork as jest.Mock - -describe('test executable cortex file', () => { - afterAll(function () { - Object.defineProperty(process, 'platform', { - value: originalPlatform, - }) - }) - - it('executes on MacOS', () => { - const mockProcess = { - on: jest.fn((event, callback) => { - if (event === 'message') { - callback('noavx') - } - }), - send: jest.fn(), - } - Object.defineProperty(process, 'platform', { - value: 'darwin', - }) - Object.defineProperty(process, 'arch', { - value: 'arm64', - }) - - mockFork.mockReturnValue(mockProcess) - expect(engine.engineVariant(testSettings)).resolves.toEqual('mac-arm64') - }) - - it('executes on MacOS', () => { - Object.defineProperty(process, 'platform', { - value: 'darwin', - }) - Object.defineProperty(process, 'arch', { - value: 'arm64', - }) - - const mockProcess = { - on: jest.fn((event, callback) => { - if (event === 'message') { - callback('noavx') - } - }), - send: jest.fn(), - } - mockFork.mockReturnValue(mockProcess) - Object.defineProperty(process, 'arch', { - value: 'x64', - }) - - expect(engine.engineVariant(testSettings)).resolves.toEqual('mac-amd64') - }) - - it('executes on Windows CPU', () => { - Object.defineProperty(process, 'platform', { - value: 'win32', - }) - const settings: GpuSetting = { - ...testSettings, - run_mode: 'cpu', - } - const mockProcess = { - on: jest.fn((event, callback) => { - if (event === 'message') { - callback('avx') - } - }), - send: jest.fn(), - } - mockFork.mockReturnValue(mockProcess) - - expect(engine.engineVariant()).resolves.toEqual('windows-amd64-avx') - }) - - it('executes on Windows Cuda 11', () => { - Object.defineProperty(process, 'platform', { - value: 'win32', - }) - const settings: GpuSetting = { - ...testSettings, - run_mode: 'gpu', - cuda: { - exist: true, - version: '11', - }, - nvidia_driver: { - exist: true, - version: '12', - }, - gpus_in_use: ['0'], - gpus: [ - { - id: '0', - name: 'NVIDIA GeForce GTX 1080', - vram: '80000000', - }, - ], - } - - const mockProcess = { - on: jest.fn((event, callback) => { - if (event === 'message') { - callback('avx2') - } - }), - send: jest.fn(), - } - mockFork.mockReturnValue(mockProcess) - - expect(engine.engineVariant(settings)).resolves.toEqual( - 'windows-amd64-avx2-cuda-11-7' - ) - }) - - it('executes on Windows Cuda 12', () => { - Object.defineProperty(process, 'platform', { - value: 'win32', - }) - const settings: GpuSetting = { - ...testSettings, - run_mode: 'gpu', - cuda: { - exist: true, - version: '12', - }, - nvidia_driver: { - exist: true, - version: '12', - }, - gpus_in_use: ['0'], - gpus: [ - { - id: '0', - name: 'NVIDIA GeForce GTX 1080', - vram: '80000000', - }, - ], - } - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback('noavx') - } - }), - send: jest.fn(), - }) - - expect(engine.engineVariant(settings)).resolves.toEqual( - 'windows-amd64-noavx-cuda-12-0' - ) - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback('avx512') - } - }), - send: jest.fn(), - }) - expect(engine.engineVariant(settings)).resolves.toEqual( - 'windows-amd64-avx2-cuda-12-0' - ) - }) - - it('executes on Linux CPU', () => { - Object.defineProperty(process, 'platform', { - value: 'linux', - }) - const settings: GpuSetting = { - ...testSettings, - run_mode: 'cpu', - } - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback('noavx') - } - }), - send: jest.fn(), - }) - - expect(engine.engineVariant()).resolves.toEqual('linux-amd64-noavx') - }) - - it('executes on Linux Cuda 11', () => { - Object.defineProperty(process, 'platform', { - value: 'linux', - }) - const settings: GpuSetting = { - ...testSettings, - run_mode: 'gpu', - cuda: { - exist: true, - version: '11', - }, - nvidia_driver: { - exist: true, - version: '12', - }, - gpus_in_use: ['0'], - gpus: [ - { - id: '0', - name: 'NVIDIA GeForce GTX 1080', - vram: '80000000', - }, - ], - } - - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback('avx512') - } - }), - send: jest.fn(), - }) - - expect(engine.engineVariant(settings)).resolves.toBe( - 'linux-amd64-avx2-cuda-11-7' - ) - }) - - it('executes on Linux Cuda 12', () => { - Object.defineProperty(process, 'platform', { - value: 'linux', - }) - const settings: GpuSetting = { - ...testSettings, - run_mode: 'gpu', - cuda: { - exist: true, - version: '12', - }, - nvidia_driver: { - exist: true, - version: '12', - }, - gpus_in_use: ['0'], - gpus: [ - { - id: '0', - name: 'NVIDIA GeForce GTX 1080', - vram: '80000000', - }, - ], - } - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback('avx2') - } - }), - send: jest.fn(), - }) - - expect(engine.engineVariant(settings)).resolves.toEqual( - 'linux-amd64-avx2-cuda-12-0' - ) - }) - - // Generate test for different cpu instructions on Linux - it(`executes on Linux CPU with different instructions`, () => { - Object.defineProperty(process, 'platform', { - value: 'linux', - }) - const settings: GpuSetting = { - ...testSettings, - run_mode: 'cpu', - } - - const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx'] - cpuInstructions.forEach((instruction) => { - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback(instruction) - } - }), - send: jest.fn(), - }) - - expect(engine.engineVariant(settings)).resolves.toEqual( - `linux-amd64-${instruction}` - ) - }) - }) - // Generate test for different cpu instructions on Windows - it(`executes on Windows CPU with different instructions`, () => { - Object.defineProperty(process, 'platform', { - value: 'win32', - }) - const settings: GpuSetting = { - ...testSettings, - run_mode: 'cpu', - } - const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx'] - cpuInstructions.forEach((instruction) => { - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback(instruction) - } - }), - send: jest.fn(), - }) - expect(engine.engineVariant(settings)).resolves.toEqual( - `windows-amd64-${instruction}` - ) - }) - }) - - // Generate test for different cpu instructions on Windows - it(`executes on Windows GPU with different instructions`, () => { - Object.defineProperty(process, 'platform', { - value: 'win32', - }) - const settings: GpuSetting = { - ...testSettings, - run_mode: 'gpu', - cuda: { - exist: true, - version: '12', - }, - nvidia_driver: { - exist: true, - version: '12', - }, - gpus_in_use: ['0'], - gpus: [ - { - id: '0', - name: 'NVIDIA GeForce GTX 1080', - vram: '80000000', - }, - ], - } - const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx'] - cpuInstructions.forEach((instruction) => { - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback(instruction) - } - }), - send: jest.fn(), - }) - expect(engine.engineVariant(settings)).resolves.toEqual( - `windows-amd64-${instruction === 'avx512' || instruction === 'avx2' ? 'avx2' : 'noavx'}-cuda-12-0` - ) - }) - }) - - // Generate test for different cpu instructions on Linux - it(`executes on Linux GPU with different instructions`, () => { - Object.defineProperty(process, 'platform', { - value: 'linux', - }) - const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx'] - const settings: GpuSetting = { - ...testSettings, - run_mode: 'gpu', - cuda: { - exist: true, - version: '12', - }, - nvidia_driver: { - exist: true, - version: '12', - }, - gpus_in_use: ['0'], - gpus: [ - { - id: '0', - name: 'NVIDIA GeForce GTX 1080', - vram: '80000000', - }, - ], - } - cpuInstructions.forEach((instruction) => { - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback(instruction) - } - }), - send: jest.fn(), - }) - expect(engine.engineVariant(settings)).resolves.toEqual( - `linux-amd64-${instruction === 'avx512' || instruction === 'avx2' ? 'avx2' : 'noavx'}-cuda-12-0` - ) - }) - }) - - // Generate test for different cpu instructions on Linux - it(`executes on Linux Vulkan should not have CPU instructions included`, () => { - Object.defineProperty(process, 'platform', { - value: 'linux', - }) - const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx'] - const settings: GpuSetting = { - ...testSettings, - run_mode: 'gpu', - vulkan: true, - cuda: { - exist: true, - version: '12', - }, - nvidia_driver: { - exist: true, - version: '12', - }, - gpus_in_use: ['0'], - gpus: [ - { - id: '0', - name: 'NVIDIA GeForce GTX 1080', - vram: '80000000', - }, - ], - } - cpuInstructions.forEach((instruction) => { - mockFork.mockReturnValue({ - on: jest.fn((event, callback) => { - if (event === 'message') { - callback(instruction) - } - }), - send: jest.fn(), - }) - expect(engine.engineVariant(settings)).resolves.toEqual( - `linux-amd64-vulkan` - ) - }) - }) -}) diff --git a/extensions/engine-management-extension/src/node/index.ts b/extensions/engine-management-extension/src/node/index.ts index 31ad90ed2..ae1934b25 100644 --- a/extensions/engine-management-extension/src/node/index.ts +++ b/extensions/engine-management-extension/src/node/index.ts @@ -2,114 +2,14 @@ import * as path from 'path' import { appResourcePath, getJanDataFolderPath, - GpuSetting, log, } from '@janhq/core/node' -import { fork } from 'child_process' -import { mkdir, readdir, symlink } from 'fs/promises' - -/** - * The GPU runMode that will be set - either 'vulkan', 'cuda', or empty for cpu. - * @param settings - * @returns - */ -const gpuRunMode = (settings?: GpuSetting): string => { - if (process.platform === 'darwin') - // MacOS now has universal binaries - return '' - - if (!settings) return '' - - return settings.vulkan === true || settings.run_mode === 'cpu' ? '' : 'cuda' -} - -/** - * The OS & architecture that the current process is running on. - * @returns win, mac-x64, mac-arm64, or linux - */ -const os = (): string => { - return process.platform === 'win32' - ? 'windows-amd64' - : process.platform === 'darwin' - ? process.arch === 'arm64' - ? 'mac-arm64' - : 'mac-amd64' - : 'linux-amd64' -} - -/** - * The CUDA version that will be set - either '11-7' or '12-0'. - * @param settings - * @returns - */ -const cudaVersion = (settings?: GpuSetting): '11-7' | '12-0' | undefined => { - const isUsingCuda = - settings?.vulkan !== true && - settings?.run_mode === 'gpu' && - !os().includes('mac') - - if (!isUsingCuda) return undefined - return settings?.cuda?.version === '11' ? '11-7' : '12-0' -} - -/** - * The CPU instructions that will be set - either 'avx512', 'avx2', 'avx', or 'noavx'. - * @returns - */ -const cpuInstructions = async (): Promise => { - if (process.platform === 'darwin') return '' - - const child = fork(path.join(__dirname, './cpuInfo.js')) // Path to the child process file - - return new Promise((resolve, reject) => { - child.on('message', (cpuInfo?: string) => { - resolve(cpuInfo ?? 'noavx') - child.kill() // Kill the child process after receiving the result - }) - - child.on('error', (err) => { - resolve('noavx') - child.kill() - }) - - child.on('exit', (code) => { - if (code !== 0) { - resolve('noavx') - child.kill() - } - }) - }) -} - -/** - * Find which variant to run based on the current platform. - */ -const engineVariant = async (gpuSetting?: GpuSetting): Promise => { - const cpuInstruction = await cpuInstructions() - log(`[CORTEX]: CPU instruction: ${cpuInstruction}`) - let engineVariant = [ - os(), - gpuSetting?.vulkan - ? 'vulkan' - : gpuRunMode(gpuSetting) !== 'cuda' - ? // CPU mode - support all variants - cpuInstruction - : // GPU mode - packaged CUDA variants of avx2 and noavx - cpuInstruction === 'avx2' || cpuInstruction === 'avx512' - ? 'avx2' - : 'noavx', - gpuRunMode(gpuSetting), - cudaVersion(gpuSetting), - ] - .filter((e) => !!e) - .join('-') - - log(`[CORTEX]: Engine variant: ${engineVariant}`) - return engineVariant -} +import { mkdir, readdir, symlink, cp } from 'fs/promises' +import { existsSync } from 'fs' /** * Create symlink to each variant for the default bundled version + * If running in AppImage environment, copy files instead of creating symlinks */ const symlinkEngines = async () => { const sourceEnginePath = path.join( @@ -124,6 +24,8 @@ const symlinkEngines = async () => { 'cortex.llamacpp' ) const variantFolders = await readdir(sourceEnginePath) + const isStandalone = process.platform === 'linux' + for (const variant of variantFolders) { const targetVariantPath = path.join( sourceEnginePath, @@ -140,14 +42,28 @@ const symlinkEngines = async () => { recursive: true, }).catch((error) => log(JSON.stringify(error))) - await symlink(targetVariantPath, symlinkVariantPath, 'junction').catch( - (error) => log(JSON.stringify(error)) - ) - console.log(`Symlink created: ${targetVariantPath} -> ${symlinkEnginePath}`) + // Skip if already exists + if (existsSync(symlinkVariantPath)) { + console.log(`Target already exists: ${symlinkVariantPath}`) + continue + } + + if (isStandalone) { + // Copy files for AppImage environments instead of symlinking + await cp(targetVariantPath, symlinkVariantPath, { recursive: true }).catch( + (error) => log(JSON.stringify(error)) + ) + console.log(`Files copied: ${targetVariantPath} -> ${symlinkVariantPath}`) + } else { + // Create symlink for other environments + await symlink(targetVariantPath, symlinkVariantPath, 'junction').catch( + (error) => log(JSON.stringify(error)) + ) + console.log(`Symlink created: ${targetVariantPath} -> ${symlinkVariantPath}`) + } } } export default { - engineVariant, symlinkEngines, } diff --git a/extensions/engine-management-extension/src/populateRemoteModels.test.ts b/extensions/engine-management-extension/src/populateRemoteModels.test.ts new file mode 100644 index 000000000..225db26cc --- /dev/null +++ b/extensions/engine-management-extension/src/populateRemoteModels.test.ts @@ -0,0 +1,139 @@ +import { describe, beforeEach, it, expect, vi } from 'vitest' +import JanEngineManagementExtension from './index' +import { InferenceEngine } from '@janhq/core' + +describe('populateRemoteModels', () => { + let extension: JanEngineManagementExtension + + beforeEach(() => { + // @ts-ignore + extension = new JanEngineManagementExtension() + vi.resetAllMocks() + }) + + it('should populate remote models successfully', async () => { + const mockEngineConfig = { + engine: InferenceEngine.openai, + } + + const mockRemoteModels = { + data: [ + { + id: 'gpt-4', + name: 'GPT-4', + }, + ], + } + + const mockGetRemoteModels = vi.spyOn(extension, 'getRemoteModels') + mockGetRemoteModels.mockResolvedValue(mockRemoteModels) + + const mockAddRemoteModel = vi.spyOn(extension, 'addRemoteModel') + mockAddRemoteModel.mockResolvedValue(undefined) + + vi.mock('@janhq/core', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + events: { + emit: vi.fn(), + }, + } + }) + + // Use the private method through index.ts + // @ts-ignore - Accessing private method for testing + await extension.populateRemoteModels(mockEngineConfig) + + expect(mockGetRemoteModels).toHaveBeenCalledWith(mockEngineConfig.engine) + expect(mockAddRemoteModel).toHaveBeenCalledWith({ + ...mockRemoteModels.data[0], + engine: mockEngineConfig.engine, + model: 'gpt-4', + }) + }) + + it('should handle empty data from remote models', async () => { + const mockEngineConfig = { + engine: InferenceEngine.openai, + } + + const mockGetRemoteModels = vi.spyOn(extension, 'getRemoteModels') + mockGetRemoteModels.mockResolvedValue({ data: [] }) + + const mockAddRemoteModel = vi.spyOn(extension, 'addRemoteModel') + + vi.mock('@janhq/core', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + events: { + emit: vi.fn(), + }, + } + }) + + // @ts-ignore - Accessing private method for testing + await extension.populateRemoteModels(mockEngineConfig) + + expect(mockGetRemoteModels).toHaveBeenCalledWith(mockEngineConfig.engine) + expect(mockAddRemoteModel).not.toHaveBeenCalled() + }) + + it('should handle errors when getting remote models', async () => { + const mockEngineConfig = { + engine: InferenceEngine.openai, + } + + const mockGetRemoteModels = vi.spyOn(extension, 'getRemoteModels') + mockGetRemoteModels.mockRejectedValue(new Error('Failed to fetch models')) + + const consoleSpy = vi.spyOn(console, 'info').mockImplementation(() => {}) + + // @ts-ignore - Accessing private method for testing + await extension.populateRemoteModels(mockEngineConfig) + + expect(mockGetRemoteModels).toHaveBeenCalledWith(mockEngineConfig.engine) + expect(consoleSpy).toHaveBeenCalled() + }) + + it('should handle errors when adding remote models', async () => { + const mockEngineConfig = { + engine: InferenceEngine.openai, + } + + const mockRemoteModels = { + data: [ + { + id: 'gpt-4', + name: 'GPT-4', + }, + ], + } + + const mockGetRemoteModels = vi.spyOn(extension, 'getRemoteModels') + mockGetRemoteModels.mockResolvedValue(mockRemoteModels) + + const mockAddRemoteModel = vi.spyOn(extension, 'addRemoteModel') + mockAddRemoteModel.mockRejectedValue(new Error('Failed to add model')) + + const consoleSpy = vi.spyOn(console, 'info').mockImplementation(() => {}) + + vi.mock('@janhq/core', async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + events: { + emit: vi.fn(), + }, + } + }) + + // @ts-ignore - Accessing private method for testing + await extension.populateRemoteModels(mockEngineConfig) + + expect(mockGetRemoteModels).toHaveBeenCalledWith(mockEngineConfig.engine) + expect(mockAddRemoteModel).toHaveBeenCalled() + expect(consoleSpy).toHaveBeenCalled() + }) +}) \ No newline at end of file diff --git a/extensions/engine-management-extension/src/utils.test.ts b/extensions/engine-management-extension/src/utils.test.ts new file mode 100644 index 000000000..e453f58cb --- /dev/null +++ b/extensions/engine-management-extension/src/utils.test.ts @@ -0,0 +1,90 @@ +import { describe, it, expect, vi } from 'vitest' +import { engineVariant } from './utils' + +vi.mock('@janhq/core', () => { + return { + log: () => {}, + } +}) + +describe('engineVariant', () => { + it('should return mac-arm64 when platform is darwin and arch is arm64', async () => { + vi.stubGlobal('PLATFORM', 'darwin') + const result = await engineVariant({ + cpu: { arch: 'arm64', instructions: '' }, + gpus: [], + vulkan: false, + }) + expect(result).toBe('mac-arm64') + }) + + it('should return mac-amd64 when platform is darwin and arch is not arm64', async () => { + vi.stubGlobal('PLATFORM', 'darwin') + const result = await engineVariant({ + cpu: { arch: 'x64', instructions: [] }, + gpus: [], + vulkan: false, + }) + expect(result).toBe('mac-amd64') + }) + + it('should return windows-amd64-noavx-cuda-12-0 when platform is win32, cuda is enabled, and cuda version is 12', async () => { + vi.stubGlobal('PLATFORM', 'win32') + const result = await engineVariant({ + cpu: { arch: 'x64', instructions: ['avx2'] }, + gpus: [ + { + activated: true, + version: '12', + additional_information: { driver_version: '1.0' }, + }, + ], + vulkan: false, + }) + expect(result).toBe('windows-amd64-avx2-cuda-12-0') + }) + + it('should return linux-amd64-noavx-cuda-11-7 when platform is linux, cuda is enabled, and cuda version is 11', async () => { + vi.stubGlobal('PLATFORM', 'linux') + const result = await engineVariant({ + cpu: { arch: 'x64', instructions: [] }, + gpus: [ + { + activated: true, + version: '11', + additional_information: { driver_version: '1.0' }, + }, + ], + vulkan: false, + }) + expect(result).toBe('linux-amd64-noavx-cuda-11-7') + }) + + it('should return windows-amd64-vulkan when platform is win32 and vulkan is enabled', async () => { + vi.stubGlobal('PLATFORM', 'win32') + const result = await engineVariant({ + cpu: { arch: 'x64', instructions: [] }, + gpus: [{ activated: true, version: '12' }], + vulkan: true, + }) + expect(result).toBe('windows-amd64-vulkan') + }) + + it('should return windows-amd64-avx512 when platform is win32, no gpu detected and avx512 cpu instruction is supported', async () => { + vi.stubGlobal('PLATFORM', 'win32') + const result = await engineVariant({ + cpu: { arch: 'x64', instructions: ['avx512'] }, + gpus: [{ activated: true, version: '12' }], + }) + expect(result).toBe('windows-amd64-avx512') + }) + + it('should return windows-amd64-avx512 when platform is win32, no gpu detected and no accelerated cpu instructions are supported', async () => { + vi.stubGlobal('PLATFORM', 'win32') + const result = await engineVariant({ + cpu: { arch: 'x64', instructions: [''] }, + gpus: [{ activated: true, version: '12' }], + }) + expect(result).toBe('windows-amd64-noavx') + }) +}) diff --git a/extensions/engine-management-extension/src/utils.ts b/extensions/engine-management-extension/src/utils.ts new file mode 100644 index 000000000..5e3f01ef7 --- /dev/null +++ b/extensions/engine-management-extension/src/utils.ts @@ -0,0 +1,101 @@ +import { GpuSetting, log } from '@janhq/core' + +// Supported run modes +enum RunMode { + Cuda = 'cuda', + CPU = 'cpu', +} + +// Supported instruction sets +const instructionBinaryNames = ['noavx', 'avx', 'avx2', 'avx512'] + +/** + * The GPU runMode that will be set - either 'vulkan', 'cuda', or empty for cpu. + * @param settings + * @returns + */ + +const gpuRunMode = (settings?: GpuSetting): RunMode => { + return settings.gpus?.some( + (gpu) => + gpu.activated && + gpu.additional_information && + gpu.additional_information.driver_version + ) + ? RunMode.Cuda + : RunMode.CPU +} + +/** + * The OS & architecture that the current process is running on. + * @returns win, mac-x64, mac-arm64, or linux + */ +const os = (settings?: GpuSetting): string => { + return PLATFORM === 'win32' + ? 'windows-amd64' + : PLATFORM === 'darwin' + ? settings?.cpu?.arch === 'arm64' + ? 'mac-arm64' + : 'mac-amd64' + : 'linux-amd64' +} + +/** + * The CUDA version that will be set - either '11-7' or '12-0'. + * @param settings + * @returns + */ +const cudaVersion = (settings?: GpuSetting): '12-0' | '11-7' | undefined => { + return settings.gpus?.some((gpu) => gpu.version.includes('12')) + ? '12-0' + : '11-7' +} + +/** + * The CPU instructions that will be set - either 'avx512', 'avx2', 'avx', or 'noavx'. + * @returns + */ + +/** + * Find which variant to run based on the current platform. + */ +export const engineVariant = async ( + gpuSetting?: GpuSetting +): Promise => { + const platform = os(gpuSetting) + + // There is no need to append the variant extension for mac + if (platform.startsWith('mac')) return platform + + const runMode = gpuRunMode(gpuSetting) + // Only Nvidia GPUs have addition_information set and activated by default + let engineVariant = + !gpuSetting?.vulkan || + !gpuSetting.gpus?.length || + gpuSetting.gpus.some((e) => e.additional_information && e.activated) + ? [ + platform, + ...(runMode === RunMode.Cuda + ? // For cuda we only need to check if the cpu supports avx2 or noavx - since other binaries are not shipped with the extension + [ + gpuSetting.cpu?.instructions.includes('avx2') || + gpuSetting.cpu?.instructions.includes('avx512') + ? 'avx2' + : 'noavx', + runMode, + cudaVersion(gpuSetting), + ] + : // For cpu only we need to check all available supported instructions + [ + (gpuSetting.cpu?.instructions ?? ['noavx']).find((e) => + instructionBinaryNames.includes(e.toLowerCase()) + ) ?? 'noavx', + ]), + ].filter(Boolean) + : [platform, 'vulkan'] + + let engineVariantString = engineVariant.join('-') + + log(`[CORTEX]: Engine variant: ${engineVariantString}`) + return engineVariantString +} diff --git a/extensions/inference-cortex-extension/jest.config.js b/extensions/hardware-management-extension/jest.config.js similarity index 98% rename from extensions/inference-cortex-extension/jest.config.js rename to extensions/hardware-management-extension/jest.config.js index b413e106d..8bb37208d 100644 --- a/extensions/inference-cortex-extension/jest.config.js +++ b/extensions/hardware-management-extension/jest.config.js @@ -2,4 +2,4 @@ module.exports = { preset: 'ts-jest', testEnvironment: 'node', -}; \ No newline at end of file +} diff --git a/extensions/hardware-management-extension/package.json b/extensions/hardware-management-extension/package.json new file mode 100644 index 000000000..396404df9 --- /dev/null +++ b/extensions/hardware-management-extension/package.json @@ -0,0 +1,46 @@ +{ + "name": "@janhq/hardware-management-extension", + "productName": "Hardware Management", + "version": "1.0.0", + "description": "Manages Better Hardware settings.", + "main": "dist/index.js", + "node": "dist/node/index.cjs.js", + "author": "Jan ", + "license": "MIT", + "scripts": { + "test": "jest", + "build": "rolldown -c rolldown.config.mjs", + "codesign:darwin": "../../.github/scripts/auto-sign.sh", + "codesign:win32:linux": "echo 'No codesigning required'", + "codesign": "run-script-os", + "build:publish": "rimraf *.tgz --glob || true && yarn build && yarn codesign && npm pack && cpx *.tgz ../../pre-install" + }, + "exports": { + ".": "./dist/index.js", + "./main": "./dist/module.js" + }, + "devDependencies": { + "cpx": "^1.5.0", + "rimraf": "^3.0.2", + "rolldown": "^1.0.0-beta.1", + "run-script-os": "^1.1.6", + "ts-loader": "^9.5.0", + "typescript": "^5.3.3" + }, + "dependencies": { + "@janhq/core": "../../core/package.tgz", + "ky": "^1.7.2", + "p-queue": "^8.0.1" + }, + "bundledDependencies": [ + "@janhq/core" + ], + "hardwares": { + "node": ">=18.0.0" + }, + "files": [ + "dist/*", + "package.json", + "README.md" + ] +} diff --git a/extensions/hardware-management-extension/rolldown.config.mjs b/extensions/hardware-management-extension/rolldown.config.mjs new file mode 100644 index 000000000..1a9c34ba0 --- /dev/null +++ b/extensions/hardware-management-extension/rolldown.config.mjs @@ -0,0 +1,16 @@ +import { defineConfig } from 'rolldown' +import pkgJson from './package.json' with { type: 'json' } + +export default defineConfig([ + { + input: 'src/index.ts', + output: { + format: 'esm', + file: 'dist/index.js', + }, + define: { + NODE: JSON.stringify(`${pkgJson.name}/${pkgJson.node}`), + API_URL: JSON.stringify(`http://127.0.0.1:${process.env.CORTEX_API_PORT ?? "39291"}`), + }, + }, +]) diff --git a/extensions/hardware-management-extension/src/@types/global.d.ts b/extensions/hardware-management-extension/src/@types/global.d.ts new file mode 100644 index 000000000..a412681e8 --- /dev/null +++ b/extensions/hardware-management-extension/src/@types/global.d.ts @@ -0,0 +1,11 @@ +declare const API_URL: string +declare const NODE: string + +interface Core { + api: APIFunctions + events: EventEmitter +} +interface Window { + core?: Core | undefined + electronAPI?: any | undefined +} diff --git a/extensions/hardware-management-extension/src/index.ts b/extensions/hardware-management-extension/src/index.ts new file mode 100644 index 000000000..edd98a7ae --- /dev/null +++ b/extensions/hardware-management-extension/src/index.ts @@ -0,0 +1,86 @@ +import { HardwareManagementExtension, HardwareInformation } from '@janhq/core' +import ky, { KyInstance } from 'ky' +import PQueue from 'p-queue' + +/** + * JSONHardwareManagementExtension is a HardwareManagementExtension implementation that provides + * functionality for managing engines. + */ +export default class JSONHardwareManagementExtension extends HardwareManagementExtension { + queue = new PQueue({ concurrency: 1 }) + + /** + * Called when the extension is loaded. + */ + async onLoad() { + // Run Healthcheck + this.queue.add(() => this.healthz()) + } + + api?: KyInstance + /** + * Get the API instance + * @returns + */ + async apiInstance(): Promise { + if(this.api) return this.api + const apiKey = (await window.core?.api.appToken()) ?? 'cortex.cpp' + this.api = ky.extend({ + prefixUrl: API_URL, + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }) + return this.api + } + + /** + * Called when the extension is unloaded. + */ + onUnload() {} + + /** + * Do health check on cortex.cpp + * @returns + */ + async healthz(): Promise { + return this.apiInstance().then((api) => + api + .get('healthz', { + retry: { limit: 20, delay: () => 500, methods: ['get'] }, + }) + .then(() => {}) + ) + } + + /** + * @returns A Promise that resolves to an object of hardware. + */ + async getHardware(): Promise { + return this.queue.add(() => + this.apiInstance().then((api) => + api + .get('v1/hardware') + .json() + .then((e) => e) + ) + ) as Promise + } + + /** + * @returns A Promise that resolves to an object of set gpu activate. + */ + async setAvtiveGpu(data: { gpus: number[] }): Promise<{ + message: string + activated_gpus: number[] + }> { + return this.queue.add(() => + this.apiInstance().then((api) => + api.post('v1/hardware/activate', { json: data }).then((e) => e) + ) + ) as Promise<{ + message: string + activated_gpus: number[] + }> + } +} diff --git a/extensions/monitoring-extension/tsconfig.json b/extensions/hardware-management-extension/tsconfig.json similarity index 65% rename from extensions/monitoring-extension/tsconfig.json rename to extensions/hardware-management-extension/tsconfig.json index 2477d58ce..72e1e1895 100644 --- a/extensions/monitoring-extension/tsconfig.json +++ b/extensions/hardware-management-extension/tsconfig.json @@ -8,7 +8,9 @@ "forceConsistentCasingInFileNames": true, "strict": false, "skipLibCheck": true, - "rootDir": "./src" + "rootDir": "./src", + "resolveJsonModule": true }, - "include": ["./src"] + "include": ["./src"], + "exclude": ["src/**/*.test.ts", "rolldown.config.mjs"] } diff --git a/extensions/inference-cortex-extension/README.md b/extensions/inference-cortex-extension/README.md index f9690da09..b9595b6e1 100644 --- a/extensions/inference-cortex-extension/README.md +++ b/extensions/inference-cortex-extension/README.md @@ -70,6 +70,6 @@ There are a few things to keep in mind when writing your extension code: ``` For more information about the Jan Extension Core module, see the - [documentation](https://github.com/janhq/jan/blob/main/core/README.md). + [documentation](https://github.com/menloresearch/jan/blob/main/core/README.md). So, what are you waiting for? Go ahead and start customizing your extension! diff --git a/extensions/inference-cortex-extension/bin/version.txt b/extensions/inference-cortex-extension/bin/version.txt index 53bd3ae76..492b167a6 100644 --- a/extensions/inference-cortex-extension/bin/version.txt +++ b/extensions/inference-cortex-extension/bin/version.txt @@ -1 +1 @@ -1.0.9-rc7 +1.0.12 \ No newline at end of file diff --git a/extensions/inference-cortex-extension/download.bat b/extensions/inference-cortex-extension/download.bat index fd20e8c8d..ca2930bdd 100644 --- a/extensions/inference-cortex-extension/download.bat +++ b/extensions/inference-cortex-extension/download.bat @@ -2,14 +2,14 @@ set BIN_PATH=./bin set SHARED_PATH=./../../electron/shared set /p CORTEX_VERSION=<./bin/version.txt -set ENGINE_VERSION=0.1.49 +set ENGINE_VERSION=0.1.55 @REM Download cortex.llamacpp binaries -set DOWNLOAD_URL=https://github.com/janhq/cortex.llamacpp/releases/download/v%ENGINE_VERSION%/cortex.llamacpp-%ENGINE_VERSION%-windows-amd64 -set CUDA_DOWNLOAD_URL=https://github.com/janhq/cortex.llamacpp/releases/download/v%ENGINE_VERSION% +set DOWNLOAD_URL=https://github.com/menloresearch/cortex.llamacpp/releases/download/v%ENGINE_VERSION%/cortex.llamacpp-%ENGINE_VERSION%-windows-amd64 +set CUDA_DOWNLOAD_URL=https://github.com/menloresearch/cortex.llamacpp/releases/download/v%ENGINE_VERSION% set SUBFOLDERS=windows-amd64-noavx-cuda-12-0 windows-amd64-noavx-cuda-11-7 windows-amd64-avx2-cuda-12-0 windows-amd64-avx2-cuda-11-7 windows-amd64-noavx windows-amd64-avx windows-amd64-avx2 windows-amd64-avx512 windows-amd64-vulkan -call .\node_modules\.bin\download -e --strip 1 -o %BIN_PATH% https://github.com/janhq/cortex.cpp/releases/download/v%CORTEX_VERSION%/cortex-%CORTEX_VERSION%-windows-amd64.tar.gz +call .\node_modules\.bin\download -e --strip 1 -o %BIN_PATH% https://github.com/menloresearch/cortex.cpp/releases/download/v%CORTEX_VERSION%/cortex-%CORTEX_VERSION%-windows-amd64.tar.gz call .\node_modules\.bin\download %DOWNLOAD_URL%-avx2-cuda-12-0.tar.gz -e --strip 1 -o %SHARED_PATH%/engines/cortex.llamacpp/windows-amd64-avx2-cuda-12-0/v%ENGINE_VERSION% call .\node_modules\.bin\download %DOWNLOAD_URL%-avx2-cuda-11-7.tar.gz -e --strip 1 -o %SHARED_PATH%/engines/cortex.llamacpp/windows-amd64-avx2-cuda-11-7/v%ENGINE_VERSION% call .\node_modules\.bin\download %DOWNLOAD_URL%-noavx-cuda-12-0.tar.gz -e --strip 1 -o %SHARED_PATH%/engines/cortex.llamacpp/windows-amd64-noavx-cuda-12-0/v%ENGINE_VERSION% diff --git a/extensions/inference-cortex-extension/download.sh b/extensions/inference-cortex-extension/download.sh index c32160184..3476708bb 100755 --- a/extensions/inference-cortex-extension/download.sh +++ b/extensions/inference-cortex-extension/download.sh @@ -2,10 +2,10 @@ # Read CORTEX_VERSION CORTEX_VERSION=$(cat ./bin/version.txt) -ENGINE_VERSION=0.1.49 -CORTEX_RELEASE_URL="https://github.com/janhq/cortex.cpp/releases/download" -ENGINE_DOWNLOAD_URL="https://github.com/janhq/cortex.llamacpp/releases/download/v${ENGINE_VERSION}/cortex.llamacpp-${ENGINE_VERSION}" -CUDA_DOWNLOAD_URL="https://github.com/janhq/cortex.llamacpp/releases/download/v${ENGINE_VERSION}" +ENGINE_VERSION=0.1.55 +CORTEX_RELEASE_URL="https://github.com/menloresearch/cortex.cpp/releases/download" +ENGINE_DOWNLOAD_URL="https://github.com/menloresearch/cortex.llamacpp/releases/download/v${ENGINE_VERSION}/cortex.llamacpp-${ENGINE_VERSION}" +CUDA_DOWNLOAD_URL="https://github.com/menloresearch/cortex.llamacpp/releases/download/v${ENGINE_VERSION}" BIN_PATH=./bin SHARED_PATH="../../electron/shared" # Detect platform diff --git a/extensions/inference-cortex-extension/package.json b/extensions/inference-cortex-extension/package.json index f191f3071..00e7c346e 100644 --- a/extensions/inference-cortex-extension/package.json +++ b/extensions/inference-cortex-extension/package.json @@ -1,14 +1,14 @@ { "name": "@janhq/inference-cortex-extension", "productName": "Cortex Inference Engine", - "version": "1.0.24", + "version": "1.0.25", "description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.", "main": "dist/index.js", "node": "dist/node/index.cjs.js", "author": "Jan ", "license": "AGPL-3.0", "scripts": { - "test": "jest", + "test": "vitest run", "build": "rolldown -c rolldown.config.mjs", "downloadcortex:linux:darwin": "./download.sh", "downloadcortex:win32": "download.bat", @@ -35,17 +35,15 @@ "rolldown": "1.0.0-beta.1", "run-script-os": "^1.1.6", "ts-jest": "^29.1.2", - "typescript": "^5.3.3" + "typescript": "^5.3.3", + "vitest": "^3.0.8" }, "dependencies": { "@janhq/core": "../../core/package.tgz", - "decompress": "^4.2.1", "fetch-retry": "^5.0.6", "ky": "^1.7.2", "p-queue": "^8.0.1", "rxjs": "^7.8.1", - "tcp-port-used": "^1.0.2", - "terminate": "2.6.1", "ulidx": "^2.3.0" }, "engines": { diff --git a/extensions/inference-cortex-extension/resources/default_settings.json b/extensions/inference-cortex-extension/resources/default_settings.json index a3a93f305..945f32729 100644 --- a/extensions/inference-cortex-extension/resources/default_settings.json +++ b/extensions/inference-cortex-extension/resources/default_settings.json @@ -76,7 +76,7 @@ }, { "key": "use_mmap", - "title": "MMAP", + "title": "mmap", "description": "Loads model files more efficiently by mapping them to memory, reducing RAM usage.", "controllerType": "checkbox", "controllerProps": { diff --git a/extensions/inference-cortex-extension/resources/models/aya-23-35b/model.json b/extensions/inference-cortex-extension/resources/models/aya-23-35b/model.json deleted file mode 100644 index f6e3d08e9..000000000 --- a/extensions/inference-cortex-extension/resources/models/aya-23-35b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "aya-23-35B-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/aya-23-35B-GGUF/resolve/main/aya-23-35B-Q4_K_M.gguf" - } - ], - "id": "aya-23-35b", - "object": "model", - "name": "Aya 23 35B Q4", - "version": "1.1", - "description": "Aya 23 can talk upto 23 languages fluently.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{system_prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>", - "llama_model_path": "aya-23-35B-Q4_K_M.gguf", - "ngl": 41 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "frequency_penalty": 0, - "presence_penalty": 0, - "stop": ["<|END_OF_TURN_TOKEN|>"] - }, - "metadata": { - "author": "CohereForAI", - "tags": ["34B", "Finetuned"], - "size": 21556982144 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/aya-23-8b/model.json b/extensions/inference-cortex-extension/resources/models/aya-23-8b/model.json deleted file mode 100644 index 463f7eec7..000000000 --- a/extensions/inference-cortex-extension/resources/models/aya-23-8b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "aya-23-8B-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/aya-23-8B-GGUF/resolve/main/aya-23-8B-Q4_K_M.gguf" - } - ], - "id": "aya-23-8b", - "object": "model", - "name": "Aya 23 8B Q4", - "version": "1.2", - "description": "Aya 23 can talk upto 23 languages fluently.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{system_prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>", - "llama_model_path": "aya-23-8B-Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "frequency_penalty": 0, - "presence_penalty": 0, - "stop": ["<|END_OF_TURN_TOKEN|>"] - }, - "metadata": { - "author": "CohereForAI", - "tags": ["7B", "Finetuned"], - "size": 5056982144 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/bakllava-1/model.json b/extensions/inference-cortex-extension/resources/models/bakllava-1/model.json deleted file mode 100644 index ccc8f693f..000000000 --- a/extensions/inference-cortex-extension/resources/models/bakllava-1/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "ggml-model-q5_k.gguf", - "url": "https://huggingface.co/mys/ggml_bakllava-1/resolve/main/ggml-model-q5_k.gguf" - }, - { - "filename": "mmproj-model-f16.gguf", - "url": "https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf" - } - ], - "id": "bakllava-1", - "object": "model", - "name": "BakLlava 1", - "version": "1.0", - "description": "BakLlava 1 can bring vision understanding to Jan", - "format": "gguf", - "settings": { - "vision_model": true, - "text_model": false, - "ctx_len": 4096, - "prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n", - "llama_model_path": "ggml-model-q5_k.gguf", - "mmproj": "mmproj-model-f16.gguf", - "ngl": 33 - }, - "parameters": { - "max_tokens": 4096 - }, - "metadata": { - "author": "Mys", - "tags": ["Vision"], - "size": 5750000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/codeninja-1.0-7b/model.json b/extensions/inference-cortex-extension/resources/models/codeninja-1.0-7b/model.json deleted file mode 100644 index 7bd5bf3a4..000000000 --- a/extensions/inference-cortex-extension/resources/models/codeninja-1.0-7b/model.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "sources": [ - { - "filename": "codeninja-1.0-openchat-7b.Q4_K_M.gguf", - "url": "https://huggingface.co/beowolx/CodeNinja-1.0-OpenChat-7B-GGUF/resolve/main/codeninja-1.0-openchat-7b.Q4_K_M.gguf" - } - ], - "id": "codeninja-1.0-7b", - "object": "model", - "name": "CodeNinja 7B Q4", - "version": "1.2", - "description": "CodeNinja is good for coding tasks and can handle various languages including Python, C, C++, Rust, Java, JavaScript, and more.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:", - "llama_model_path": "codeninja-1.0-openchat-7b.Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Beowolx", - "tags": ["7B", "Finetuned"], - "size": 4370000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/codestral-22b/model.json b/extensions/inference-cortex-extension/resources/models/codestral-22b/model.json deleted file mode 100644 index 2cce063e6..000000000 --- a/extensions/inference-cortex-extension/resources/models/codestral-22b/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Codestral-22B-v0.1-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Codestral-22B-v0.1-GGUF/resolve/main/Codestral-22B-v0.1-Q4_K_M.gguf" - } - ], - "id": "codestral-22b", - "object": "model", - "name": "Codestral 22B Q4", - "version": "1.1", - "description": "Latest model from MistralAI optimized for code generation tasks.", - "format": "gguf", - "settings": { - "ctx_len": 32000, - "prompt_template": "{system_message} [INST] {prompt} [/INST]", - "llama_model_path": "Codestral-22B-v0.1-Q4_K_M.gguf", - "ngl": 57 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32000, - "stop": [", [/INST]"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MistralAI", - "tags": ["22B", "Finetuned", "Featured"], - "size": 13341237440 - }, - "engine": "llama-cpp" - } - diff --git a/extensions/inference-cortex-extension/resources/models/command-r-34b/model.json b/extensions/inference-cortex-extension/resources/models/command-r-34b/model.json deleted file mode 100644 index 13518604c..000000000 --- a/extensions/inference-cortex-extension/resources/models/command-r-34b/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "c4ai-command-r-v01-Q4_K_M.gguf", - "url": "https://huggingface.co/andrewcanis/c4ai-command-r-v01-GGUF/resolve/main/c4ai-command-r-v01-Q4_K_M.gguf" - } - ], - "id": "command-r-34b", - "object": "model", - "name": "Command-R v01 34B Q4", - "version": "1.6", - "description": "C4AI Command-R developed by CohereAI is optimized for a variety of use cases including reasoning, summarization, and question answering.", - "format": "gguf", - "settings": { - "ctx_len": 131072, - "prompt_template": "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>", - "llama_model_path": "c4ai-command-r-v01-Q4_K_M.gguf", - "ngl": 41 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 131072, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "CohereAI", - "tags": ["34B", "Finetuned"], - "size": 21500000000 - }, - "engine": "llama-cpp" - } - diff --git a/extensions/inference-cortex-extension/resources/models/deepseek-coder-1.3b/model.json b/extensions/inference-cortex-extension/resources/models/deepseek-coder-1.3b/model.json deleted file mode 100644 index 6722d253d..000000000 --- a/extensions/inference-cortex-extension/resources/models/deepseek-coder-1.3b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "deepseek-coder-1.3b-instruct.Q8_0.gguf", - "url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-instruct-GGUF/resolve/main/deepseek-coder-1.3b-instruct.Q8_0.gguf" - } - ], - "id": "deepseek-coder-1.3b", - "object": "model", - "name": "Deepseek Coder 1.3B Instruct Q8", - "version": "1.4", - "description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.", - "format": "gguf", - "settings": { - "ctx_len": 16384, - "prompt_template": "### Instruction:\n{prompt}\n### Response:", - "llama_model_path": "deepseek-coder-1.3b-instruct.Q8_0.gguf", - "ngl": 25 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 16384, - "stop": ["<|EOT|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Deepseek, The Bloke", - "tags": ["Tiny"], - "size": 1430000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/deepseek-coder-34b/model.json b/extensions/inference-cortex-extension/resources/models/deepseek-coder-34b/model.json deleted file mode 100644 index 8a2e271cd..000000000 --- a/extensions/inference-cortex-extension/resources/models/deepseek-coder-34b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "deepseek-coder-33b-instruct.Q4_K_M.gguf", - "url": "https://huggingface.co/mradermacher/deepseek-coder-33b-instruct-GGUF/resolve/main/deepseek-coder-33b-instruct.Q4_K_M.gguf" - } - ], - "id": "deepseek-coder-34b", - "object": "model", - "name": "Deepseek Coder 33B Instruct Q4", - "version": "1.4", - "description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.", - "format": "gguf", - "settings": { - "ctx_len": 16384, - "prompt_template": "### Instruction:\n{prompt}\n### Response:", - "llama_model_path": "deepseek-coder-33b-instruct.Q4_K_M.gguf", - "ngl": 63 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 16384, - "stop": ["<|EOT|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Deepseek", - "tags": ["33B"], - "size": 19940000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/gemma-1.1-2b/model.json b/extensions/inference-cortex-extension/resources/models/gemma-1.1-2b/model.json deleted file mode 100644 index 3278c9a81..000000000 --- a/extensions/inference-cortex-extension/resources/models/gemma-1.1-2b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "gemma-1.1-2b-it-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/gemma-1.1-2b-it-GGUF/resolve/main/gemma-1.1-2b-it-Q4_K_M.gguf" - } - ], - "id": "gemma-1.1-2b-it", - "object": "model", - "name": "Gemma 1.1 2B Q4", - "version": "1.3", - "description": "Gemma is built from the same technology with Google's Gemini.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "user\n{prompt}\nmodel", - "llama_model_path": "gemma-1.1-2b-it-Q4_K_M.gguf", - "ngl": 19 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": [""], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Google", - "tags": ["2B", "Finetuned", "Tiny"], - "size": 1630000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/gemma-1.1-7b/model.json b/extensions/inference-cortex-extension/resources/models/gemma-1.1-7b/model.json deleted file mode 100644 index 9a57f9b37..000000000 --- a/extensions/inference-cortex-extension/resources/models/gemma-1.1-7b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "gemma-1.1-7b-it-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/gemma-1.1-7b-it-GGUF/resolve/main/gemma-1.1-7b-it-Q4_K_M.gguf" - } - ], - "id": "gemma-1.1-7b-it", - "object": "model", - "name": "Gemma 1.1 7B Q4", - "version": "1.3", - "description": "Google's Gemma is built for multilingual purpose", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "user\n{prompt}\nmodel", - "llama_model_path": "gemma-1.1-7b-it-Q4_K_M.gguf", - "ngl": 29 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Google", - "tags": ["7B", "Finetuned"], - "size": 5330000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/gemma-2-27b/model.json b/extensions/inference-cortex-extension/resources/models/gemma-2-27b/model.json deleted file mode 100644 index 66eaff7c2..000000000 --- a/extensions/inference-cortex-extension/resources/models/gemma-2-27b/model.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "sources": [ - { - "filename": "gemma-2-27b-it-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/gemma-2-27b-it-GGUF/resolve/main/gemma-2-27b-it-Q4_K_M.gguf" - } - ], - "id": "gemma-2-27b-it", - "object": "model", - "name": "Gemma 2 27B Q4", - "version": "1.1", - "description": "Gemma is built from the same technology with Google's Gemini.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "user\n{prompt}\nmodel\n\nmodel\n", - "llama_model_path": "gemma-2-27b-it-Q4_K_M.gguf", - "ngl": 47 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": [ - "" - ], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Google", - "tags": [ - "27B", - "Conversational", - "Text-generation" - ], - "size": 16600000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/gemma-2-2b/model.json b/extensions/inference-cortex-extension/resources/models/gemma-2-2b/model.json deleted file mode 100644 index 60be558b8..000000000 --- a/extensions/inference-cortex-extension/resources/models/gemma-2-2b/model.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "sources": [ - { - "filename": "gemma-2-2b-it-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/gemma-2-2b-it-GGUF/resolve/main/gemma-2-2b-it-Q4_K_M.gguf" - } - ], - "id": "gemma-2-2b-it", - "object": "model", - "name": "Gemma 2 2B Q4", - "version": "1.1", - "description": "Gemma is built from the same technology with Google's Gemini.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "user\n{prompt}\nmodel\n\nmodel\n", - "llama_model_path": "gemma-2-2b-it-Q4_K_M.gguf", - "ngl": 27 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": [ - "" - ], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Google", - "tags": [ - "2B", - "Tiny", - "Conversational", - "Text-generation" - ], - "size": 1710000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/gemma-2-9b/model.json b/extensions/inference-cortex-extension/resources/models/gemma-2-9b/model.json deleted file mode 100644 index 67acaad09..000000000 --- a/extensions/inference-cortex-extension/resources/models/gemma-2-9b/model.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "sources": [ - { - "filename": "gemma-2-9b-it-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/resolve/main/gemma-2-9b-it-Q4_K_M.gguf" - } - ], - "id": "gemma-2-9b-it", - "object": "model", - "name": "Gemma 2 9B Q4", - "version": "1.1", - "description": "Gemma is built from the same technology with Google's Gemini.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "user\n{prompt}\nmodel\n\nmodel\n", - "llama_model_path": "gemma-2-9b-it-Q4_K_M.gguf", - "ngl": 43 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": [ - "" - ], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Google", - "tags": [ - "9B", - "Conversational", - "Text-generation" - ], - "size": 5760000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/llama2-chat-70b/model.json b/extensions/inference-cortex-extension/resources/models/llama2-chat-70b/model.json deleted file mode 100644 index c91a0a73b..000000000 --- a/extensions/inference-cortex-extension/resources/models/llama2-chat-70b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "llama-2-70b-chat.Q4_K_M.gguf", - "url": "https://huggingface.co/TheBloke/Llama-2-70B-Chat-GGUF/resolve/main/llama-2-70b-chat.Q4_K_M.gguf" - } - ], - "id": "llama2-chat-70b", - "object": "model", - "name": "Llama 2 Chat 70B Q4", - "version": "1.1", - "description": "Llama 2 specifically designed for a comprehensive understanding the world.", - "format": "gguf", - "settings": { - "ctx_len": 4096, - "prompt_template": "[INST] <>\n{system_message}<>\n{prompt}[/INST]", - "llama_model_path": "llama-2-70b-chat.Q4_K_M.gguf", - "ngl": 81 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 4096, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MetaAI", - "tags": ["70B", "Foundational Model"], - "size": 43920000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/llama2-chat-7b/model.json b/extensions/inference-cortex-extension/resources/models/llama2-chat-7b/model.json deleted file mode 100644 index 4a28f6004..000000000 --- a/extensions/inference-cortex-extension/resources/models/llama2-chat-7b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "llama-2-7b-chat.Q4_K_M.gguf", - "url": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf" - } - ], - "id": "llama2-chat-7b", - "object": "model", - "name": "Llama 2 Chat 7B Q4", - "version": "1.1", - "description": "Llama 2 specifically designed for a comprehensive understanding the world.", - "format": "gguf", - "settings": { - "ctx_len": 4096, - "prompt_template": "[INST] <>\n{system_message}<>\n{prompt}[/INST]", - "llama_model_path": "llama-2-7b-chat.Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 4096, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MetaAI", - "tags": ["7B", "Foundational Model"], - "size": 4080000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/llama3-8b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/llama3-8b-instruct/model.json deleted file mode 100644 index 3456a185e..000000000 --- a/extensions/inference-cortex-extension/resources/models/llama3-8b-instruct/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf" - } - ], - "id": "llama3-8b-instruct", - "object": "model", - "name": "Llama 3 8B Instruct Q4", - "version": "1.4", - "description": "Meta's Llama 3 excels at general usage situations, including chat, general world knowledge, and coding.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "llama_model_path": "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": ["<|end_of_text|>","<|eot_id|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MetaAI", - "tags": ["8B"], - "size": 4920000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/llama3-hermes-8b/model.json b/extensions/inference-cortex-extension/resources/models/llama3-hermes-8b/model.json deleted file mode 100644 index 718629fb0..000000000 --- a/extensions/inference-cortex-extension/resources/models/llama3-hermes-8b/model.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "sources": [ - { - "filename": "Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf", - "url": "https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/resolve/main/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf" - } - ], - "id": "llama3-hermes-8b", - "object": "model", - "name": "Hermes Pro Llama 3 8B Q4", - "version": "1.2", - "description": "Hermes Pro is well-designed for General chat and JSON output.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "NousResearch", - "tags": [ - "7B", - "Finetuned" - ], - "size": 4920000000 - }, - "engine": "llama-cpp" - } diff --git a/extensions/inference-cortex-extension/resources/models/llama3.1-70b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/llama3.1-70b-instruct/model.json deleted file mode 100644 index aec73719e..000000000 --- a/extensions/inference-cortex-extension/resources/models/llama3.1-70b-instruct/model.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "sources": [ - { - "filename": "Meta-Llama-3.1-70B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Meta-Llama-3.1-70B-Instruct-GGUF/resolve/main/Meta-Llama-3.1-70B-Instruct-Q4_K_M.gguf" - } - ], - "id": "llama3.1-70b-instruct", - "object": "model", - "name": "Llama 3.1 70B Instruct Q4", - "version": "1.2", - "description": "Meta's Llama 3.1 excels at general usage situations, including chat, general world knowledge, and coding.", - "format": "gguf", - "settings": { - "ctx_len": 131072, - "prompt_template": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "llama_model_path": "Meta-Llama-3.1-70B-Instruct-Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": [ - "<|end_of_text|>", - "<|eot_id|>", - "<|eom_id|>" - ], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MetaAI", - "tags": [ - "70B" - ], - "size": 42500000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/llama3.1-8b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/llama3.1-8b-instruct/model.json deleted file mode 100644 index ec9a0284b..000000000 --- a/extensions/inference-cortex-extension/resources/models/llama3.1-8b-instruct/model.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "sources": [ - { - "filename": "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF/resolve/main/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf" - } - ], - "id": "llama3.1-8b-instruct", - "object": "model", - "name": "Llama 3.1 8B Instruct Q4", - "version": "1.2", - "description": "Meta's Llama 3.1 excels at general usage situations, including chat, general world knowledge, and coding.", - "format": "gguf", - "settings": { - "ctx_len": 131072, - "prompt_template": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "llama_model_path": "Meta-Llama-3.1-8B-Instruct.Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": [ - "<|end_of_text|>", - "<|eot_id|>", - "<|eom_id|>" - ], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MetaAI", - "tags": [ - "8B", "Featured" - ], - "size": 4920000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/llama3.2-1b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/llama3.2-1b-instruct/model.json deleted file mode 100644 index 0fe7d3316..000000000 --- a/extensions/inference-cortex-extension/resources/models/llama3.2-1b-instruct/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "Llama-3.2-1B-Instruct-Q8_0.gguf", - "url": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q8_0.gguf" - } - ], - "id": "llama3.2-1b-instruct", - "object": "model", - "name": "Llama 3.2 1B Instruct Q8", - "version": "1.0", - "description": "Meta's Llama 3.2 excels at general usage situations, including chat, general world knowledge, and coding.", - "format": "gguf", - "settings": { - "ctx_len": 131072, - "prompt_template": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "llama_model_path": "Llama-3.2-1B-Instruct-Q8_0.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": ["<|end_of_text|>", "<|eot_id|>", "<|eom_id|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MetaAI", - "tags": ["1B", "Featured"], - "size": 1320000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/llama3.2-3b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/llama3.2-3b-instruct/model.json deleted file mode 100644 index 299362fbf..000000000 --- a/extensions/inference-cortex-extension/resources/models/llama3.2-3b-instruct/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "Llama-3.2-3B-Instruct-Q8_0.gguf", - "url": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q8_0.gguf" - } - ], - "id": "llama3.2-3b-instruct", - "object": "model", - "name": "Llama 3.2 3B Instruct Q8", - "version": "1.0", - "description": "Meta's Llama 3.2 excels at general usage situations, including chat, general world knowledge, and coding.", - "format": "gguf", - "settings": { - "ctx_len": 131072, - "prompt_template": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "llama_model_path": "Llama-3.2-3B-Instruct-Q8_0.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": ["<|end_of_text|>", "<|eot_id|>", "<|eom_id|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MetaAI", - "tags": ["3B", "Featured"], - "size": 3420000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/llamacorn-1.1b/model.json b/extensions/inference-cortex-extension/resources/models/llamacorn-1.1b/model.json deleted file mode 100644 index 3230df5b0..000000000 --- a/extensions/inference-cortex-extension/resources/models/llamacorn-1.1b/model.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "sources": [ - { - "url":"https://huggingface.co/janhq/llamacorn-1.1b-chat-GGUF/resolve/main/llamacorn-1.1b-chat.Q8_0.gguf", - "filename": "llamacorn-1.1b-chat.Q8_0.gguf" - } - ], - "id": "llamacorn-1.1b", - "object": "model", - "name": "LlamaCorn 1.1B Q8", - "version": "1.1", - "description": "LlamaCorn is designed to improve chat functionality from TinyLlama.", - "format": "gguf", - "settings": { - "ctx_len": 2048, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "llamacorn-1.1b-chat.Q8_0.gguf", - "ngl": 23 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 2048, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Jan", - "tags": [ - "Tiny", - "Finetuned" - ], - "size": 1170000000 - }, - "engine": "llama-cpp" - } \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/llava-13b/model.json b/extensions/inference-cortex-extension/resources/models/llava-13b/model.json deleted file mode 100644 index fe058e259..000000000 --- a/extensions/inference-cortex-extension/resources/models/llava-13b/model.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "sources": [ - { - "filename": "llava-v1.6-vicuna-13b.Q4_K_M.gguf", - "url": "https://huggingface.co/cjpais/llava-v1.6-vicuna-13b-gguf/resolve/main/llava-v1.6-vicuna-13b.Q4_K_M.gguf" - }, - { - "filename": "mmproj-model-f16.gguf", - "url": "https://huggingface.co/cjpais/llava-v1.6-vicuna-13b-gguf/resolve/main/mmproj-model-f16.gguf" - } - ], - "id": "llava-13b", - "object": "model", - "name": "LlaVa 13B Q4", - "version": "1.2", - "description": "LlaVa can bring vision understanding to Jan", - "format": "gguf", - "settings": { - "vision_model": true, - "text_model": false, - "ctx_len": 4096, - "prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n", - "llama_model_path": "llava-v1.6-vicuna-13b.Q4_K_M.gguf", - "mmproj": "mmproj-model-f16.gguf", - "ngl": 33 - }, - "parameters": { - "max_tokens": 4096, - "stop": [""] - }, - "metadata": { - "author": "liuhaotian", - "tags": ["Vision"], - "size": 7870000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/llava-7b/model.json b/extensions/inference-cortex-extension/resources/models/llava-7b/model.json deleted file mode 100644 index 8e5cdf09f..000000000 --- a/extensions/inference-cortex-extension/resources/models/llava-7b/model.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "sources": [ - { - "filename": "llava-v1.6-mistral-7b.Q4_K_M.gguf", - "url": "https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/llava-v1.6-mistral-7b.Q4_K_M.gguf" - }, - { - "filename": "mmproj-model-f16.gguf", - "url": "https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/mmproj-model-f16.gguf" - } - ], - "id": "llava-7b", - "object": "model", - "name": "LlaVa 7B", - "version": "1.2", - "description": "LlaVa can bring vision understanding to Jan", - "format": "gguf", - "settings": { - "vision_model": true, - "text_model": false, - "ctx_len": 4096, - "prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n", - "llama_model_path": "llava-v1.6-mistral-7b.Q4_K_M.gguf", - "mmproj": "mmproj-model-f16.gguf", - "ngl": 33 - }, - "parameters": { - "max_tokens": 4096, - "stop": [""] - }, - "metadata": { - "author": "liuhaotian", - "tags": ["Vision"], - "size": 4370000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/mistral-ins-7b-q4/model.json b/extensions/inference-cortex-extension/resources/models/mistral-ins-7b-q4/model.json deleted file mode 100644 index 9b568e468..000000000 --- a/extensions/inference-cortex-extension/resources/models/mistral-ins-7b-q4/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Mistral-7B-Instruct-v0.3-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Mistral-7B-Instruct-v0.3-GGUF/resolve/main/Mistral-7B-Instruct-v0.3-Q4_K_M.gguf" - } - ], - "id": "mistral-ins-7b-q4", - "object": "model", - "name": "Mistral 7B Instruct Q4", - "version": "1.5", - "description": "Mistral 7B Instruct model, specifically designed for a comprehensive understanding of the world.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "{system_message} [INST] {prompt} [/INST]", - "llama_model_path": "Mistral-7B-Instruct-v0.3-Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": ["[/INST]"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MistralAI", - "tags": ["7B", "Foundational Model"], - "size": 4370000000, - "cover": "https://raw.githubusercontent.com/janhq/jan/dev/models/mistral-ins-7b-q4/cover.png" - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/mixtral-8x7b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/mixtral-8x7b-instruct/model.json deleted file mode 100644 index c711065ff..000000000 --- a/extensions/inference-cortex-extension/resources/models/mixtral-8x7b-instruct/model.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "sources": [ - { - "filename": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf", - "url": "https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf" - } - ], - "id": "mixtral-8x7b-instruct", - "object": "model", - "name": "Mixtral 8x7B Instruct Q4", - "version": "1.1", - "description": "The Mixtral-8x7B is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms 70B models on most benchmarks.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "[INST] {prompt} [/INST]", - "llama_model_path": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf", - "ngl": 100 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "MistralAI, TheBloke", - "tags": ["70B", "Foundational Model"], - "size": 26440000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/noromaid-7b/model.json b/extensions/inference-cortex-extension/resources/models/noromaid-7b/model.json deleted file mode 100644 index 1999035aa..000000000 --- a/extensions/inference-cortex-extension/resources/models/noromaid-7b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "Noromaid-7B-0.4-DPO.q4_k_m.gguf", - "url": "https://huggingface.co/NeverSleep/Noromaid-7B-0.4-DPO-GGUF/resolve/main/Noromaid-7B-0.4-DPO.q4_k_m.gguf" - } - ], - "id": "noromaid-7b", - "object": "model", - "name": "Noromaid 7B Q4", - "version": "1.2", - "description": "The Noromaid 7b model is designed for role-playing with human-like behavior.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Noromaid-7B-0.4-DPO.q4_k_m.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "NeverSleep", - "tags": ["7B", "Finetuned"], - "size": 4370000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/openchat-3.5-7b/model.json b/extensions/inference-cortex-extension/resources/models/openchat-3.5-7b/model.json deleted file mode 100644 index 05371b69e..000000000 --- a/extensions/inference-cortex-extension/resources/models/openchat-3.5-7b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "openchat-3.5-0106.Q4_K_M.gguf", - "url": "https://huggingface.co/TheBloke/openchat-3.5-0106-GGUF/resolve/main/openchat-3.5-0106.Q4_K_M.gguf" - } - ], - "id": "openchat-3.5-7b", - "object": "model", - "name": "Openchat-3.5 7B Q4", - "version": "1.2", - "description": "The performance of Openchat surpasses ChatGPT-3.5 and Grok-1 across various benchmarks.", - "format": "gguf", - "settings": { - "ctx_len": 8192, - "prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:", - "llama_model_path": "openchat-3.5-0106.Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 8192, - "stop": ["<|end_of_turn|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Openchat", - "tags": ["Recommended", "7B", "Finetuned"], - "size": 4370000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/phi3-3.8b/model.json b/extensions/inference-cortex-extension/resources/models/phi3-3.8b/model.json deleted file mode 100644 index 90aa50117..000000000 --- a/extensions/inference-cortex-extension/resources/models/phi3-3.8b/model.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "sources": [ - { - "url": "https://huggingface.co/bartowski/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q4_K_M.gguf", - "filename": "Phi-3-mini-4k-instruct-Q4_K_M.gguf" - } - ], - "id": "phi3-3.8b", - "object": "model", - "name": "Phi-3 Mini Instruct Q4", - "version": "1.3", - "description": "Phi-3 Mini is Microsoft's newest, compact model designed for mobile use.", - "format": "gguf", - "settings": { - "ctx_len": 4096, - "prompt_template": "<|user|>\n{prompt}<|end|>\n<|assistant|>\n", - "llama_model_path": "Phi-3-mini-4k-instruct-Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "max_tokens": 4096, - "stop": ["<|end|>"], - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Microsoft", - "tags": [ - "3B", - "Finetuned" - ], - "size": 2320000000 - }, - "engine": "llama-cpp" -} \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/phi3-medium/model.json b/extensions/inference-cortex-extension/resources/models/phi3-medium/model.json deleted file mode 100644 index afce04952..000000000 --- a/extensions/inference-cortex-extension/resources/models/phi3-medium/model.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "sources": [ - { - "url": "https://huggingface.co/bartowski/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q4_K_M.gguf", - "filename": "Phi-3-mini-4k-instruct-Q4_K_M.gguf" - } - ], - "id": "phi3-medium", - "object": "model", - "name": "Phi-3 Medium Instruct Q4", - "version": "1.4", - "description": "Phi-3 Medium is Microsoft's latest SOTA model.", - "format": "gguf", - "settings": { - "ctx_len": 128000, - "prompt_template": "<|user|> {prompt}<|end|><|assistant|>", - "llama_model_path": "Phi-3-mini-4k-instruct-Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "max_tokens": 128000, - "stop": ["<|end|>"], - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Microsoft", - "tags": [ - "14B", - "Finetuned" - ], - "size": 8366000000 - }, - "engine": "llama-cpp" - } \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/phind-34b/model.json b/extensions/inference-cortex-extension/resources/models/phind-34b/model.json deleted file mode 100644 index f6e302173..000000000 --- a/extensions/inference-cortex-extension/resources/models/phind-34b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "phind-codellama-34b-v2.Q5_K_M.gguf", - "url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf" - } - ], - "id": "phind-34b", - "object": "model", - "name": "Phind 34B Q4", - "version": "1.3", - "description": "Phind 34B is the best Open-source coding model.", - "format": "gguf", - "settings": { - "ctx_len": 16384, - "prompt_template": "### System Prompt\n{system_message}\n### User Message\n{prompt}\n### Assistant", - "llama_model_path": "phind-codellama-34b-v2.Q4_K_M.gguf", - "ngl": 49 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 16384, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Phind", - "tags": ["34B", "Finetuned"], - "size": 20220000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/qwen-7b/model.json b/extensions/inference-cortex-extension/resources/models/qwen-7b/model.json deleted file mode 100644 index be37cac0d..000000000 --- a/extensions/inference-cortex-extension/resources/models/qwen-7b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "qwen1_5-7b-chat-q4_k_m.gguf", - "url": "https://huggingface.co/Qwen/Qwen1.5-7B-Chat-GGUF/resolve/main/qwen1_5-7b-chat-q4_k_m.gguf" - } - ], - "id": "qwen-7b", - "object": "model", - "name": "Qwen Chat 7B Q4", - "version": "1.2", - "description": "Qwen is optimized at Chinese, ideal for everyday tasks.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "qwen1_5-7b-chat-q4_k_m.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Alibaba", - "tags": ["7B", "Finetuned"], - "size": 4770000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/qwen2-7b/model.json b/extensions/inference-cortex-extension/resources/models/qwen2-7b/model.json deleted file mode 100644 index 210848a43..000000000 --- a/extensions/inference-cortex-extension/resources/models/qwen2-7b/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Qwen2-7B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Qwen2-7B-Instruct-GGUF/resolve/main/Qwen2-7B-Instruct-Q4_K_M.gguf" - } - ], - "id": "qwen2-7b", - "object": "model", - "name": "Qwen 2 7B Instruct Q4", - "version": "1.2", - "description": "Qwen is optimized at Chinese, ideal for everyday tasks.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Qwen2-7B-Instruct-Q4_K_M.gguf", - "ngl": 29 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Alibaba", - "tags": ["7B", "Finetuned"], - "size": 4680000000 - }, - "engine": "llama-cpp" - } - \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/qwen2.5-14b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/qwen2.5-14b-instruct/model.json deleted file mode 100644 index 96e4d214c..000000000 --- a/extensions/inference-cortex-extension/resources/models/qwen2.5-14b-instruct/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Qwen2.5-14B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Qwen2.5-14B-Instruct-GGUF/resolve/main/Qwen2.5-14B-Instruct-Q4_K_M.gguf" - } - ], - "id": "qwen2.5-14b-instruct", - "object": "model", - "name": "Qwen2.5 14B Instruct Q4", - "version": "1.0", - "description": "The Qwen 2.5 family is specifically designed to excel in math, coding, and other computational tasks", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Qwen2.5-14B-Instruct-Q4_K_M.gguf", - "ngl": 49 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": ["<|endoftext|>", "<|im_end|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "QwenLM", - "tags": ["14B", "Featured"], - "size": 8990000000 - }, - "engine": "llama-cpp" - } - \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/qwen2.5-32b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/qwen2.5-32b-instruct/model.json deleted file mode 100644 index 20681dff4..000000000 --- a/extensions/inference-cortex-extension/resources/models/qwen2.5-32b-instruct/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Qwen2.5-32B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Qwen2.5-32B-Instruct-GGUF/resolve/main/Qwen2.5-32B-Instruct-Q4_K_M.gguf" - } - ], - "id": "qwen2.5-32b-instruct", - "object": "model", - "name": "Qwen2.5 32B Instruct Q4", - "version": "1.0", - "description": "The Qwen 2.5 family is specifically designed to excel in math, coding, and other computational tasks", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Qwen2.5-32B-Instruct-Q4_K_M.gguf", - "ngl": 65 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": ["<|endoftext|>", "<|im_end|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "QwenLM", - "tags": ["32B"], - "size": 19900000000 - }, - "engine": "llama-cpp" - } - \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/qwen2.5-72b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/qwen2.5-72b-instruct/model.json deleted file mode 100644 index b741539eb..000000000 --- a/extensions/inference-cortex-extension/resources/models/qwen2.5-72b-instruct/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Qwen2.5-72B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Qwen2.5-72B-Instruct-GGUF/resolve/main/Qwen2.5-72B-Instruct-Q4_K_M.gguf" - } - ], - "id": "qwen2.5-72b-instruct", - "object": "model", - "name": "Qwen2.5 72B Instruct Q4", - "version": "1.0", - "description": "The Qwen 2.5 family is specifically designed to excel in math, coding, and other computational tasks", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Qwen2.5-72B-Instruct-Q4_K_M.gguf", - "ngl": 81 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": ["<|endoftext|>", "<|im_end|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "QwenLM", - "tags": ["72B"], - "size": 47400000000 - }, - "engine": "llama-cpp" - } - \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/qwen2.5-7b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/qwen2.5-7b-instruct/model.json deleted file mode 100644 index 6741aef64..000000000 --- a/extensions/inference-cortex-extension/resources/models/qwen2.5-7b-instruct/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Qwen2.5-7B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Qwen2.5-7B-Instruct-GGUF/resolve/main/Qwen2.5-7B-Instruct-Q4_K_M.gguf" - } - ], - "id": "qwen2.5-7b-instruct", - "object": "model", - "name": "Qwen2.5 7B Instruct Q4", - "version": "1.0", - "description": "The Qwen 2.5 family is specifically designed to excel in math, coding, and other computational tasks", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Qwen2.5-7B-Instruct-Q4_K_M.gguf", - "ngl": 29 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": ["<|endoftext|>", "<|im_end|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "QwenLM", - "tags": ["7B", "Featured"], - "size": 4680000000 - }, - "engine": "llama-cpp" - } - \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/qwen2.5-coder-14b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/qwen2.5-coder-14b-instruct/model.json deleted file mode 100644 index a445ee2db..000000000 --- a/extensions/inference-cortex-extension/resources/models/qwen2.5-coder-14b-instruct/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Qwen2.5-Coder-14B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Qwen2.5-Coder-14B-Instruct-GGUF/resolve/main/Qwen2.5-Coder-14B-Instruct-Q4_K_M.gguf" - } - ], - "id": "qwen2.5-coder-14b-instruct", - "object": "model", - "name": "Qwen2.5 Coder 14B Instruct Q4", - "version": "1.0", - "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models. Significantly improvements in code generation, code reasoning and code fixing.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Qwen2.5-Coder-14B-Instruct-Q4_K_M.gguf", - "ngl": 29 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": ["<|endoftext|>", "<|im_end|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "QwenLM", - "tags": ["14B", "Featured"], - "size": 8990000000 - }, - "engine": "llama-cpp" - } - \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/qwen2.5-coder-32b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/qwen2.5-coder-32b-instruct/model.json deleted file mode 100644 index cffdf03df..000000000 --- a/extensions/inference-cortex-extension/resources/models/qwen2.5-coder-32b-instruct/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Qwen2.5-Coder-32B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Qwen2.5-Coder-32B-Instruct-GGUF/resolve/main/Qwen2.5-Coder-32B-Instruct-Q4_K_M.gguf" - } - ], - "id": "qwen2.5-coder-32b-instruct", - "object": "model", - "name": "Qwen2.5 Coder 32B Instruct Q4", - "version": "1.0", - "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models. Significantly improvements in code generation, code reasoning and code fixing.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Qwen2.5-Coder-32B-Instruct-Q4_K_M.gguf", - "ngl": 29 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": ["<|endoftext|>", "<|im_end|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "QwenLM", - "tags": ["32B", "Featured"], - "size": 19900000000 - }, - "engine": "llama-cpp" - } - \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/qwen2.5-coder-7b-instruct/model.json b/extensions/inference-cortex-extension/resources/models/qwen2.5-coder-7b-instruct/model.json deleted file mode 100644 index 9162c8a43..000000000 --- a/extensions/inference-cortex-extension/resources/models/qwen2.5-coder-7b-instruct/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf", - "url": "https://huggingface.co/bartowski/Qwen2.5-Coder-7B-Instruct-GGUF/resolve/main/Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf" - } - ], - "id": "qwen2.5-coder-7b-instruct", - "object": "model", - "name": "Qwen2.5 Coder 7B Instruct Q4", - "version": "1.0", - "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models. Significantly improvements in code generation, code reasoning and code fixing.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf", - "ngl": 29 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": ["<|endoftext|>", "<|im_end|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "QwenLM", - "tags": ["7B", "Featured"], - "size": 4680000000 - }, - "engine": "llama-cpp" - } - \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/stable-zephyr-3b/model.json b/extensions/inference-cortex-extension/resources/models/stable-zephyr-3b/model.json deleted file mode 100644 index a6c84bd17..000000000 --- a/extensions/inference-cortex-extension/resources/models/stable-zephyr-3b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "url": "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q8_0.gguf", - "filename": "stablelm-zephyr-3b.Q8_0.gguf" - } - ], - "id": "stable-zephyr-3b", - "object": "model", - "name": "Stable Zephyr 3B Q8", - "version": "1.1", - "description": "StableLM Zephyr 3B is a best model for low-end machine.", - "format": "gguf", - "settings": { - "ctx_len": 4096, - "prompt_template": "<|user|>\n{prompt}<|endoftext|>\n<|assistant|>", - "llama_model_path": "stablelm-zephyr-3b.Q8_0.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 4096, - "stop": ["<|endoftext|>"], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "StabilityAI", - "tags": ["3B", "Finetuned", "Tiny"], - "size": 2970000000 - }, - "engine": "llama-cpp" - } \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/stealth-v1.2-7b/model.json b/extensions/inference-cortex-extension/resources/models/stealth-v1.2-7b/model.json deleted file mode 100644 index ffb32922e..000000000 --- a/extensions/inference-cortex-extension/resources/models/stealth-v1.2-7b/model.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "sources": [ - { - "filename": "stealth-v1.3.Q4_K_M.gguf", - "url": "https://huggingface.co/janhq/stealth-v1.3-GGUF/resolve/main/stealth-v1.3.Q4_K_M.gguf" - } - ], - "id": "stealth-v1.2-7b", - "object": "model", - "name": "Stealth 7B Q4", - "version": "1.2", - "description": "This is a new experimental family designed to enhance Mathematical and Logical abilities.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "stealth-v1.3.Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Jan", - "tags": ["7B", "Finetuned"], - "size": 4370000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/tinyllama-1.1b/model.json b/extensions/inference-cortex-extension/resources/models/tinyllama-1.1b/model.json deleted file mode 100644 index b6aeea3e3..000000000 --- a/extensions/inference-cortex-extension/resources/models/tinyllama-1.1b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", - "url": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" - } - ], - "id": "tinyllama-1.1b", - "object": "model", - "name": "TinyLlama Chat 1.1B Q4", - "version": "1.1", - "description": "TinyLlama is a tiny model with only 1.1B. It's a good model for less powerful computers.", - "format": "gguf", - "settings": { - "ctx_len": 4096, - "prompt_template": "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>", - "llama_model_path": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", - "ngl": 23 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 2048, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "TinyLlama", - "tags": ["Tiny", "Foundation Model"], - "size": 669000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/trinity-v1.2-7b/model.json b/extensions/inference-cortex-extension/resources/models/trinity-v1.2-7b/model.json deleted file mode 100644 index fae5d0ca5..000000000 --- a/extensions/inference-cortex-extension/resources/models/trinity-v1.2-7b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "trinity-v1.2.Q4_K_M.gguf", - "url": "https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf" - } - ], - "id": "trinity-v1.2-7b", - "object": "model", - "name": "Trinity-v1.2 7B Q4", - "version": "1.2", - "description": "Trinity is an experimental model merge using the Slerp method. Recommended for daily assistance purposes.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "trinity-v1.2.Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Jan", - "tags": ["7B", "Merged"], - "size": 4370000000, - "cover": "https://raw.githubusercontent.com/janhq/jan/dev/models/trinity-v1.2-7b/cover.png" - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/vistral-7b/model.json b/extensions/inference-cortex-extension/resources/models/vistral-7b/model.json deleted file mode 100644 index 46b6999a6..000000000 --- a/extensions/inference-cortex-extension/resources/models/vistral-7b/model.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sources": [ - { - "filename": "vistral-7b-chat-dpo.Q4_K_M.gguf", - "url": "https://huggingface.co/janhq/vistral-7b-chat-dpo-GGUF/resolve/main/vistral-7b-chat-dpo.Q4_K_M.gguf" - } - ], - "id": "vistral-7b", - "object": "model", - "name": "Vistral 7B Q4", - "version": "1.2", - "description": "Vistral 7B has a deep understanding of Vietnamese.", - "format": "gguf", - "settings": { - "ctx_len": 32768, - "prompt_template": "[INST] <>\n{system_message}\n<>\n{prompt} [/INST]", - "llama_model_path": "vistral-7b-chat-dpo.Q4_K_M.gguf", - "ngl": 33 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 32768, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "Viet Mistral, Jan", - "tags": ["7B", "Finetuned"], - "size": 4410000000 - }, - "engine": "llama-cpp" - } - \ No newline at end of file diff --git a/extensions/inference-cortex-extension/resources/models/wizardcoder-13b/model.json b/extensions/inference-cortex-extension/resources/models/wizardcoder-13b/model.json deleted file mode 100644 index cf39ad857..000000000 --- a/extensions/inference-cortex-extension/resources/models/wizardcoder-13b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "wizardcoder-python-13b-v1.0.Q4_K_M.gguf", - "url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q4_K_M.gguf" - } - ], - "id": "wizardcoder-13b", - "object": "model", - "name": "Wizard Coder Python 13B Q4", - "version": "1.2", - "description": "WizardCoder 13B is a Python coding model. This model demonstrate high proficiency in specific domains like coding and mathematics.", - "format": "gguf", - "settings": { - "ctx_len": 16384, - "prompt_template": "### Instruction:\n{prompt}\n### Response:", - "llama_model_path": "wizardcoder-python-13b-v1.0.Q4_K_M.gguf", - "ngl": 41 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 16384, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "WizardLM, The Bloke", - "tags": ["Recommended", "13B", "Finetuned"], - "size": 7870000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/resources/models/yi-34b/model.json b/extensions/inference-cortex-extension/resources/models/yi-34b/model.json deleted file mode 100644 index 4f56650d7..000000000 --- a/extensions/inference-cortex-extension/resources/models/yi-34b/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sources": [ - { - "filename": "yi-34b-chat.Q4_K_M.gguf", - "url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q4_K_M.gguf" - } - ], - "id": "yi-34b", - "object": "model", - "name": "Yi 34B Q4", - "version": "1.1", - "description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.", - "format": "gguf", - "settings": { - "ctx_len": 4096, - "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", - "llama_model_path": "yi-34b-chat.Q4_K_M.gguf", - "ngl": 61 - }, - "parameters": { - "temperature": 0.7, - "top_p": 0.95, - "stream": true, - "max_tokens": 4096, - "stop": [], - "frequency_penalty": 0, - "presence_penalty": 0 - }, - "metadata": { - "author": "01-ai, The Bloke", - "tags": ["34B", "Foundational Model"], - "size": 20660000000 - }, - "engine": "llama-cpp" -} diff --git a/extensions/inference-cortex-extension/rolldown.config.mjs b/extensions/inference-cortex-extension/rolldown.config.mjs index 278664d3d..ef4c56c7b 100644 --- a/extensions/inference-cortex-extension/rolldown.config.mjs +++ b/extensions/inference-cortex-extension/rolldown.config.mjs @@ -1,53 +1,6 @@ import { defineConfig } from 'rolldown' import packageJson from './package.json' with { type: 'json' } import defaultSettingJson from './resources/default_settings.json' with { type: 'json' } -import bakllavaJson from './resources/models/bakllava-1/model.json' with { type: 'json' } -import codeninja7bJson from './resources/models/codeninja-1.0-7b/model.json' with { type: 'json' } -import commandr34bJson from './resources/models/command-r-34b/model.json' with { type: 'json' } -import deepseekCoder13bJson from './resources/models/deepseek-coder-1.3b/model.json' with { type: 'json' } -import deepseekCoder34bJson from './resources/models/deepseek-coder-34b/model.json' with { type: 'json' } -import gemma112bJson from './resources/models/gemma-1.1-2b/model.json' with { type: 'json' } -import gemma117bJson from './resources/models/gemma-1.1-7b/model.json' with { type: 'json' } -import llama2Chat70bJson from './resources/models/llama2-chat-70b/model.json' with { type: 'json' } -import llama2Chat7bJson from './resources/models/llama2-chat-7b/model.json' with { type: 'json' } -import llamacorn1bJson from './resources/models/llamacorn-1.1b/model.json' with { type: 'json' } -import llava13bJson from './resources/models/llava-13b/model.json' with { type: 'json' } -import llava7bJson from './resources/models/llava-7b/model.json' with { type: 'json' } -import mistralIns7bq4Json from './resources/models/mistral-ins-7b-q4/model.json' with { type: 'json' } -import mixtral8x7bInstructJson from './resources/models/mixtral-8x7b-instruct/model.json' with { type: 'json' } -import noromaid7bJson from './resources/models/noromaid-7b/model.json' with { type: 'json' } -import openchat357bJson from './resources/models/openchat-3.5-7b/model.json' with { type: 'json' } -import phi3bJson from './resources/models/phi3-3.8b/model.json' with { type: 'json' } -import phind34bJson from './resources/models/phind-34b/model.json' with { type: 'json' } -import qwen7bJson from './resources/models/qwen-7b/model.json' with { type: 'json' } -import stableZephyr3bJson from './resources/models/stable-zephyr-3b/model.json' with { type: 'json' } -import stealthv127bJson from './resources/models/stealth-v1.2-7b/model.json' with { type: 'json' } -import tinyllama11bJson from './resources/models/tinyllama-1.1b/model.json' with { type: 'json' } -import trinityv127bJson from './resources/models/trinity-v1.2-7b/model.json' with { type: 'json' } -import vistral7bJson from './resources/models/vistral-7b/model.json' with { type: 'json' } -import wizardcoder13bJson from './resources/models/wizardcoder-13b/model.json' with { type: 'json' } -import yi34bJson from './resources/models/yi-34b/model.json' with { type: 'json' } -import llama3Json from './resources/models/llama3-8b-instruct/model.json' with { type: 'json' } -import llama3Hermes8bJson from './resources/models/llama3-hermes-8b/model.json' with { type: 'json' } -import aya8bJson from './resources/models/aya-23-8b/model.json' with { type: 'json' } -import aya35bJson from './resources/models/aya-23-35b/model.json' with { type: 'json' } -import phimediumJson from './resources/models/phi3-medium/model.json' with { type: 'json' } -import codestralJson from './resources/models/codestral-22b/model.json' with { type: 'json' } -import qwen2Json from './resources/models/qwen2-7b/model.json' with { type: 'json' } -import llama318bJson from './resources/models/llama3.1-8b-instruct/model.json' with { type: 'json' } -import llama3170bJson from './resources/models/llama3.1-70b-instruct/model.json' with { type: 'json' } -import gemma22bJson from './resources/models/gemma-2-2b/model.json' with { type: 'json' } -import gemma29bJson from './resources/models/gemma-2-9b/model.json' with { type: 'json' } -import gemma227bJson from './resources/models/gemma-2-27b/model.json' with { type: 'json' } -import llama321bJson from './resources/models/llama3.2-1b-instruct/model.json' with { type: 'json' } -import llama323bJson from './resources/models/llama3.2-3b-instruct/model.json' with { type: 'json' } -import qwen257bJson from './resources/models/qwen2.5-7b-instruct/model.json' with { type: 'json' } -import qwen25coder7bJson from './resources/models/qwen2.5-coder-7b-instruct/model.json' with { type: 'json' } -import qwen25coder14bJson from './resources/models/qwen2.5-coder-14b-instruct/model.json' with { type: 'json' } -import qwen25coder32bJson from './resources/models/qwen2.5-coder-32b-instruct/model.json' with { type: 'json' } -import qwen2514bJson from './resources/models/qwen2.5-14b-instruct/model.json' with { type: 'json' } -import qwen2532bJson from './resources/models/qwen2.5-32b-instruct/model.json' with { type: 'json' } -import qwen2572bJson from './resources/models/qwen2.5-72b-instruct/model.json' with { type: 'json' } export default defineConfig([ { @@ -58,65 +11,20 @@ export default defineConfig([ }, platform: 'browser', define: { - MODELS: JSON.stringify([ - bakllavaJson, - codeninja7bJson, - commandr34bJson, - deepseekCoder13bJson, - deepseekCoder34bJson, - gemma112bJson, - gemma117bJson, - llama2Chat70bJson, - llama2Chat7bJson, - llamacorn1bJson, - llava13bJson, - llava7bJson, - mistralIns7bq4Json, - mixtral8x7bInstructJson, - noromaid7bJson, - openchat357bJson, - phi3bJson, - phind34bJson, - qwen7bJson, - stableZephyr3bJson, - stealthv127bJson, - tinyllama11bJson, - trinityv127bJson, - vistral7bJson, - wizardcoder13bJson, - yi34bJson, - llama3Json, - llama3Hermes8bJson, - phimediumJson, - aya8bJson, - aya35bJson, - codestralJson, - qwen2Json, - llama318bJson, - llama3170bJson, - gemma22bJson, - gemma29bJson, - gemma227bJson, - llama321bJson, - llama323bJson, - qwen257bJson, - qwen25coder7bJson, - qwen25coder14bJson, - qwen25coder32bJson, - qwen2514bJson, - qwen2532bJson, - qwen2572bJson, - ]), NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`), SETTINGS: JSON.stringify(defaultSettingJson), - CORTEX_API_URL: JSON.stringify('http://127.0.0.1:39291'), - CORTEX_SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'), - CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.49'), + CORTEX_API_URL: JSON.stringify( + `http://127.0.0.1:${process.env.CORTEX_API_PORT ?? '39291'}` + ), + CORTEX_SOCKET_URL: JSON.stringify( + `ws://127.0.0.1:${process.env.CORTEX_API_PORT ?? '39291'}` + ), + CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.55'), }, }, { input: 'src/node/index.ts', - external: ['@janhq/core/node', 'cpu-instructions'], + external: ['@janhq/core/node'], output: { format: 'cjs', file: 'dist/node/index.cjs.js', @@ -126,6 +34,11 @@ export default defineConfig([ resolve: { extensions: ['.js', '.ts', '.json'], }, + define: { + CORTEX_API_URL: JSON.stringify( + `http://127.0.0.1:${process.env.CORTEX_API_PORT ?? '39291'}` + ), + }, platform: 'node', }, ]) diff --git a/extensions/inference-cortex-extension/src/@types/global.d.ts b/extensions/inference-cortex-extension/src/@types/global.d.ts index 2de432c29..52f97b9ab 100644 --- a/extensions/inference-cortex-extension/src/@types/global.d.ts +++ b/extensions/inference-cortex-extension/src/@types/global.d.ts @@ -3,4 +3,3 @@ declare const CORTEX_API_URL: string declare const CORTEX_SOCKET_URL: string declare const CORTEX_ENGINE_VERSION: string declare const SETTINGS: any -declare const MODELS: any diff --git a/extensions/inference-cortex-extension/src/index.test.ts b/extensions/inference-cortex-extension/src/index.test.ts new file mode 100644 index 000000000..9726400e7 --- /dev/null +++ b/extensions/inference-cortex-extension/src/index.test.ts @@ -0,0 +1,452 @@ +import { describe, beforeEach, it, expect, vi, afterEach } from 'vitest' + +// Must mock before imports +vi.mock('@janhq/core', () => { + return { + executeOnMain: vi.fn().mockResolvedValue({}), + events: { + emit: vi.fn() + }, + extractModelLoadParams: vi.fn().mockReturnValue({}), + ModelEvent: { + OnModelsUpdate: 'OnModelsUpdate', + OnModelStopped: 'OnModelStopped' + }, + EngineEvent: { + OnEngineUpdate: 'OnEngineUpdate' + }, + InferenceEngine: { + cortex: 'cortex', + nitro: 'nitro', + cortex_llamacpp: 'cortex_llamacpp' + }, + LocalOAIEngine: class LocalOAIEngine { + onLoad() {} + onUnload() {} + } + } +}) + +import JanInferenceCortexExtension, { Settings } from './index' +import { InferenceEngine, ModelEvent, EngineEvent, executeOnMain, events } from '@janhq/core' +import ky from 'ky' + +// Mock global variables +const CORTEX_API_URL = 'http://localhost:3000' +const CORTEX_SOCKET_URL = 'ws://localhost:3000' +const SETTINGS = [ + { id: 'n_parallel', name: 'Parallel Execution', description: 'Number of parallel executions', type: 'number', value: '4' }, + { id: 'cont_batching', name: 'Continuous Batching', description: 'Enable continuous batching', type: 'boolean', value: true }, + { id: 'caching_enabled', name: 'Caching', description: 'Enable caching', type: 'boolean', value: true }, + { id: 'flash_attn', name: 'Flash Attention', description: 'Enable flash attention', type: 'boolean', value: true }, + { id: 'cache_type', name: 'Cache Type', description: 'Type of cache to use', type: 'string', value: 'f16' }, + { id: 'use_mmap', name: 'Use Memory Map', description: 'Use memory mapping', type: 'boolean', value: true }, + { id: 'cpu_threads', name: 'CPU Threads', description: 'Number of CPU threads', type: 'number', value: '' } +] +const NODE = 'node' + +// Mock globals +vi.stubGlobal('CORTEX_API_URL', CORTEX_API_URL) +vi.stubGlobal('CORTEX_SOCKET_URL', CORTEX_SOCKET_URL) +vi.stubGlobal('SETTINGS', SETTINGS) +vi.stubGlobal('NODE', NODE) +vi.stubGlobal('window', { + addEventListener: vi.fn() +}) + +// Mock WebSocket +class MockWebSocket { + url :string + listeners: {} + onclose: Function + + constructor(url) { + this.url = url + this.listeners = {} + } + + addEventListener(event, listener) { + this.listeners[event] = listener + } + + emit(event, data) { + if (this.listeners[event]) { + this.listeners[event](data) + } + } + + close() { + if (this.onclose) { + this.onclose({ code: 1000 }) + } + } +} + +// Mock global WebSocket +// @ts-ignore +global.WebSocket = vi.fn().mockImplementation((url) => new MockWebSocket(url)) + +describe('JanInferenceCortexExtension', () => { + let extension + + beforeEach(() => { + // Reset mocks + vi.clearAllMocks() + + // Create a new instance for each test + extension = new JanInferenceCortexExtension() + + // Mock the getSetting method + extension.getSetting = vi.fn().mockImplementation((key, defaultValue) => { + switch(key) { + case Settings.n_parallel: + return '4' + case Settings.cont_batching: + return true + case Settings.caching_enabled: + return true + case Settings.flash_attn: + return true + case Settings.cache_type: + return 'f16' + case Settings.use_mmap: + return true + case Settings.cpu_threads: + return '' + default: + return defaultValue + } + }) + + // Mock methods + extension.registerSettings = vi.fn() + extension.onLoad = vi.fn() + extension.clean = vi.fn().mockResolvedValue({}) + extension.healthz = vi.fn().mockResolvedValue({}) + extension.subscribeToEvents = vi.fn() + }) + + describe('onSettingUpdate', () => { + it('should update n_parallel setting correctly', () => { + extension.onSettingUpdate(Settings.n_parallel, '8') + expect(extension.n_parallel).toBe(8) + }) + + it('should update cont_batching setting correctly', () => { + extension.onSettingUpdate(Settings.cont_batching, false) + expect(extension.cont_batching).toBe(false) + }) + + it('should update caching_enabled setting correctly', () => { + extension.onSettingUpdate(Settings.caching_enabled, false) + expect(extension.caching_enabled).toBe(false) + }) + + it('should update flash_attn setting correctly', () => { + extension.onSettingUpdate(Settings.flash_attn, false) + expect(extension.flash_attn).toBe(false) + }) + + it('should update cache_type setting correctly', () => { + extension.onSettingUpdate(Settings.cache_type, 'f32') + expect(extension.cache_type).toBe('f32') + }) + + it('should update use_mmap setting correctly', () => { + extension.onSettingUpdate(Settings.use_mmap, false) + expect(extension.use_mmap).toBe(false) + }) + + it('should update cpu_threads setting correctly', () => { + extension.onSettingUpdate(Settings.cpu_threads, '4') + expect(extension.cpu_threads).toBe(4) + }) + + it('should not update cpu_threads when value is not a number', () => { + extension.cpu_threads = undefined + extension.onSettingUpdate(Settings.cpu_threads, 'not-a-number') + expect(extension.cpu_threads).toBeUndefined() + }) + }) + + describe('onUnload', () => { + it('should clean up resources correctly', async () => { + extension.shouldReconnect = true + + await extension.onUnload() + + expect(extension.shouldReconnect).toBe(false) + expect(extension.clean).toHaveBeenCalled() + expect(executeOnMain).toHaveBeenCalledWith(NODE, 'dispose') + }) + }) + + describe('loadModel', () => { + it('should remove llama_model_path and mmproj from settings', async () => { + // Setup + const model = { + id: 'test-model', + settings: { + llama_model_path: '/path/to/model', + mmproj: '/path/to/mmproj', + some_setting: 'value' + }, + engine: InferenceEngine.cortex_llamacpp + } + + // Mock ky.post + vi.spyOn(ky, 'post').mockImplementation(() => ({ + // @ts-ignore + json: () => Promise.resolve({}), + catch: () => ({ + finally: () => ({ + // @ts-ignore + then: () => Promise.resolve({}) + }) + }) + })) + + // Setup queue for testing + extension.queue = { add: vi.fn(fn => fn()) } + + // Execute + await extension.loadModel(model) + + // Verify settings were filtered + expect(model.settings).not.toHaveProperty('llama_model_path') + expect(model.settings).not.toHaveProperty('mmproj') + expect(model.settings).toHaveProperty('some_setting') + }) + + it('should convert nitro to cortex_llamacpp engine', async () => { + // Setup + const model = { + id: 'test-model', + settings: {}, + engine: InferenceEngine.nitro + } + + // Mock ky.post + const mockKyPost = vi.spyOn(ky, 'post').mockImplementation(() => ({ + // @ts-ignore + json: () => Promise.resolve({}), + catch: () => ({ + finally: () => ({ + // @ts-ignore + then: () => Promise.resolve({}) + }) + }) + })) + + // Setup queue for testing + extension.queue = { add: vi.fn(fn => fn()) } + + // Execute + await extension.loadModel(model) + + // Verify API call + expect(mockKyPost).toHaveBeenCalledWith( + `${CORTEX_API_URL}/v1/models/start`, + expect.objectContaining({ + json: expect.objectContaining({ + engine: InferenceEngine.cortex_llamacpp + }) + }) + ) + }) + }) + + describe('unloadModel', () => { + it('should call the correct API endpoint and abort loading if in progress', async () => { + // Setup + const model = { id: 'test-model' } + const mockAbort = vi.fn() + extension.abortControllers.set(model.id, { abort: mockAbort }) + + // Mock ky.post + const mockKyPost = vi.spyOn(ky, 'post').mockImplementation(() => ({ + // @ts-ignore + json: () => Promise.resolve({}), + finally: () => ({ + // @ts-ignore + then: () => Promise.resolve({}) + }) + })) + + // Execute + await extension.unloadModel(model) + + // Verify API call + expect(mockKyPost).toHaveBeenCalledWith( + `${CORTEX_API_URL}/v1/models/stop`, + expect.objectContaining({ + json: { model: model.id } + }) + ) + + // Verify abort controller was called + expect(mockAbort).toHaveBeenCalled() + }) + }) + + describe('clean', () => { + it('should make a DELETE request to destroy process manager', async () => { + // Mock the ky.delete function directly + const mockDelete = vi.fn().mockReturnValue({ + catch: vi.fn().mockReturnValue(Promise.resolve({})) + }) + + // Replace the original implementation + vi.spyOn(ky, 'delete').mockImplementation(mockDelete) + + // Override the clean method to use the real implementation + // @ts-ignore + extension.clean = JanInferenceCortexExtension.prototype.clean + + // Call the method + await extension.clean() + + // Verify the correct API call was made + expect(mockDelete).toHaveBeenCalledWith( + `${CORTEX_API_URL}/processmanager/destroy`, + expect.objectContaining({ + timeout: 2000, + retry: expect.objectContaining({ + limit: 0 + }) + }) + ) + }) + }) + + describe('WebSocket events', () => { + it('should handle WebSocket events correctly', () => { + // Create a mock implementation for subscribeToEvents that stores the socket + let messageHandler; + let closeHandler; + + // Override the private method + extension.subscribeToEvents = function() { + this.socket = new MockWebSocket('ws://localhost:3000/events'); + this.socket.addEventListener('message', (event) => { + const data = JSON.parse(event.data); + + // Store for testing + messageHandler = data; + + const transferred = data.task.items.reduce( + (acc, cur) => acc + cur.downloadedBytes, + 0 + ); + const total = data.task.items.reduce( + (acc, cur) => acc + cur.bytes, + 0 + ); + const percent = total > 0 ? transferred / total : 0; + + events.emit( + data.type === 'DownloadUpdated' ? 'onFileDownloadUpdate' : + data.type === 'DownloadSuccess' ? 'onFileDownloadSuccess' : + data.type, + { + modelId: data.task.id, + percent: percent, + size: { + transferred: transferred, + total: total, + }, + downloadType: data.task.type, + } + ); + + if (data.task.type === 'Engine') { + events.emit(EngineEvent.OnEngineUpdate, { + type: data.type, + percent: percent, + id: data.task.id, + }); + } + else if (data.type === 'DownloadSuccess') { + setTimeout(() => { + events.emit(ModelEvent.OnModelsUpdate, { + fetch: true, + }); + }, 500); + } + }); + + this.socket.onclose = (event) => { + closeHandler = event; + // Notify app to update model running state + events.emit(ModelEvent.OnModelStopped, {}); + }; + }; + + // Setup queue + extension.queue = { + add: vi.fn(fn => fn()) + }; + + // Execute the method + extension.subscribeToEvents(); + + // Simulate a message event + extension.socket.listeners.message({ + data: JSON.stringify({ + type: 'DownloadUpdated', + task: { + id: 'test-model', + type: 'Model', + items: [ + { downloadedBytes: 50, bytes: 100 } + ] + } + }) + }); + + // Verify event emission + expect(events.emit).toHaveBeenCalledWith( + 'onFileDownloadUpdate', + expect.objectContaining({ + modelId: 'test-model', + percent: 0.5 + }) + ); + + // Simulate a download success event + vi.useFakeTimers(); + extension.socket.listeners.message({ + data: JSON.stringify({ + type: 'DownloadSuccess', + task: { + id: 'test-model', + type: 'Model', + items: [ + { downloadedBytes: 100, bytes: 100 } + ] + } + }) + }); + + // Fast-forward time to trigger the timeout + vi.advanceTimersByTime(500); + + // Verify the ModelEvent.OnModelsUpdate event was emitted + expect(events.emit).toHaveBeenCalledWith( + ModelEvent.OnModelsUpdate, + { fetch: true } + ); + + vi.useRealTimers(); + + // Trigger websocket close + extension.socket.onclose({ code: 1000 }); + + // Verify OnModelStopped event was emitted + expect(events.emit).toHaveBeenCalledWith( + ModelEvent.OnModelStopped, + {} + ); + }); + }) +}) \ No newline at end of file diff --git a/extensions/inference-cortex-extension/src/index.ts b/extensions/inference-cortex-extension/src/index.ts index 84cc49b94..7ed51f9c2 100644 --- a/extensions/inference-cortex-extension/src/index.ts +++ b/extensions/inference-cortex-extension/src/index.ts @@ -10,19 +10,14 @@ import { Model, executeOnMain, EngineEvent, - systemInformation, - joinPath, LocalOAIEngine, InferenceEngine, - getJanDataFolderPath, extractModelLoadParams, - fs, events, ModelEvent, - dirName, } from '@janhq/core' import PQueue from 'p-queue' -import ky from 'ky' +import ky, { KyInstance } from 'ky' /** * Event subscription types of Downloader @@ -80,14 +75,37 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { abortControllers = new Map() + api?: KyInstance /** - * Subscribes to events emitted by the @janhq/core package. + * Get the API instance + * @returns + */ + async apiInstance(): Promise { + if(this.api) return this.api + const apiKey = (await window.core?.api.appToken()) ?? 'cortex.cpp' + this.api = ky.extend({ + prefixUrl: CORTEX_API_URL, + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }) + return this.api + } + + /** + * Authorization headers for the API requests. + * @returns + */ + headers(): Promise { + return window.core?.api.appToken().then((token: string) => ({ + Authorization: `Bearer ${token}`, + })) + } + + /** + * Called when the extension is loaded. */ async onLoad() { - const models = MODELS as Model[] - - this.registerModels(models) - super.onLoad() // Register Settings @@ -112,8 +130,8 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { if (!Number.isNaN(threads_number)) this.cpu_threads = threads_number // Run the process watchdog - const systemInfo = await systemInformation() - this.queue.add(() => executeOnMain(NODE, 'run', systemInfo)) + // const systemInfo = await systemInformation() + this.queue.add(() => executeOnMain(NODE, 'run')) this.queue.add(() => this.healthz()) this.subscribeToEvents() @@ -152,81 +170,59 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { override async loadModel( model: Model & { file_path?: string } ): Promise { - if ( - (model.engine === InferenceEngine.nitro || model.settings.vision_model) && - model.settings.llama_model_path - ) { - // Legacy chat model support - model.settings = { - ...model.settings, - llama_model_path: await getModelFilePath( - model, - model.settings.llama_model_path - ), - } - } else { - const { llama_model_path, ...settings } = model.settings - model.settings = settings - } + // Cortex will handle these settings + const { llama_model_path, mmproj, ...settings } = model.settings + model.settings = settings - if ( - (model.engine === InferenceEngine.nitro || model.settings.vision_model) && - model.settings.mmproj - ) { - // Legacy clip vision model support - model.settings = { - ...model.settings, - mmproj: await getModelFilePath(model, model.settings.mmproj), - } - } else { - const { mmproj, ...settings } = model.settings - model.settings = settings - } const controller = new AbortController() const { signal } = controller this.abortControllers.set(model.id, controller) return await this.queue.add(() => - ky - .post(`${CORTEX_API_URL}/v1/models/start`, { - json: { - ...extractModelLoadParams(model.settings), - model: model.id, - engine: - model.engine === InferenceEngine.nitro // Legacy model cache - ? InferenceEngine.cortex_llamacpp - : model.engine, - cont_batching: this.cont_batching, - n_parallel: this.n_parallel, - caching_enabled: this.caching_enabled, - flash_attn: this.flash_attn, - cache_type: this.cache_type, - use_mmap: this.use_mmap, - ...(this.cpu_threads ? { cpu_threads: this.cpu_threads } : {}), - }, - timeout: false, - signal, - }) - .json() - .catch(async (e) => { - throw (await e.response?.json()) ?? e - }) - .finally(() => this.abortControllers.delete(model.id)) - .then() + this.apiInstance().then((api) => + api + .post('v1/models/start', { + json: { + ...extractModelLoadParams(model.settings), + model: model.id, + engine: + model.engine === InferenceEngine.nitro // Legacy model cache + ? InferenceEngine.cortex_llamacpp + : model.engine, + cont_batching: this.cont_batching, + n_parallel: this.n_parallel, + caching_enabled: this.caching_enabled, + flash_attn: this.flash_attn, + cache_type: this.cache_type, + use_mmap: this.use_mmap, + ...(this.cpu_threads ? { cpu_threads: this.cpu_threads } : {}), + }, + timeout: false, + signal, + }) + .json() + .catch(async (e) => { + throw (await e.response?.json()) ?? e + }) + .finally(() => this.abortControllers.delete(model.id)) + .then() + ) ) } override async unloadModel(model: Model): Promise { - return ky - .post(`${CORTEX_API_URL}/v1/models/stop`, { - json: { model: model.id }, - }) - .json() - .finally(() => { - this.abortControllers.get(model.id)?.abort() - }) - .then() + return this.apiInstance().then((api) => + api + .post('v1/models/stop', { + json: { model: model.id }, + }) + .json() + .finally(() => { + this.abortControllers.get(model.id)?.abort() + }) + .then() + ) } /** @@ -234,15 +230,17 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { * @returns */ private async healthz(): Promise { - return ky - .get(`${CORTEX_API_URL}/healthz`, { - retry: { - limit: 20, - delay: () => 500, - methods: ['get'], - }, - }) - .then(() => {}) + return this.apiInstance().then((api) => + api + .get('healthz', { + retry: { + limit: 20, + delay: () => 500, + methods: ['get'], + }, + }) + .then(() => {}) + ) } /** @@ -250,13 +248,15 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { * @returns */ private async clean(): Promise { - return ky - .delete(`${CORTEX_API_URL}/processmanager/destroy`, { - timeout: 2000, // maximum 2 seconds - retry: { - limit: 0, - }, - }) + return this.apiInstance() + .then((api) => + api.delete('processmanager/destroy', { + timeout: 2000, // maximum 2 seconds + retry: { + limit: 0, + }, + }) + ) .catch(() => { // Do nothing }) @@ -339,22 +339,3 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { ) } } - -/// Legacy -const getModelFilePath = async ( - model: Model & { file_path?: string }, - file: string -): Promise => { - // Symlink to the model file - if ( - !model.sources[0]?.url.startsWith('http') && - (await fs.existsSync(model.sources[0].url)) - ) { - return model.sources[0]?.url - } - if (model.file_path) { - await joinPath([await dirName(model.file_path), file]) - } - return joinPath([await getJanDataFolderPath(), 'models', model.id, file]) -} -/// diff --git a/extensions/inference-cortex-extension/src/node/index.test.ts b/extensions/inference-cortex-extension/src/node/index.test.ts index bdfd99d03..6a1e168f3 100644 --- a/extensions/inference-cortex-extension/src/node/index.test.ts +++ b/extensions/inference-cortex-extension/src/node/index.test.ts @@ -1,6 +1,14 @@ -jest.mock('@janhq/core/node', () => ({ - ...jest.requireActual('@janhq/core/node'), +import { describe, it, expect, vi } from 'vitest' +// Mocks + +const CORTEX_API_URL = 'http://localhost:3000' +vi.stubGlobal('CORTEX_API_URL', CORTEX_API_URL) + +vi.mock('@janhq/core/node', (actual) => ({ + ...actual(), getJanDataFolderPath: () => '', + appResourcePath: () => '/mock/path', + log: vi.fn(), getSystemResourceInfo: () => { return { cpu: { @@ -30,25 +38,36 @@ jest.mock('@janhq/core/node', () => ({ }, })) -jest.mock('fs', () => ({ +vi.mock('fs', () => ({ default: { readdirSync: () => [], }, })) -jest.mock('child_process', () => ({ +vi.mock('./watchdog', () => { + return { + ProcessWatchdog: vi.fn().mockImplementation(() => { + return { + start: vi.fn(), + terminate: vi.fn(), + } + }), + } +}) + +vi.mock('child_process', () => ({ exec: () => { return { - stdout: { on: jest.fn() }, - stderr: { on: jest.fn() }, - on: jest.fn(), + stdout: { on: vi.fn() }, + stderr: { on: vi.fn() }, + on: vi.fn(), } }, spawn: () => { return { - stdout: { on: jest.fn() }, - stderr: { on: jest.fn() }, - on: jest.fn(), + stdout: { on: vi.fn() }, + stderr: { on: vi.fn() }, + on: vi.fn(), pid: '111', } }, @@ -56,28 +75,70 @@ jest.mock('child_process', () => ({ import index from './index' -describe('dispose', () => { - it('should dispose a model successfully on Mac', async () => { - Object.defineProperty(process, 'platform', { - value: 'darwin', +describe('Cortex extension node interface', () => { + describe('run', () => { + it('should start the cortex subprocess on macOS', async () => { + Object.defineProperty(process, 'platform', { + value: 'darwin', + }) + + const result = await index.run() + expect(result).toBeUndefined() }) - // Call the dispose function - const result = await index.dispose() + it('should start the cortex subprocess on Windows', async () => { + Object.defineProperty(process, 'platform', { + value: 'win32', + }) - // Assert that the result is as expected - expect(result).toBeUndefined() + const result = await index.run() + expect(result).toBeUndefined() + }) + + it('should set the proper environment variables based on platform', async () => { + // Test for Windows + Object.defineProperty(process, 'platform', { + value: 'win32', + }) + process.env.PATH = '/original/path' + + await index.run() + expect(process.env.PATH).toContain('/original/path') + + // Test for non-Windows (macOS/Linux) + Object.defineProperty(process, 'platform', { + value: 'darwin', + }) + process.env.LD_LIBRARY_PATH = '/original/ld/path' + + await index.run() + expect(process.env.LD_LIBRARY_PATH).toContain('/original/ld/path') + }) }) - it('should kill the subprocess successfully on Windows', async () => { - Object.defineProperty(process, 'platform', { - value: 'win32', + describe('dispose', () => { + it('should dispose a model successfully on Mac', async () => { + Object.defineProperty(process, 'platform', { + value: 'darwin', + }) + + // Call the dispose function + const result = index.dispose() + + // Assert that the result is as expected + expect(result).toBeUndefined() }) - // Call the killSubprocess function - const result = await index.dispose() + it('should kill the subprocess successfully on Windows', async () => { + Object.defineProperty(process, 'platform', { + value: 'win32', + }) - // Assert that the result is as expected - expect(result).toBeUndefined() + // Call the dispose function + const result = index.dispose() + + // Assert that the result is as expected + expect(result).toBeUndefined() + }) }) }) diff --git a/extensions/inference-cortex-extension/src/node/index.ts b/extensions/inference-cortex-extension/src/node/index.ts index 420c84b6e..d82225745 100644 --- a/extensions/inference-cortex-extension/src/node/index.ts +++ b/extensions/inference-cortex-extension/src/node/index.ts @@ -1,30 +1,27 @@ import path from 'path' -import { - appResourcePath, - getJanDataFolderPath, - log, - SystemInformation, -} from '@janhq/core/node' +import { appResourcePath, getJanDataFolderPath, log } from '@janhq/core/node' import { ProcessWatchdog } from './watchdog' -import { readdir, symlink } from 'fs/promises' -// The HOST address to use for the Nitro subprocess -const LOCAL_PORT = '39291' let watchdog: ProcessWatchdog | undefined = undefined /** * Spawns a Nitro subprocess. * @returns A promise that resolves when the Nitro subprocess is started. */ -function run(systemInfo?: SystemInformation): Promise { +function run(): Promise { log(`[CORTEX]:: Spawning cortex subprocess...`) return new Promise(async (resolve, reject) => { - let gpuVisibleDevices = systemInfo?.gpuSetting?.gpus_in_use.join(',') ?? '' - let binaryName = `cortex-server${process.platform === 'win32' ? '.exe' : ''}` + // let gpuVisibleDevices = systemInfo?.gpuSetting?.gpus_in_use.join(',') ?? '' + let binaryName = `cortex-server${ + process.platform === 'win32' ? '.exe' : '' + }` const binPath = path.join(__dirname, '..', 'bin') const executablePath = path.join(binPath, binaryName) + + addEnvPaths(binPath) + const sharedPath = path.join(appResourcePath(), 'shared') // Execute the binary log(`[CORTEX]:: Spawn cortex at path: ${executablePath}`) @@ -34,6 +31,9 @@ function run(systemInfo?: SystemInformation): Promise { watchdog.terminate() } + // The HOST address to use for the cortex subprocess + const LOCAL_PORT = CORTEX_API_URL.split(':').pop() ?? '39291' + watchdog = new ProcessWatchdog( executablePath, [ @@ -44,15 +44,18 @@ function run(systemInfo?: SystemInformation): Promise { `${path.join(dataFolderPath, '.janrc')}`, '--data_folder_path', dataFolderPath, + 'config', + '--api_keys', + process.env.appToken ?? 'cortex.cpp', ], { env: { ...process.env, - CUDA_VISIBLE_DEVICES: gpuVisibleDevices, - // Vulkan - Support 1 device at a time for now - ...(gpuVisibleDevices?.length > 0 && { - GGML_VK_VISIBLE_DEVICES: gpuVisibleDevices, - }), + // CUDA_VISIBLE_DEVICES: gpuVisibleDevices, + // // Vulkan - Support 1 device at a time for now + // ...(gpuVisibleDevices?.length > 0 && { + // GGML_VK_VISIBLE_DEVICES: gpuVisibleDevices, + // }), }, cwd: sharedPath, } @@ -71,6 +74,22 @@ function dispose() { watchdog?.terminate() } +/** + * Set the environment paths for the cortex subprocess + * @param dest + */ +function addEnvPaths(dest: string) { + // Add engine path to the PATH and LD_LIBRARY_PATH + if (process.platform === 'win32') { + process.env.PATH = (process.env.PATH || '').concat(path.delimiter, dest) + } else { + process.env.LD_LIBRARY_PATH = (process.env.LD_LIBRARY_PATH || '').concat( + path.delimiter, + dest + ) + } +} + /** * Cortex process info */ diff --git a/extensions/model-extension/README.md b/extensions/model-extension/README.md index f9690da09..b9595b6e1 100644 --- a/extensions/model-extension/README.md +++ b/extensions/model-extension/README.md @@ -70,6 +70,6 @@ There are a few things to keep in mind when writing your extension code: ``` For more information about the Jan Extension Core module, see the - [documentation](https://github.com/janhq/jan/blob/main/core/README.md). + [documentation](https://github.com/menloresearch/jan/blob/main/core/README.md). So, what are you waiting for? Go ahead and start customizing your extension! diff --git a/extensions/model-extension/jest.config.js b/extensions/model-extension/jest.config.js deleted file mode 100644 index 3e32adceb..000000000 --- a/extensions/model-extension/jest.config.js +++ /dev/null @@ -1,9 +0,0 @@ -/** @type {import('ts-jest').JestConfigWithTsJest} */ -module.exports = { - preset: 'ts-jest', - testEnvironment: 'node', - transform: { - 'node_modules/@janhq/core/.+\\.(j|t)s?$': 'ts-jest', - }, - transformIgnorePatterns: ['node_modules/(?!@janhq/core/.*)'], -} diff --git a/extensions/model-extension/package.json b/extensions/model-extension/package.json index abd0e28a0..32ef2f70c 100644 --- a/extensions/model-extension/package.json +++ b/extensions/model-extension/package.json @@ -1,13 +1,13 @@ { "name": "@janhq/model-extension", "productName": "Model Management", - "version": "1.0.35", + "version": "1.0.36", "description": "Handles model lists, their details, and settings.", "main": "dist/index.js", "author": "Jan ", "license": "AGPL-3.0", "scripts": { - "test": "jest", + "test": "vitest run", "build": "rolldown -c rolldown.config.mjs", "build:publish": "rimraf *.tgz --glob || true && yarn build && npm pack && cpx *.tgz ../../pre-install" }, @@ -16,8 +16,8 @@ "rimraf": "^3.0.2", "rolldown": "1.0.0-beta.1", "run-script-os": "^1.1.6", - "ts-loader": "^9.5.0", - "typescript": "5.3.3" + "typescript": "5.3.3", + "vitest": "^3.0.6" }, "files": [ "dist/*", diff --git a/extensions/model-extension/resources/default.json b/extensions/model-extension/resources/default.json index 7d58c5598..ce17c9616 100644 --- a/extensions/model-extension/resources/default.json +++ b/extensions/model-extension/resources/default.json @@ -1,79 +1,6 @@ [ { - "id": "cortexso/deepseek-r1-distill-llama-70b", - "metadata": { - "_id": "678fe1673b0a6384a4e1f887", - "author": "cortexso", - "cardData": { - "license": "mit" - }, - "createdAt": "2025-01-21T18:03:19.000Z", - "description": "---\nlicense: mit\n---\n\n## Overview\n\n**DeepSeek** developed and released the [DeepSeek R1 Distill Llama 70B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B) model, a distilled version of the Llama 70B language model. This model represents the pinnacle of the DeepSeek R1 Distill series, designed for exceptional performance in text generation, dialogue tasks, and advanced reasoning, offering unparalleled capabilities for large-scale AI applications.\n\nThe model is ideal for enterprise-grade applications, research, conversational AI, and large-scale knowledge systems, providing top-tier accuracy, safety, and efficiency.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [gguf](https://huggingface.co/cortexso/deepseek-r1-distill-llama-70b/tree/main) | `cortex run deepseek-r1-distill-llama-70b` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```text\n cortexso/deepseek-r1-distill-llama-70b\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run deepseek-r1-distill-llama-70b\n ```\n\n## Credits\n\n- **Author:** DeepSeek\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B#7-license)\n- **Papers:** [DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning](https://arxiv.org/html/2501.12948v1)\n", - "disabled": false, - "downloads": 6, - "gated": false, - "id": "cortexso/deepseek-r1-distill-llama-70b", - "inference": "library-not-detected", - "lastModified": "2025-01-23T08:58:56.000Z", - "likes": 0, - "model-index": null, - "modelId": "cortexso/deepseek-r1-distill-llama-70b", - "private": false, - "sha": "59faddbe48125c56544917c3faff6c9f688167ee", - "siblings": [ - { - "rfilename": ".gitattributes" - }, - { - "rfilename": "README.md" - }, - { - "rfilename": "metadata.yml" - }, - { - "rfilename": "model.yml" - } - ], - "spaces": [], - "tags": ["license:mit", "region:us"], - "usedStorage": 310170138880 - }, - "models": [ - { - "id": "deepseek-r1-distill-llama-70b:70b-gguf-q2-k", - "size": 26375110432 - }, - { - "id": "deepseek-r1-distill-llama-70b:70b-gguf-q3-ks", - "size": 30912053024 - }, - { - "id": "deepseek-r1-distill-llama-70b:70b-gguf-q4-km", - "size": 42520395552 - }, - { - "id": "deepseek-r1-distill-llama-70b:70b-gguf-q5-ks", - "size": 48657448736 - }, - { - "id": "deepseek-r1-distill-llama-70b:70b-gguf-q3-km", - "size": 34267496224 - }, - { - "id": "deepseek-r1-distill-llama-70b:70b-gguf-q5-km", - "size": 49949818656 - }, - { - "id": "deepseek-r1-distill-llama-70b:70b-gguf-q3-kl", - "size": 37140594464 - }, - { - "id": "deepseek-r1-distill-llama-70b:70b-gguf-q4-ks", - "size": 40347221792 - } - ] - }, - { + "author": "CohereForAI", "id": "cortexso/command-r", "metadata": { "_id": "66751b98585f2bf57092b2ae", @@ -82,12 +9,11 @@ "license": "cc-by-nc-4.0" }, "createdAt": "2024-06-21T06:20:08.000Z", - "description": "---\nlicense: cc-by-nc-4.0\n---\n\n## Overview\n\nC4AI Command-R is a research release of a 35 billion parameter highly performant generative model. Command-R is a large language model with open weights optimized for a variety of use cases including reasoning, summarization, and question answering. Command-R has the capability for multilingual generation evaluated in 10 languages and highly performant RAG capabilities.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [35b-gguf](https://huggingface.co/cortexhub/command-r/tree/35b-gguf) | `cortex run command-r:35b-gguf` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```\n cortexhub/command-r\n ```\n \n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```\n cortex run command-r\n ```\n \n## Credits\n\n- **Author:** Cohere For AI: [cohere.for.ai](https://cohere.for.ai/)\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [Licence](https://cohere.com/c4ai-cc-by-nc-license)", + "description": "---\nlicense: cc-by-nc-4.0\npipeline_tag: text-generation\ntags:\n- cortex.cpp\n---\n\n## Overview\n\nC4AI Command-R is a research release of a 35 billion parameter highly performant generative model. Command-R is a large language model with open weights optimized for a variety of use cases including reasoning, summarization, and question answering. Command-R has the capability for multilingual generation evaluated in 10 languages and highly performant RAG capabilities.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [Command-r-32b](https://huggingface.co/cortexhub/command-r/tree/32b) | `cortex run command-r:32b` |\n| 1 | [Command-r-35b](https://huggingface.co/cortexhub/command-r/tree/35b) | `cortex run command-r:35b` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```bash\n cortexhub/command-r\n ```\n \n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run command-r\n ```\n \n## Credits\n\n- **Author:** Cohere For AI\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [Licence](https://cohere.com/c4ai-cc-by-nc-license)", "disabled": false, - "downloads": 9, + "downloads": 14, "gated": false, "id": "cortexso/command-r", - "inference": "library-not-detected", "lastModified": "2024-11-12T20:13:19.000Z", "likes": 1, "model-index": null, @@ -114,220 +40,57 @@ }, "models": [ { - "id": "command-r:gguf", - "size": 21527041888 - }, - { - "id": "command-r:32b-gguf-q2-k", - "size": 12810767424 - }, - { - "id": "command-r:32b-gguf-q3-ks", - "size": 14708689984 - }, - { - "id": "command-r:32b-gguf-q3-kl", - "size": 17563438144 - }, - { - "id": "command-r:32b-gguf-q6-k", - "size": 26505169984 - }, - { - "id": "command-r:32b-gguf-q4-ks", - "size": 18849516608 + "id": "command-r:32b-gguf-q8-0", + "size": 34326891584 }, { "id": "command-r:35b-gguf", "size": 21527041888 }, { - "id": "command-r:32b-gguf-q4-km", - "size": 19800837184 + "id": "command-r:32b-gguf-q6-k", + "size": 26505169984 }, { "id": "command-r:32b-gguf-q5-km", "size": 23051422784 }, { - "id": "command-r:32b-gguf-q3-km", - "size": 16231746624 - }, - { - "id": "command-r:32b-gguf-q8-0", - "size": 34326891584 + "id": "command-r:32b-gguf-q4-km", + "size": 19800837184 }, { "id": "command-r:32b-gguf-q5-ks", "size": 22494366784 + }, + { + "id": "command-r:32b-gguf-q2-k", + "size": 12810767424 + }, + { + "id": "command-r:32b-gguf-q3-kl", + "size": 17563438144 + }, + { + "id": "command-r:gguf", + "size": 21527041888 + }, + { + "id": "command-r:32b-gguf-q3-ks", + "size": 14708689984 + }, + { + "id": "command-r:32b-gguf-q3-km", + "size": 16231746624 + }, + { + "id": "command-r:32b-gguf-q4-ks", + "size": 18849516608 } ] }, { - "id": "cortexso/deepseek-r1-distill-qwen-7b", - "metadata": { - "_id": "6790a5b2044aeb2bd5922877", - "author": "cortexso", - "cardData": { - "license": "mit" - }, - "createdAt": "2025-01-22T08:00:50.000Z", - "description": "---\nlicense: mit\n---\n\n## Overview\n\n**DeepSeek** developed and released the [DeepSeek R1 Distill Qwen 7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) model, a distilled version of the Qwen 7B language model. This version is fine-tuned for high-performance text generation and optimized for dialogue and information-seeking tasks, providing even greater capabilities with its larger size compared to the 7B variant.\n\nThe model is designed for applications in customer support, conversational AI, and research, focusing on delivering accurate, helpful, and safe outputs while maintaining efficiency.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [gguf](https://huggingface.co/cortexso/deepseek-r1-distill-qwen-7b/tree/main) | `cortex run deepseek-r1-distill-qwen-7b` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```text\n cortexso/deepseek-r1-distill-qwen-7b\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run deepseek-r1-distill-qwen-7b\n ```\n\n## Credits\n\n- **Author:** DeepSeek\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B#7-license)\n- **Papers:** [DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning](https://arxiv.org/html/2501.12948v1)\n", - "disabled": false, - "downloads": 0, - "gated": false, - "id": "cortexso/deepseek-r1-distill-qwen-7b", - "inference": "library-not-detected", - "lastModified": "2025-01-23T08:43:37.000Z", - "likes": 0, - "model-index": null, - "modelId": "cortexso/deepseek-r1-distill-qwen-7b", - "private": false, - "sha": "bbe804804125f9ace206eecd2e3040d8034189a6", - "siblings": [ - { - "rfilename": ".gitattributes" - }, - { - "rfilename": "README.md" - }, - { - "rfilename": "metadata.yml" - }, - { - "rfilename": "model.yml" - } - ], - "spaces": [], - "tags": ["license:mit", "region:us"], - "usedStorage": 48658728896 - }, - "models": [ - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q2-k", - "size": 3015939680 - }, - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q3-ks", - "size": 3492367968 - }, - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q4-ks", - "size": 4457768544 - }, - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q4-km", - "size": 4683073120 - }, - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q8-0", - "size": 8098524768 - }, - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q5-ks", - "size": 5315176032 - }, - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q3-kl", - "size": 4088458848 - }, - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q6-k", - "size": 6254198368 - }, - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q5-km", - "size": 5444830816 - }, - { - "id": "deepseek-r1-distill-qwen-7b:7b-gguf-q3-km", - "size": 3808390752 - } - ] - }, - { - "id": "cortexso/deepseek-r1-distill-qwen-14b", - "metadata": { - "_id": "678fdf2be186002cc0ba006e", - "author": "cortexso", - "cardData": { - "license": "mit" - }, - "createdAt": "2025-01-21T17:53:47.000Z", - "description": "---\nlicense: mit\n---\n\n## Overview\n\n**DeepSeek** developed and released the [DeepSeek R1 Distill Qwen 14B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B) model, a distilled version of the Qwen 14B language model. This variant represents the largest and most powerful model in the DeepSeek R1 Distill series, fine-tuned for high-performance text generation, dialogue optimization, and advanced reasoning tasks. \n\nThe model is designed for applications that require extensive understanding, such as conversational AI, research, large-scale knowledge systems, and customer service, providing superior performance in accuracy, efficiency, and safety.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [gguf](https://huggingface.co/cortexso/deepseek-r1-distill-qwen-14b/tree/main) | `cortex run deepseek-r1-distill-qwen-14b` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```text\n cortexso/deepseek-r1-distill-qwen-14b\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run deepseek-r1-distill-qwen-14b\n ```\n\n## Credits\n\n- **Author:** DeepSeek\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B#7-license)\n- **Papers:** [DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning](https://arxiv.org/html/2501.12948v1)\n", - "disabled": false, - "downloads": 12, - "gated": false, - "id": "cortexso/deepseek-r1-distill-qwen-14b", - "inference": "library-not-detected", - "lastModified": "2025-01-23T08:48:43.000Z", - "likes": 0, - "model-index": null, - "modelId": "cortexso/deepseek-r1-distill-qwen-14b", - "private": false, - "sha": "6ff0420f0bf32454e6b28180989d6b14687e19e6", - "siblings": [ - { - "rfilename": ".gitattributes" - }, - { - "rfilename": "README.md" - }, - { - "rfilename": "metadata.yml" - }, - { - "rfilename": "model.yml" - } - ], - "spaces": [], - "tags": ["license:mit", "region:us"], - "usedStorage": 93857311040 - }, - "models": [ - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q3-kl", - "size": 7924767776 - }, - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q2-k", - "size": 5770497056 - }, - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q4-ks", - "size": 8573430816 - }, - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q3-ks", - "size": 6659595296 - }, - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q4-km", - "size": 8988109856 - }, - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q6-k", - "size": 12124683296 - }, - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q5-ks", - "size": 10266553376 - }, - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q3-km", - "size": 7339203616 - }, - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q5-km", - "size": 10508872736 - }, - { - "id": "deepseek-r1-distill-qwen-14b:14b-gguf-q8-0", - "size": 15701597216 - } - ] - }, - { + "author": "Google", "id": "cortexso/gemma2", "metadata": { "_id": "66b06c37491b555fefe0a0bf", @@ -336,12 +99,11 @@ "license": "gemma" }, "createdAt": "2024-08-05T06:07:51.000Z", - "description": "---\nlicense: gemma\n---\n\n## Overview\n\nThe [Gemma](https://huggingface.co/google/gemma-2-2b-it), state-of-the-art open model trained with the Gemma datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Gemma family with the 4B, 7B version in two variants 8K and 128K which is the context length (in tokens) that it can support.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [2b-gguf](https://huggingface.co/cortexso/gemma2/tree/2b-gguf) | `cortex run gemma:2b-gguf` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```\n cortexso/gemma2\n ```\n \n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```\n cortex run gemma2\n ```\n \n## Credits\n\n- **Author:** Go\u200cogle\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://ai.google.dev/gemma/terms)\n- **Papers:** [Gemma Technical Report](https://arxiv.org/abs/2403.08295)", + "description": "---\nlicense: gemma\npipeline_tag: text-generation\ntags:\n- cortex.cpp\n---\n\n## Overview\n\nThe [Gemma](https://huggingface.co/google/gemma-2-2b-it), state-of-the-art open model trained with the Gemma datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Gemma family with the 4B, 7B version in two variants 8K and 128K which is the context length (in tokens) that it can support.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [Gemma2-2b](https://huggingface.co/cortexso/gemma2/tree/2b) | `cortex run gemma2:2b` |\n| 2 | [Gemma2-9b](https://huggingface.co/cortexso/gemma2/tree/9b) | `cortex run gemma2:9b` |\n| 3 | [Gemma2-27b](https://huggingface.co/cortexso/gemma2/tree/27b) | `cortex run gemma2:27b` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```bash\n cortexso/gemma2\n ```\n \n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run gemma2\n ```\n \n## Credits\n\n- **Author:** Go\u200cogle\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://ai.google.dev/gemma/terms)\n- **Papers:** [Gemma Technical Report](https://arxiv.org/abs/2403.08295)", "disabled": false, - "downloads": 284, + "downloads": 190, "gated": false, "id": "cortexso/gemma2", - "inference": "library-not-detected", "lastModified": "2024-11-12T20:13:02.000Z", "likes": 0, "model-index": null, @@ -367,33 +129,13 @@ "usedStorage": 265964141287 }, "models": [ - { - "id": "gemma2:2b-gguf-q3-km", - "size": 1461667584 - }, - { - "id": "gemma2:2b-gguf-q4-km", - "size": 1708582656 - }, { "id": "gemma2:2b-gguf-q6-k", "size": 2151393024 }, { - "id": "gemma2:2b-gguf-q3-ks", - "size": 1360660224 - }, - { - "id": "gemma2:2b-gguf-q8-0", - "size": 2784495360 - }, - { - "id": "gemma2:2b-gguf-q4-ks", - "size": 1638651648 - }, - { - "id": "gemma2:9b-gguf-q3-ks", - "size": 4337665120 + "id": "gemma2:2b-gguf-q3-km", + "size": 1461667584 }, { "id": "gemma2:gguf", @@ -403,50 +145,18 @@ "id": "gemma2:9b-gguf-q4-km", "size": 5761057888 }, + { + "id": "gemma2:9b-gguf-q3-ks", + "size": 4337665120 + }, + { + "id": "gemma2:2b-gguf-q4-ks", + "size": 1638651648 + }, { "id": "gemma2:9b-gguf-q5-ks", "size": 6483592288 }, - { - "id": "gemma2:9b-gguf-q5-km", - "size": 6647366752 - }, - { - "id": "gemma2:2b-gguf-q5-km", - "size": 1923278592 - }, - { - "id": "gemma2:27b-gguf-q2-k", - "size": 10449575584 - }, - { - "id": "gemma2:onnx", - "size": 1708582496 - }, - { - "id": "gemma2:27b-gguf-q3-kl", - "size": 14519361184 - }, - { - "id": "gemma2:9b-gguf-q6-k", - "size": 7589069920 - }, - { - "id": "gemma2:27b-gguf-q3-ks", - "size": 12169060000 - }, - { - "id": "gemma2:27b-gguf-q3-km", - "size": 13424647840 - }, - { - "id": "gemma2:9b-gguf-q4-ks", - "size": 5478925408 - }, - { - "id": "gemma2:27b-gguf-q4-km", - "size": 16645381792 - }, { "id": "gemma2:9b-gguf-q3-km", "size": 4761781344 @@ -459,33 +169,53 @@ "id": "gemma2:27b-gguf-q5-ks", "size": 18884206240 }, - { - "id": "gemma2:2b-gguf-q3-kl", - "size": 1550436096 - }, { "id": "gemma2:9b-gguf-q2-k", "size": 3805398112 }, + { + "id": "gemma2:27b-gguf-q3-km", + "size": 13424647840 + }, { "id": "gemma2:2b-gguf", "size": 1708582496 }, { - "id": "gemma2:27b-gguf-q5-km", - "size": 19408117408 + "id": "gemma2:onnx", + "size": 1708582496 + }, + { + "id": "gemma2:27b-gguf-q4-km", + "size": 16645381792 + }, + { + "id": "gemma2:9b-gguf-q5-km", + "size": 6647366752 + }, + { + "id": "gemma2:27b-gguf-q2-k", + "size": 10449575584 + }, + { + "id": "gemma2:9b-gguf-q4-ks", + "size": 5478925408 + }, + { + "id": "gemma2:27b-gguf-q3-ks", + "size": 12169060000 }, { "id": "gemma2:2b-gguf-q2-k", "size": 1229829888 }, { - "id": "gemma2:27b-gguf-q6-k", - "size": 22343524000 + "id": "gemma2:2b-gguf-q4-km", + "size": 1708582656 }, { - "id": "gemma2:2b-gguf-q5-ks", - "size": 1882543872 + "id": "gemma2:27b-gguf-q4-ks", + "size": 15739264672 }, { "id": "gemma2:9b-gguf-q8-0", @@ -496,12 +226,45 @@ "size": 28937387680 }, { - "id": "gemma2:27b-gguf-q4-ks", - "size": 15739264672 + "id": "gemma2:9b-gguf-q6-k", + "size": 7589069920 + }, + { + "id": "gemma2:2b-gguf-q8-0", + "size": 2784495360 + }, + { + "id": "gemma2:27b-gguf-q5-km", + "size": 19408117408 + }, + { + "id": "gemma2:2b-gguf-q3-kl", + "size": 1550436096 + }, + { + "id": "gemma2:27b-gguf-q6-k", + "size": 22343524000 + }, + { + "id": "gemma2:2b-gguf-q3-ks", + "size": 1360660224 + }, + { + "id": "gemma2:27b-gguf-q3-kl", + "size": 14519361184 + }, + { + "id": "gemma2:2b-gguf-q5-ks", + "size": 1882543872 + }, + { + "id": "gemma2:2b-gguf-q5-km", + "size": 1923278592 } ] }, { + "author": "CohereForAI", "id": "cortexso/aya", "metadata": { "_id": "66790e21db26e8589ccd3816", @@ -510,12 +273,11 @@ "license": "apache-2.0" }, "createdAt": "2024-06-24T06:11:45.000Z", - "description": "---\nlicense: apache-2.0\n---\n\n## Overview\n\nThe Aya model is a massively multilingual generative language model that follows instructions in 101 languages.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [12.9b-gguf](https://huggingface.co/cortexhub/aya/tree/12.9b-gguf) | `cortex run aya:12.9b-gguf` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```\n cortexhub/aya\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```\n cortex run aya\n ```\n\n## Credits\n\n- **Author:** [Cohere For AI](https://cohere.for.ai)\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)", + "description": "---\nlicense: cc-by-sa-4.0\npipeline_tag: text-generation\ntags:\n- cortex.cpp\n---\n\n## Overview\n\nAya Expanse is an open-weight research release of a model with highly advanced multilingual capabilities. It focuses on pairing a highly performant pre-trained Command family of models with the result of a year\u2019s dedicated research from Cohere For AI, including data arbitrage, multilingual preference training, safety tuning, and model merging. The result is a powerful multilingual large language model serving 23 languages.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [main](https://huggingface.co/cortexso/aya-expanse/tree/main) | `cortex run aya-expanse` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```\n cortexso/aya-expanse\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```\n cortex run aya-expanse\n ```\n\n## Credits\n\n- **Author:** CohereAI\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://cohere.com/c4ai-cc-by-nc-license)\n- **Papers:** [Aya Expanse Blog](https://cohere.com/blog/aya-expanse-connecting-our-world)", "disabled": false, - "downloads": 11, + "downloads": 25, "gated": false, "id": "cortexso/aya", - "inference": "library-not-detected", "lastModified": "2024-11-12T20:24:22.000Z", "likes": 0, "model-index": null, @@ -552,26 +314,37 @@ ] }, { + "author": "Qwen", "id": "cortexso/qwen2.5", "metadata": { "_id": "671d0d55748faf685e6450a3", "author": "cortexso", "cardData": { - "license": "apache-2.0" + "license": "apache-2.0", + "pipeline_tag": "text-generation", + "tags": ["cortex.cpp"] }, "createdAt": "2024-10-26T15:40:05.000Z", - "description": "---\nlicense: apache-2.0\n---\n\n## Overview\n\nQwen2.5 by Qwen is a family of model include various specialized models for coding and mathematics available in multiple sizes from 0.5B to 72B parameters\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [main/default](https://huggingface.co/cortexso/qwen2.5/tree/main) | `cortex run qwen2.5` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```\n cortexso/qwen2.5\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```\n cortex run qwen2.5\n ```\n\n## Credits\n\n- **Author:** Qwen\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\n- **Papers:** [Qwen2.5 Blog](https://qwenlm.github.io/blog/qwen2.5/)", + "description": "---\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- cortex.cpp\n---\n\n## Overview\n\nQwen2.5 by Qwen is a family of model include various specialized models for coding and mathematics available in multiple sizes from 0.5B to 72B parameters\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [Qwen-2.5-0.5b](https://huggingface.co/cortexso/qwen2.5/tree/0.5b) | `cortex run qwen2.5:0.5b` |\n| 2 | [Qwen-2.5-1.5b](https://huggingface.co/cortexso/qwen2.5/tree/1.5b) | `cortex run qwen2.5:1.5b` |\n| 3 | [Qwen-2.5-3b](https://huggingface.co/cortexso/qwen2.5/tree/3b) | `cortex run qwen2.5:3b` |\n| 4 | [Qwen-2.5-7b](https://huggingface.co/cortexso/qwen2.5/tree/7b) | `cortex run qwen2.5:7b` |\n| 5 | [Qwen-2.5-14b](https://huggingface.co/cortexso/qwen2.5/tree/14b) | `cortex run qwen2.5:14b` |\n| 6 | [Qwen-2.5-32b](https://huggingface.co/cortexso/qwen2.5/tree/32b) | `cortex run qwen2.5:32b` |\n| 7 | [Qwen-2.5-72b](https://huggingface.co/cortexso/qwen2.5/tree/72b) | `cortex run qwen2.5:72b` |\n| 8 | [Qwen-2.5-coder-1.5b](https://huggingface.co/cortexso/qwen2.5/tree/coder-1.5b) | `cortex run qwen2.5:coder-1.5b` |\n| 9 | [Qwen-2.5-coder-7b](https://huggingface.co/cortexso/qwen2.5/tree/coder-7b) | `cortex run qwen2.5:coder-7b` |\n| 10 | [Qwen-2.5-math-1.5b](https://huggingface.co/cortexso/qwen2.5/tree/math-1.5b) | `cortex run qwen2.5:math-1.5b` |\n| 11 | [Qwen-2.5-math-7b](https://huggingface.co/cortexso/qwen2.5/tree/math-7b) | `cortex run qwen2.5:math-7b` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```\n cortexso/qwen2.5\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```\n cortex run qwen2.5\n ```\n\n## Credits\n\n- **Author:** Qwen\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License Apache 2.0](https://choosealicense.com/licenses/apache-2.0/)\n- **Papers:** [Qwen2.5 Blog](https://qwenlm.github.io/blog/qwen2.5/)", "disabled": false, - "downloads": 17, + "downloads": 2482, "gated": false, + "gguf": { + "architecture": "qwen2", + "bos_token": "<|endoftext|>", + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "context_length": 32768, + "eos_token": "<|im_end|>", + "total": 494032768 + }, "id": "cortexso/qwen2.5", - "inference": "library-not-detected", - "lastModified": "2024-10-28T12:59:17.000Z", + "lastModified": "2025-02-25T07:36:34.000Z", "likes": 0, "model-index": null, "modelId": "cortexso/qwen2.5", + "pipeline_tag": "text-generation", "private": false, - "sha": "3b0b7a4bca6aada4c97cc7d8133a8adb11b025fa", + "sha": "7b8b2c31e393f5cf085fe6e535fa5d6ee1cb1c5c", "siblings": [ { "rfilename": ".gitattributes" @@ -584,456 +357,420 @@ }, { "rfilename": "model.yml" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-0.5b-instruct-q8_0.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-1.5b-instruct-q8_0.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-14b-instruct-q8_0.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-32b-instruct-q8_0.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-3b-instruct-q8_0.gguf" + }, + { + "rfilename": "qwen2.5-72b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-7b-instruct-q8_0.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-coder-1.5b-instruct-q8_0.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-coder-7b-instruct-q8_0.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-math-1.5b-instruct-q8_0.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q2_k.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q6_k.gguf" + }, + { + "rfilename": "qwen2.5-math-7b-instruct-q8_0.gguf" } ], "spaces": [], - "tags": ["license:apache-2.0", "region:us"], - "usedStorage": 733469812928 + "tags": [ + "gguf", + "cortex.cpp", + "text-generation", + "license:apache-2.0", + "endpoints_compatible", + "region:us", + "conversational" + ], + "usedStorage": 1466939625856, + "widgetData": [ + { + "text": "Hi, what can you help me with?" + }, + { + "text": "Hey, let's have a conversation!" + }, + { + "text": "Hello there!" + }, + { + "text": "Hey my name is Clara! How are you?" + } + ] }, "models": [ { - "id": "qwen2.5:7b-gguf-q2-k", - "size": 3015940416 - }, - { - "id": "qwen2.5:7b-gguf-q3-ks", - "size": 3492368704 - }, - { - "id": "qwen2.5:7b-gguf-q3-km", - "size": 3808391488 - }, - { - "id": "qwen2.5:7b-gguf-q3-kl", - "size": 4088459584 - }, - { - "id": "qwen2.5:7b-gguf-q4-km", - "size": 4683073856 - }, - { - "id": "qwen2.5:7b-gguf-q5-ks", - "size": 5315176768 - }, - { - "id": "qwen2.5:7b-gguf-q5-km", - "size": 5444831552 - }, - { - "id": "qwen2.5:7b-gguf-q6-k", - "size": 6254199104 - }, - { - "id": "qwen2.5:0.5b-gguf-q3-km", - "size": 355466432 - }, - { - "id": "qwen2.5:0.5b-gguf-q3-kl", - "size": 369358016 - }, - { - "id": "qwen2.5:1.5b-gguf-q2-k", - "size": 676304768 - }, - { - "id": "qwen2.5:0.5b-gguf-q5-km", - "size": 420085952 - }, - { - "id": "qwen2.5:7b-gguf-q8-0", - "size": 8098525504 - }, - { - "id": "qwen2.5:1.5b-gguf-q3-kl", - "size": 880162688 - }, - { - "id": "qwen2.5:1.5b-gguf-q4-km", + "id": "qwen2.5:1.5b", "size": 986048384 }, { - "id": "qwen2.5:1.5b-gguf-q8-0", - "size": 1646572928 - }, - { - "id": "qwen2.5:1.5b-gguf-q5-km", - "size": 1125050240 - }, - { - "id": "qwen2.5:3b-gguf-q3-km", - "size": 1590475584 - }, - { - "id": "qwen2.5:3b-gguf-q4-km", - "size": 1929902912 - }, - { - "id": "qwen2.5:3b-gguf-q5-ks", - "size": 2169666368 - }, - { - "id": "qwen2.5:1.5b-gguf-q4-ks", - "size": 940312448 - }, - { - "id": "qwen2.5:14b-gguf-q4-km", - "size": 8988110592 - }, - { - "id": "qwen2.5:3b-gguf-q6-k", - "size": 2538158912 - }, - { - "id": "qwen2.5:14b-gguf-q3-kl", - "size": 7924768512 - }, - { - "id": "qwen2.5:coder-7b-gguf-q6-k", - "size": 6254199168 - }, - { - "id": "qwen2.5:14b-gguf-q5-ks", - "size": 10266554112 - }, - { - "id": "qwen2.5:14b-gguf-q5-km", - "size": 10508873472 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q2-k", - "size": 676304864 - }, - { - "id": "qwen2.5:14b-gguf-q6-k", - "size": 12124684032 - }, - { - "id": "qwen2.5:14b-gguf-q8-0", - "size": 15701597952 - }, - { - "id": "qwen2.5:32b-gguf-q2-k", - "size": 12313098752 - }, - { - "id": "qwen2.5:32b-gguf-q3-km", - "size": 15935048192 - }, - { - "id": "qwen2.5:32b-gguf-q3-kl", - "size": 17247078912 - }, - { - "id": "qwen2.5:32b-gguf-q4-ks", - "size": 18784410112 - }, - { - "id": "qwen2.5:32b-gguf-q5-ks", - "size": 22638254592 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q5-km", - "size": 1125050336 - }, - { - "id": "qwen2.5:72b-gguf-q2-k", - "size": 29811762464 - }, - { - "id": "qwen2.5:math-7b-gguf-q3-ks", - "size": 3492368704 - }, - { - "id": "qwen2.5:72b-gguf-q3-ks", - "size": 34487788832 - }, - { - "id": "qwen2.5:32b-gguf-q4-km", - "size": 19851336192 - }, - { - "id": "qwen2.5:math-7b-gguf-q3-kl", - "size": 4088459584 - }, - { - "id": "qwen2.5:0.5b-gguf-q4-km", - "size": 397807808 - }, - { - "id": "qwen2.5:3b-gguf-q2-k", - "size": 1274755904 - }, - { - "id": "qwen2.5:0.5b-gguf-q6-k", - "size": 505736384 - }, - { - "id": "qwen2.5:1.5b-gguf-q3-ks", - "size": 760944512 - }, - { - "id": "qwen2.5:72b-gguf-q3-kl", - "size": 39505224992 - }, - { - "id": "qwen2.5:coder-7b-gguf-q2-k", - "size": 3015940480 - }, - { - "id": "qwen2.5:14b-gguf-q2-k", - "size": 5770497792 - }, - { - "id": "qwen2.5:32b-gguf-q3-ks", - "size": 14392330752 - }, - { - "id": "qwen2.5:coder-7b-gguf-q3-ks", - "size": 3492368768 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q6-k", - "size": 1272739808 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q3-km", - "size": 824178592 - }, - { - "id": "qwen2.5:math-7b-gguf-q6-k", - "size": 6254199104 - }, - { - "id": "qwen2.5:coder-7b-gguf-q3-km", - "size": 3808391552 - }, - { - "id": "qwen2.5:coder-7b-gguf-q3-kl", - "size": 4088459648 - }, - { - "id": "qwen2.5:coder-7b-gguf-q4-ks", - "size": 4457769344 - }, - { - "id": "qwen2.5:coder-7b-gguf-q8-0", - "size": 8098525568 - }, - { - "id": "qwen2.5:32b-gguf-q5-km", - "size": 23262157312 - }, - { - "id": "qwen2.5:72b-gguf-q3-km", - "size": 37698725152 - }, - { - "id": "qwen2.5:math-7b-gguf-q3-km", - "size": 3808391488 - }, - { - "id": "qwen2.5:0.5b-gguf-q3-ks", - "size": 338263232 - }, - { - "id": "qwen2.5:coder-7b-gguf-q5-km", - "size": 5444831616 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q3-km", - "size": 824178656 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q3-kl", - "size": 880162784 - }, - { - "id": "qwen2.5:72b-gguf-q4-km", - "size": 47415715104 - }, - { - "id": "qwen2.5:3b-gguf-q4-ks", - "size": 1834384192 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q4-ks", - "size": 940312544 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q5-ks", - "size": 1098729440 - }, - { - "id": "qwen2.5:3b-gguf-q3-kl", - "size": 1707391808 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q6-k", - "size": 1272739744 - }, - { - "id": "qwen2.5:32b-gguf-q8-0", - "size": 34820884992 - }, - { - "id": "qwen2.5:1.5b-gguf-q6-k", - "size": 1272739712 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q8-0", - "size": 1646573024 - }, - { - "id": "qwen2.5:math-7b-gguf-q4-km", - "size": 4683073856 - }, - { - "id": "qwen2.5:0.5b-gguf-q8-0", - "size": 531068096 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q3-ks", - "size": 760944544 - }, - { - "id": "qwen2.5:72b-gguf-q4-ks", - "size": 43889222944 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q4-ks", - "size": 940312480 - }, - { - "id": "qwen2.5:math-7b-gguf-q5-ks", - "size": 5315176768 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q5-km", - "size": 1125050272 - }, - { - "id": "qwen2.5:0.5b-gguf-q5-ks", - "size": 412710080 - }, - { - "id": "qwen2.5:3b-gguf-q3-ks", - "size": 1454357312 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q2-k", - "size": 676304800 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q3-ks", - "size": 760944608 - }, - { - "id": "qwen2.5:3b-gguf-q5-km", - "size": 2224814912 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q8-0", - "size": 1646572960 - }, - { - "id": "qwen2.5:0.5b-gguf-q2-k", - "size": 338607296 - }, - { - "id": "qwen2.5:14b-gguf-q3-ks", - "size": 6659596032 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q4-km", + "id": "qwen2.5:math-1.5b", "size": 986048416 }, { - "id": "qwen2.5:1.5b-gguf-q3-km", - "size": 824178560 + "id": "qwen2.5:7b", + "size": 4683073856 }, { - "id": "qwen2.5:7b-gguf-q4-ks", - "size": 4457769280 + "id": "qwen2.5:0.5b", + "size": 397807808 }, { - "id": "qwen2.5:1.5b-gguf-q5-ks", - "size": 1098729344 - }, - { - "id": "qwen2.5:coder-1.5b-gguf-q4-km", + "id": "qwen2.5:coder-1.5b", "size": 986048480 }, { - "id": "qwen2.5:math-7b-gguf-q2-k", - "size": 3015940416 + "id": "qwen2.5:32b", + "size": 19851336192 }, { - "id": "qwen2.5:math-7b-gguf-q5-km", - "size": 5444831552 + "id": "qwen2.5:3b", + "size": 1929902912 }, { - "id": "qwen2.5:0.5b-gguf-q4-ks", - "size": 385471680 + "id": "qwen2.5:14b", + "size": 8988110592 }, { - "id": "qwen2.5:coder-7b-gguf-q5-ks", - "size": 5315176832 + "id": "qwen2.5:math-7b", + "size": 4683073856 }, { - "id": "qwen2.5:math-7b-gguf-q4-ks", - "size": 4457769280 + "id": "qwen2.5:72b", + "size": 47415715104 }, { - "id": "qwen2.5:math-7b-gguf-q8-0", - "size": 8098525504 - }, - { - "id": "qwen2.5:3b-gguf-q8-0", - "size": 3285476160 - }, - { - "id": "qwen2.5:14b-gguf-q3-km", - "size": 7339204352 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q3-kl", - "size": 880162720 - }, - { - "id": "qwen2.5:32b-gguf-q6-k", - "size": 26886154752 - }, - { - "id": "qwen2.5:math-1.5b-gguf-q5-ks", - "size": 1098729376 - }, - { - "id": "qwen2.5:coder-7b-gguf-q4-km", + "id": "qwen2.5:coder-7b", "size": 4683073920 + }, + { + "id": "qwen2.5:main", + "size": 8098525504 } ] }, { + "author": "meta-llama", "id": "cortexso/llama3.2", "metadata": { "_id": "66f63309ba963b1db95deaa4", "author": "cortexso", "cardData": { - "license": "llama3.2" + "license": "llama3.2", + "pipeline_tag": "text-generation", + "tags": ["cortex.cpp", "featured"] }, "createdAt": "2024-09-27T04:22:33.000Z", - "description": "---\nlicense: llama3.2\n---\n\n## Overview\n\nMeta developed and released the [Meta Llama 3.2](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 2 | [gguf](https://huggingface.co/cortexso/llama3.2/tree/gguf) | `cortex run llama3.2:gguf` |\n| 3 | [main/default](https://huggingface.co/cortexso/llama3.2/tree/main) | `cortex run llama3.2` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```\n cortexso/llama3.2\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```\n cortex run llama3.2\n ```\n\n## Credits\n\n- **Author:** Meta\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct/blob/main/LICENSE.txt)\n- **Papers:** [Llama-3.2 Blog](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)", + "description": "---\nlicense: llama3.2\npipeline_tag: text-generation\ntags:\n- cortex.cpp\n- featured\n---\n\n## Overview\n\nMeta developed and released the [Meta Llama 3.2](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 2 | [gguf](https://huggingface.co/cortexso/llama3.2/tree/main) | `cortex run llama3.2` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```bash\n cortexso/llama3.2\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run llama3.2\n ```\n\n## Credits\n\n- **Author:** Meta\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct/blob/main/LICENSE.txt)\n- **Papers:** [Llama-3.2 Blog](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)", "disabled": false, - "downloads": 422, + "downloads": 761, "gated": false, + "gguf": { + "architecture": "llama", + "bos_token": "<|begin_of_text|>", + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {{- \"<|eot_id|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "context_length": 131072, + "eos_token": "<|eot_id|>", + "total": 1235814432 + }, "id": "cortexso/llama3.2", - "inference": "library-not-detected", - "lastModified": "2024-10-07T06:42:49.000Z", + "lastModified": "2025-02-24T10:25:50.000Z", "likes": 0, "model-index": null, "modelId": "cortexso/llama3.2", + "pipeline_tag": "text-generation", "private": false, - "sha": "97784eeed591168e27671d7dd0f8ea68d2e0430c", + "sha": "5aabb7db00af6183d866ff69260db98b55760359", "siblings": [ { "rfilename": ".gitattributes" @@ -1041,6 +778,66 @@ { "rfilename": "README.md" }, + { + "rfilename": "llama-3.2-1b-instruct-q2_k.gguf" + }, + { + "rfilename": "llama-3.2-1b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "llama-3.2-1b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "llama-3.2-1b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "llama-3.2-1b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "llama-3.2-1b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "llama-3.2-1b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "llama-3.2-1b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "llama-3.2-1b-instruct-q6_k.gguf" + }, + { + "rfilename": "llama-3.2-1b-instruct-q8_0.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q2_k.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q3_k_l.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q3_k_m.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q3_k_s.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q4_k_m.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q4_k_s.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q5_k_m.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q5_k_s.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q6_k.gguf" + }, + { + "rfilename": "llama-3.2-3b-instruct-q8_0.gguf" + }, { "rfilename": "metadata.yml" }, @@ -1049,73 +846,79 @@ } ], "spaces": [], - "tags": ["license:llama3.2", "region:us"], - "usedStorage": 21014285888 + "tags": [ + "gguf", + "cortex.cpp", + "featured", + "text-generation", + "license:llama3.2", + "endpoints_compatible", + "region:us", + "conversational" + ], + "usedStorage": 50404795008, + "widgetData": [ + { + "text": "Hi, what can you help me with?" + }, + { + "text": "Hey, let's have a conversation!" + }, + { + "text": "Hello there!" + }, + { + "text": "Hey my name is Clara! How are you?" + } + ] }, "models": [ { - "id": "llama3.2:3b-gguf-q3-ks", - "size": 1542848672 + "id": "llama3.2:1b", + "size": 911503104 }, { - "id": "llama3.2:3b-gguf-q3-kl", - "size": 1815347360 - }, - { - "id": "llama3.2:3b-gguf-q3-km", - "size": 1687158944 - }, - { - "id": "llama3.2:3b-gguf-q4-ks", - "size": 1928200352 - }, - { - "id": "llama3.2:3b-gguf-q5-ks", - "size": 2269511840 - }, - { - "id": "llama3.2:3b-gguf-q4-km", - "size": 2019377312 - }, - { - "id": "llama3.2:3b-gguf-q6-k", - "size": 2643853472 - }, - { - "id": "llama3.2:3b-gguf-q2-k", - "size": 1363935392 - }, - { - "id": "llama3.2:3b-gguf-q5-km", - "size": 2322153632 - }, - { - "id": "llama3.2:3b-gguf-q8-0", + "id": "llama3.2:main", "size": 3421898912 + }, + { + "id": "llama3.2:3b", + "size": 2019377312 } ] }, { - "id": "cortexso/deepseek-r1-distill-qwen-1.5b", + "author": "DeepSeek-AI", + "id": "cortexso/deepseek-r1", "metadata": { - "_id": "678e84d99d66241aabee008a", + "_id": "67a0bcf13ac2dd6adf0bdfcf", "author": "cortexso", "cardData": { - "license": "mit" + "license": "mit", + "pipeline_tag": "text-generation", + "tags": ["cortexp.cpp", "featured"] }, - "createdAt": "2025-01-20T17:16:09.000Z", - "description": "---\nlicense: mit\n---\n## Overview\n\n**DeepSeek** developed and released the [DeepSeek R1 Distill Qwen 1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B) model, a distilled version of the Qwen 1.5B language model. It is fine-tuned for high-performance text generation and optimized for dialogue and information-seeking tasks. This model achieves a balance of efficiency and accuracy while maintaining a smaller footprint compared to the original Qwen 1.5B.\n\nThe model is designed for applications in customer support, conversational AI, and research, prioritizing both helpfulness and safety.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [gguf](https://huggingface.co/cortexso/deepseek-r1-distill-qwen-1.5b/tree/main) | `cortex run deepseek-r1-distill-qwen-1.5b` |\n\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```text\n cortexso/deepseek-r1-distill-qwen-1.5b\n ```\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run deepseek-r1-distill-qwen-1.5b\n ```\n## Credits\n\n- **Author:** DeepSeek\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B#7-license)\n- **Papers:** [DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning](https://arxiv.org/html/2501.12948v1)", + "createdAt": "2025-02-03T12:56:17.000Z", + "description": "---\nlicense: mit\npipeline_tag: text-generation\ntags:\n- cortexp.cpp\n- featured\n---\n\n## Overview\n\n**DeepSeek** developed and released the **DeepSeek-R1** series, featuring multiple model sizes fine-tuned for high-performance text generation. These models are optimized for dialogue, reasoning, and information-seeking tasks, providing a balance of efficiency and accuracy while maintaining a smaller footprint compared to their original counterparts.\n\nThe DeepSeek-R1 models include distilled and full-scale variants of both **Qwen** and **Llama** architectures, catering to various applications such as customer support, conversational AI, research, and enterprise automation.\n\n## Variants\n\n### DeepSeek-R1\n\n| No | Variant | Branch | Cortex CLI command |\n| -- | ---------------------------------------------------------------------------------------------- | ------- | ------------------------------------------ |\n| 1 | [DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/cortexso/deepseek-r1/tree/1.5b) | 1.5b | `cortex run deepseek-r1:1.5b` |\n| 2 | [DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/cortexso/deepseek-r1/tree/7b) | 7b | `cortex run deepseek-r1:7b` |\n| 3 | [DeepSeek-R1-Distill-Llama-8B](https://huggingface.co/cortexso/deepseek-r1/tree/8b) | 8b | `cortex run deepseek-r1:8b` |\n| 4 | [DeepSeek-R1-Distill-Qwen-14B](https://huggingface.co/cortexso/deepseek-r1/tree/14b) | 14b | `cortex run deepseek-r1:14b` |\n| 5 | [DeepSeek-R1-Distill-Qwen-32B](https://huggingface.co/cortexso/deepseek-r1/tree/32b) | 32b | `cortex run deepseek-r1:32b` |\n| 6 | [DeepSeek-R1-Distill-Llama-70B](https://huggingface.co/cortexso/deepseek-r1/tree/70b) | 70b | `cortex run deepseek-r1:70b` |\n\nEach branch contains a default quantized version:\n- **Qwen-1.5B:** q4-km\n- **Qwen-7B:** q4-km\n- **Llama-8B:** q4-km\n- **Qwen-14B:** q4-km\n- **Qwen-32B:** q4-km\n- **Llama-70B:** q4-km\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```text\n cortexso/deepseek-r1\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run deepseek-r1\n ```\n\n## Credits\n\n- **Author:** DeepSeek\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/deepseek-ai/DeepSeek-R1#license)\n- **Papers:** [DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning](https://arxiv.org/html/2501.12948v1)", "disabled": false, - "downloads": 70, + "downloads": 999, "gated": false, - "id": "cortexso/deepseek-r1-distill-qwen-1.5b", - "inference": "library-not-detected", - "lastModified": "2025-01-24T04:26:48.000Z", + "gguf": { + "architecture": "llama", + "bos_token": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c>\\n'}}{% endif %}", + "context_length": 131072, + "eos_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "total": 70553706560 + }, + "id": "cortexso/deepseek-r1", + "lastModified": "2025-02-24T17:35:43.000Z", "likes": 0, "model-index": null, - "modelId": "cortexso/deepseek-r1-distill-qwen-1.5b", + "modelId": "cortexso/deepseek-r1", + "pipeline_tag": "text-generation", "private": false, - "sha": "15c639a690dc821d63b82f1b3a0c2b9051411d23", + "sha": "b08ca722cc176e8d830a4e348f51367ea47b7bed", "siblings": [ { "rfilename": ".gitattributes" @@ -1124,244 +927,251 @@ "rfilename": "README.md" }, { - "rfilename": "metadata.yml" + "rfilename": "deepseek-r1-distill-llama-70b-q4_k_m.gguf" }, { - "rfilename": "model.yml" + "rfilename": "deepseek-r1-distill-llama-8b-q2_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-llama-8b-q3_k_l.gguf" + }, + { + "rfilename": "deepseek-r1-distill-llama-8b-q3_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-llama-8b-q3_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-llama-8b-q4_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-llama-8b-q4_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-llama-8b-q5_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-llama-8b-q5_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-llama-8b-q6_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-llama-8b-q8_0.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q2_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q3_k_l.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q3_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q3_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q4_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q4_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q5_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q5_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q6_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-1.5b-q8_0.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q2_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q3_k_l.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q3_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q3_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q4_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q4_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q5_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q5_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q6_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-14b-q8_0.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q2_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q3_k_l.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q3_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q3_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q4_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q4_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q5_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q5_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q6_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-32b-q8_0.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q2_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q3_k_l.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q3_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q3_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q4_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q4_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q5_k_m.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q5_k_s.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q6_k.gguf" + }, + { + "rfilename": "deepseek-r1-distill-qwen-7b-q8_0.gguf" + }, + { + "rfilename": "metadata.yaml" } ], "spaces": [], - "tags": ["license:mit", "region:us"], - "usedStorage": 11611279040 + "tags": [ + "gguf", + "cortexp.cpp", + "featured", + "text-generation", + "license:mit", + "endpoints_compatible", + "region:us", + "conversational" + ], + "usedStorage": 825182913408, + "widgetData": [ + { + "text": "Hi, what can you help me with?" + }, + { + "text": "Hey, let's have a conversation!" + }, + { + "text": "Hello there!" + }, + { + "text": "Hey my name is Clara! How are you?" + } + ] }, "models": [ { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q3-ks", - "size": 861221600 + "id": "deepseek-r1:1.5b", + "size": 1117320480 }, { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q3-km", - "size": 924455648 + "id": "deepseek-r1:14b", + "size": 8988109920 }, { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q3-kl", - "size": 980439776 + "id": "deepseek-r1:70b", + "size": 42520395584 }, { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q4-ks", - "size": 1071584480 + "id": "deepseek-r1:8b", + "size": 4920736256 }, { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q6-k", - "size": 1464178400 + "id": "deepseek-r1:main", + "size": 8098524832 }, { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q5-ks", - "size": 1259173088 + "id": "deepseek-r1:7b", + "size": 4683073184 }, { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q2-k", - "size": 752879840 - }, - { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q5-km", - "size": 1285493984 - }, - { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q4-km", - "size": 1117320416 - }, - { - "id": "deepseek-r1-distill-qwen-1.5b:1.5b-gguf-q8-0", - "size": 1894531808 - } - ] - }, - { - "id": "cortexso/deepseek-r1-distill-qwen-32b", - "metadata": { - "_id": "678fe132df84bd3d94f37e58", - "author": "cortexso", - "cardData": { - "license": "mit" - }, - "createdAt": "2025-01-21T18:02:26.000Z", - "description": "---\nlicense: mit\n---\n\n## Overview\n\n**DeepSeek** developed and released the [DeepSeek R1 Distill Qwen 32B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B) model, a distilled version of the Qwen 32B language model. This is the most advanced and largest model in the DeepSeek R1 Distill family, offering unparalleled performance in text generation, dialogue optimization, and reasoning tasks. \n\nThe model is tailored for large-scale applications in conversational AI, research, enterprise solutions, and knowledge systems, delivering exceptional accuracy, efficiency, and safety at scale.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [gguf](https://huggingface.co/cortexso/deepseek-r1-distill-qwen-32b/tree/main) | `cortex run deepseek-r1-distill-qwen-32b` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```text\n cortexso/deepseek-r1-distill-qwen-32b\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run deepseek-r1-distill-qwen-32b\n ```\n\n## Credits\n\n- **Author:** DeepSeek\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B#7-license)\n- **Papers:** [DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning](https://arxiv.org/html/2501.12948v1)\n", - "disabled": false, - "downloads": 6, - "gated": false, - "id": "cortexso/deepseek-r1-distill-qwen-32b", - "inference": "library-not-detected", - "lastModified": "2025-01-23T08:50:04.000Z", - "likes": 0, - "model-index": null, - "modelId": "cortexso/deepseek-r1-distill-qwen-32b", - "private": false, - "sha": "a5d2268c4d8bc697597d562172490d3e21059fc4", - "siblings": [ - { - "rfilename": ".gitattributes" - }, - { - "rfilename": "README.md" - }, - { - "rfilename": "metadata.yml" - }, - { - "rfilename": "model.yml" - } - ], - "spaces": [], - "tags": ["license:mit", "region:us"], - "usedStorage": 206130747200 - }, - "models": [ - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q2-k", - "size": 12313098016 - }, - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q3-ks", - "size": 14392330016 - }, - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q3-kl", - "size": 17247078176 - }, - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q4-ks", - "size": 18784409376 - }, - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q4-km", - "size": 19851335456 - }, - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q5-km", - "size": 23262156576 - }, - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q3-km", - "size": 15935047456 - }, - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q6-k", - "size": 26886154016 - }, - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q8-0", - "size": 34820884256 - }, - { - "id": "deepseek-r1-distill-qwen-32b:32b-gguf-q5-ks", - "size": 22638253856 - } - ] - }, - { - "id": "cortexso/deepseek-r1-distill-llama-8b", - "metadata": { - "_id": "678f4b5625a9b93997f1f666", - "author": "cortexso", - "cardData": { - "license": "mit" - }, - "createdAt": "2025-01-21T07:23:02.000Z", - "description": "---\nlicense: mit\n---\n\n## Overview\n\n**DeepSeek** developed and released the [DeepSeek R1 Distill Llama 8B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B) model, a distilled version of the Llama 8B language model. This variant is fine-tuned for high-performance text generation, optimized for dialogue, and tailored for information-seeking tasks. It offers a robust balance between model size and performance, making it suitable for demanding conversational AI and research use cases.\n\nThe model is designed to deliver accurate, efficient, and safe responses in applications such as customer support, knowledge systems, and research environments.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [gguf](https://huggingface.co/cortexso/deepseek-r1-distill-llama-8b/tree/main) | `cortex run deepseek-r1-distill-llama-8b` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```bash\n cortexso/deepseek-r1-distill-llama-8b\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run deepseek-r1-distill-llama-8b\n ```\n\n## Credits\n\n- **Author:** DeepSeek\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B#7-license)\n- **Papers:** [DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning](https://arxiv.org/html/2501.12948v1)\n", - "disabled": false, - "downloads": 59, - "gated": false, - "id": "cortexso/deepseek-r1-distill-llama-8b", - "inference": "library-not-detected", - "lastModified": "2025-01-23T08:46:41.000Z", - "likes": 0, - "model-index": null, - "modelId": "cortexso/deepseek-r1-distill-llama-8b", - "private": false, - "sha": "f69bd2c9e2ea1380cbcaeec136ab71a4b164b200", - "siblings": [ - { - "rfilename": ".gitattributes" - }, - { - "rfilename": "README.md" - }, - { - "rfilename": "metadata.yml" - }, - { - "rfilename": "model.yml" - } - ], - "spaces": [], - "tags": ["license:mit", "region:us"], - "usedStorage": 51266986688 - }, - "models": [ - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q4-ks", - "size": 4692670944 - }, - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q3-ks", - "size": 3664501216 - }, - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q3-km", - "size": 4018919904 - }, - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q3-kl", - "size": 4321958368 - }, - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q4-km", - "size": 4920736224 - }, - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q2-k", - "size": 3179133408 - }, - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q8-0", - "size": 8540772832 - }, - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q5-ks", - "size": 5599295968 - }, - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q5-km", - "size": 5732989408 - }, - { - "id": "deepseek-r1-distill-llama-8b:8b-gguf-q6-k", - "size": 6596008416 + "id": "deepseek-r1:32b", + "size": 19851335520 } ] }, { + "author": "meta-llama", "id": "cortexso/llama3.1", "metadata": { "_id": "66a76e01a1037fe261a5a472", "author": "cortexso", "cardData": { - "license": "llama3.1" + "license": "llama3.1", + "pipeline_tag": "text-generation", + "tags": ["cortex.cpp"] }, "createdAt": "2024-07-29T10:25:05.000Z", - "description": "---\nlicense: llama3.1\n---\n\n## Overview\n\nMeta developed and released the [Meta Llama 3.1](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B) family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 2 | [gguf](https://huggingface.co/cortexso/llama3.1/tree/gguf) | `cortex run llama3.1:gguf` |\n| 3 | [main/default](https://huggingface.co/cortexso/llama3.1/tree/main) | `cortex run llama3.1` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```\n cortexso/llama3.1\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```\n cortex run llama3.1\n ```\n\n## Credits\n\n- **Author:** Meta\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B/blob/main/LICENSE)\n- **Papers:** [Llama-3.1 Blog](https://scontent.fsgn3-1.fna.fbcdn.net/v/t39.2365-6/452387774_1036916434819166_4173978747091533306_n.pdf?_nc_cat=104&ccb=1-7&_nc_sid=3c67a6&_nc_ohc=DTS7hDTcxZoQ7kNvgHxaQ8K&_nc_ht=scontent.fsgn3-1.fna&oh=00_AYC1gXduoxatzt8eFMfLunrRUzpzQcoKzAktIOT7FieZAQ&oe=66AE9C4D)", + "description": "---\nlicense: llama3.1\npipeline_tag: text-generation\ntags:\n- cortex.cpp\n---\n\n## Overview\n\nMeta developed and released the [Meta Llama 3.1](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B) family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety.\n\n## Variants\n\n| No | Variant | Cortex CLI command |\n| --- | --- | --- |\n| 1 | [Llama3.1-8b](https://huggingface.co/cortexso/llama3.1/tree/8b) | `cortex run llama3.1:8b` |\n\n## Use it with Jan (UI)\n\n1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)\n2. Use in Jan model Hub:\n ```bash\n cortexso/llama3.1\n ```\n\n## Use it with Cortex (CLI)\n\n1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)\n2. Run the model with command:\n ```bash\n cortex run llama3.1\n ```\n\n## Credits\n\n- **Author:** Meta\n- **Converter:** [Homebrew](https://www.homebrew.ltd/)\n- **Original License:** [License](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B/blob/main/LICENSE)\n- **Papers:** [Llama-3.1 Blog](https://ai.meta.com/blog/meta-llama-3-1/)", "disabled": false, - "downloads": 29, + "downloads": 275, "gated": false, + "gguf": { + "architecture": "llama", + "bos_token": "<|begin_of_text|>", + "context_length": 131072, + "eos_token": "<|end_of_text|>", + "total": 8030261312 + }, "id": "cortexso/llama3.1", - "inference": "library-not-detected", - "lastModified": "2024-11-12T20:11:22.000Z", + "lastModified": "2025-02-25T07:41:12.000Z", "likes": 0, "model-index": null, "modelId": "cortexso/llama3.1", + "pipeline_tag": "text-generation", "private": false, - "sha": "4702595a4e5e5aba5c0f7d1180199cecc076597d", + "sha": "f83805762b13bfe9aaa071c065edb74c48281367", "siblings": [ { "rfilename": ".gitattributes" @@ -1369,6 +1179,36 @@ { "rfilename": "README.md" }, + { + "rfilename": "llama-3.1-8b-q2_k.gguf" + }, + { + "rfilename": "llama-3.1-8b-q3_k_l.gguf" + }, + { + "rfilename": "llama-3.1-8b-q3_k_m.gguf" + }, + { + "rfilename": "llama-3.1-8b-q3_k_s.gguf" + }, + { + "rfilename": "llama-3.1-8b-q4_k_m.gguf" + }, + { + "rfilename": "llama-3.1-8b-q4_k_s.gguf" + }, + { + "rfilename": "llama-3.1-8b-q5_k_m.gguf" + }, + { + "rfilename": "llama-3.1-8b-q5_k_s.gguf" + }, + { + "rfilename": "llama-3.1-8b-q6_k.gguf" + }, + { + "rfilename": "llama-3.1-8b-q8_0.gguf" + }, { "rfilename": "metadata.yml" }, @@ -1377,57 +1217,38 @@ } ], "spaces": [], - "tags": ["license:llama3.1", "region:us"], - "usedStorage": 175802939712 + "tags": [ + "gguf", + "cortex.cpp", + "text-generation", + "license:llama3.1", + "endpoints_compatible", + "region:us" + ], + "usedStorage": 227069905920, + "widgetData": [ + { + "text": "My name is Julien and I like to" + }, + { + "text": "I like traveling by train because" + }, + { + "text": "Paris is an amazing place to visit," + }, + { + "text": "Once upon a time," + } + ] }, "models": [ { - "id": "llama3.1:8b-gguf-q3-ks", - "size": 3664504064 + "id": "llama3.1:8b", + "size": 4920734176 }, { - "id": "llama3.1:8b-gguf-q8-0", - "size": 8540775680 - }, - { - "id": "llama3.1:8b-gguf-q4-ks", - "size": 4692673792 - }, - { - "id": "llama3.1:8b-gguf-q3-km", - "size": 4018922752 - }, - { - "id": "llama3.1:8b-gguf", - "size": 4920734656 - }, - { - "id": "llama3.1:8b-gguf-q3-kl", - "size": 4321961216 - }, - { - "id": "llama3.1:8b-gguf-q4-km", - "size": 4920739072 - }, - { - "id": "llama3.1:8b-gguf-q5-km", - "size": 5732992256 - }, - { - "id": "llama3.1:8b-gguf-q6-k", - "size": 6596011264 - }, - { - "id": "llama3.1:8b-gguf-q5-ks", - "size": 5599298816 - }, - { - "id": "llama3.1:8b-gguf-q2-k", - "size": 3179136256 - }, - { - "id": "llama3.1:gguf", - "size": 4920734656 + "id": "llama3.1:main", + "size": 8540770784 } ] } diff --git a/extensions/model-extension/rolldown.config.mjs b/extensions/model-extension/rolldown.config.mjs index 01672addd..54ea654ff 100644 --- a/extensions/model-extension/rolldown.config.mjs +++ b/extensions/model-extension/rolldown.config.mjs @@ -11,8 +11,7 @@ export default defineConfig({ platform: 'browser', define: { SETTINGS: JSON.stringify(settingJson), - API_URL: JSON.stringify('http://127.0.0.1:39291'), - SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'), + CORTEX_API_URL: JSON.stringify(`http://127.0.0.1:${process.env.CORTEX_API_PORT ?? "39291"}`), DEFAULT_MODEL_SOURCES: JSON.stringify(modelSources), }, }) diff --git a/extensions/model-extension/src/@types/global.d.ts b/extensions/model-extension/src/@types/global.d.ts index 1b6b71a03..e4d269cdb 100644 --- a/extensions/model-extension/src/@types/global.d.ts +++ b/extensions/model-extension/src/@types/global.d.ts @@ -1,6 +1,5 @@ declare const NODE: string -declare const API_URL: string -declare const SOCKET_URL: string +declare const CORTEX_API_URL: string declare const SETTINGS: SettingComponentProps[] declare const DEFAULT_MODEL_SOURCES: any diff --git a/extensions/model-extension/src/cortex.ts b/extensions/model-extension/src/cortex.ts deleted file mode 100644 index d7d4a0233..000000000 --- a/extensions/model-extension/src/cortex.ts +++ /dev/null @@ -1,242 +0,0 @@ -import PQueue from 'p-queue' -import ky from 'ky' -import { extractModelLoadParams, Model, ModelSource } from '@janhq/core' -import { extractInferenceParams } from '@janhq/core' -/** - * cortex.cpp Model APIs interface - */ -interface ICortexAPI { - getModel(model: string): Promise - getModels(): Promise - pullModel(model: string, id?: string, name?: string): Promise - importModel( - path: string, - modelPath: string, - name?: string, - option?: string - ): Promise - deleteModel(model: string): Promise - updateModel(model: object): Promise - cancelModelPull(model: string): Promise - configs(body: { [key: string]: any }): Promise - getSources(): Promise - addSource(source: string): Promise - deleteSource(source: string): Promise -} - -type Data = { - data: any[] -} - -export class CortexAPI implements ICortexAPI { - queue = new PQueue({ concurrency: 1 }) - - constructor() { - this.queue.add(() => this.healthz()) - } - - /** - * Fetches a model detail from cortex.cpp - * @param model - * @returns - */ - getModel(model: string): Promise { - return this.queue.add(() => - ky - .get(`${API_URL}/v1/models/${model}`) - .json() - .then((e) => this.transformModel(e)) - ) - } - - /** - * Fetches models list from cortex.cpp - * @param model - * @returns - */ - getModels(): Promise { - return this.queue - .add(() => ky.get(`${API_URL}/v1/models?limit=-1`).json()) - .then((e) => - typeof e === 'object' ? e.data.map((e) => this.transformModel(e)) : [] - ) - } - - /** - * Pulls a model from HuggingFace via cortex.cpp - * @param model - * @returns - */ - pullModel(model: string, id?: string, name?: string): Promise { - return this.queue.add(() => - ky - .post(`${API_URL}/v1/models/pull`, { json: { model, id, name } }) - .json() - .catch(async (e) => { - throw (await e.response?.json()) ?? e - }) - .then() - ) - } - - /** - * Imports a model from a local path via cortex.cpp - * @param model - * @returns - */ - importModel( - model: string, - modelPath: string, - name?: string, - option?: string - ): Promise { - return this.queue.add(() => - ky - .post(`${API_URL}/v1/models/import`, { - json: { model, modelPath, name, option }, - }) - .json() - .catch((e) => console.debug(e)) // Ignore error - .then() - ) - } - - /** - * Deletes a model from cortex.cpp - * @param model - * @returns - */ - deleteModel(model: string): Promise { - return this.queue.add(() => - ky.delete(`${API_URL}/v1/models/${model}`).json().then() - ) - } - - /** - * Update a model in cortex.cpp - * @param model - * @returns - */ - updateModel(model: Partial): Promise { - return this.queue.add(() => - ky - .patch(`${API_URL}/v1/models/${model.id}`, { json: { ...model } }) - .json() - .then() - ) - } - - /** - * Cancel model pull in cortex.cpp - * @param model - * @returns - */ - cancelModelPull(model: string): Promise { - return this.queue.add(() => - ky - .delete(`${API_URL}/v1/models/pull`, { json: { taskId: model } }) - .json() - .then() - ) - } - - /** - * Check model status - * @param model - */ - async getModelStatus(model: string): Promise { - return this.queue - .add(() => ky.get(`${API_URL}/v1/models/status/${model}`)) - .then((e) => true) - .catch(() => false) - } - - // BEGIN - Model Sources - /** - * Get model sources - * @param model - */ - async getSources(): Promise { - return this.queue - .add(() => ky.get(`${API_URL}/v1/models/sources`).json()) - .then((e) => (typeof e === 'object' ? (e.data as ModelSource[]) : [])) - .catch(() => []) - } - - /** - * Add a model source - * @param model - */ - async addSource(source: string): Promise { - return this.queue.add(() => - ky.post(`${API_URL}/v1/models/sources`, { - json: { - source, - }, - }) - ) - } - - /** - * Delete a model source - * @param model - */ - async deleteSource(source: string): Promise { - return this.queue.add(() => - ky.delete(`${API_URL}/v1/models/sources`, { - json: { - source, - }, - }) - ) - } - // END - Model Sources - - /** - * Do health check on cortex.cpp - * @returns - */ - healthz(): Promise { - return ky - .get(`${API_URL}/healthz`, { - retry: { - limit: 20, - delay: () => 500, - methods: ['get'], - }, - }) - .then(() => {}) - } - - /** - * Configure model pull options - * @param body - */ - configs(body: { [key: string]: any }): Promise { - return this.queue.add(() => - ky.patch(`${API_URL}/v1/configs`, { json: body }).then(() => {}) - ) - } - - /** - * TRansform model to the expected format (e.g. parameters, settings, metadata) - * @param model - * @returns - */ - private transformModel(model: any) { - model.parameters = { - ...extractInferenceParams(model), - ...model.parameters, - ...model.inference_params, - } - model.settings = { - ...extractModelLoadParams(model), - ...model.settings, - } - model.metadata = model.metadata ?? { - tags: [], - size: model.size ?? model.metadata?.size ?? 0, - } - return model as Model - } -} diff --git a/extensions/model-extension/src/index.test.ts b/extensions/model-extension/src/index.test.ts index e514f8ce3..a339c8c9b 100644 --- a/extensions/model-extension/src/index.test.ts +++ b/extensions/model-extension/src/index.test.ts @@ -1,89 +1,88 @@ +import { describe, it, expect, beforeEach, vi } from 'vitest' import JanModelExtension from './index' +import ky from 'ky' +import { ModelManager } from '@janhq/core' -let SETTINGS = [] -// @ts-ignore -global.SETTINGS = SETTINGS +const API_URL = 'http://localhost:3000' -jest.mock('@janhq/core', () => ({ - ...jest.requireActual('@janhq/core/node'), - events: { - emit: jest.fn(), - }, - joinPath: (paths) => paths.join('/'), - ModelExtension: jest.fn().mockImplementation(function () { - // @ts-ignore - this.registerSettings = () => { - return Promise.resolve() - } - // @ts-ignore - return this - }), -})) +vi.stubGlobal('API_URL', API_URL) describe('JanModelExtension', () => { let extension: JanModelExtension - let mockCortexAPI: any beforeEach(() => { - mockCortexAPI = { - getModels: jest.fn().mockResolvedValue([]), - pullModel: jest.fn().mockResolvedValue(undefined), - importModel: jest.fn().mockResolvedValue(undefined), - deleteModel: jest.fn().mockResolvedValue(undefined), - updateModel: jest.fn().mockResolvedValue({}), - cancelModelPull: jest.fn().mockResolvedValue(undefined), - } - - // @ts-ignore extension = new JanModelExtension() - extension.cortexAPI = mockCortexAPI - }) + vi.spyOn(ModelManager, 'instance').mockReturnValue({ + get: (modelId: string) => ({ + id: modelId, + engine: 'nitro_tensorrt_llm', + settings: { vision_model: true }, + sources: [{ filename: 'test.bin' }], + }), + } as any) + vi.spyOn(JanModelExtension.prototype, 'cancelModelPull').mockImplementation( + async (model: string) => { + const kyDeleteSpy = vi.spyOn(ky, 'delete').mockResolvedValue({ + json: () => Promise.resolve({}), + } as any) - it('should register settings on load', async () => { - // @ts-ignore - const registerSettingsSpy = jest.spyOn(extension, 'registerSettings') - await extension.onLoad() - expect(registerSettingsSpy).toHaveBeenCalledWith(SETTINGS) - }) + await ky.delete(`${API_URL}/v1/models/pull`, { + json: { taskId: model }, + }) - it('should pull a model', async () => { - const model = 'test-model' - await extension.pullModel(model) - expect(mockCortexAPI.pullModel).toHaveBeenCalledWith(model) - }) + expect(kyDeleteSpy).toHaveBeenCalledWith(`${API_URL}/v1/models/pull`, { + json: { taskId: model }, + }) - it('should cancel model download', async () => { - const model = 'test-model' - await extension.cancelModelPull(model) - expect(mockCortexAPI.cancelModelPull).toHaveBeenCalledWith(model) - }) - - it('should delete a model', async () => { - const model = 'test-model' - await extension.deleteModel(model) - expect(mockCortexAPI.deleteModel).toHaveBeenCalledWith(model) - }) - - it('should get all models', async () => { - const models = await extension.getModels() - expect(models).toEqual([]) - expect(mockCortexAPI.getModels).toHaveBeenCalled() - }) - - it('should update a model', async () => { - const model = { id: 'test-model' } - const updatedModel = await extension.updateModel(model) - expect(updatedModel).toEqual({}) - expect(mockCortexAPI.updateModel).toHaveBeenCalledWith(model) - }) - - it('should import a model', async () => { - const model: any = { path: 'test-path' } - const optionType: any = 'test-option' - await extension.importModel(model, optionType) - expect(mockCortexAPI.importModel).toHaveBeenCalledWith( - model.path, - optionType + kyDeleteSpy.mockRestore() // Restore the original implementation + } ) }) + + it('should initialize with an empty queue', () => { + expect(extension.queue.size).toBe(0) + }) + + describe('pullModel', () => { + it('should call the pull model endpoint with correct parameters', async () => { + const model = 'test-model' + const id = 'test-id' + const name = 'test-name' + + const kyPostSpy = vi.spyOn(ky, 'post').mockReturnValue({ + json: () => Promise.resolve({}), + } as any) + + await extension.pullModel(model, id, name) + + expect(kyPostSpy).toHaveBeenCalledWith(`${API_URL}/v1/models/pull`, { + json: { model, id, name }, + }) + + kyPostSpy.mockRestore() // Restore the original implementation + }) + }) + + describe('cancelModelPull', () => { + it('should call the cancel model pull endpoint with the correct model', async () => { + const model = 'test-model' + + await extension.cancelModelPull(model) + }) + }) + + describe('deleteModel', () => { + it('should call the delete model endpoint with the correct model', async () => { + const model = 'test-model' + const kyDeleteSpy = vi + .spyOn(ky, 'delete') + .mockResolvedValue({ json: () => Promise.resolve({}) } as any) + + await extension.deleteModel(model) + + expect(kyDeleteSpy).toHaveBeenCalledWith(`${API_URL}/v1/models/${model}`) + + kyDeleteSpy.mockRestore() // Restore the original implementation + }) + }) }) diff --git a/extensions/model-extension/src/index.ts b/extensions/model-extension/src/index.ts index 719671cfd..4362ab9a5 100644 --- a/extensions/model-extension/src/index.ts +++ b/extensions/model-extension/src/index.ts @@ -5,35 +5,56 @@ import { joinPath, dirName, fs, - ModelManager, - abortDownload, - DownloadState, - events, - DownloadEvent, OptionType, ModelSource, + extractInferenceParams, + extractModelLoadParams, } from '@janhq/core' -import { CortexAPI } from './cortex' import { scanModelsFolder } from './legacy/model-json' -import { downloadModel } from './legacy/download' -import { systemInformation } from '@janhq/core' import { deleteModelFiles } from './legacy/delete' +import PQueue from 'p-queue' +import ky, { KyInstance } from 'ky' +/** + * cortex.cpp setting keys + */ export enum Settings { huggingfaceToken = 'hugging-face-access-token', } +/** Data List Response Type */ +type Data = { + data: T[] +} + /** * A extension for models */ export default class JanModelExtension extends ModelExtension { - cortexAPI: CortexAPI = new CortexAPI() + queue = new PQueue({ concurrency: 1 }) + api?: KyInstance + /** + * Get the API instance + * @returns + */ + async apiInstance(): Promise { + if(this.api) return this.api + const apiKey = (await window.core?.api.appToken()) ?? 'cortex.cpp' + this.api = ky.extend({ + prefixUrl: CORTEX_API_URL, + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }) + return this.api + } /** * Called when the extension is loaded. - * @override */ async onLoad() { + this.queue.add(() => this.healthz()) + this.registerSettings(SETTINGS) // Configure huggingface token if available @@ -41,11 +62,12 @@ export default class JanModelExtension extends ModelExtension { Settings.huggingfaceToken, undefined ) - if (huggingfaceToken) - this.cortexAPI.configs({ huggingface_token: huggingfaceToken }) + if (huggingfaceToken) { + this.updateCortexConfig({ huggingface_token: huggingfaceToken }) + } - // Listen to app download events - this.handleDesktopEvents() + // Sync with cortexsohub + this.fetchCortexsoModels() } /** @@ -55,7 +77,7 @@ export default class JanModelExtension extends ModelExtension { */ onSettingUpdate(key: string, value: T): void { if (key === Settings.huggingfaceToken) { - this.cortexAPI.configs({ huggingface_token: value }) + this.updateCortexConfig({ huggingface_token: value }) } } @@ -65,28 +87,27 @@ export default class JanModelExtension extends ModelExtension { */ async onUnload() {} + // BEGIN: - Public API /** * Downloads a machine learning model. * @param model - The model to download. * @returns A Promise that resolves when the model is downloaded. */ async pullModel(model: string, id?: string, name?: string): Promise { - if (id) { - const model: Model = ModelManager.instance().get(id) - // Clip vision model - should not be handled by cortex.cpp - // TensorRT model - should not be handled by cortex.cpp - if ( - model && - (model.engine === InferenceEngine.nitro_tensorrt_llm || - model.settings.vision_model) - ) { - return downloadModel(model, (await systemInformation()).gpuSetting) - } - } /** * Sending POST to /models/pull/{id} endpoint to pull the model */ - return this.cortexAPI.pullModel(model, id, name) + return this.queue.add(() => + this.apiInstance().then((api) => + api + .post('v1/models/pull', { json: { model, id, name }, timeout: false }) + .json() + .catch(async (e) => { + throw (await e.response?.json()) ?? e + }) + .then() + ) + ) } /** @@ -96,25 +117,17 @@ export default class JanModelExtension extends ModelExtension { * @returns {Promise} A promise that resolves when the download has been cancelled. */ async cancelModelPull(model: string): Promise { - if (model) { - const modelDto: Model = ModelManager.instance().get(model) - // Clip vision model - should not be handled by cortex.cpp - // TensorRT model - should not be handled by cortex.cpp - if ( - modelDto && - (modelDto.engine === InferenceEngine.nitro_tensorrt_llm || - modelDto.settings.vision_model) - ) { - for (const source of modelDto.sources) { - const path = await joinPath(['models', modelDto.id, source.filename]) - await abortDownload(path) - } - } - } /** * Sending DELETE to /models/pull/{id} endpoint to cancel a model pull */ - return this.cortexAPI.cancelModelPull(model) + return this.queue.add(() => + this.apiInstance().then((api) => + api + .delete('v1/models/pull', { json: { taskId: model } }) + .json() + .then() + ) + ) } /** @@ -123,13 +136,17 @@ export default class JanModelExtension extends ModelExtension { * @returns A Promise that resolves when the model is deleted. */ async deleteModel(model: string): Promise { - return this.cortexAPI - .deleteModel(model) + return this.queue + .add(() => + this.apiInstance().then((api) => + api.delete(`v1/models/${model}`).json().then() + ) + ) .catch((e) => console.debug(e)) .finally(async () => { // Delete legacy model files await deleteModelFiles(model).catch((e) => console.debug(e)) - }) + }) as Promise } /** @@ -153,7 +170,7 @@ export default class JanModelExtension extends ModelExtension { /** * Fetch models from cortex.cpp */ - var fetchedModels = await this.cortexAPI.getModels().catch(() => []) + var fetchedModels = await this.fetchModels().catch(() => []) // Checking if there are models to import const existingIds = fetchedModels.map((e) => e.id) @@ -180,16 +197,16 @@ export default class JanModelExtension extends ModelExtension { toImportModels.map(async (model: Model & { file_path: string }) => { return this.importModel( model.id, - model.sources[0].url.startsWith('http') || - !(await fs.existsSync(model.sources[0].url)) + model.sources?.[0]?.url.startsWith('http') || + !(await fs.existsSync(model.sources?.[0]?.url)) ? await joinPath([ await dirName(model.file_path), - model.sources[0]?.filename ?? + model.sources?.[0]?.filename ?? model.settings?.llama_model_path ?? - model.sources[0]?.url.split('/').pop() ?? + model.sources?.[0]?.url.split('/').pop() ?? model.id, ]) // Copied models - : model.sources[0].url, // Symlink models, + : model.sources?.[0]?.url, // Symlink models, model.name ) .then((e) => { @@ -210,8 +227,7 @@ export default class JanModelExtension extends ModelExtension { * Models are imported successfully before * Now return models from cortex.cpp and merge with legacy models which are not imported */ - return await this.cortexAPI - .getModels() + return await this.fetchModels() .then((models) => { return models.concat( legacyModels.filter((e) => !models.some((x) => x.id === e.id)) @@ -225,9 +241,34 @@ export default class JanModelExtension extends ModelExtension { * @param model - The metadata of the model */ async updateModel(model: Partial): Promise { - return this.cortexAPI - ?.updateModel(model) - .then(() => this.cortexAPI!.getModel(model.id)) + return this.queue + .add(() => + this.apiInstance().then((api) => + api + .patch(`v1/models/${model.id}`, { + json: { ...model }, + timeout: false, + }) + .json() + .then() + ) + ) + .then(() => this.getModel(model.id)) + } + + /** + * Get a model by its ID + * @param model - The ID of the model + */ + async getModel(model: string): Promise { + return this.queue.add(() => + this.apiInstance().then((api) => + api + .get(`v1/models/${model}`) + .json() + .then((e) => this.transformModel(e)) + ) + ) as Promise } /** @@ -241,7 +282,18 @@ export default class JanModelExtension extends ModelExtension { name?: string, option?: OptionType ): Promise { - return this.cortexAPI.importModel(model, modelPath, name, option) + return this.queue.add(() => + this.apiInstance().then((api) => + api + .post('v1/models/import', { + json: { model, modelPath, name, option }, + timeout: false, + }) + .json() + .catch((e) => console.debug(e)) // Ignore error + .then() + ) + ) } // BEGIN - Model Sources @@ -250,7 +302,14 @@ export default class JanModelExtension extends ModelExtension { * @param model */ async getSources(): Promise { - const sources = await this.cortexAPI.getSources() + const sources = await this.queue + .add(() => + this.apiInstance().then((api) => + api.get('v1/models/sources').json>() + ) + ) + .then((e) => (typeof e === 'object' ? (e.data as ModelSource[]) : [])) + .catch(() => []) return sources.concat( DEFAULT_MODEL_SOURCES.filter((e) => !sources.some((x) => x.id === e.id)) ) @@ -261,7 +320,15 @@ export default class JanModelExtension extends ModelExtension { * @param model */ async addSource(source: string): Promise { - return this.cortexAPI.addSource(source) + return this.queue.add(() => + this.apiInstance().then((api) => + api.post('v1/models/sources', { + json: { + source, + }, + }) + ) + ) } /** @@ -269,7 +336,16 @@ export default class JanModelExtension extends ModelExtension { * @param model */ async deleteSource(source: string): Promise { - return this.cortexAPI.deleteSource(source) + return this.queue.add(() => + this.apiInstance().then((api) => + api.delete('v1/models/sources', { + json: { + source, + }, + timeout: false, + }) + ) + ) } // END - Model Sources @@ -278,40 +354,124 @@ export default class JanModelExtension extends ModelExtension { * @param model */ async isModelLoaded(model: string): Promise { - return this.cortexAPI.getModelStatus(model) + return this.queue + .add(() => + this.apiInstance().then((api) => api.get(`v1/models/status/${model}`)) + ) + .then((e) => true) + .catch(() => false) } /** * Configure pull options such as proxy, headers, etc. */ async configurePullOptions(options: { [key: string]: any }): Promise { - return this.cortexAPI.configs(options).catch((e) => console.debug(e)) + return this.updateCortexConfig(options).catch((e) => console.debug(e)) } /** - * Handle download state from main app + * Fetches models list from cortex.cpp + * @param model + * @returns */ - handleDesktopEvents() { - if (window && window.electronAPI) { - window.electronAPI.onFileDownloadUpdate( - async (_event: string, state: DownloadState | undefined) => { - if (!state) return - state.downloadState = 'downloading' - events.emit(DownloadEvent.onFileDownloadUpdate, state) - } + async fetchModels(): Promise { + return this.queue + .add(() => + this.apiInstance().then((api) => + api.get('v1/models?limit=-1').json>() + ) ) - window.electronAPI.onFileDownloadError( - async (_event: string, state: DownloadState) => { - state.downloadState = 'error' - events.emit(DownloadEvent.onFileDownloadError, state) - } + .then((e) => + typeof e === 'object' ? e.data.map((e) => this.transformModel(e)) : [] ) - window.electronAPI.onFileDownloadSuccess( - async (_event: string, state: DownloadState) => { - state.downloadState = 'end' - events.emit(DownloadEvent.onFileDownloadSuccess, state) - } - ) - } } + // END: - Public API + + // BEGIN: - Private API + + /** + * Transform model to the expected format (e.g. parameters, settings, metadata) + * @param model + * @returns + */ + private transformModel(model: any) { + model.parameters = { + ...extractInferenceParams(model), + ...model.parameters, + ...model.inference_params, + } + model.settings = { + ...extractModelLoadParams(model), + ...model.settings, + } + model.metadata = model.metadata ?? { + tags: [], + size: model.size ?? model.metadata?.size ?? 0, + } + return model as Model + } + + /** + * Update cortex config + * @param body + */ + private async updateCortexConfig(body: { + [key: string]: any + }): Promise { + return this.queue + .add(() => + this.apiInstance().then((api) => + api.patch('v1/configs', { json: body }).then(() => {}) + ) + ) + .catch((e) => console.debug(e)) + } + + /** + * Do health check on cortex.cpp + * @returns + */ + private healthz(): Promise { + return this.apiInstance() + .then((api) => + api.get('healthz', { + retry: { + limit: 20, + delay: () => 500, + methods: ['get'], + }, + }) + ) + .then(() => { + this.queue.concurrency = Infinity + }) + } + + /** + * Fetch models from cortex.so + */ + private fetchCortexsoModels = async () => { + const models = await this.fetchModels() + + return this.queue.add(() => + this.apiInstance() + .then((api) => + api + .get('v1/models/hub?author=cortexso&tag=cortex.cpp') + .json>() + .then((e) => { + e.data?.forEach((model) => { + if ( + !models.some( + (e) => 'modelSource' in e && e.modelSource === model + ) + ) + this.addSource(model).catch((e) => console.debug(e)) + }) + }) + ) + .catch((e) => console.debug(e)) + ) + } + // END: - Private API } diff --git a/extensions/model-extension/src/legacy/download.ts b/extensions/model-extension/src/legacy/download.ts deleted file mode 100644 index d4d6c62d9..000000000 --- a/extensions/model-extension/src/legacy/download.ts +++ /dev/null @@ -1,105 +0,0 @@ -import { - downloadFile, - DownloadRequest, - fs, - GpuSetting, - InferenceEngine, - joinPath, - Model, -} from '@janhq/core' - -export const downloadModel = async ( - model: Model, - gpuSettings?: GpuSetting, - network?: { ignoreSSL?: boolean; proxy?: string } -): Promise => { - const homedir = 'file://models' - const supportedGpuArch = ['ampere', 'ada'] - // Create corresponding directory - const modelDirPath = await joinPath([homedir, model.id]) - if (!(await fs.existsSync(modelDirPath))) await fs.mkdir(modelDirPath) - - const jsonFilePath = await joinPath([modelDirPath, 'model.json']) - // Write model.json on download - if (!(await fs.existsSync(jsonFilePath))) - await fs.writeFileSync( - jsonFilePath, - JSON.stringify(model, null, 2) - ) - - if (model.engine === InferenceEngine.nitro_tensorrt_llm) { - if (!gpuSettings || gpuSettings.gpus.length === 0) { - console.error('No GPU found. Please check your GPU setting.') - return - } - const firstGpu = gpuSettings.gpus[0] - if (!firstGpu.name.toLowerCase().includes('nvidia')) { - console.error('No Nvidia GPU found. Please check your GPU setting.') - return - } - const gpuArch = firstGpu.arch - if (gpuArch === undefined) { - console.error('No GPU architecture found. Please check your GPU setting.') - return - } - - if (!supportedGpuArch.includes(gpuArch)) { - console.debug( - `Your GPU: ${JSON.stringify(firstGpu)} is not supported. Only 30xx, 40xx series are supported.` - ) - return - } - - const os = 'windows' // TODO: remove this hard coded value - - const newSources = model.sources.map((source) => { - const newSource = { ...source } - newSource.url = newSource.url - .replace(//g, os) - .replace(//g, gpuArch) - return newSource - }) - model.sources = newSources - } - - console.debug(`Download sources: ${JSON.stringify(model.sources)}`) - - if (model.sources.length > 1) { - // path to model binaries - for (const source of model.sources) { - let path = extractFileName(source.url, '.gguf') - if (source.filename) { - path = await joinPath([modelDirPath, source.filename]) - } - - const downloadRequest: DownloadRequest = { - url: source.url, - localPath: path, - modelId: model.id, - } - downloadFile(downloadRequest, network) - } - } else { - const fileName = extractFileName(model.sources[0]?.url, '.gguf') - const path = await joinPath([modelDirPath, fileName]) - const downloadRequest: DownloadRequest = { - url: model.sources[0]?.url, - localPath: path, - modelId: model.id, - } - downloadFile(downloadRequest, network) - } -} - -/** - * try to retrieve the download file name from the source url - */ -function extractFileName(url: string, fileExtension: string): string { - if (!url) return fileExtension - - const extractedFileName = url.split('/').pop() - const fileName = extractedFileName.toLowerCase().endsWith(fileExtension) - ? extractedFileName - : extractedFileName + fileExtension - return fileName -} diff --git a/extensions/model-extension/src/legacy/model-json.test.ts b/extensions/model-extension/src/legacy/model-json.test.ts index a4ea5bc0b..f90f13646 100644 --- a/extensions/model-extension/src/legacy/model-json.test.ts +++ b/extensions/model-extension/src/legacy/model-json.test.ts @@ -1,27 +1,31 @@ +import { describe, it, expect, beforeEach, vi } from 'vitest' import { scanModelsFolder, getModelJsonPath } from './model-json' // Mock the @janhq/core module -jest.mock('@janhq/core', () => ({ - fs: { - existsSync: jest.fn(), - readdirSync: jest.fn(), - fileStat: jest.fn(), - readFileSync: jest.fn(), +vi.mock('@janhq/core', () => ({ + InferenceEngine: { + nitro: 'nitro', }, - joinPath: jest.fn((paths) => paths.join('/')), + fs: { + existsSync: vi.fn(), + readdirSync: vi.fn(), + fileStat: vi.fn(), + readFileSync: vi.fn(), + }, + joinPath: vi.fn((paths) => paths.join('/')), })) // Import the mocked fs and joinPath after the mock is set up -const { fs } = jest.requireMock('@janhq/core') +import { fs } from '@janhq/core' describe('model-json', () => { beforeEach(() => { - jest.clearAllMocks() + vi.clearAllMocks() }) describe('scanModelsFolder', () => { it('should return an empty array when models folder does not exist', async () => { - fs.existsSync.mockReturnValue(false) + vi.spyOn(fs, 'existsSync').mockReturnValue(false) const result = await scanModelsFolder() expect(result).toEqual([]) @@ -38,11 +42,16 @@ describe('model-json', () => { ], } - fs.existsSync.mockReturnValue(true) - fs.readdirSync.mockReturnValueOnce(['test-model']) - fs.fileStat.mockResolvedValue({ isDirectory: () => true }) - fs.readFileSync.mockReturnValue(JSON.stringify(mockModelJson)) - fs.readdirSync.mockReturnValueOnce(['test-model.gguf', 'model.json']) + vi.spyOn(fs, 'existsSync').mockReturnValue(true) + vi.spyOn(fs, 'readdirSync').mockReturnValueOnce(['test-model']) + vi.spyOn(fs, 'fileStat').mockResolvedValue({ isDirectory: () => true }) + vi.spyOn(fs, 'readFileSync').mockReturnValue( + JSON.stringify(mockModelJson) + ) + vi.spyOn(fs, 'readdirSync').mockReturnValueOnce([ + 'test-model.gguf', + 'model.json', + ]) const result = await scanModelsFolder() expect(result).toHaveLength(1) @@ -52,26 +61,26 @@ describe('model-json', () => { describe('getModelJsonPath', () => { it('should return undefined when folder does not exist', async () => { - fs.existsSync.mockReturnValue(false) + vi.spyOn(fs, 'existsSync').mockReturnValue(false) const result = await getModelJsonPath('non-existent-folder') expect(result).toBeUndefined() }) it('should return the path when model.json exists in the root folder', async () => { - fs.existsSync.mockReturnValue(true) - fs.readdirSync.mockReturnValue(['model.json']) + vi.spyOn(fs, 'existsSync').mockReturnValue(true) + vi.spyOn(fs, 'readdirSync').mockReturnValue(['model.json']) const result = await getModelJsonPath('test-folder') expect(result).toBe('test-folder/model.json') }) it('should return the path when model.json exists in a subfolder', async () => { - fs.existsSync.mockReturnValue(true) - fs.readdirSync + vi.spyOn(fs, 'existsSync').mockReturnValue(true) + vi.spyOn(fs, 'readdirSync') .mockReturnValueOnce(['subfolder']) .mockReturnValueOnce(['model.json']) - fs.fileStat.mockResolvedValue({ isDirectory: () => true }) + vi.spyOn(fs, 'fileStat').mockResolvedValue({ isDirectory: () => true }) const result = await getModelJsonPath('test-folder') expect(result).toBe('test-folder/subfolder/model.json') diff --git a/extensions/model-extension/src/legacy/model-json.ts b/extensions/model-extension/src/legacy/model-json.ts index 03560cde2..e9f0d093b 100644 --- a/extensions/model-extension/src/legacy/model-json.ts +++ b/extensions/model-extension/src/legacy/model-json.ts @@ -112,7 +112,7 @@ export const scanModelsFolder = async (): Promise< } return undefined }) - .filter((e) => !!e) + .filter(Boolean) return modelData } catch (err) { diff --git a/extensions/model-extension/src/migration.test.ts b/extensions/model-extension/src/migration.test.ts index a3ddfa87c..fc7ebe8ba 100644 --- a/extensions/model-extension/src/migration.test.ts +++ b/extensions/model-extension/src/migration.test.ts @@ -1,48 +1,51 @@ -import { Model, InferenceEngine } from '@janhq/core' -import JanModelExtension from './index' +import { describe, it, expect, beforeEach, vi } from 'vitest' + +vi.stubGlobal('API_URL', 'http://localhost:3000') + // Mock the @janhq/core module -jest.mock('@janhq/core', () => ({ +vi.mock('@janhq/core', (actual) => ({ + ...actual, ModelExtension: class {}, InferenceEngine: { nitro: 'nitro', }, - joinPath: jest.fn(), - dirName: jest.fn(), + joinPath: vi.fn(), + dirName: vi.fn(), + fs: { + existsSync: vi.fn(), + readFileSync: vi.fn(), + writeFileSync: vi.fn(), + mkdirSync: vi.fn(), + }, })) -// Mock the CortexAPI -jest.mock('./cortex', () => ({ - CortexAPI: jest.fn().mockImplementation(() => ({ - getModels: jest.fn(), - importModel: jest.fn(), - })), -})) +import { Model, InferenceEngine } from '@janhq/core' + +import JanModelExtension from './index' // Mock the model-json module -jest.mock('./model-json', () => ({ - scanModelsFolder: jest.fn(), +vi.mock('./legacy/model-json', () => ({ + scanModelsFolder: vi.fn(), })) // Import the mocked scanModelsFolder after the mock is set up -const { scanModelsFolder } = jest.requireMock('./model-json') +import * as legacy from './legacy/model-json' describe('JanModelExtension', () => { let extension: JanModelExtension let mockLocalStorage: { [key: string]: string } - let mockCortexAPI: jest.Mock beforeEach(() => { // @ts-ignore extension = new JanModelExtension() mockLocalStorage = {} - mockCortexAPI = extension.cortexAPI as any // Mock localStorage Object.defineProperty(global, 'localStorage', { value: { - getItem: jest.fn((key) => mockLocalStorage[key]), - setItem: jest.fn((key, value) => { + getItem: vi.fn((key) => mockLocalStorage[key]), + setItem: vi.fn((key, value) => { mockLocalStorage[key] = value }), }, @@ -76,22 +79,13 @@ describe('JanModelExtension', () => { file_path: '/path/to/model2', }, ] as any - scanModelsFolder.mockResolvedValue(mockModels) - extension.cortexAPI.importModel = jest - .fn() - .mockResolvedValueOnce(mockModels[0]) - extension.cortexAPI.getModels = jest - .fn() - .mockResolvedValue([mockModels[0]]) - extension.cortexAPI.importModel = jest - .fn() - .mockResolvedValueOnce(mockModels[1]) - extension.cortexAPI.getModels = jest - .fn() - .mockResolvedValue([mockModels[0], mockModels[1]]) - + vi.mocked(legacy.scanModelsFolder).mockResolvedValue(mockModels) + vi.spyOn(extension, 'fetchModels').mockResolvedValue([mockModels[0]]) + vi.spyOn(extension, 'updateModel').mockResolvedValue(undefined) + vi.spyOn(extension, 'importModel').mockResolvedValueOnce(mockModels[1]) + vi.spyOn(extension, 'fetchModels').mockResolvedValue([mockModels[0], mockModels[1]]) const result = await extension.getModels() - expect(scanModelsFolder).toHaveBeenCalled() + expect(legacy.scanModelsFolder).toHaveBeenCalled() expect(result).toEqual(mockModels) }) @@ -121,9 +115,8 @@ describe('JanModelExtension', () => { }, ] as any mockLocalStorage['downloadedModels'] = JSON.stringify(mockModels) - - extension.cortexAPI.getModels = jest.fn().mockResolvedValue([]) - extension.importModel = jest.fn().mockResolvedValue(undefined) + vi.spyOn(extension, 'updateModel').mockResolvedValue(undefined) + vi.spyOn(extension, 'importModel').mockResolvedValue(undefined) const result = await extension.getModels() @@ -155,12 +148,12 @@ describe('JanModelExtension', () => { }, ] as any mockLocalStorage['downloadedModels'] = JSON.stringify(mockModels) - - extension.cortexAPI.getModels = jest.fn().mockResolvedValue(mockModels) + vi.spyOn(extension, 'fetchModels').mockResolvedValue(mockModels) + extension.getModels = vi.fn().mockResolvedValue(mockModels) const result = await extension.getModels() - expect(extension.cortexAPI.getModels).toHaveBeenCalled() + expect(extension.getModels).toHaveBeenCalled() expect(result).toEqual(mockModels) }) }) diff --git a/extensions/model-extension/tsconfig.json b/extensions/model-extension/tsconfig.json index 0d3252934..1d3c112d4 100644 --- a/extensions/model-extension/tsconfig.json +++ b/extensions/model-extension/tsconfig.json @@ -11,5 +11,5 @@ "rootDir": "./src" }, "include": ["./src"], - "exclude": ["**/*.test.ts"] + "exclude": ["**/*.test.ts", "vite.config.ts"] } diff --git a/extensions/model-extension/vite.config.ts b/extensions/model-extension/vite.config.ts new file mode 100644 index 000000000..a8ad5615f --- /dev/null +++ b/extensions/model-extension/vite.config.ts @@ -0,0 +1,8 @@ +import { defineConfig } from "vite" +export default defineConfig(({ mode }) => ({ + define: process.env.VITEST ? {} : { global: 'window' }, + test: { + environment: 'jsdom', + }, +})) + diff --git a/extensions/monitoring-extension/README.md b/extensions/monitoring-extension/README.md deleted file mode 100644 index f9690da09..000000000 --- a/extensions/monitoring-extension/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# Create a Jan Extension using Typescript - -Use this template to bootstrap the creation of a TypeScript Jan extension. 🚀 - -## Create Your Own Extension - -To create your own extension, you can use this repository as a template! Just follow the below instructions: - -1. Click the Use this template button at the top of the repository -2. Select Create a new repository -3. Select an owner and name for your new repository -4. Click Create repository -5. Clone your new repository - -## Initial Setup - -After you've cloned the repository to your local machine or codespace, you'll need to perform some initial setup steps before you can develop your extension. - -> [!NOTE] -> -> You'll need to have a reasonably modern version of -> [Node.js](https://nodejs.org) handy. If you are using a version manager like -> [`nodenv`](https://github.com/nodenv/nodenv) or -> [`nvm`](https://github.com/nvm-sh/nvm), you can run `nodenv install` in the -> root of your repository to install the version specified in -> [`package.json`](./package.json). Otherwise, 20.x or later should work! - -1. :hammer_and_wrench: Install the dependencies - - ```bash - npm install - ``` - -1. :building_construction: Package the TypeScript for distribution - - ```bash - npm run bundle - ``` - -1. :white_check_mark: Check your artifact - - There will be a tgz file in your extension directory now - -## Update the Extension Metadata - -The [`package.json`](package.json) file defines metadata about your extension, such as -extension name, main entry, description and version. - -When you copy this repository, update `package.json` with the name, description for your extension. - -## Update the Extension Code - -The [`src/`](./src/) directory is the heart of your extension! This contains the -source code that will be run when your extension functions are invoked. You can replace the -contents of this directory with your own code. - -There are a few things to keep in mind when writing your extension code: - -- Most Jan Extension functions are processed asynchronously. - In `index.ts`, you will see that the extension function will return a `Promise`. - - ```typescript - import { events, MessageEvent, MessageRequest } from '@janhq/core' - - function onStart(): Promise { - return events.on(MessageEvent.OnMessageSent, (data: MessageRequest) => - this.inference(data) - ) - } - ``` - - For more information about the Jan Extension Core module, see the - [documentation](https://github.com/janhq/jan/blob/main/core/README.md). - -So, what are you waiting for? Go ahead and start customizing your extension! diff --git a/extensions/monitoring-extension/bin/.gitkeep b/extensions/monitoring-extension/bin/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/extensions/monitoring-extension/download.bat b/extensions/monitoring-extension/download.bat deleted file mode 100644 index 14e0aadd9..000000000 --- a/extensions/monitoring-extension/download.bat +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -.\node_modules\.bin\download https://catalog.jan.ai/vulkaninfoSDK.exe -o ./bin \ No newline at end of file diff --git a/extensions/monitoring-extension/package.json b/extensions/monitoring-extension/package.json deleted file mode 100644 index 2f827b41b..000000000 --- a/extensions/monitoring-extension/package.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "name": "@janhq/monitoring-extension", - "productName": "System Monitoring", - "version": "1.0.10", - "description": "Provides system health and OS level data.", - "main": "dist/index.js", - "node": "dist/node/index.cjs.js", - "author": "Jan ", - "license": "AGPL-3.0", - "scripts": { - "build": "rolldown -c rolldown.config.mjs && yarn download-artifacts", - "download-artifacts": "run-script-os && cpx \"bin/**\" \"dist/bin\"", - "download-artifacts:darwin": "echo 'No artifacts to download for darwin'", - "download-artifacts:win32": "download.bat", - "download-artifacts:linux": "download https://catalog.jan.ai/vulkaninfo -o ./bin && chmod +x ./bin/vulkaninfo", - "build:publish": "rimraf *.tgz --glob || true && yarn build && npm pack && cpx *.tgz ../../pre-install" - }, - "exports": { - ".": "./dist/index.js", - "./main": "./dist/node/index.cjs.js" - }, - "devDependencies": { - "@types/node": "^20.11.4", - "@types/node-os-utils": "^1.3.4", - "cpx": "^1.5.0", - "download-cli": "^1.1.1", - "rimraf": "^3.0.2", - "rolldown": "1.0.0-beta.1", - "run-script-os": "^1.1.6", - "typescript": "^5.3.3" - }, - "dependencies": { - "@janhq/core": "../../core/package.tgz", - "node-os-utils": "^1.3.7" - }, - "files": [ - "dist/*", - "package.json", - "README.md" - ], - "bundleDependencies": [ - "node-os-utils", - "@janhq/core" - ], - "installConfig": { - "hoistingLimits": "workspaces" - }, - "packageManager": "yarn@4.5.3" -} diff --git a/extensions/monitoring-extension/resources/settings.json b/extensions/monitoring-extension/resources/settings.json deleted file mode 100644 index 40b0b97f9..000000000 --- a/extensions/monitoring-extension/resources/settings.json +++ /dev/null @@ -1,22 +0,0 @@ -[ - { - "key": "log-enabled", - "title": "Enable App Logs", - "description": "Saves app logs locally on your computer. This enables you to send us crash reports.", - "controllerType": "checkbox", - "controllerProps": { - "value": true - } - }, - { - "key": "log-cleaning-interval", - "title": "Log Cleaning Interval", - "description": "Automatically delete local logs after a certain time interval (in milliseconds).", - "controllerType": "input", - "controllerProps": { - "value": "120000", - "placeholder": "Interval in milliseconds. E.g. 120000", - "textAlign": "right" - } - } -] \ No newline at end of file diff --git a/extensions/monitoring-extension/rolldown.config.mjs b/extensions/monitoring-extension/rolldown.config.mjs deleted file mode 100644 index 3533e052b..000000000 --- a/extensions/monitoring-extension/rolldown.config.mjs +++ /dev/null @@ -1,32 +0,0 @@ -import { defineConfig } from 'rolldown' -import packageJson from './package.json' with { type: 'json' } -import settingJson from './resources/settings.json' with { type: 'json' } - -export default defineConfig([ - { - input: 'src/index.ts', - output: { - format: 'esm', - file: 'dist/index.js', - }, - platform: 'browser', - define: { - NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`), - SETTINGS: JSON.stringify(settingJson), - }, - }, - { - input: 'src/node/index.ts', - external: ['@janhq/core/node'], - output: { - format: 'cjs', - file: 'dist/node/index.cjs.js', - sourcemap: false, - inlineDynamicImports: true, - }, - resolve: { - extensions: ['.js', '.ts', '.json'], - }, - platform: 'node', - }, -]) diff --git a/extensions/monitoring-extension/src/@types/global.d.ts b/extensions/monitoring-extension/src/@types/global.d.ts deleted file mode 100644 index 7536fabd8..000000000 --- a/extensions/monitoring-extension/src/@types/global.d.ts +++ /dev/null @@ -1,19 +0,0 @@ -declare const NODE: string -declare const SETTINGS: SettingComponentProps[] - -type CpuGpuInfo = { - cpu: { - usage: number - } - gpu: GpuInfo[] -} - -type GpuInfo = { - id: string - name: string - temperature: string - utilization: string - memoryTotal: string - memoryFree: string - memoryUtilization: string -} diff --git a/extensions/monitoring-extension/src/index.ts b/extensions/monitoring-extension/src/index.ts deleted file mode 100644 index 5616c70a8..000000000 --- a/extensions/monitoring-extension/src/index.ts +++ /dev/null @@ -1,90 +0,0 @@ -import { - AppConfigurationEventName, - GpuSetting, - MonitoringExtension, - OperatingSystemInfo, - events, - executeOnMain, -} from '@janhq/core' - -enum Settings { - logEnabled = 'log-enabled', - logCleaningInterval = 'log-cleaning-interval', -} -/** - * JanMonitoringExtension is a extension that provides system monitoring functionality. - * It implements the MonitoringExtension interface from the @janhq/core package. - */ -export default class JanMonitoringExtension extends MonitoringExtension { - /** - * Called when the extension is loaded. - */ - async onLoad() { - // Register extension settings - this.registerSettings(SETTINGS) - - const logEnabled = await this.getSetting(Settings.logEnabled, true) - const logCleaningInterval = parseInt( - await this.getSetting(Settings.logCleaningInterval, '120000') - ) - // Register File Logger provided by this extension - await executeOnMain(NODE, 'registerLogger', { - logEnabled, - logCleaningInterval: isNaN(logCleaningInterval) - ? 120000 - : logCleaningInterval, - }) - - // Attempt to fetch nvidia info - await executeOnMain(NODE, 'updateNvidiaInfo') - events.emit(AppConfigurationEventName.OnConfigurationUpdate, {}) - } - - onSettingUpdate(key: string, value: T): void { - if (key === Settings.logEnabled) { - executeOnMain(NODE, 'updateLogger', { logEnabled: value }) - } else if (key === Settings.logCleaningInterval) { - executeOnMain(NODE, 'updateLogger', { logCleaningInterval: value }) - } - } - - /** - * Called when the extension is unloaded. - */ - onUnload(): void { - // Register File Logger provided by this extension - executeOnMain(NODE, 'unregisterLogger') - } - - /** - * Returns the GPU configuration. - * @returns A Promise that resolves to an object containing the GPU configuration. - */ - async getGpuSetting(): Promise { - return executeOnMain(NODE, 'getGpuConfig') - } - - /** - * Returns information about the system resources. - * @returns A Promise that resolves to an object containing information about the system resources. - */ - getResourcesInfo(): Promise { - return executeOnMain(NODE, 'getResourcesInfo') - } - - /** - * Returns information about the current system load. - * @returns A Promise that resolves to an object containing information about the current system load. - */ - getCurrentLoad(): Promise { - return executeOnMain(NODE, 'getCurrentLoad') - } - - /** - * Returns information about the OS - * @returns - */ - getOsInfo(): Promise { - return executeOnMain(NODE, 'getOsInfo') - } -} diff --git a/extensions/monitoring-extension/src/node/index.ts b/extensions/monitoring-extension/src/node/index.ts deleted file mode 100644 index e32f85082..000000000 --- a/extensions/monitoring-extension/src/node/index.ts +++ /dev/null @@ -1,389 +0,0 @@ -import { - GpuSetting, - GpuSettingInfo, - LoggerManager, - OperatingSystemInfo, - ResourceInfo, - SupportedPlatforms, - getJanDataFolderPath, - log, -} from '@janhq/core/node' -import { mem, cpu } from 'node-os-utils' -import { exec } from 'child_process' -import { writeFileSync, existsSync, readFileSync, mkdirSync } from 'fs' -import path from 'path' -import os from 'os' -import { FileLogger } from './logger' - -/** - * Path to the settings directory - **/ -export const SETTINGS_DIR = path.join(getJanDataFolderPath(), 'settings') -/** - * Path to the settings file - **/ -export const GPU_INFO_FILE = path.join(SETTINGS_DIR, 'settings.json') - -/** - * Default GPU settings - * TODO: This needs to be refactored to support multiple accelerators - **/ -const DEFAULT_SETTINGS: GpuSetting = { - notify: true, - run_mode: 'cpu', - nvidia_driver: { - exist: false, - version: '', - }, - cuda: { - exist: false, - version: '', - }, - gpus: [], - gpu_highest_vram: '', - gpus_in_use: [], - is_initial: true, - // TODO: This needs to be set based on user toggle in settings - vulkan: false, -} - -export const getGpuConfig = async (): Promise => { - if (process.platform === 'darwin') return undefined - if (existsSync(GPU_INFO_FILE)) - return JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) - return DEFAULT_SETTINGS -} - -export const getResourcesInfo = async (): Promise => { - const ramUsedInfo = await mem.used() - const totalMemory = ramUsedInfo.totalMemMb * 1024 * 1024 - const usedMemory = ramUsedInfo.usedMemMb * 1024 * 1024 - - const resourceInfo: ResourceInfo = { - mem: { - totalMemory, - usedMemory, - }, - } - - return resourceInfo -} - -export const getCurrentLoad = () => - new Promise(async (resolve, reject) => { - const cpuPercentage = await cpu.usage() - let data = { - run_mode: 'cpu', - gpus_in_use: [], - } - - if (process.platform !== 'darwin') { - data = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) - } - - if (data.run_mode === 'gpu' && data.gpus_in_use.length > 0) { - const gpuIds = data.gpus_in_use.join(',') - if (gpuIds !== '' && data['vulkan'] !== true) { - exec( - `nvidia-smi --query-gpu=index,name,temperature.gpu,utilization.gpu,memory.total,memory.free,utilization.memory --format=csv,noheader,nounits --id=${gpuIds}`, - (error, stdout, _) => { - if (error) { - console.error(`exec error: ${error}`) - throw new Error(error.message) - } - const gpuInfo: GpuInfo[] = stdout - .trim() - .split('\n') - .map((line) => { - const [ - id, - name, - temperature, - utilization, - memoryTotal, - memoryFree, - memoryUtilization, - ] = line.split(', ').map((item) => item.replace(/\r/g, '')) - return { - id, - name, - temperature, - utilization, - memoryTotal, - memoryFree, - memoryUtilization, - } - }) - - resolve({ - cpu: { usage: cpuPercentage }, - gpu: gpuInfo, - }) - } - ) - } else { - // Handle the case where gpuIds is empty - resolve({ - cpu: { usage: cpuPercentage }, - gpu: [], - }) - } - } else { - // Handle the case where run_mode is not 'gpu' or no GPUs are in use - resolve({ - cpu: { usage: cpuPercentage }, - gpu: [], - }) - } - }) - -/** - * This will retrieve GPU information and persist settings.json - * Will be called when the extension is loaded to turn on GPU acceleration if supported - */ -export const updateNvidiaInfo = async () => { - // ignore if macos - if (process.platform === 'darwin') return - - try { - JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) - } catch (error) { - if (!existsSync(SETTINGS_DIR)) { - mkdirSync(SETTINGS_DIR, { - recursive: true, - }) - } - writeFileSync(GPU_INFO_FILE, JSON.stringify(DEFAULT_SETTINGS, null, 2)) - } - - await updateNvidiaDriverInfo() - await updateGpuInfo() -} - -const updateNvidiaDriverInfo = async () => - new Promise((resolve, reject) => { - exec( - 'nvidia-smi --query-gpu=driver_version --format=csv,noheader', - (error, stdout) => { - const data: GpuSetting = JSON.parse( - readFileSync(GPU_INFO_FILE, 'utf-8') - ) - - if (!error) { - const firstLine = stdout.split('\n')[0].trim() - data.nvidia_driver.exist = true - data.nvidia_driver.version = firstLine - } else { - data.nvidia_driver.exist = false - } - - writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2)) - resolve({}) - } - ) - }) - -const getGpuArch = (gpuName: string): string => { - if (!gpuName.toLowerCase().includes('nvidia')) return 'unknown' - - if (gpuName.includes('30')) return 'ampere' - else if (gpuName.includes('40')) return 'ada' - else return 'unknown' -} - -const updateGpuInfo = async () => - new Promise((resolve, reject) => { - let data: GpuSetting = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) - - // Cuda - if (data.vulkan === true) { - // Vulkan - exec( - process.platform === 'win32' - ? `${__dirname}\\..\\bin\\vulkaninfoSDK.exe --summary` - : `${__dirname}/../bin/vulkaninfo --summary`, - async (error, stdout) => { - if (!error) { - const output = stdout.toString() - - log(output) - const gpuRegex = /GPU(\d+):(?:[\s\S]*?)deviceName\s*=\s*(.*)/g - - const gpus: GpuSettingInfo[] = [] - let match - while ((match = gpuRegex.exec(output)) !== null) { - const id = match[1] - const name = match[2] - const arch = getGpuArch(name) - gpus.push({ id, vram: '0', name, arch }) - } - data.gpus = gpus - - if (!data.gpus_in_use || data.gpus_in_use.length === 0) { - data.gpus_in_use = [data.gpus.length > 1 ? '1' : '0'] - } - - data = await updateCudaExistence(data) - writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2)) - log(`[APP]::${JSON.stringify(data)}`) - resolve({}) - } else { - reject(error) - } - } - ) - } else { - exec( - 'nvidia-smi --query-gpu=index,memory.total,name --format=csv,noheader,nounits', - async (error, stdout) => { - if (!error) { - log(`[SPECS]::${stdout}`) - // Get GPU info and gpu has higher memory first - let highestVram = 0 - let highestVramId = '0' - const gpus: GpuSettingInfo[] = stdout - .trim() - .split('\n') - .map((line) => { - let [id, vram, name] = line.split(', ') - const arch = getGpuArch(name) - vram = vram.replace(/\r/g, '') - if (parseFloat(vram) > highestVram) { - highestVram = parseFloat(vram) - highestVramId = id - } - return { id, vram, name, arch } - }) - - data.gpus = gpus - data.gpu_highest_vram = highestVramId - } else { - data.gpus = [] - data.gpu_highest_vram = undefined - } - - if (!data.gpus_in_use || data.gpus_in_use.length === 0) { - data.gpus_in_use = data.gpu_highest_vram ? [data.gpu_highest_vram].filter(e => !!e) : [] - } - - data = await updateCudaExistence(data) - console.log('[MONITORING]::Cuda info: ', data) - writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2)) - log(`[APP]::${JSON.stringify(data)}`) - resolve({}) - } - ) - } - }) - -/** - * Check if file exists in paths - */ -const checkFileExistenceInPaths = (file: string, paths: string[]): boolean => { - return paths.some((p) => existsSync(path.join(p, file))) -} - -/** - * Validate cuda for linux and windows - */ -const updateCudaExistence = async ( - data: GpuSetting = DEFAULT_SETTINGS -): Promise => { - let filesCuda12: string[] - let filesCuda11: string[] - let paths: string[] - let cudaVersion: string = '' - - if (process.platform === 'win32') { - filesCuda12 = ['cublas64_12.dll', 'cudart64_12.dll', 'cublasLt64_12.dll'] - filesCuda11 = ['cublas64_11.dll', 'cudart64_110.dll', 'cublasLt64_11.dll'] - paths = process.env.PATH ? process.env.PATH.split(path.delimiter) : [] - } else { - filesCuda12 = ['libcudart.so.12', 'libcublas.so.12', 'libcublasLt.so.12'] - filesCuda11 = ['libcudart.so.11.0', 'libcublas.so.11', 'libcublasLt.so.11'] - paths = process.env.LD_LIBRARY_PATH - ? process.env.LD_LIBRARY_PATH.split(path.delimiter) - : [] - paths.push('/usr/lib/x86_64-linux-gnu/') - } - - let cudaExists = filesCuda12.every( - (file) => existsSync(file) || checkFileExistenceInPaths(file, paths) - ) - - if (!cudaExists) { - cudaExists = filesCuda11.every( - (file) => existsSync(file) || checkFileExistenceInPaths(file, paths) - ) - if (cudaExists) { - cudaVersion = '11' - } - } else { - cudaVersion = '12' - } - - data.cuda.exist = cudaExists - data.cuda.version = cudaVersion - - console.debug(data.is_initial, data.gpus_in_use) - - if (cudaExists && data.is_initial && data.gpus_in_use.length > 0) { - data.run_mode = 'gpu' - } - - data.is_initial = false - - // Attempt to query CUDA using NVIDIA SMI - if (!cudaExists) { - await new Promise((resolve) => { - exec('nvidia-smi', (error, stdout) => { - if (!error) { - const regex = /CUDA\s*Version:\s*(\d+\.\d+)/g - const match = regex.exec(stdout) - if (match && match[1]) { - data.cuda.version = match[1] - } - } - console.log('[MONITORING]::Finalized cuda info update: ', data) - resolve() - }) - }) - } - return data -} - -export const getOsInfo = (): OperatingSystemInfo => { - const platform = - SupportedPlatforms.find((p) => p === process.platform) || 'unknown' - - const osInfo: OperatingSystemInfo = { - platform: platform, - arch: process.arch, - release: os.release(), - machine: os.machine(), - version: os.version(), - totalMem: os.totalmem(), - freeMem: os.freemem(), - } - - return osInfo -} - -export const registerLogger = ({ logEnabled, logCleaningInterval }) => { - const logger = new FileLogger(logEnabled, logCleaningInterval) - LoggerManager.instance().register(logger) - logger.cleanLogs() -} - -export const unregisterLogger = () => { - LoggerManager.instance().unregister('file') -} - -export const updateLogger = ({ logEnabled, logCleaningInterval }) => { - const logger = LoggerManager.instance().loggers.get('file') as FileLogger - if (logger && logEnabled !== undefined) logger.logEnabled = logEnabled - if (logger && logCleaningInterval) - logger.logCleaningInterval = logCleaningInterval - // Rerun - logger && logger.cleanLogs() -} diff --git a/extensions/yarn.lock b/extensions/yarn.lock index 1aaa51bb5..b87f2b047 100644 --- a/extensions/yarn.lock +++ b/extensions/yarn.lock @@ -429,6 +429,181 @@ __metadata: languageName: node linkType: hard +"@esbuild/aix-ppc64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/aix-ppc64@npm:0.24.2" + conditions: os=aix & cpu=ppc64 + languageName: node + linkType: hard + +"@esbuild/android-arm64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/android-arm64@npm:0.24.2" + conditions: os=android & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/android-arm@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/android-arm@npm:0.24.2" + conditions: os=android & cpu=arm + languageName: node + linkType: hard + +"@esbuild/android-x64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/android-x64@npm:0.24.2" + conditions: os=android & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/darwin-arm64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/darwin-arm64@npm:0.24.2" + conditions: os=darwin & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/darwin-x64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/darwin-x64@npm:0.24.2" + conditions: os=darwin & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/freebsd-arm64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/freebsd-arm64@npm:0.24.2" + conditions: os=freebsd & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/freebsd-x64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/freebsd-x64@npm:0.24.2" + conditions: os=freebsd & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/linux-arm64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/linux-arm64@npm:0.24.2" + conditions: os=linux & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/linux-arm@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/linux-arm@npm:0.24.2" + conditions: os=linux & cpu=arm + languageName: node + linkType: hard + +"@esbuild/linux-ia32@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/linux-ia32@npm:0.24.2" + conditions: os=linux & cpu=ia32 + languageName: node + linkType: hard + +"@esbuild/linux-loong64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/linux-loong64@npm:0.24.2" + conditions: os=linux & cpu=loong64 + languageName: node + linkType: hard + +"@esbuild/linux-mips64el@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/linux-mips64el@npm:0.24.2" + conditions: os=linux & cpu=mips64el + languageName: node + linkType: hard + +"@esbuild/linux-ppc64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/linux-ppc64@npm:0.24.2" + conditions: os=linux & cpu=ppc64 + languageName: node + linkType: hard + +"@esbuild/linux-riscv64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/linux-riscv64@npm:0.24.2" + conditions: os=linux & cpu=riscv64 + languageName: node + linkType: hard + +"@esbuild/linux-s390x@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/linux-s390x@npm:0.24.2" + conditions: os=linux & cpu=s390x + languageName: node + linkType: hard + +"@esbuild/linux-x64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/linux-x64@npm:0.24.2" + conditions: os=linux & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/netbsd-arm64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/netbsd-arm64@npm:0.24.2" + conditions: os=netbsd & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/netbsd-x64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/netbsd-x64@npm:0.24.2" + conditions: os=netbsd & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/openbsd-arm64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/openbsd-arm64@npm:0.24.2" + conditions: os=openbsd & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/openbsd-x64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/openbsd-x64@npm:0.24.2" + conditions: os=openbsd & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/sunos-x64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/sunos-x64@npm:0.24.2" + conditions: os=sunos & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/win32-arm64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/win32-arm64@npm:0.24.2" + conditions: os=win32 & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/win32-ia32@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/win32-ia32@npm:0.24.2" + conditions: os=win32 & cpu=ia32 + languageName: node + linkType: hard + +"@esbuild/win32-x64@npm:0.24.2": + version: 0.24.2 + resolution: "@esbuild/win32-x64@npm:0.24.2" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + "@isaacs/cliui@npm:^8.0.2": version: 8.0.2 resolution: "@isaacs/cliui@npm:8.0.2" @@ -509,67 +684,84 @@ __metadata: "@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension": version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension" + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=704042&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension" dependencies: rxjs: "npm:^7.8.1" ulidx: "npm:^2.3.0" - checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52 + checksum: 10c0/4c53e86e66a5aa291b56a7257a90b31cd06e624d56a1d114d05b2bed46eaa39da5d9ebc5a86131867b2ebda51089b09bdd8a0ed97f329630e1d35d3463e1ba37 languageName: node linkType: hard "@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension": version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension" + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=704042&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension" dependencies: rxjs: "npm:^7.8.1" ulidx: "npm:^2.3.0" - checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52 + checksum: 10c0/4c53e86e66a5aa291b56a7257a90b31cd06e624d56a1d114d05b2bed46eaa39da5d9ebc5a86131867b2ebda51089b09bdd8a0ed97f329630e1d35d3463e1ba37 languageName: node linkType: hard "@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension": version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension" + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=704042&locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension" dependencies: rxjs: "npm:^7.8.1" ulidx: "npm:^2.3.0" - checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52 + checksum: 10c0/4c53e86e66a5aa291b56a7257a90b31cd06e624d56a1d114d05b2bed46eaa39da5d9ebc5a86131867b2ebda51089b09bdd8a0ed97f329630e1d35d3463e1ba37 + languageName: node + linkType: hard + +"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fhardware-management-extension%40workspace%3Ahardware-management-extension": + version: 0.1.10 + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=704042&locator=%40janhq%2Fhardware-management-extension%40workspace%3Ahardware-management-extension" + dependencies: + rxjs: "npm:^7.8.1" + ulidx: "npm:^2.3.0" + checksum: 10c0/4c53e86e66a5aa291b56a7257a90b31cd06e624d56a1d114d05b2bed46eaa39da5d9ebc5a86131867b2ebda51089b09bdd8a0ed97f329630e1d35d3463e1ba37 languageName: node linkType: hard "@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension": version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension" + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=704042&locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension" dependencies: rxjs: "npm:^7.8.1" ulidx: "npm:^2.3.0" - checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52 + checksum: 10c0/4c53e86e66a5aa291b56a7257a90b31cd06e624d56a1d114d05b2bed46eaa39da5d9ebc5a86131867b2ebda51089b09bdd8a0ed97f329630e1d35d3463e1ba37 languageName: node linkType: hard "@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension": version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension" + resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=704042&locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension" dependencies: rxjs: "npm:^7.8.1" ulidx: "npm:^2.3.0" - checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52 - languageName: node - linkType: hard - -"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension": - version: 0.1.10 - resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension" - dependencies: - rxjs: "npm:^7.8.1" - ulidx: "npm:^2.3.0" - checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52 + checksum: 10c0/4c53e86e66a5aa291b56a7257a90b31cd06e624d56a1d114d05b2bed46eaa39da5d9ebc5a86131867b2ebda51089b09bdd8a0ed97f329630e1d35d3463e1ba37 languageName: node linkType: hard "@janhq/engine-management-extension@workspace:engine-management-extension": version: 0.0.0-use.local resolution: "@janhq/engine-management-extension@workspace:engine-management-extension" + dependencies: + "@janhq/core": ../../core/package.tgz + cpx: "npm:^1.5.0" + ky: "npm:^1.7.2" + p-queue: "npm:^8.0.1" + rimraf: "npm:^3.0.2" + rolldown: "npm:^1.0.0-beta.1" + run-script-os: "npm:^1.1.6" + ts-loader: "npm:^9.5.0" + typescript: "npm:^5.3.3" + vitest: "npm:^3.0.6" + languageName: unknown + linkType: soft + +"@janhq/hardware-management-extension@workspace:hardware-management-extension": + version: 0.0.0-use.local + resolution: "@janhq/hardware-management-extension@workspace:hardware-management-extension" dependencies: "@janhq/core": ../../core/package.tgz cpu-instructions: "npm:^0.0.13" @@ -596,7 +788,6 @@ __metadata: "@types/os-utils": "npm:^0.0.4" "@types/tcp-port-used": "npm:^1.0.4" cpx: "npm:^1.5.0" - decompress: "npm:^4.2.1" download-cli: "npm:^1.1.1" fetch-retry: "npm:^5.0.6" jest: "npm:^29.7.0" @@ -606,11 +797,10 @@ __metadata: rolldown: "npm:1.0.0-beta.1" run-script-os: "npm:^1.1.6" rxjs: "npm:^7.8.1" - tcp-port-used: "npm:^1.0.2" - terminate: "npm:2.6.1" ts-jest: "npm:^29.1.2" typescript: "npm:^5.3.3" ulidx: "npm:^2.3.0" + vitest: "npm:^3.0.8" languageName: unknown linkType: soft @@ -625,25 +815,8 @@ __metadata: rimraf: "npm:^3.0.2" rolldown: "npm:1.0.0-beta.1" run-script-os: "npm:^1.1.6" - ts-loader: "npm:^9.5.0" typescript: "npm:5.3.3" - languageName: unknown - linkType: soft - -"@janhq/monitoring-extension@workspace:monitoring-extension": - version: 0.0.0-use.local - resolution: "@janhq/monitoring-extension@workspace:monitoring-extension" - dependencies: - "@janhq/core": ../../core/package.tgz - "@types/node": "npm:^20.11.4" - "@types/node-os-utils": "npm:^1.3.4" - cpx: "npm:^1.5.0" - download-cli: "npm:^1.1.1" - node-os-utils: "npm:^1.3.7" - rimraf: "npm:^3.0.2" - rolldown: "npm:1.0.0-beta.1" - run-script-os: "npm:^1.1.6" - typescript: "npm:^5.3.3" + vitest: "npm:^3.0.6" languageName: unknown linkType: soft @@ -902,7 +1075,7 @@ __metadata: languageName: node linkType: hard -"@jridgewell/sourcemap-codec@npm:^1.4.10, @jridgewell/sourcemap-codec@npm:^1.4.14": +"@jridgewell/sourcemap-codec@npm:^1.4.10, @jridgewell/sourcemap-codec@npm:^1.4.14, @jridgewell/sourcemap-codec@npm:^1.5.0": version: 1.5.0 resolution: "@jridgewell/sourcemap-codec@npm:1.5.0" checksum: 10c0/2eb864f276eb1096c3c11da3e9bb518f6d9fc0023c78344cdc037abadc725172c70314bdb360f2d4b7bffec7f5d657ce006816bc5d4ecb35e61b66132db00c18 @@ -1730,6 +1903,139 @@ __metadata: languageName: node linkType: hard +"@rollup/rollup-android-arm-eabi@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-android-arm-eabi@npm:4.34.8" + conditions: os=android & cpu=arm + languageName: node + linkType: hard + +"@rollup/rollup-android-arm64@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-android-arm64@npm:4.34.8" + conditions: os=android & cpu=arm64 + languageName: node + linkType: hard + +"@rollup/rollup-darwin-arm64@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-darwin-arm64@npm:4.34.8" + conditions: os=darwin & cpu=arm64 + languageName: node + linkType: hard + +"@rollup/rollup-darwin-x64@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-darwin-x64@npm:4.34.8" + conditions: os=darwin & cpu=x64 + languageName: node + linkType: hard + +"@rollup/rollup-freebsd-arm64@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-freebsd-arm64@npm:4.34.8" + conditions: os=freebsd & cpu=arm64 + languageName: node + linkType: hard + +"@rollup/rollup-freebsd-x64@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-freebsd-x64@npm:4.34.8" + conditions: os=freebsd & cpu=x64 + languageName: node + linkType: hard + +"@rollup/rollup-linux-arm-gnueabihf@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-arm-gnueabihf@npm:4.34.8" + conditions: os=linux & cpu=arm & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-arm-musleabihf@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-arm-musleabihf@npm:4.34.8" + conditions: os=linux & cpu=arm & libc=musl + languageName: node + linkType: hard + +"@rollup/rollup-linux-arm64-gnu@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-arm64-gnu@npm:4.34.8" + conditions: os=linux & cpu=arm64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-arm64-musl@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-arm64-musl@npm:4.34.8" + conditions: os=linux & cpu=arm64 & libc=musl + languageName: node + linkType: hard + +"@rollup/rollup-linux-loongarch64-gnu@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-loongarch64-gnu@npm:4.34.8" + conditions: os=linux & cpu=loong64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-powerpc64le-gnu@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-powerpc64le-gnu@npm:4.34.8" + conditions: os=linux & cpu=ppc64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-riscv64-gnu@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-riscv64-gnu@npm:4.34.8" + conditions: os=linux & cpu=riscv64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-s390x-gnu@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-s390x-gnu@npm:4.34.8" + conditions: os=linux & cpu=s390x & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-x64-gnu@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-x64-gnu@npm:4.34.8" + conditions: os=linux & cpu=x64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-x64-musl@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-linux-x64-musl@npm:4.34.8" + conditions: os=linux & cpu=x64 & libc=musl + languageName: node + linkType: hard + +"@rollup/rollup-win32-arm64-msvc@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-win32-arm64-msvc@npm:4.34.8" + conditions: os=win32 & cpu=arm64 + languageName: node + linkType: hard + +"@rollup/rollup-win32-ia32-msvc@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-win32-ia32-msvc@npm:4.34.8" + conditions: os=win32 & cpu=ia32 + languageName: node + linkType: hard + +"@rollup/rollup-win32-x64-msvc@npm:4.34.8": + version: 4.34.8 + resolution: "@rollup/rollup-win32-x64-msvc@npm:4.34.8" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + "@sinclair/typebox@npm:^0.27.8": version: 0.27.8 resolution: "@sinclair/typebox@npm:0.27.8" @@ -1814,6 +2120,13 @@ __metadata: languageName: node linkType: hard +"@types/estree@npm:1.0.6, @types/estree@npm:^1.0.0": + version: 1.0.6 + resolution: "@types/estree@npm:1.0.6" + checksum: 10c0/cdfd751f6f9065442cd40957c07fd80361c962869aa853c1c2fd03e101af8b9389d8ff4955a43a6fcfa223dd387a089937f95be0f3eec21ca527039fd2d9859a + languageName: node + linkType: hard + "@types/graceful-fs@npm:^4.1.3": version: 4.1.9 resolution: "@types/graceful-fs@npm:4.1.9" @@ -1877,13 +2190,6 @@ __metadata: languageName: node linkType: hard -"@types/node-os-utils@npm:^1.3.4": - version: 1.3.4 - resolution: "@types/node-os-utils@npm:1.3.4" - checksum: 10c0/d57bfa84862ee388f538e2bf38b5a6e6a555aebf6e50573ad5700f5858f657ee72388833aa7ed6c9d0b68ce0a6763802366326617b0d5f4d56cc3fe61dd617e1 - languageName: node - linkType: hard - "@types/node@npm:*": version: 22.10.2 resolution: "@types/node@npm:22.10.2" @@ -1985,6 +2291,168 @@ __metadata: languageName: node linkType: hard +"@vitest/expect@npm:3.0.6": + version: 3.0.6 + resolution: "@vitest/expect@npm:3.0.6" + dependencies: + "@vitest/spy": "npm:3.0.6" + "@vitest/utils": "npm:3.0.6" + chai: "npm:^5.2.0" + tinyrainbow: "npm:^2.0.0" + checksum: 10c0/1273d80d3f523dd390016d89c037e6088688342cc1961f1b0b8b54103f94212c7f6efa275c263fbcfc77e1adcf0fc9faa7285782b85eb4fe49a3bc999e7a61d4 + languageName: node + linkType: hard + +"@vitest/expect@npm:3.0.8": + version: 3.0.8 + resolution: "@vitest/expect@npm:3.0.8" + dependencies: + "@vitest/spy": "npm:3.0.8" + "@vitest/utils": "npm:3.0.8" + chai: "npm:^5.2.0" + tinyrainbow: "npm:^2.0.0" + checksum: 10c0/48aebec816f5a1b1f64f82b474ccfba537801a654f9547c581ed1c2d30b5de72207b643d3db2ac2869809a63a585425df30f65481f86d2bbbf979d8f235661bd + languageName: node + linkType: hard + +"@vitest/mocker@npm:3.0.6": + version: 3.0.6 + resolution: "@vitest/mocker@npm:3.0.6" + dependencies: + "@vitest/spy": "npm:3.0.6" + estree-walker: "npm:^3.0.3" + magic-string: "npm:^0.30.17" + peerDependencies: + msw: ^2.4.9 + vite: ^5.0.0 || ^6.0.0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + checksum: 10c0/41911fbdf2c6afe099aa8d039079495dfd3dec2cd13e660fbc43488457181065c043d889ed17395bbc76e29e7253bcffbe9ad6a2fb407be33929470089e0b06b + languageName: node + linkType: hard + +"@vitest/mocker@npm:3.0.8": + version: 3.0.8 + resolution: "@vitest/mocker@npm:3.0.8" + dependencies: + "@vitest/spy": "npm:3.0.8" + estree-walker: "npm:^3.0.3" + magic-string: "npm:^0.30.17" + peerDependencies: + msw: ^2.4.9 + vite: ^5.0.0 || ^6.0.0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + checksum: 10c0/bc89a31a5ebba900bb965b05d1fab581ae2872b6ddc17734f2a8433b9a3c7ae1fa0efd5f13bf03cf8075864b47954e8fcf609cf3a8258f0451375d68b81f135b + languageName: node + linkType: hard + +"@vitest/pretty-format@npm:3.0.6, @vitest/pretty-format@npm:^3.0.6": + version: 3.0.6 + resolution: "@vitest/pretty-format@npm:3.0.6" + dependencies: + tinyrainbow: "npm:^2.0.0" + checksum: 10c0/339b47598f2c77da0d0b7d373c2ceb94995d6154cd30b7de778bbf396d21c570de0be765f1d66793d2a30a6cc673a471be45f093a074acb8a1a71d7665713dd9 + languageName: node + linkType: hard + +"@vitest/pretty-format@npm:3.0.8, @vitest/pretty-format@npm:^3.0.8": + version: 3.0.8 + resolution: "@vitest/pretty-format@npm:3.0.8" + dependencies: + tinyrainbow: "npm:^2.0.0" + checksum: 10c0/9133052605f16966db91d5e495afb5e32c3eb9215602248710bc3fd9034b1b511d1a7f1093571afee8664beb2a83303d42f1d5896fdba2a39adbb5ca9af788f7 + languageName: node + linkType: hard + +"@vitest/runner@npm:3.0.6": + version: 3.0.6 + resolution: "@vitest/runner@npm:3.0.6" + dependencies: + "@vitest/utils": "npm:3.0.6" + pathe: "npm:^2.0.3" + checksum: 10c0/a20cd27d6c91947866b35080db7b8f2fc4568c62878d4175cad38914c2bb769c49791be8601d5ffe27c80cefbc0310e6c0b4256c621581daecdc508d60270d31 + languageName: node + linkType: hard + +"@vitest/runner@npm:3.0.8": + version: 3.0.8 + resolution: "@vitest/runner@npm:3.0.8" + dependencies: + "@vitest/utils": "npm:3.0.8" + pathe: "npm:^2.0.3" + checksum: 10c0/9a9d48dc82ca7101209b21309e18a4720e77d6015bf00a60ace6130e362320158d110f48cf9aa221e5e744729fe8a198811dd69e598688ffbb78c2fce2a842a1 + languageName: node + linkType: hard + +"@vitest/snapshot@npm:3.0.6": + version: 3.0.6 + resolution: "@vitest/snapshot@npm:3.0.6" + dependencies: + "@vitest/pretty-format": "npm:3.0.6" + magic-string: "npm:^0.30.17" + pathe: "npm:^2.0.3" + checksum: 10c0/9baf575d23ef262de6ff180dca156ccd327c02a507d8380b3d59d3b714e3754c45aa588aaa57e3a115cec572a5dd552b8613736d14ac3759b98e068bfe220bed + languageName: node + linkType: hard + +"@vitest/snapshot@npm:3.0.8": + version: 3.0.8 + resolution: "@vitest/snapshot@npm:3.0.8" + dependencies: + "@vitest/pretty-format": "npm:3.0.8" + magic-string: "npm:^0.30.17" + pathe: "npm:^2.0.3" + checksum: 10c0/40564f60f7d166d10a03e9d1f8780daef164c76b2d85c1c8f5800168f907929c815395ac5c1f5c824da5ff29286f874e22dd8874b52044a53e0d858be67ceeb7 + languageName: node + linkType: hard + +"@vitest/spy@npm:3.0.6": + version: 3.0.6 + resolution: "@vitest/spy@npm:3.0.6" + dependencies: + tinyspy: "npm:^3.0.2" + checksum: 10c0/575cf28a370b9f9909e54578460a14234eddf449621b0d28f0fb22b872d2c5302c7ea7df39b680836efc729a1290fa562eee129cef73c5223dfe5b58e6a13b1b + languageName: node + linkType: hard + +"@vitest/spy@npm:3.0.8": + version: 3.0.8 + resolution: "@vitest/spy@npm:3.0.8" + dependencies: + tinyspy: "npm:^3.0.2" + checksum: 10c0/7a940e6fbf5e6903758dfd904dedc9223df72ffa2a3d8c988706c2626c0fd3f9b129452bcd7af40bda014831f15ddb23ad7c1a7e42900acf4f3432b0c2bc8fb5 + languageName: node + linkType: hard + +"@vitest/utils@npm:3.0.6": + version: 3.0.6 + resolution: "@vitest/utils@npm:3.0.6" + dependencies: + "@vitest/pretty-format": "npm:3.0.6" + loupe: "npm:^3.1.3" + tinyrainbow: "npm:^2.0.0" + checksum: 10c0/6b0e89e26c96fcfd825e0795f586336df6a02524a11e9ac3e576b7ed9738a9e4b69cd79d0b69b23c195cc4c6bdd907f1d8f7aa79a4ee0cb85393c94a1aa85267 + languageName: node + linkType: hard + +"@vitest/utils@npm:3.0.8": + version: 3.0.8 + resolution: "@vitest/utils@npm:3.0.8" + dependencies: + "@vitest/pretty-format": "npm:3.0.8" + loupe: "npm:^3.1.3" + tinyrainbow: "npm:^2.0.0" + checksum: 10c0/929e71582d27f5ec2fe422d72112471b36517620beb2c4398c116598ca55b36340b0fa97958d8584bc05153d92dbd60324664d5b623ec6eed8c72e50e226633c + languageName: node + linkType: hard + "abbrev@npm:^2.0.0": version: 2.0.0 resolution: "abbrev@npm:2.0.0" @@ -2150,6 +2618,13 @@ __metadata: languageName: node linkType: hard +"assertion-error@npm:^2.0.1": + version: 2.0.1 + resolution: "assertion-error@npm:2.0.1" + checksum: 10c0/bbbcb117ac6480138f8c93cf7f535614282dea9dc828f540cdece85e3c665e8f78958b96afac52f29ff883c72638e6a87d469ecc9fe5bc902df03ed24a55dba8 + languageName: node + linkType: hard + "assign-symbols@npm:^1.0.0": version: 1.0.0 resolution: "assign-symbols@npm:1.0.0" @@ -2500,6 +2975,13 @@ __metadata: languageName: node linkType: hard +"cac@npm:^6.7.14": + version: 6.7.14 + resolution: "cac@npm:6.7.14" + checksum: 10c0/4ee06aaa7bab8981f0d54e5f5f9d4adcd64058e9697563ce336d8a3878ed018ee18ebe5359b2430eceae87e0758e62ea2019c3f52ae6e211b1bd2e133856cd10 + languageName: node + linkType: hard + "cacache@npm:^19.0.1": version: 19.0.1 resolution: "cacache@npm:19.0.1" @@ -2594,6 +3076,19 @@ __metadata: languageName: node linkType: hard +"chai@npm:^5.2.0": + version: 5.2.0 + resolution: "chai@npm:5.2.0" + dependencies: + assertion-error: "npm:^2.0.1" + check-error: "npm:^2.1.1" + deep-eql: "npm:^5.0.1" + loupe: "npm:^3.1.0" + pathval: "npm:^2.0.0" + checksum: 10c0/dfd1cb719c7cebb051b727672d382a35338af1470065cb12adb01f4ee451bbf528e0e0f9ab2016af5fc1eea4df6e7f4504dc8443f8f00bd8fb87ad32dc516f7d + languageName: node + linkType: hard + "chalk@npm:^4.0.0, chalk@npm:^4.0.2, chalk@npm:^4.1.0": version: 4.1.2 resolution: "chalk@npm:4.1.2" @@ -2618,6 +3113,13 @@ __metadata: languageName: node linkType: hard +"check-error@npm:^2.1.1": + version: 2.1.1 + resolution: "check-error@npm:2.1.1" + checksum: 10c0/979f13eccab306cf1785fa10941a590b4e7ea9916ea2a4f8c87f0316fc3eab07eabefb6e587424ef0f88cbcd3805791f172ea739863ca3d7ce2afc54641c7f0e + languageName: node + linkType: hard + "chokidar@npm:^1.6.0": version: 1.7.0 resolution: "chokidar@npm:1.7.0" @@ -2892,7 +3394,7 @@ __metadata: languageName: node linkType: hard -"debug@npm:4, debug@npm:^4.1.0, debug@npm:^4.1.1, debug@npm:^4.3.1, debug@npm:^4.3.4": +"debug@npm:4, debug@npm:^4.1.0, debug@npm:^4.1.1, debug@npm:^4.3.1, debug@npm:^4.3.4, debug@npm:^4.4.0": version: 4.4.0 resolution: "debug@npm:4.4.0" dependencies: @@ -2904,18 +3406,6 @@ __metadata: languageName: node linkType: hard -"debug@npm:4.3.1": - version: 4.3.1 - resolution: "debug@npm:4.3.1" - dependencies: - ms: "npm:2.1.2" - peerDependenciesMeta: - supports-color: - optional: true - checksum: 10c0/610bcc2eb07c533d6a9964478422f7d741095d67301888ee0b77b8f2ad0a15d115c93fb2adb13d10a9eda3d81f2d4d335405540b09596fb23aca070e77497d95 - languageName: node - linkType: hard - "debug@npm:^2.2.0, debug@npm:^2.3.3": version: 2.6.9 resolution: "debug@npm:2.6.9" @@ -3004,7 +3494,7 @@ __metadata: languageName: node linkType: hard -"decompress@npm:^4.0.0, decompress@npm:^4.2.1": +"decompress@npm:^4.0.0": version: 4.2.1 resolution: "decompress@npm:4.2.1" dependencies: @@ -3032,10 +3522,10 @@ __metadata: languageName: node linkType: hard -"deep-is@npm:^0.1.3": - version: 0.1.4 - resolution: "deep-is@npm:0.1.4" - checksum: 10c0/7f0ee496e0dff14a573dc6127f14c95061b448b87b995fc96c017ce0a1e66af1675e73f1d6064407975bc4ea6ab679497a29fff7b5b9c4e99cb10797c1ad0b4c +"deep-eql@npm:^5.0.1": + version: 5.0.2 + resolution: "deep-eql@npm:5.0.2" + checksum: 10c0/7102cf3b7bb719c6b9c0db2e19bf0aa9318d141581befe8c7ce8ccd39af9eaa4346e5e05adef7f9bd7015da0f13a3a25dcfe306ef79dc8668aedbecb658dd247 languageName: node linkType: hard @@ -3143,7 +3633,7 @@ __metadata: languageName: node linkType: hard -"duplexer@npm:^0.1.1, duplexer@npm:~0.1.1": +"duplexer@npm:^0.1.1": version: 0.1.2 resolution: "duplexer@npm:0.1.2" checksum: 10c0/c57bcd4bdf7e623abab2df43a7b5b23d18152154529d166c1e0da6bee341d84c432d157d7e97b32fecb1bf3a8b8857dd85ed81a915789f550637ed25b8e64fc2 @@ -3247,6 +3737,99 @@ __metadata: languageName: node linkType: hard +"es-module-lexer@npm:^1.6.0": + version: 1.6.0 + resolution: "es-module-lexer@npm:1.6.0" + checksum: 10c0/667309454411c0b95c476025929881e71400d74a746ffa1ff4cb450bd87f8e33e8eef7854d68e401895039ac0bac64e7809acbebb6253e055dd49ea9e3ea9212 + languageName: node + linkType: hard + +"esbuild@npm:^0.24.2": + version: 0.24.2 + resolution: "esbuild@npm:0.24.2" + dependencies: + "@esbuild/aix-ppc64": "npm:0.24.2" + "@esbuild/android-arm": "npm:0.24.2" + "@esbuild/android-arm64": "npm:0.24.2" + "@esbuild/android-x64": "npm:0.24.2" + "@esbuild/darwin-arm64": "npm:0.24.2" + "@esbuild/darwin-x64": "npm:0.24.2" + "@esbuild/freebsd-arm64": "npm:0.24.2" + "@esbuild/freebsd-x64": "npm:0.24.2" + "@esbuild/linux-arm": "npm:0.24.2" + "@esbuild/linux-arm64": "npm:0.24.2" + "@esbuild/linux-ia32": "npm:0.24.2" + "@esbuild/linux-loong64": "npm:0.24.2" + "@esbuild/linux-mips64el": "npm:0.24.2" + "@esbuild/linux-ppc64": "npm:0.24.2" + "@esbuild/linux-riscv64": "npm:0.24.2" + "@esbuild/linux-s390x": "npm:0.24.2" + "@esbuild/linux-x64": "npm:0.24.2" + "@esbuild/netbsd-arm64": "npm:0.24.2" + "@esbuild/netbsd-x64": "npm:0.24.2" + "@esbuild/openbsd-arm64": "npm:0.24.2" + "@esbuild/openbsd-x64": "npm:0.24.2" + "@esbuild/sunos-x64": "npm:0.24.2" + "@esbuild/win32-arm64": "npm:0.24.2" + "@esbuild/win32-ia32": "npm:0.24.2" + "@esbuild/win32-x64": "npm:0.24.2" + dependenciesMeta: + "@esbuild/aix-ppc64": + optional: true + "@esbuild/android-arm": + optional: true + "@esbuild/android-arm64": + optional: true + "@esbuild/android-x64": + optional: true + "@esbuild/darwin-arm64": + optional: true + "@esbuild/darwin-x64": + optional: true + "@esbuild/freebsd-arm64": + optional: true + "@esbuild/freebsd-x64": + optional: true + "@esbuild/linux-arm": + optional: true + "@esbuild/linux-arm64": + optional: true + "@esbuild/linux-ia32": + optional: true + "@esbuild/linux-loong64": + optional: true + "@esbuild/linux-mips64el": + optional: true + "@esbuild/linux-ppc64": + optional: true + "@esbuild/linux-riscv64": + optional: true + "@esbuild/linux-s390x": + optional: true + "@esbuild/linux-x64": + optional: true + "@esbuild/netbsd-arm64": + optional: true + "@esbuild/netbsd-x64": + optional: true + "@esbuild/openbsd-arm64": + optional: true + "@esbuild/openbsd-x64": + optional: true + "@esbuild/sunos-x64": + optional: true + "@esbuild/win32-arm64": + optional: true + "@esbuild/win32-ia32": + optional: true + "@esbuild/win32-x64": + optional: true + bin: + esbuild: bin/esbuild + checksum: 10c0/5a25bb08b6ba23db6e66851828d848bd3ff87c005a48c02d83e38879058929878a6baa5a414e1141faee0d1dece3f32b5fbc2a87b82ed6a7aa857cf40359aeb5 + languageName: node + linkType: hard + "escalade@npm:^3.1.1, escalade@npm:^3.2.0": version: 3.2.0 resolution: "escalade@npm:3.2.0" @@ -3278,18 +3861,12 @@ __metadata: languageName: node linkType: hard -"event-stream@npm:=3.3.4": - version: 3.3.4 - resolution: "event-stream@npm:3.3.4" +"estree-walker@npm:^3.0.3": + version: 3.0.3 + resolution: "estree-walker@npm:3.0.3" dependencies: - duplexer: "npm:~0.1.1" - from: "npm:~0" - map-stream: "npm:~0.1.0" - pause-stream: "npm:0.0.11" - split: "npm:0.3" - stream-combiner: "npm:~0.0.4" - through: "npm:~2.3.1" - checksum: 10c0/c3ec4e1efc27ab3e73a98923f0a2fa9a19051b87068fea2f3d53d2e4e8c5cfdadf8c8a115b17f3d90b16a46432d396bad91b6e8d0cceb3e449be717a03b75209 + "@types/estree": "npm:^1.0.0" + checksum: 10c0/c12e3c2b2642d2bcae7d5aa495c60fa2f299160946535763969a1c83fc74518ffa9c2cd3a8b69ac56aea547df6a8aac25f729a342992ef0bbac5f1c73e78995d languageName: node linkType: hard @@ -3371,6 +3948,13 @@ __metadata: languageName: node linkType: hard +"expect-type@npm:^1.1.0": + version: 1.1.0 + resolution: "expect-type@npm:1.1.0" + checksum: 10c0/5af0febbe8fe18da05a6d51e3677adafd75213512285408156b368ca471252565d5ca6e59e4bddab25121f3cfcbbebc6a5489f8cc9db131cc29e69dcdcc7ae15 + languageName: node + linkType: hard + "expect@npm:^29.0.0, expect@npm:^29.7.0": version: 29.7.0 resolution: "expect@npm:29.7.0" @@ -3688,13 +4272,6 @@ __metadata: languageName: node linkType: hard -"from@npm:~0": - version: 0.1.7 - resolution: "from@npm:0.1.7" - checksum: 10c0/3aab5aea8fe8e1f12a5dee7f390d46a93431ce691b6222dcd5701c5d34378e51ca59b44967da1105a0f90fcdf5d7629d963d51e7ccd79827d19693bdcfb688d4 - languageName: node - linkType: hard - "fs-constants@npm:^1.0.0": version: 1.0.0 resolution: "fs-constants@npm:1.0.0" @@ -3729,7 +4306,7 @@ __metadata: languageName: node linkType: hard -"fsevents@npm:^2.3.2": +"fsevents@npm:^2.3.2, fsevents@npm:~2.3.2, fsevents@npm:~2.3.3": version: 2.3.3 resolution: "fsevents@npm:2.3.3" dependencies: @@ -3749,7 +4326,7 @@ __metadata: languageName: node linkType: hard -"fsevents@patch:fsevents@npm%3A^2.3.2#optional!builtin": +"fsevents@patch:fsevents@npm%3A^2.3.2#optional!builtin, fsevents@patch:fsevents@npm%3A~2.3.2#optional!builtin, fsevents@patch:fsevents@npm%3A~2.3.3#optional!builtin": version: 2.3.3 resolution: "fsevents@patch:fsevents@npm%3A2.3.3#optional!builtin::version=2.3.3&hash=df0bf1" dependencies: @@ -4144,13 +4721,6 @@ __metadata: languageName: node linkType: hard -"ip-regex@npm:^4.1.0": - version: 4.3.0 - resolution: "ip-regex@npm:4.3.0" - checksum: 10c0/f9ef1f5d0df05b9133a882974e572ae525ccd205260cb103dae337f1fc7451ed783391acc6ad688e56dd2598f769e8e72ecbb650ec34763396af822a91768562 - languageName: node - linkType: hard - "is-accessor-descriptor@npm:^1.0.1": version: 1.0.1 resolution: "is-accessor-descriptor@npm:1.0.1" @@ -4394,13 +4964,6 @@ __metadata: languageName: node linkType: hard -"is-url@npm:^1.2.4": - version: 1.2.4 - resolution: "is-url@npm:1.2.4" - checksum: 10c0/0157a79874f8f95fdd63540e3f38c8583c2ef572661cd0693cda80ae3e42dfe8e9a4a972ec1b827f861d9a9acf75b37f7d58a37f94a8a053259642912c252bc3 - languageName: node - linkType: hard - "is-utf8@npm:^0.2.0": version: 0.2.1 resolution: "is-utf8@npm:0.2.1" @@ -4415,17 +4978,6 @@ __metadata: languageName: node linkType: hard -"is2@npm:^2.0.6": - version: 2.0.9 - resolution: "is2@npm:2.0.9" - dependencies: - deep-is: "npm:^0.1.3" - ip-regex: "npm:^4.1.0" - is-url: "npm:^1.2.4" - checksum: 10c0/51090a2ad046651c1523e6aec98843c2be4b61fdafa5a68d89966b7d3b7116fdc68cfb218cfc3825eb20175fa741de2f89249546352dbc4ac1d86847fa4a084a - languageName: node - linkType: hard - "isarray@npm:1.0.0, isarray@npm:~1.0.0": version: 1.0.0 resolution: "isarray@npm:1.0.0" @@ -5402,6 +5954,13 @@ __metadata: languageName: node linkType: hard +"loupe@npm:^3.1.0, loupe@npm:^3.1.3": + version: 3.1.3 + resolution: "loupe@npm:3.1.3" + checksum: 10c0/f5dab4144254677de83a35285be1b8aba58b3861439ce4ba65875d0d5f3445a4a496daef63100ccf02b2dbc25bf58c6db84c9cb0b96d6435331e9d0a33b48541 + languageName: node + linkType: hard + "lowercase-keys@npm:^1.0.0": version: 1.0.1 resolution: "lowercase-keys@npm:1.0.1" @@ -5425,6 +5984,15 @@ __metadata: languageName: node linkType: hard +"magic-string@npm:^0.30.17": + version: 0.30.17 + resolution: "magic-string@npm:0.30.17" + dependencies: + "@jridgewell/sourcemap-codec": "npm:^1.5.0" + checksum: 10c0/16826e415d04b88378f200fe022b53e638e3838b9e496edda6c0e086d7753a44a6ed187adc72d19f3623810589bf139af1a315541cd6a26ae0771a0193eaf7b8 + languageName: node + linkType: hard + "make-dir@npm:^1.0.0": version: 1.3.0 resolution: "make-dir@npm:1.3.0" @@ -5492,13 +6060,6 @@ __metadata: languageName: node linkType: hard -"map-stream@npm:~0.1.0": - version: 0.1.0 - resolution: "map-stream@npm:0.1.0" - checksum: 10c0/7dd6debe511c1b55d9da75e1efa65a28b1252a2d8357938d2e49b412713c478efbaefb0cdf0ee0533540c3bf733e8f9f71e1a15aa0fe74bf71b64e75bf1576bd - languageName: node - linkType: hard - "map-visit@npm:^1.0.0": version: 1.0.0 resolution: "map-visit@npm:1.0.0" @@ -5841,13 +6402,6 @@ __metadata: languageName: node linkType: hard -"ms@npm:2.1.2": - version: 2.1.2 - resolution: "ms@npm:2.1.2" - checksum: 10c0/a437714e2f90dbf881b5191d35a6db792efbca5badf112f87b9e1c712aace4b4b9b742dd6537f3edf90fd6f684de897cec230abde57e87883766712ddda297cc - languageName: node - linkType: hard - "ms@npm:^2.0.0, ms@npm:^2.1.1, ms@npm:^2.1.3": version: 2.1.3 resolution: "ms@npm:2.1.3" @@ -5873,6 +6427,15 @@ __metadata: languageName: node linkType: hard +"nanoid@npm:^3.3.8": + version: 3.3.8 + resolution: "nanoid@npm:3.3.8" + bin: + nanoid: bin/nanoid.cjs + checksum: 10c0/4b1bb29f6cfebf3be3bc4ad1f1296fb0a10a3043a79f34fbffe75d1621b4318319211cd420549459018ea3592f0d2f159247a6f874911d6d26eaaadda2478120 + languageName: node + linkType: hard + "nanomatch@npm:^1.2.9": version: 1.2.13 resolution: "nanomatch@npm:1.2.13" @@ -5999,13 +6562,6 @@ __metadata: languageName: node linkType: hard -"node-os-utils@npm:^1.3.7": - version: 1.3.7 - resolution: "node-os-utils@npm:1.3.7" - checksum: 10c0/88b8a4c7ed99ca0ca8f077f4f4672026e732605d5afb125e856de9ba1880b842facefa4c38f732f5cce20a34f9f471ce18a20c677dcdb702b4b68c17bacf9584 - languageName: node - linkType: hard - "node-releases@npm:^2.0.19": version: 2.0.19 resolution: "node-releases@npm:2.0.19" @@ -6404,12 +6960,17 @@ __metadata: languageName: node linkType: hard -"pause-stream@npm:0.0.11": - version: 0.0.11 - resolution: "pause-stream@npm:0.0.11" - dependencies: - through: "npm:~2.3" - checksum: 10c0/86f12c64cdaaa8e45ebaca4e39a478e1442db8b4beabc280b545bfaf79c0e2f33c51efb554aace5c069cc441c7b924ba484837b345eaa4ba6fc940d62f826802 +"pathe@npm:^2.0.3": + version: 2.0.3 + resolution: "pathe@npm:2.0.3" + checksum: 10c0/c118dc5a8b5c4166011b2b70608762e260085180bb9e33e80a50dcdb1e78c010b1624f4280c492c92b05fc276715a4c357d1f9edc570f8f1b3d90b6839ebaca1 + languageName: node + linkType: hard + +"pathval@npm:^2.0.0": + version: 2.0.0 + resolution: "pathval@npm:2.0.0" + checksum: 10c0/602e4ee347fba8a599115af2ccd8179836a63c925c23e04bd056d0674a64b39e3a081b643cc7bc0b84390517df2d800a46fcc5598d42c155fe4977095c2f77c5 languageName: node linkType: hard @@ -6430,7 +6991,7 @@ __metadata: languageName: node linkType: hard -"picocolors@npm:^1.0.0, picocolors@npm:^1.1.0": +"picocolors@npm:^1.0.0, picocolors@npm:^1.1.0, picocolors@npm:^1.1.1": version: 1.1.1 resolution: "picocolors@npm:1.1.1" checksum: 10c0/e2e3e8170ab9d7c7421969adaa7e1b31434f789afb9b3f115f6b96d91945041ac3ceb02e9ec6fe6510ff036bcc0bf91e69a1772edc0b707e12b19c0f2d6bcf58 @@ -6497,6 +7058,17 @@ __metadata: languageName: node linkType: hard +"postcss@npm:^8.5.2": + version: 8.5.3 + resolution: "postcss@npm:8.5.3" + dependencies: + nanoid: "npm:^3.3.8" + picocolors: "npm:^1.1.1" + source-map-js: "npm:^1.2.1" + checksum: 10c0/b75510d7b28c3ab728c8733dd01538314a18c52af426f199a3c9177e63eb08602a3938bfb66b62dc01350b9aed62087eabbf229af97a1659eb8d3513cec823b3 + languageName: node + linkType: hard + "prebuildify@npm:^6.0.1": version: 6.0.1 resolution: "prebuildify@npm:6.0.1" @@ -6579,17 +7151,6 @@ __metadata: languageName: node linkType: hard -"ps-tree@npm:^1.2.0": - version: 1.2.0 - resolution: "ps-tree@npm:1.2.0" - dependencies: - event-stream: "npm:=3.3.4" - bin: - ps-tree: ./bin/ps-tree.js - checksum: 10c0/9d1c159e0890db5aa05f84d125193c2190a6c4ecd457596fd25e7611f8f747292a846459dcc0244e27d45529d4cea6d1010c3a2a087fad02624d12fdb7d97c22 - languageName: node - linkType: hard - "pump@npm:^3.0.0": version: 3.0.2 resolution: "pump@npm:3.0.2" @@ -6961,6 +7522,78 @@ __metadata: languageName: node linkType: hard +"rollup@npm:^4.30.1": + version: 4.34.8 + resolution: "rollup@npm:4.34.8" + dependencies: + "@rollup/rollup-android-arm-eabi": "npm:4.34.8" + "@rollup/rollup-android-arm64": "npm:4.34.8" + "@rollup/rollup-darwin-arm64": "npm:4.34.8" + "@rollup/rollup-darwin-x64": "npm:4.34.8" + "@rollup/rollup-freebsd-arm64": "npm:4.34.8" + "@rollup/rollup-freebsd-x64": "npm:4.34.8" + "@rollup/rollup-linux-arm-gnueabihf": "npm:4.34.8" + "@rollup/rollup-linux-arm-musleabihf": "npm:4.34.8" + "@rollup/rollup-linux-arm64-gnu": "npm:4.34.8" + "@rollup/rollup-linux-arm64-musl": "npm:4.34.8" + "@rollup/rollup-linux-loongarch64-gnu": "npm:4.34.8" + "@rollup/rollup-linux-powerpc64le-gnu": "npm:4.34.8" + "@rollup/rollup-linux-riscv64-gnu": "npm:4.34.8" + "@rollup/rollup-linux-s390x-gnu": "npm:4.34.8" + "@rollup/rollup-linux-x64-gnu": "npm:4.34.8" + "@rollup/rollup-linux-x64-musl": "npm:4.34.8" + "@rollup/rollup-win32-arm64-msvc": "npm:4.34.8" + "@rollup/rollup-win32-ia32-msvc": "npm:4.34.8" + "@rollup/rollup-win32-x64-msvc": "npm:4.34.8" + "@types/estree": "npm:1.0.6" + fsevents: "npm:~2.3.2" + dependenciesMeta: + "@rollup/rollup-android-arm-eabi": + optional: true + "@rollup/rollup-android-arm64": + optional: true + "@rollup/rollup-darwin-arm64": + optional: true + "@rollup/rollup-darwin-x64": + optional: true + "@rollup/rollup-freebsd-arm64": + optional: true + "@rollup/rollup-freebsd-x64": + optional: true + "@rollup/rollup-linux-arm-gnueabihf": + optional: true + "@rollup/rollup-linux-arm-musleabihf": + optional: true + "@rollup/rollup-linux-arm64-gnu": + optional: true + "@rollup/rollup-linux-arm64-musl": + optional: true + "@rollup/rollup-linux-loongarch64-gnu": + optional: true + "@rollup/rollup-linux-powerpc64le-gnu": + optional: true + "@rollup/rollup-linux-riscv64-gnu": + optional: true + "@rollup/rollup-linux-s390x-gnu": + optional: true + "@rollup/rollup-linux-x64-gnu": + optional: true + "@rollup/rollup-linux-x64-musl": + optional: true + "@rollup/rollup-win32-arm64-msvc": + optional: true + "@rollup/rollup-win32-ia32-msvc": + optional: true + "@rollup/rollup-win32-x64-msvc": + optional: true + fsevents: + optional: true + bin: + rollup: dist/bin/rollup + checksum: 10c0/b9e711e33413112fbb761107c3fddc4561dfc74335c393542a829a85ccfb2763bfd17bf2422d84a2e9bee7646e5367018973e97005fdf64e49c2e209612f0eb6 + languageName: node + linkType: hard + "root-workspace-0b6124@workspace:.": version: 0.0.0-use.local resolution: "root-workspace-0b6124@workspace:." @@ -7090,6 +7723,13 @@ __metadata: languageName: node linkType: hard +"siginfo@npm:^2.0.0": + version: 2.0.0 + resolution: "siginfo@npm:2.0.0" + checksum: 10c0/3def8f8e516fbb34cb6ae415b07ccc5d9c018d85b4b8611e3dc6f8be6d1899f693a4382913c9ed51a06babb5201639d76453ab297d1c54a456544acf5c892e34 + languageName: node + linkType: hard + "signal-exit@npm:^3.0.0, signal-exit@npm:^3.0.3, signal-exit@npm:^3.0.7": version: 3.0.7 resolution: "signal-exit@npm:3.0.7" @@ -7200,6 +7840,13 @@ __metadata: languageName: node linkType: hard +"source-map-js@npm:^1.2.1": + version: 1.2.1 + resolution: "source-map-js@npm:1.2.1" + checksum: 10c0/7bda1fc4c197e3c6ff17de1b8b2c20e60af81b63a52cb32ec5a5d67a20a7d42651e2cb34ebe93833c5a2a084377e17455854fee3e21e7925c64a51b6a52b0faf + languageName: node + linkType: hard + "source-map-resolve@npm:^0.5.0": version: 0.5.3 resolution: "source-map-resolve@npm:0.5.3" @@ -7294,15 +7941,6 @@ __metadata: languageName: node linkType: hard -"split@npm:0.3": - version: 0.3.3 - resolution: "split@npm:0.3.3" - dependencies: - through: "npm:2" - checksum: 10c0/88c09b1b4de84953bf5d6c153123a1fbb20addfea9381f70d27b4eb6b2bfbadf25d313f8f5d3fd727d5679b97bfe54da04766b91010f131635bf49e51d5db3fc - languageName: node - linkType: hard - "sprintf-js@npm:^1.1.3": version: 1.1.3 resolution: "sprintf-js@npm:1.1.3" @@ -7335,6 +7973,13 @@ __metadata: languageName: node linkType: hard +"stackback@npm:0.0.2": + version: 0.0.2 + resolution: "stackback@npm:0.0.2" + checksum: 10c0/89a1416668f950236dd5ac9f9a6b2588e1b9b62b1b6ad8dff1bfc5d1a15dbf0aafc9b52d2226d00c28dffff212da464eaeebfc6b7578b9d180cef3e3782c5983 + languageName: node + linkType: hard + "static-extend@npm:^0.1.1": version: 0.1.2 resolution: "static-extend@npm:0.1.2" @@ -7345,12 +7990,10 @@ __metadata: languageName: node linkType: hard -"stream-combiner@npm:~0.0.4": - version: 0.0.4 - resolution: "stream-combiner@npm:0.0.4" - dependencies: - duplexer: "npm:~0.1.1" - checksum: 10c0/8075a94c0eb0f20450a8236cb99d4ce3ea6e6a4b36d8baa7440b1a08cde6ffd227debadffaecd80993bd334282875d0e927ab5b88484625e01970dd251004ff5 +"std-env@npm:^3.8.0": + version: 3.8.0 + resolution: "std-env@npm:3.8.0" + checksum: 10c0/f560a2902fd0fa3d648d7d0acecbd19d664006f7372c1fba197ed4c216b4c9e48db6e2769b5fe1616d42a9333c9f066c5011935035e85c59f45dc4f796272040 languageName: node linkType: hard @@ -7576,25 +8219,6 @@ __metadata: languageName: node linkType: hard -"tcp-port-used@npm:^1.0.2": - version: 1.0.2 - resolution: "tcp-port-used@npm:1.0.2" - dependencies: - debug: "npm:4.3.1" - is2: "npm:^2.0.6" - checksum: 10c0/a5fb29e35f1e452f1064e3671d02b6d65e7d9bffad98d8da688270b6ffdaa9a8351fe8321aedf131f3904af70b569d9c5f6d9fe75d57dda19c466abac2bc025a - languageName: node - linkType: hard - -"terminate@npm:2.6.1": - version: 2.6.1 - resolution: "terminate@npm:2.6.1" - dependencies: - ps-tree: "npm:^1.2.0" - checksum: 10c0/1174aa66462da601248d09a29243d81722b65d6b8ff198d7a9fdb50a4e90182ea2ea012ab73e19775538a09f01c99e52107654fd425b8048562ddf70d810886a - languageName: node - linkType: hard - "test-exclude@npm:^6.0.0": version: 6.0.0 resolution: "test-exclude@npm:6.0.0" @@ -7606,7 +8230,7 @@ __metadata: languageName: node linkType: hard -"through@npm:2, through@npm:^2.3.8, through@npm:~2.3, through@npm:~2.3.1": +"through@npm:^2.3.8": version: 2.3.8 resolution: "through@npm:2.3.8" checksum: 10c0/4b09f3774099de0d4df26d95c5821a62faee32c7e96fb1f4ebd54a2d7c11c57fe88b0a0d49cf375de5fee5ae6bf4eb56dbbf29d07366864e2ee805349970d3cc @@ -7620,6 +8244,41 @@ __metadata: languageName: node linkType: hard +"tinybench@npm:^2.9.0": + version: 2.9.0 + resolution: "tinybench@npm:2.9.0" + checksum: 10c0/c3500b0f60d2eb8db65250afe750b66d51623057ee88720b7f064894a6cb7eb93360ca824a60a31ab16dab30c7b1f06efe0795b352e37914a9d4bad86386a20c + languageName: node + linkType: hard + +"tinyexec@npm:^0.3.2": + version: 0.3.2 + resolution: "tinyexec@npm:0.3.2" + checksum: 10c0/3efbf791a911be0bf0821eab37a3445c2ba07acc1522b1fa84ae1e55f10425076f1290f680286345ed919549ad67527d07281f1c19d584df3b74326909eb1f90 + languageName: node + linkType: hard + +"tinypool@npm:^1.0.2": + version: 1.0.2 + resolution: "tinypool@npm:1.0.2" + checksum: 10c0/31ac184c0ff1cf9a074741254fe9ea6de95026749eb2b8ec6fd2b9d8ca94abdccda731f8e102e7f32e72ed3b36d32c6975fd5f5523df3f1b6de6c3d8dfd95e63 + languageName: node + linkType: hard + +"tinyrainbow@npm:^2.0.0": + version: 2.0.0 + resolution: "tinyrainbow@npm:2.0.0" + checksum: 10c0/c83c52bef4e0ae7fb8ec6a722f70b5b6fa8d8be1c85792e829f56c0e1be94ab70b293c032dc5048d4d37cfe678f1f5babb04bdc65fd123098800148ca989184f + languageName: node + linkType: hard + +"tinyspy@npm:^3.0.2": + version: 3.0.2 + resolution: "tinyspy@npm:3.0.2" + checksum: 10c0/55ffad24e346622b59292e097c2ee30a63919d5acb7ceca87fc0d1c223090089890587b426e20054733f97a58f20af2c349fb7cc193697203868ab7ba00bcea0 + languageName: node + linkType: hard + "tmpl@npm:1.0.5": version: 1.0.5 resolution: "tmpl@npm:1.0.5" @@ -7990,6 +8649,194 @@ __metadata: languageName: node linkType: hard +"vite-node@npm:3.0.6": + version: 3.0.6 + resolution: "vite-node@npm:3.0.6" + dependencies: + cac: "npm:^6.7.14" + debug: "npm:^4.4.0" + es-module-lexer: "npm:^1.6.0" + pathe: "npm:^2.0.3" + vite: "npm:^5.0.0 || ^6.0.0" + bin: + vite-node: vite-node.mjs + checksum: 10c0/bfef19ac659b453c31fc00b42f8d08b3f7539092f67b0b02504dc2f802af1fe9bcf3531a4ecd248bf8ce2f00b7f4b9a67e20cdd57c2e50d9ff8cea5ff941bedd + languageName: node + linkType: hard + +"vite-node@npm:3.0.8": + version: 3.0.8 + resolution: "vite-node@npm:3.0.8" + dependencies: + cac: "npm:^6.7.14" + debug: "npm:^4.4.0" + es-module-lexer: "npm:^1.6.0" + pathe: "npm:^2.0.3" + vite: "npm:^5.0.0 || ^6.0.0" + bin: + vite-node: vite-node.mjs + checksum: 10c0/1e7243ad04edc71ccff67b1a686cc85b59ad803645b83c524eab6cde92d6c8f06d595cc99cd3236b4017de27d6760808c419711cd728471eb36ec9a6734ef651 + languageName: node + linkType: hard + +"vite@npm:^5.0.0 || ^6.0.0": + version: 6.1.1 + resolution: "vite@npm:6.1.1" + dependencies: + esbuild: "npm:^0.24.2" + fsevents: "npm:~2.3.3" + postcss: "npm:^8.5.2" + rollup: "npm:^4.30.1" + peerDependencies: + "@types/node": ^18.0.0 || ^20.0.0 || >=22.0.0 + jiti: ">=1.21.0" + less: "*" + lightningcss: ^1.21.0 + sass: "*" + sass-embedded: "*" + stylus: "*" + sugarss: "*" + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + dependenciesMeta: + fsevents: + optional: true + peerDependenciesMeta: + "@types/node": + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + bin: + vite: bin/vite.js + checksum: 10c0/4ec5ddc9436951a68b213cd59c2a157663ef423658c387400774582ea33da40dcae18e55f3adb3b629173e2183b10d49db8370bc51a0aa89797e4ca5a34702a0 + languageName: node + linkType: hard + +"vitest@npm:^3.0.6": + version: 3.0.6 + resolution: "vitest@npm:3.0.6" + dependencies: + "@vitest/expect": "npm:3.0.6" + "@vitest/mocker": "npm:3.0.6" + "@vitest/pretty-format": "npm:^3.0.6" + "@vitest/runner": "npm:3.0.6" + "@vitest/snapshot": "npm:3.0.6" + "@vitest/spy": "npm:3.0.6" + "@vitest/utils": "npm:3.0.6" + chai: "npm:^5.2.0" + debug: "npm:^4.4.0" + expect-type: "npm:^1.1.0" + magic-string: "npm:^0.30.17" + pathe: "npm:^2.0.3" + std-env: "npm:^3.8.0" + tinybench: "npm:^2.9.0" + tinyexec: "npm:^0.3.2" + tinypool: "npm:^1.0.2" + tinyrainbow: "npm:^2.0.0" + vite: "npm:^5.0.0 || ^6.0.0" + vite-node: "npm:3.0.6" + why-is-node-running: "npm:^2.3.0" + peerDependencies: + "@edge-runtime/vm": "*" + "@types/debug": ^4.1.12 + "@types/node": ^18.0.0 || ^20.0.0 || >=22.0.0 + "@vitest/browser": 3.0.6 + "@vitest/ui": 3.0.6 + happy-dom: "*" + jsdom: "*" + peerDependenciesMeta: + "@edge-runtime/vm": + optional: true + "@types/debug": + optional: true + "@types/node": + optional: true + "@vitest/browser": + optional: true + "@vitest/ui": + optional: true + happy-dom: + optional: true + jsdom: + optional: true + bin: + vitest: vitest.mjs + checksum: 10c0/e50a08f8508a7dbda1ea985b2ba05483ab6f87e100a9388c6c4bc47ee76fcdebe89b33db320df177ea6d198fc50e98eb4b9650bb9d314dd8a7bfe885659b3d42 + languageName: node + linkType: hard + +"vitest@npm:^3.0.8": + version: 3.0.8 + resolution: "vitest@npm:3.0.8" + dependencies: + "@vitest/expect": "npm:3.0.8" + "@vitest/mocker": "npm:3.0.8" + "@vitest/pretty-format": "npm:^3.0.8" + "@vitest/runner": "npm:3.0.8" + "@vitest/snapshot": "npm:3.0.8" + "@vitest/spy": "npm:3.0.8" + "@vitest/utils": "npm:3.0.8" + chai: "npm:^5.2.0" + debug: "npm:^4.4.0" + expect-type: "npm:^1.1.0" + magic-string: "npm:^0.30.17" + pathe: "npm:^2.0.3" + std-env: "npm:^3.8.0" + tinybench: "npm:^2.9.0" + tinyexec: "npm:^0.3.2" + tinypool: "npm:^1.0.2" + tinyrainbow: "npm:^2.0.0" + vite: "npm:^5.0.0 || ^6.0.0" + vite-node: "npm:3.0.8" + why-is-node-running: "npm:^2.3.0" + peerDependencies: + "@edge-runtime/vm": "*" + "@types/debug": ^4.1.12 + "@types/node": ^18.0.0 || ^20.0.0 || >=22.0.0 + "@vitest/browser": 3.0.8 + "@vitest/ui": 3.0.8 + happy-dom: "*" + jsdom: "*" + peerDependenciesMeta: + "@edge-runtime/vm": + optional: true + "@types/debug": + optional: true + "@types/node": + optional: true + "@vitest/browser": + optional: true + "@vitest/ui": + optional: true + happy-dom: + optional: true + jsdom: + optional: true + bin: + vitest: vitest.mjs + checksum: 10c0/007a951c4e10ceda1eecad38e5bcc7aa25ed90269614e1394eb2c5fa5f51bbe05d915bcec27fc2e18da8bdea27cea80d428095ef818b97857c51422fddda34ff + languageName: node + linkType: hard + "walker@npm:^1.0.8": version: 1.0.8 resolution: "walker@npm:1.0.8" @@ -8052,6 +8899,18 @@ __metadata: languageName: node linkType: hard +"why-is-node-running@npm:^2.3.0": + version: 2.3.0 + resolution: "why-is-node-running@npm:2.3.0" + dependencies: + siginfo: "npm:^2.0.0" + stackback: "npm:0.0.2" + bin: + why-is-node-running: cli.js + checksum: 10c0/1cde0b01b827d2cf4cb11db962f3958b9175d5d9e7ac7361d1a7b0e2dc6069a263e69118bd974c4f6d0a890ef4eedfe34cf3d5167ec14203dbc9a18620537054 + languageName: node + linkType: hard + "wrap-ansi-cjs@npm:wrap-ansi@^7.0.0, wrap-ansi@npm:^7.0.0": version: 7.0.0 resolution: "wrap-ansi@npm:7.0.0" diff --git a/joi/src/core/ScrollArea/styles.scss b/joi/src/core/ScrollArea/styles.scss index fd8a43e53..99ee7de87 100644 --- a/joi/src/core/ScrollArea/styles.scss +++ b/joi/src/core/ScrollArea/styles.scss @@ -51,20 +51,3 @@ flex-direction: column; height: 8px; } - -::-webkit-scrollbar { - width: 8px; - height: 8px; -} -::-webkit-scrollbar-track, -::-webkit-scrollbar-thumb { - background-clip: content-box; - border-radius: inherit; -} -::-webkit-scrollbar-track { - background: hsla(var(--scrollbar-tracker)); -} -::-webkit-scrollbar-thumb { - background: hsla(var(--scrollbar-thumb)); - border-radius: 20px; -} diff --git a/joi/src/core/Select/index.tsx b/joi/src/core/Select/index.tsx index a8c3df528..d8935bd83 100644 --- a/joi/src/core/Select/index.tsx +++ b/joi/src/core/Select/index.tsx @@ -7,14 +7,17 @@ import './styles.scss' import { twMerge } from 'tailwind-merge' type Props = { - options?: { name: string; value: string }[] + options?: { name: string; value: string; recommend?: boolean }[] open?: boolean block?: boolean value?: string + side?: 'top' | 'right' | 'bottom' | 'left' + position?: 'item-aligned' | 'popper' placeholder?: string disabled?: boolean containerPortal?: HTMLDivElement | undefined | null className?: string + sideOffset?: number onValueChange?: (value: string) => void onOpenChange?: (open: boolean) => void } @@ -26,7 +29,10 @@ const Select = ({ disabled, containerPortal, block, + sideOffset, + position, className, + side, open, onValueChange, onOpenChange, @@ -52,7 +58,12 @@ const Select = ({ - + {options && options.map((item, i) => { @@ -62,9 +73,16 @@ const Select = ({ className="select__item" value={item.value} > - - {item.name} - +
+ + {item.name} + + {item.recommend && ( + + Recommended + + )} +
@@ -72,7 +90,6 @@ const Select = ({ ) })}
-
diff --git a/joi/src/core/Slider/Slider.test.tsx b/joi/src/core/Slider/Slider.test.tsx index 86bd8c623..e74bf5cac 100644 --- a/joi/src/core/Slider/Slider.test.tsx +++ b/joi/src/core/Slider/Slider.test.tsx @@ -29,7 +29,7 @@ jest.mock('@radix-ui/react-slider', () => ({ describe('@joi/core/Slider', () => { it('renders correctly with default props', () => { - render() + render() expect(screen.getByTestId('slider-root')).toBeInTheDocument() expect(screen.getByTestId('slider-track')).toBeInTheDocument() expect(screen.getByTestId('slider-range')).toBeInTheDocument() diff --git a/joi/src/core/Slider/index.tsx b/joi/src/core/Slider/index.tsx index 7f8c6cb89..ea3d8dfca 100644 --- a/joi/src/core/Slider/index.tsx +++ b/joi/src/core/Slider/index.tsx @@ -39,7 +39,9 @@ const Slider = ({ - + {value?.map((_, i) => ( + + ))} ) diff --git a/server/cortex.json b/server/cortex.json index 917cff354..56daf32cc 100644 --- a/server/cortex.json +++ b/server/cortex.json @@ -5,77 +5,470 @@ "post": { "operationId": "AssistantsController_create", "summary": "Create assistant", - "description": "Creates a new assistant.", - "parameters": [], + "description": "Creates a new assistant with the specified configuration.", "requestBody": { "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/CreateAssistantDto" + "type": "object", + "properties": { + "model": { + "type": "string", + "description": "The model identifier to use for the assistant." + }, + "name": { + "type": "string", + "description": "The name of the assistant." + }, + "description": { + "type": "string", + "description": "The description of the assistant." + }, + "instructions": { + "type": "string", + "description": "Instructions for the assistant's behavior." + }, + "tools": { + "type": "array", + "description": "A list of tools enabled on the assistant. Maximum of 128 tools.", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "code_interpreter", + "file_search", + "function" + ] + } + } + } + }, + "tool_resources": { + "type": "object", + "description": "Resources used by the assistant's tools.", + "properties": { + "code_interpreter": { + "type": "object" + }, + "file_search": { + "type": "object" + } + } + }, + "metadata": { + "type": "object", + "description": "Set of key-value pairs for the assistant.", + "additionalProperties": true + }, + "temperature": { + "type": "number", + "format": "float", + "description": "Temperature parameter for response generation." + }, + "top_p": { + "type": "number", + "format": "float", + "description": "Top p parameter for response generation." + }, + "response_format": { + "oneOf": [ + { + "type": "string", + "enum": ["auto"] + }, + { + "type": "object" + } + ] + } + }, + "required": ["model"] } } } }, - "responses": { - "201": { - "description": "The assistant has been successfully created." - } - }, - "tags": ["Assistants"] - }, - "get": { - "operationId": "AssistantsController_findAll", - "summary": "List assistants", - "description": "Returns a list of assistants.", - "parameters": [ - { - "name": "limit", - "required": false, - "in": "query", - "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.", - "schema": { - "type": "number" - } - }, - { - "name": "order", - "required": false, - "in": "query", - "description": "Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order.", - "schema": { - "type": "string" - } - }, - { - "name": "after", - "required": false, - "in": "query", - "description": "A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.", - "schema": { - "type": "string" - } - }, - { - "name": "before", - "required": false, - "in": "query", - "description": "A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.", - "schema": { - "type": "string" - } - } - ], "responses": { "200": { "description": "Ok", "content": { "application/json": { "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AssistantEntity" + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique identifier of the assistant." + }, + "object": { + "type": "string", + "enum": ["assistant"], + "description": "The object type, which is always 'assistant'." + }, + "created_at": { + "type": "integer", + "description": "Unix timestamp (in seconds) of when the assistant was created." + }, + "model": { + "type": "string", + "description": "The model identifier used by the assistant." + }, + "name": { + "type": "string", + "description": "The name of the assistant." + }, + "description": { + "type": "string", + "description": "The description of the assistant." + }, + "instructions": { + "type": "string", + "description": "Instructions for the assistant's behavior." + }, + "tools": { + "type": "array", + "description": "A list of tools enabled on the assistant.", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "code_interpreter", + "file_search", + "function" + ] + } + } + } + }, + "tool_resources": { + "type": "object", + "description": "Resources used by the assistant's tools.", + "properties": { + "code_interpreter": { + "type": "object" + }, + "file_search": { + "type": "object" + } + } + }, + "metadata": { + "type": "object", + "description": "Set of key-value pairs that can be attached to the assistant.", + "additionalProperties": true + }, + "temperature": { + "type": "number", + "format": "float", + "description": "Temperature parameter for response generation." + }, + "top_p": { + "type": "number", + "format": "float", + "description": "Top p parameter for response generation." + }, + "response_format": { + "oneOf": [ + { + "type": "string", + "enum": ["auto"] + }, + { + "type": "object" + } + ] + } + }, + "required": [ + "id", + "object", + "created_at", + "model", + "metadata" + ] + } + } + } + } + }, + "tags": ["Assistants"] + }, + "patch": { + "operationId": "AssistantsController_update", + "summary": "Update assistant", + "description": "Updates an assistant. Requires at least one modifiable field.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the assistant.", + "schema": { + "type": "string" + } + }, + { + "name": "OpenAI-Beta", + "required": true, + "in": "header", + "description": "Beta feature header.", + "schema": { + "type": "string", + "enum": ["assistants=v2"] + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "model": { + "type": "string", + "description": "The model identifier to use for the assistant." + }, + "name": { + "type": "string", + "description": "The name of the assistant." + }, + "description": { + "type": "string", + "description": "The description of the assistant." + }, + "instructions": { + "type": "string", + "description": "Instructions for the assistant's behavior." + }, + "tools": { + "type": "array", + "description": "A list of tools enabled on the assistant. Maximum of 128 tools.", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "code_interpreter", + "file_search", + "function" + ] + } + } + } + }, + "tool_resources": { + "type": "object", + "description": "Resources used by the assistant's tools.", + "properties": { + "code_interpreter": { + "type": "object" + }, + "file_search": { + "type": "object" + } + } + }, + "metadata": { + "type": "object", + "description": "Set of key-value pairs for the assistant.", + "additionalProperties": true + }, + "temperature": { + "type": "number", + "format": "float", + "description": "Temperature parameter for response generation." + }, + "top_p": { + "type": "number", + "format": "float", + "description": "Top p parameter for response generation." + }, + "response_format": { + "oneOf": [ + { + "type": "string", + "enum": ["auto"] + }, + { + "type": "object" + } + ] } + }, + "minProperties": 1 + } + } + } + }, + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique identifier of the assistant." + }, + "object": { + "type": "string", + "enum": ["assistant"], + "description": "The object type, which is always 'assistant'." + }, + "created_at": { + "type": "integer", + "description": "Unix timestamp (in seconds) of when the assistant was created." + }, + "model": { + "type": "string", + "description": "The model identifier used by the assistant." + }, + "name": { + "type": "string", + "description": "The name of the assistant." + }, + "description": { + "type": "string", + "description": "The description of the assistant." + }, + "instructions": { + "type": "string", + "description": "Instructions for the assistant's behavior." + }, + "tools": { + "type": "array", + "description": "A list of tools enabled on the assistant.", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "code_interpreter", + "file_search", + "function" + ] + } + } + } + }, + "tool_resources": { + "type": "object", + "description": "Resources used by the assistant's tools.", + "properties": { + "code_interpreter": { + "type": "object" + }, + "file_search": { + "type": "object" + } + } + }, + "metadata": { + "type": "object", + "description": "Set of key-value pairs that can be attached to the assistant.", + "additionalProperties": true + }, + "temperature": { + "type": "number", + "format": "float", + "description": "Temperature parameter for response generation." + }, + "top_p": { + "type": "number", + "format": "float", + "description": "Top p parameter for response generation." + }, + "response_format": { + "oneOf": [ + { + "type": "string", + "enum": ["auto"] + }, + { + "type": "object" + } + ] + } + }, + "required": [ + "id", + "object", + "created_at", + "model", + "metadata" + ] + } + } + } + } + }, + "tags": ["Assistants"] + }, + "get": { + "operationId": "AssistantsController_list", + "summary": "List assistants", + "description": "Returns a list of assistants.", + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "object": { + "type": "string", + "enum": ["list"], + "description": "The object type, which is always 'list' for a list response." + }, + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique identifier of the assistant." + }, + "object": { + "type": "string", + "enum": ["assistant"], + "description": "The object type, which is always 'assistant'." + }, + "created_at": { + "type": "integer", + "description": "Unix timestamp (in seconds) of when the assistant was created." + }, + "model": { + "type": "string", + "description": "The model identifier used by the assistant." + }, + "metadata": { + "type": "object", + "description": "Set of key-value pairs that can be attached to the assistant.", + "additionalProperties": true + } + }, + "required": [ + "id", + "object", + "created_at", + "model", + "metadata" + ] + } + } + }, + "required": ["object", "data"] } } } @@ -88,7 +481,77 @@ "get": { "operationId": "AssistantsController_findOne", "summary": "Get assistant", - "description": "Retrieves a specific assistant defined by an assistant's `id`.", + "description": "Retrieves a specific assistant by ID.", + "parameters": [ + { + "name": "id", + "required": true, + "in": "path", + "description": "The unique identifier of the assistant.", + "schema": { + "type": "string" + } + }, + { + "name": "OpenAI-Beta", + "required": true, + "in": "header", + "description": "Beta feature header.", + "schema": { + "type": "string", + "enum": ["assistants=v2"] + } + } + ], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique identifier of the assistant." + }, + "object": { + "type": "string", + "enum": ["assistant"], + "description": "The object type, which is always 'assistant'." + }, + "created_at": { + "type": "integer", + "description": "Unix timestamp (in seconds) of when the assistant was created." + }, + "model": { + "type": "string", + "description": "The model identifier used by the assistant." + }, + "metadata": { + "type": "object", + "description": "Set of key-value pairs attached to the assistant.", + "additionalProperties": true + } + }, + "required": [ + "id", + "object", + "created_at", + "model", + "metadata" + ] + } + } + } + } + }, + "tags": ["Assistants"] + }, + "delete": { + "operationId": "AssistantsController_remove", + "summary": "Delete assistant", + "description": "Deletes a specific assistant by ID.", "parameters": [ { "name": "id", @@ -106,36 +569,24 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/AssistantEntity" - } - } - } - } - }, - "tags": ["Assistants"] - }, - "delete": { - "operationId": "AssistantsController_remove", - "summary": "Delete assistant", - "description": "Deletes a specific assistant defined by an assistant's `id`.", - "parameters": [ - { - "name": "id", - "required": true, - "in": "path", - "description": "The unique identifier of the assistant.", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "The assistant has been successfully deleted.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeleteAssistantResponseDto" + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique identifier of the deleted assistant." + }, + "object": { + "type": "string", + "enum": ["assistant.deleted"], + "description": "The object type for a deleted assistant." + }, + "deleted": { + "type": "boolean", + "enum": [true], + "description": "Indicates the assistant was successfully deleted." + } + }, + "required": ["id", "object", "deleted"] } } } @@ -145,6 +596,11 @@ } }, "/healthz": { + "servers": [ + { + "url": "/" + } + ], "get": { "operationId": "HealthController_check", "summary": "Check health", @@ -162,6 +618,11 @@ } }, "/processManager/destroy": { + "servers": [ + { + "url": "/" + } + ], "delete": { "operationId": "Terminate server process", "summary": "Terminate server", @@ -1991,6 +2452,11 @@ } }, "/system": { + "servers": [ + { + "url": "/" + } + ], "delete": { "operationId": "SystemController_delete", "summary": "Stop api server", @@ -2017,6 +2483,11 @@ } }, "/system/events/download": { + "servers": [ + { + "url": "/" + } + ], "get": { "operationId": "SystemController_downloadEvent", "summary": "Get download status", @@ -2038,6 +2509,11 @@ } }, "/system/events/model": { + "servers": [ + { + "url": "/" + } + ], "get": { "operationId": "SystemController_modelEvent", "summary": "Get model status", @@ -2059,6 +2535,11 @@ } }, "/system/events/resources": { + "servers": [ + { + "url": "/" + } + ], "get": { "operationId": "SystemController_resourcesEvent", "summary": "Get resources status", @@ -2187,7 +2668,85 @@ }, "url": { "type": "string", - "example": "https://api.github.com/repos/janhq/cortex.llamacpp/releases/186479804" + "example": "https://api.github.com/repos/menloresearch/cortex.llamacpp/releases/186479804" + } + } + } + } + } + } + } + }, + "tags": ["Engines"] + } + }, + "/engines/{name}/releases/{version}": { + "get": { + "summary": "List variants for a specific engine version", + "description": "Lists all available variants (builds) for a specific version of an engine. Variants can include different CPU architectures (AVX, AVX2, AVX512), GPU support (CUDA, Vulkan), and operating systems (Windows, Linux, macOS).", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "enum": ["llama-cpp", "onnxruntime", "tensorrt-llm"], + "default": "llama-cpp" + }, + "description": "The type of engine" + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The version of the engine" + }, + { + "name": "show", + "in": "query", + "required": false, + "schema": { + "type": "string", + "enum": ["all", "compatible"], + "default": "all" + }, + "description": "Filter the variants list. Use 'compatible' to show only variants compatible with the current system, or 'all' to show all available variants." + } + ], + "responses": { + "200": { + "description": "Successfully retrieved variants list", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the variant, including OS, architecture, and capabilities", + "example": "linux-amd64-avx-cuda-11-7" + }, + "created_at": { + "type": "string", + "format": "date-time", + "description": "Creation timestamp of the variant", + "example": "2024-11-13T04:51:16Z" + }, + "size": { + "type": "integer", + "description": "Size of the variant in bytes", + "example": 151224604 + }, + "download_count": { + "type": "integer", + "description": "Number of times this variant has been downloaded", + "example": 0 } } } @@ -2314,7 +2873,7 @@ "get_models_url": { "type": "string", "description": "The URL to get models", - "example": "https://api.openai.com/v1/models" + "example": "https://api.openai.com/models" } } } @@ -3074,238 +3633,6 @@ }, "tags": ["Files"] } - }, - "/configs": { - "get": { - "summary": "Get Configurations", - "description": "Retrieves the current configuration settings of the Cortex server.", - "responses": { - "200": { - "description": "Successful response", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "allowed_origins": { - "type": "array", - "items": { - "type": "string" - }, - "example": ["http://127.0.0.1:39281", "https://cortex.so"] - }, - "cors": { - "type": "boolean", - "example": false - }, - "proxy_username": { - "type": "string", - "example": "username" - }, - "proxy_password": { - "type": "string", - "example": "password" - }, - "proxy_url": { - "type": "string", - "example": "http://proxy.example.com:8080" - }, - "verify_proxy_ssl": { - "type": "boolean", - "description": "test", - "example": false - }, - "verify_proxy_host_ssl": { - "type": "boolean", - "example": false - }, - "verify_peer_ssl": { - "type": "boolean", - "example": false - }, - "verify_host_ssl": { - "type": "boolean", - "example": false - }, - "no_proxy": { - "type": "string", - "example": "localhost" - }, - "huggingface_token": { - "type": "string", - "example": "your_token" - } - } - }, - "example": { - "allowed_origins": [ - "http://127.0.0.1:39281", - "https://cortex.so" - ], - "cors": false, - "proxy_username": "username", - "proxy_password": "password", - "proxy_url": "http://proxy.example.com:8080", - "verify_proxy_ssl": false, - "verify_proxy_host_ssl": false, - "verify_peer_ssl": false, - "verify_host_ssl": false, - "no_proxy": "localhost", - "huggingface_token": "your_token" - } - } - } - } - }, - "tags": ["Configurations"] - }, - "patch": { - "tags": ["Configurations"], - "summary": "Update configuration settings", - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "cors": { - "type": "boolean", - "description": "Indicates whether CORS is enabled.", - "example": false - }, - "allowed_origins": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of allowed origins.", - "example": ["http://127.0.0.1:39281", "https://cortex.so"] - }, - "proxy_username": { - "type": "string", - "description": "Username for the proxy server.", - "example": "username" - }, - "proxy_password": { - "type": "string", - "description": "Password for the proxy server.", - "example": "password" - }, - "proxy_url": { - "type": "string", - "description": "URL for the proxy server.", - "example": "http://proxy.example.com:8080" - }, - "verify_proxy_ssl": { - "type": "boolean", - "description": "Indicates whether to verify the SSL certificate of the proxy server.", - "example": false - }, - "verify_proxy_host_ssl": { - "type": "boolean", - "description": "Indicates whether to verify the SSL certificate of the proxy server host.", - "example": false - }, - "verify_peer_ssl": { - "type": "boolean", - "description": "Indicates whether to verify the SSL certificate of the peer.", - "example": false - }, - "verify_host_ssl": { - "type": "boolean", - "description": "Indicates whether to verify the SSL certificate of the host.", - "example": false - }, - "no_proxy": { - "type": "string", - "description": "List of hosts that should not be proxied.", - "example": "localhost" - }, - "huggingface_token": { - "type": "string", - "description": "HuggingFace token to pull models.", - "example": "your_token" - } - } - } - } - } - }, - "responses": { - "200": { - "description": "Configuration updated successfully", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "config": { - "type": "object", - "properties": { - "allowed_origins": { - "type": "array", - "items": { - "type": "string" - }, - "example": [ - "http://127.0.0.1:39281", - "https://cortex.so" - ] - }, - "cors": { - "type": "boolean", - "example": false - }, - "proxy_username": { - "type": "string", - "example": "username" - }, - "proxy_password": { - "type": "string", - "example": "password" - }, - "proxy_url": { - "type": "string", - "example": "http://proxy.example.com:8080" - }, - "verify_proxy_ssl": { - "type": "boolean", - "example": false - }, - "verify_proxy_host_ssl": { - "type": "boolean", - "example": false - }, - "verify_peer_ssl": { - "type": "boolean", - "example": false - }, - "verify_host_ssl": { - "type": "boolean", - "example": false - }, - "no_proxy": { - "type": "string", - "example": "localhost" - }, - "huggingface_token": { - "type": "string", - "example": "your_token" - } - } - }, - "message": { - "type": "string", - "example": "Configuration updated successfully" - } - } - } - } - } - } - } - } } }, "info": { @@ -3378,6 +3705,7 @@ "Files", "Hardware", "Events", + "Assistants", "Threads", "Messages", "Pulling Models", @@ -4001,11 +4329,11 @@ "type": "string", "enum": ["text", "audio"] }, - "description": "Specifies the modalities (types of input) supported by the model. Currently, cortex only support text modalities. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/janhq/cortex.cpp/issues/1582).", + "description": "Specifies the modalities (types of input) supported by the model. Currently, cortex only support text modalities. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/menloresearch/cortex.cpp/issues/1582).", "example": ["text"] }, "audio": { - "description": "Parameters for audio output. Required when audio output is requested with `modalities: ['audio']`. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/janhq/cortex.cpp/issues/1582).", + "description": "Parameters for audio output. Required when audio output is requested with `modalities: ['audio']`. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/menloresearch/cortex.cpp/issues/1582).", "type": "object", "properties": { "voice": { @@ -4022,13 +4350,13 @@ }, "store": { "type": "boolean", - "description": "Whether or not to store the output of this chat completion request for use in our model distillation or evals products. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/janhq/cortex.cpp/issues/1582).", + "description": "Whether or not to store the output of this chat completion request for use in our model distillation or evals products. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/menloresearch/cortex.cpp/issues/1582).", "default": false, "example": false }, "metadata": { "type": "object", - "description": "Developer-defined tags and values used for filtering completions in the dashboard. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/janhq/cortex.cpp/issues/1582).", + "description": "Developer-defined tags and values used for filtering completions in the dashboard. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/menloresearch/cortex.cpp/issues/1582).", "example": { "type": "conversation" } @@ -4060,7 +4388,7 @@ }, "response_format": { "type": "object", - "description": "An object specifying the format that the model must output. Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/janhq/cortex.cpp/issues/1582).", + "description": "An object specifying the format that the model must output. Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/menloresearch/cortex.cpp/issues/1582).", "properties": { "type": { "type": "string", @@ -4078,7 +4406,7 @@ }, "service_tier": { "type": "string", - "description": "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n\n - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.\n- If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n- If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\nWhen not set, the default behavior is 'auto'.\nWhen this parameter is set, the response body will include the service_tier utilized.\n\n We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/janhq/cortex.cpp/issues/1582)." + "description": "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n\n - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.\n- If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n- If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\nWhen not set, the default behavior is 'auto'.\nWhen this parameter is set, the response body will include the service_tier utilized.\n\n We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/menloresearch/cortex.cpp/issues/1582)." }, "stream_options": { "type": "object", @@ -4144,7 +4472,7 @@ }, "user": { "type": "string", - "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/janhq/cortex.cpp/issues/1582)." + "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/menloresearch/cortex.cpp/issues/1582)." }, "dynatemp_range": { "type": "number", @@ -4858,8 +5186,8 @@ "engine", "version", "inference_params", - "TransformReq", - "TransformResp", + "transform_req", + "transform_resp", "metadata" ], "properties": { @@ -4867,9 +5195,9 @@ "type": "string", "description": "The identifier of the model." }, - "api_key_template": { + "header_template": { "type": "string", - "description": "Template for the API key header." + "description": "Template for the header." }, "engine": { "type": "string", @@ -4902,7 +5230,7 @@ } } }, - "TransformReq": { + "transform_req": { "type": "object", "properties": { "get_models": { @@ -4924,7 +5252,7 @@ } } }, - "TransformResp": { + "transform_resp": { "type": "object", "properties": { "chat_completions": { @@ -5632,9 +5960,9 @@ "description": "Number of GPU layers.", "example": 33 }, - "api_key_template": { + "header_template": { "type": "string", - "description": "Template for the API key header." + "description": "Template for the header." }, "version": { "type": "string", diff --git a/server/global.d.ts b/server/global.d.ts new file mode 100644 index 000000000..1fb95b9a0 --- /dev/null +++ b/server/global.d.ts @@ -0,0 +1 @@ +declare const CORTEX_API_URL: string \ No newline at end of file diff --git a/server/index.ts b/server/index.ts index 4008d7008..4ea927199 100644 --- a/server/index.ts +++ b/server/index.ts @@ -86,6 +86,14 @@ export const startServer = async (configs?: ServerConfig): Promise => { }, }) + const rewriteRequestHeaders = (req: any, headers: any) => { + if (req.url.includes('/configs')) return headers + return { + ...headers, + authorization: `Bearer ${process.env.appToken}`, // Add or modify Authorization header + } + } + // Register Swagger UI await server.register(require('@fastify/swagger-ui'), { routePrefix: '/', @@ -97,10 +105,41 @@ export const startServer = async (configs?: ServerConfig): Promise => { transformSpecificationClone: true, }) - server.register(require('@fastify/http-proxy'), { - upstream: 'http://127.0.0.1:39291/v1', + const proxy = require('@fastify/http-proxy') + server.register(proxy, { + upstream: `${CORTEX_API_URL}/v1`, prefix: configs?.prefix ?? '/v1', http2: false, + replyOptions: { + rewriteRequestHeaders, + }, + }) + + server.register(proxy, { + upstream: `${CORTEX_API_URL}/processManager`, + prefix: '/processManager', + http2: false, + replyOptions: { + rewriteRequestHeaders, + }, + }) + + server.register(proxy, { + upstream: `${CORTEX_API_URL}/system`, + prefix: '/system', + http2: false, + replyOptions: { + rewriteRequestHeaders, + }, + }) + + server.register(proxy, { + upstream: `${CORTEX_API_URL}/healthz`, + prefix: '/healthz', + http2: false, + replyOptions: { + rewriteRequestHeaders, + }, }) // Start listening for requests diff --git a/server/rolldown.config.mjs b/server/rolldown.config.mjs index c74655577..5f094a1af 100644 --- a/server/rolldown.config.mjs +++ b/server/rolldown.config.mjs @@ -14,5 +14,8 @@ export default defineConfig([ }, external: ['@fastify/swagger-ui'], platform: 'node', + define: { + CORTEX_API_URL: JSON.stringify(`http://127.0.0.1:${process.env.CORTEX_API_PORT ?? "39291"}`), + } }, ]) diff --git a/specs/QA-checklist.md b/specs/QA-checklist.md index 8bb5168dd..ddfee32d2 100644 --- a/specs/QA-checklist.md +++ b/specs/QA-checklist.md @@ -26,7 +26,6 @@ - [ ] :key::warning: Uninstallation process removes the app successfully from the system. - [ ] Clean the data folder and open the app to check if it creates all the necessary folders, especially models and extensions. - ## B. Overview ### 1. Shortcut key @@ -38,7 +37,7 @@ - [ ] :key: The app correctly displays the state of the loading model (e.g., loading, ready, error). - [ ] :key: Confirm that the app allows users to switch between models if multiple are available. - [ ] Check that the app provides feedback or instructions if the model fails to load. -- [ ] Verify the troubleshooting assistant correctly capture hardware / log info [#1784](https://github.com/janhq/jan/issues/1784) +- [ ] Verify the troubleshooting assistant correctly capture hardware / log info [#1784](https://github.com/menloresearch/jan/issues/1784) ## C. Thread @@ -70,26 +69,29 @@ - [ ] :key: Users switch between threads with different models, the app can handle it. ### 3. Model dropdown + - :key: Model list should highlight recommended based on user RAM (this is not really correct, I think it's based on static formula) - [ ] Model size should display (for both installed and imported models) ### 4. Users can click on a history thread + - [ ] Chat window displays the entire conversation from the selected history thread without any missing messages. - [ ] Historical threads reflect the exact state of the chat at that time, including settings. - [ ] :key: Ability to delete or clean old threads. - [ ] Changing the title of the thread updates correctly. ### 5. Users can config instructions for the assistant. + - [ ] Instructions set by the user are being followed by the assistant in subsequent conversations. - [ ] :key: Changes to instructions are updated in real time and do not require a restart of the application or session. - [ ] :key: Ability to reset instructions to default or clear them completely. - [ ] :key: RAG - Users can import documents and the system should process queries about the uploaded file, providing accurate and appropriate responses in the conversation thread. -- [ ] :key: Jan can see - Users can import image and Model with vision can generate responses (e.g. LLaVa model). [#294](https://github.com/janhq/jan/issues/294) - +- [ ] :key: Jan can see - Users can import image and Model with vision can generate responses (e.g. LLaVa model). [#294](https://github.com/menloresearch/jan/issues/294) ## D. Hub ### 1. Users can discover recommended models + - :key: Each model's recommendations are consistent with the user’s activity and preferences. - [ ] Search models and verify results / action on the results @@ -99,10 +101,10 @@ - [ ] :key: Ensure that models are labeled with RAM requirements. - [ ] :key: Check the download model functionality and validate if the cancel download feature works correctly. -### 3. Users can download models via a HuggingFace URL [#1740](https://github.com/janhq/jan/issues/1740) +### 3. Users can download models via a HuggingFace URL [#1740](https://github.com/menloresearch/jan/issues/1740) - [ ] :key: Import via Hugging Face Id / full HuggingFace URL, check the progress bar reflects the download process -- [ ] :key: Test deeplink import [#2876](https://github.com/janhq/jan/issues/2876) +- [ ] :key: Test deeplink import [#2876](https://github.com/menloresearch/jan/issues/2876) - [ ] :key: Users can use / remove the imported model. ### 4. Users can import new models to the Hub @@ -112,16 +114,16 @@ - [ ] Users can add more info to the imported model / edit name - [ ] :key: Ensure the new model updates after restarting the app. - ### 5. Users can use the model as they want -- [ ] :key: Check `start` / `stop` / `delete` button response exactly what it does. +- [ ] :key: Check `start` / `stop` / `delete` button response exactly what it does. - [ ] Check if starting another model stops the other model entirely. - [ ] :rocket: Navigate to `hub` > Click `Use` button to use model. Expect to jump to thread and see the model in dropdown model selector. - [ ] :key: Check when deleting a model it will delete all the files on the user's computer. - [ ] :warning:The recommended tags should present right for the user's hardware. ### 6. Users can Integrate With a Remote Server + - [ ] :key: Import openAI GPT model https://jan.ai/guides/using-models/integrate-with-remote-server/ and the model displayed in Hub / Thread dropdown - [ ] Users can use the remote model properly (openAI GPT, Groq) @@ -129,7 +131,7 @@ ### 1. Users can see disk and RAM utilization -- [ ] :key: Verify that the RAM and VRAM utilization graphs accurately reported in real time. +- [ ] :key: Verify that the RAM and VRAM utilization graphs accurately reported in real time. - [ ] :key: Validate that the utilization percentages reflect the actual usage compared to the system's total available resources. - [ ] :key: Ensure that the system monitors updates dynamically as the models run and stop. @@ -157,21 +159,21 @@ - [ ] :key: Users can set valid Endpoint and API Key to use remote models - [ ] Monitoring extension should allow users to enable / disable log and set log Cleaning Interval - ### 4. Advanced settings - [ ] :key: Test the `Experimental Mode` toggle to confirm it enables or disables experimental features as intended. - [ ] :key: Check the functionality of `Open App Directory` to ensure it opens the correct folder in the system file explorer. - [ ] Users can move **Jan data folder** - [ ] Validate that changes in advanced settings are applied immediately or provide appropriate instructions if a restart is needed. -- [ ] Attemp to test downloading model from hub using **HTTP Proxy** [guideline](https://github.com/janhq/jan/pull/1562) +- [ ] Attemp to test downloading model from hub using **HTTP Proxy** [guideline](https://github.com/menloresearch/jan/pull/1562) - [ ] Logs that are older than 7 days or exceed 1MB in size will be automatically cleared upon starting the application. - [ ] Users can click on Reset button to **factory reset** app settings to its original state & delete all usage data. - - [ ] Keep the current app data location - - [ ] Reset the current app data location + - [ ] Keep the current app data location + - [ ] Reset the current app data location - [ ] Users can enable the setting and chat using quick ask. ### 5. Engine + - [ ] :key: TensorRT Engine - Users able to chat with the model - [ ] :key: Onnx Engine - Users able to chat with the model - [ ] :key: Other remote Engine - Users able to chat with the model @@ -179,9 +181,10 @@ ## G. Local API server ### 1. Local Server Usage with Server Options + - [ ] :key: Explore API Reference: Swagger API for sending/receiving requests - - [ ] Use default server option - - [ ] Configure and use custom server options + - [ ] Use default server option + - [ ] Configure and use custom server options - [ ] Test starting/stopping the local API server with different Model/Model settings - [ ] Server logs captured with correct Server Options provided - [ ] Verify functionality of Open logs/Clear feature diff --git a/web/app/layout.tsx b/web/app/layout.tsx index 5f14d6f5c..aaa905a49 100644 --- a/web/app/layout.tsx +++ b/web/app/layout.tsx @@ -13,10 +13,7 @@ export const metadata: Metadata = { export default function RootLayout({ children }: PropsWithChildren) { return ( - -
- {children} - + {children} ) } diff --git a/web/app/search/UserInput.tsx b/web/app/search/UserInput.tsx index cec694c90..fcabc8ea4 100644 --- a/web/app/search/UserInput.tsx +++ b/web/app/search/UserInput.tsx @@ -66,7 +66,7 @@ const UserInput = () => { - - - - - - - - - + + + + ) } diff --git a/web/app/search/page.tsx b/web/app/search/page.tsx index 947999e62..51cf04549 100644 --- a/web/app/search/page.tsx +++ b/web/app/search/page.tsx @@ -5,6 +5,7 @@ import UserInput from './UserInput' const Search = () => { return (
+
) diff --git a/web/containers/AutoLink/index.tsx b/web/containers/AutoLink/index.tsx index 66c84f7f7..0f10f478a 100644 --- a/web/containers/AutoLink/index.tsx +++ b/web/containers/AutoLink/index.tsx @@ -10,23 +10,25 @@ const AutoLink = ({ text }: Props) => { return ( <> - {text.split(delimiter).map((word) => { - const match = word.match(delimiter) - if (match) { - const url = match[0] - return ( - - {url} - - ) - } - return word - })} + {text && + typeof text === 'string' && + text.split(delimiter).map((word) => { + const match = word.match(delimiter) + if (match) { + const url = match[0] + return ( + + {url} + + ) + } + return word + })} ) } diff --git a/web/containers/ErrorMessage/index.tsx b/web/containers/ErrorMessage/index.tsx index cd9334283..ab5a35d32 100644 --- a/web/containers/ErrorMessage/index.tsx +++ b/web/containers/ErrorMessage/index.tsx @@ -23,7 +23,13 @@ import { mainViewStateAtom } from '@/helpers/atoms/App.atom' import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom' import { selectedSettingAtom } from '@/helpers/atoms/Setting.atom' -const ErrorMessage = ({ message }: { message: ThreadMessage }) => { +const ErrorMessage = ({ + message, + errorComponent, +}: { + message?: ThreadMessage + errorComponent?: React.ReactNode +}) => { const setModalTroubleShooting = useSetAtom(modalTroubleShootingAtom) const setMainState = useSetAtom(mainViewStateAtom) const setSelectedSettingScreen = useSetAtom(selectedSettingAtom) @@ -50,7 +56,7 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => { const getErrorTitle = () => { const engine = getEngine() - switch (message.metadata?.error_code) { + switch (message?.metadata?.error_code) { case ErrorCode.InvalidApiKey: case ErrorCode.AuthenticationError: return ( @@ -61,7 +67,7 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => { className="font-medium text-[hsla(var(--app-link))] underline" onClick={() => { setMainState(MainViewState.Settings) - engine?.name && setSelectedSettingScreen(engine.name) + setSelectedSettingScreen(activeAssistant?.model?.engine ?? '') }} > Settings @@ -77,7 +83,7 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => { data-testid="passthrough-error-message" className="first-letter:uppercase" > - {message.content[0]?.text?.value === 'Failed to fetch' && + {message?.content[0]?.text?.value === 'Failed to fetch' && engine && engine?.name !== InferenceEngine.cortex_llamacpp ? ( @@ -89,6 +95,9 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => { {message?.content[0]?.text?.value && ( )} + {!message?.content[0]?.text?.value && ( + Something went wrong. Please try again. + )} )}

@@ -100,12 +109,15 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
-
Error
-
-
+
+ + Error +
+
+
setModalTroubleShooting(true)} @@ -116,7 +128,7 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
{copied ? ( @@ -138,10 +150,10 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
- {getErrorTitle()} + {errorComponent ? errorComponent : getErrorTitle()}
diff --git a/web/containers/Layout/BottomPanel/InstallingExtension/InstallingExtensionModal.tsx b/web/containers/Layout/BottomPanel/InstallingExtension/InstallingExtensionModal.tsx deleted file mode 100644 index 0d5e4d4e3..000000000 --- a/web/containers/Layout/BottomPanel/InstallingExtension/InstallingExtensionModal.tsx +++ /dev/null @@ -1,79 +0,0 @@ -import { useCallback, useEffect } from 'react' - -import { abortDownload } from '@janhq/core' -import { Button, Modal, Progress } from '@janhq/joi' -import { atom, useAtom, useAtomValue } from 'jotai' - -import { - formatDownloadPercentage, - formatExtensionsName, -} from '@/utils/converter' - -import { - InstallingExtensionState, - installingExtensionAtom, -} from '@/helpers/atoms/Extension.atom' - -export const showInstallingExtensionModalAtom = atom(false) - -const InstallingExtensionModal = () => { - const [showInstallingExtensionModal, setShowInstallingExtensionModal] = - useAtom(showInstallingExtensionModalAtom) - const installingExtensions = useAtomValue(installingExtensionAtom) - - useEffect(() => { - if (installingExtensions.length === 0) { - setShowInstallingExtensionModal(false) - } - }, [installingExtensions, setShowInstallingExtensionModal]) - - const onAbortInstallingExtensionClick = useCallback( - (item: InstallingExtensionState) => { - if (item.localPath) { - abortDownload(item.localPath) - } - }, - [] - ) - - return ( - setShowInstallingExtensionModal(false)} - content={ -
- {Object.values(installingExtensions).map((item) => ( -
- -
-
-

- {formatExtensionsName(item.extensionId)} -

- {formatDownloadPercentage(item.percentage)} -
- -
-
- ))} -
- } - /> - ) -} - -export default InstallingExtensionModal diff --git a/web/containers/Layout/BottomPanel/InstallingExtension/index.tsx b/web/containers/Layout/BottomPanel/InstallingExtension/index.tsx deleted file mode 100644 index b41b64e22..000000000 --- a/web/containers/Layout/BottomPanel/InstallingExtension/index.tsx +++ /dev/null @@ -1,52 +0,0 @@ -import { Fragment, useCallback } from 'react' - -import { Progress } from '@janhq/joi' -import { useAtomValue, useSetAtom } from 'jotai' - -import { showInstallingExtensionModalAtom } from './InstallingExtensionModal' - -import { installingExtensionAtom } from '@/helpers/atoms/Extension.atom' - -const InstallingExtension = () => { - const installingExtensions = useAtomValue(installingExtensionAtom) - const setShowInstallingExtensionModal = useSetAtom( - showInstallingExtensionModalAtom - ) - const shouldShowInstalling = installingExtensions.length > 0 - - let totalPercentage = 0 - let totalExtensions = 0 - for (const installation of installingExtensions) { - totalPercentage += installation.percentage - totalExtensions++ - } - const progress = (totalPercentage / totalExtensions) * 100 - - const onClick = useCallback(() => { - setShowInstallingExtensionModal(true) - }, [setShowInstallingExtensionModal]) - - return ( - - {shouldShowInstalling ? ( -
-

- Installing Additional Dependencies -

- -
- - - {progress.toFixed(2)}% - -
-
- ) : null} -
- ) -} - -export default InstallingExtension diff --git a/web/containers/Layout/BottomPanel/SystemMonitor/SystemMonitor.test.tsx b/web/containers/Layout/BottomPanel/SystemMonitor/SystemMonitor.test.tsx index 2eba5edbb..ca336b0e5 100644 --- a/web/containers/Layout/BottomPanel/SystemMonitor/SystemMonitor.test.tsx +++ b/web/containers/Layout/BottomPanel/SystemMonitor/SystemMonitor.test.tsx @@ -87,7 +87,7 @@ describe('SystemMonitor', () => { expect(screen.getByText('Running Models')).toBeInTheDocument() expect(screen.getByText('App Log')).toBeInTheDocument() - expect(screen.getByText('7.45/14.90 GB')).toBeInTheDocument() + expect(screen.getByText('7.45GB / 14.90GB')).toBeInTheDocument() expect(screen.getByText('30%')).toBeInTheDocument() }) diff --git a/web/containers/Layout/BottomPanel/SystemMonitor/index.tsx b/web/containers/Layout/BottomPanel/SystemMonitor/index.tsx index f47dfaeb7..d9a0b289a 100644 --- a/web/containers/Layout/BottomPanel/SystemMonitor/index.tsx +++ b/web/containers/Layout/BottomPanel/SystemMonitor/index.tsx @@ -134,8 +134,8 @@ const SystemMonitor = () => {
Memory
- {toGigabytes(usedRam, { hideUnit: true })}/ - {toGigabytes(totalRam, { hideUnit: true })} GB + {toGigabytes(usedRam, { hideUnit: true })}GB /{' '} + {toGigabytes(totalRam, { hideUnit: true })}GB
@@ -149,41 +149,43 @@ const SystemMonitor = () => {
{gpus.length > 0 && (
- {gpus.map((gpu, index) => { - const gpuUtilization = utilizedMemory( - gpu.memoryFree, - gpu.memoryTotal - ) - return ( -
-
- - {gpu.name} - -
-
- - {gpu.memoryTotal - gpu.memoryFree}/ - {gpu.memoryTotal} - - MB + {gpus + .filter((gpu) => gpu.activated === true) + .map((gpu, index) => { + const gpuUtilization = utilizedMemory( + gpu.free_vram, + gpu.total_vram + ) + return ( +
+
+ + {gpu.name} + +
+
+ + {gpu.total_vram - gpu.free_vram}/ + {gpu.total_vram} + + MB +
-
-
- - - {gpuUtilization}% - +
+ + + {gpuUtilization}% + +
-
- ) - })} + ) + })}
)}
diff --git a/web/containers/Layout/BottomPanel/UpdateFailedModal/index.tsx b/web/containers/Layout/BottomPanel/UpdateFailedModal/index.tsx index 8021a63e0..3591b4c6a 100644 --- a/web/containers/Layout/BottomPanel/UpdateFailedModal/index.tsx +++ b/web/containers/Layout/BottomPanel/UpdateFailedModal/index.tsx @@ -21,7 +21,7 @@ const UpdatedFailedModal = () => { {error}. We appreciate your help with{' '} @@ -35,7 +35,10 @@ const UpdatedFailedModal = () => { { - window.open('https://github.com/janhq/jan#download', '_blank') + window.open( + 'https://github.com/menloresearch/jan#download', + '_blank' + ) setError(undefined) }} > diff --git a/web/containers/Layout/BottomPanel/index.tsx b/web/containers/Layout/BottomPanel/index.tsx index 69894c9e3..4dd4a742c 100644 --- a/web/containers/Layout/BottomPanel/index.tsx +++ b/web/containers/Layout/BottomPanel/index.tsx @@ -7,7 +7,6 @@ import { twMerge } from 'tailwind-merge' import DownloadingState from './DownloadingState' import ImportingModelState from './ImportingModelState' -import InstallingExtension from './InstallingExtension' import SystemMonitor from './SystemMonitor' import UpdateApp from './UpdateApp' import UpdatedFailedModal from './UpdateFailedModal' @@ -24,7 +23,7 @@ const menuLinks = [ { name: 'Github', icon: , - link: 'https://github.com/janhq/jan', + link: 'https://github.com/menloresearch/jan', }, ] @@ -49,7 +48,6 @@ const BottomPanel = () => { -
diff --git a/web/containers/Layout/RibbonPanel/index.tsx b/web/containers/Layout/RibbonPanel/index.tsx index 13116dc16..a82fbbe99 100644 --- a/web/containers/Layout/RibbonPanel/index.tsx +++ b/web/containers/Layout/RibbonPanel/index.tsx @@ -1,4 +1,7 @@ +import { useEffect } from 'react' + import { Tooltip, useMediaQuery } from '@janhq/joi' + import { useAtom, useAtomValue, useSetAtom } from 'jotai' import { MessageCircleIcon, @@ -26,7 +29,7 @@ export default function RibbonPanel() { const [mainViewState, setMainViewState] = useAtom(mainViewStateAtom) const [serverEnabled] = useAtom(serverEnabledAtom) const setEditMessage = useSetAtom(editMessageAtom) - const showLeftPanel = useAtomValue(showLeftPanelAtom) + const [showLeftPanel, setShowLeftPanel] = useAtom(showLeftPanelAtom) const matches = useMediaQuery('(max-width: 880px)') const reduceTransparent = useAtomValue(reduceTransparentAtom) const setSelectedSetting = useSetAtom(selectedSettingAtom) @@ -34,6 +37,13 @@ export default function RibbonPanel() { const threads = useAtomValue(threadsAtom) const isDownloadALocalModel = useAtomValue(isDownloadALocalModelAtom) + useEffect(() => { + if (mainViewState === MainViewState.Settings) { + setShowLeftPanel(true) + } + return () => setShowLeftPanel(showLeftPanel) + }, [mainViewState]) + const onMenuClick = (state: MainViewState) => { if (mainViewState === state) return if (serverEnabled && state === MainViewState.Thread) return diff --git a/web/containers/Layout/index.tsx b/web/containers/Layout/index.tsx index 5b17eb4fc..db675d5eb 100644 --- a/web/containers/Layout/index.tsx +++ b/web/containers/Layout/index.tsx @@ -25,17 +25,22 @@ import ImportModelOptionModal from '@/screens/Settings/ImportModelOptionModal' import ImportingModelModal from '@/screens/Settings/ImportingModelModal' import SelectingModelModal from '@/screens/Settings/SelectingModelModal' +import { getAppDistinctId, updateDistinctId } from '@/utils/settings' + import LoadingModal from '../LoadingModal' import MainViewContainer from '../MainViewContainer' -import InstallingExtensionModal from './BottomPanel/InstallingExtension/InstallingExtensionModal' +import ModalAppUpdaterChangelog from '../ModalAppUpdaterChangelog' + +import ModalAppUpdaterNotAvailable from '../ModalAppUpdaterNotAvailable' import { mainViewStateAtom } from '@/helpers/atoms/App.atom' import { productAnalyticAtom, productAnalyticPromptAtom, reduceTransparentAtom, + showScrollBarAtom, } from '@/helpers/atoms/Setting.atom' const BaseLayout = () => { @@ -46,6 +51,7 @@ const BaseLayout = () => { const [productAnalyticPrompt, setProductAnalyticPrompt] = useAtom( productAnalyticPromptAtom ) + const showScrollBar = useAtomValue(showScrollBarAtom) const [showProductAnalyticPrompt, setShowProductAnalyticPrompt] = useState(false) @@ -92,8 +98,16 @@ const BaseLayout = () => { return properties }, }) - posthog.opt_in_capturing() - posthog.register({ app_version: VERSION }) + // Attempt to restore distinct Id from app global settings + getAppDistinctId() + .then((id) => { + if (id) posthog.identify(id) + }) + .finally(() => { + posthog.opt_in_capturing() + posthog.register({ app_version: VERSION }) + updateDistinctId(posthog.get_distinct_id()) + }) } else { posthog.opt_out_capturing() } @@ -136,7 +150,12 @@ const BaseLayout = () => { )} > -
+
@@ -146,7 +165,6 @@ const BaseLayout = () => { {importModelStage === 'EDIT_MODEL_INFO' && } {importModelStage === 'CONFIRM_CANCEL' && } - {showProductAnalyticPrompt && (
@@ -222,6 +240,8 @@ const BaseLayout = () => { )}
+ +
) } diff --git a/web/containers/LeftPanelContainer/index.tsx b/web/containers/LeftPanelContainer/index.tsx index 523af5ddb..ac4b8893f 100644 --- a/web/containers/LeftPanelContainer/index.tsx +++ b/web/containers/LeftPanelContainer/index.tsx @@ -12,7 +12,10 @@ import { atom, useAtom, useAtomValue } from 'jotai' import { twMerge } from 'tailwind-merge' import { showLeftPanelAtom } from '@/helpers/atoms/App.atom' -import { reduceTransparentAtom } from '@/helpers/atoms/Setting.atom' +import { + reduceTransparentAtom, + showScrollBarAtom, +} from '@/helpers/atoms/Setting.atom' type Props = PropsWithChildren @@ -27,6 +30,7 @@ const LeftPanelContainer = ({ children }: Props) => { const [showLeftPanel, setShowLeftPanel] = useAtom(showLeftPanelAtom) const matches = useMediaQuery('(max-width: 880px)') const reduceTransparent = useAtomValue(reduceTransparentAtom) + const showScrollBar = useAtomValue(showScrollBarAtom) useClickOutside( () => matches && showLeftPanel && setShowLeftPanel(false), @@ -101,7 +105,10 @@ const LeftPanelContainer = ({ children }: Props) => { style={{ width: showLeftPanel ? leftPanelWidth : 0 }} onMouseDown={(e) => isResizing && e.stopPropagation()} > - + {children} {showLeftPanel && !matches && ( diff --git a/web/containers/ListContainer/index.tsx b/web/containers/ListContainer/index.tsx index 44e5b2527..3184c171b 100644 --- a/web/containers/ListContainer/index.tsx +++ b/web/containers/ListContainer/index.tsx @@ -4,6 +4,7 @@ import { ScrollArea } from '@janhq/joi' import { useAtomValue } from 'jotai' +import { showScrollBarAtom } from '@/helpers/atoms/Setting.atom' import { activeThreadAtom } from '@/helpers/atoms/Thread.atom' const ListContainer = ({ children }: PropsWithChildren) => { @@ -12,6 +13,7 @@ const ListContainer = ({ children }: PropsWithChildren) => { const isUserManuallyScrollingUp = useRef(false) const activeThread = useAtomValue(activeThreadAtom) const prevActiveThread = useRef(activeThread) + const showScrollBar = useAtomValue(showScrollBarAtom) // Handle active thread changes useEffect(() => { @@ -59,6 +61,7 @@ const ListContainer = ({ children }: PropsWithChildren) => { return ( { +const Spinner = ({ size = 40, strokeWidth = 4, className = '' }) => { const radius = size / 2 - strokeWidth const circumference = 2 * Math.PI * radius @@ -11,6 +11,7 @@ const Spinner = ({ size = 40, strokeWidth = 4 }) => { viewBox={`0 0 ${size} ${size}`} style={{ overflow: 'visible' }} animate={{ rotate: 360 }} + className={className} transition={{ repeat: Infinity, duration: 2, // Adjust for desired speed diff --git a/web/containers/ModalAppUpdaterChangelog/index.tsx b/web/containers/ModalAppUpdaterChangelog/index.tsx new file mode 100644 index 000000000..705623a90 --- /dev/null +++ b/web/containers/ModalAppUpdaterChangelog/index.tsx @@ -0,0 +1,91 @@ +import React, { useEffect, useState } from 'react' + +import { Button, Modal } from '@janhq/joi' + +import { useAtom } from 'jotai' + +import { useGetLatestRelease } from '@/hooks/useGetLatestRelease' + +import { MarkdownTextMessage } from '@/screens/Thread/ThreadCenterPanel/TextMessage/MarkdownTextMessage' + +import LogoMark from '../Brand/Logo/Mark' + +import { appUpdateAvailableAtom } from '@/helpers/atoms/App.atom' + +const ModalAppUpdaterChangelog = () => { + const [appUpdateAvailable, setAppUpdateAvailable] = useAtom( + appUpdateAvailableAtom + ) + + const [open, setOpen] = useState(appUpdateAvailable) + + useEffect(() => { + setOpen(appUpdateAvailable) + }, [appUpdateAvailable]) + + const beta = VERSION.includes('beta') + const nightly = VERSION.includes('-') + + const { release } = useGetLatestRelease(beta ? true : false) + + return ( + +
+ +
App Update
+
+ {!nightly && ( +

+ Version {release?.name} is available and ready to install. +

+ )} + + } + open={open} + onOpenChange={() => setOpen(!open)} + content={ +
+ {nightly ? ( +

+ You are using a nightly build. This version is built from the + latest development branch and may not have release notes. +

+ ) : ( + <> +
+ +
+ + )} +
+ + +
+
+ } + /> + ) +} + +export default ModalAppUpdaterChangelog diff --git a/web/containers/ModalAppUpdaterNotAvailable/index.tsx b/web/containers/ModalAppUpdaterNotAvailable/index.tsx new file mode 100644 index 000000000..5f2b25fda --- /dev/null +++ b/web/containers/ModalAppUpdaterNotAvailable/index.tsx @@ -0,0 +1,57 @@ +import React, { useEffect, useState } from 'react' + +import { Button, Modal } from '@janhq/joi' + +import { useAtom } from 'jotai' + +import LogoMark from '../Brand/Logo/Mark' + +import { appUpdateNotAvailableAtom } from '@/helpers/atoms/App.atom' + +const ModalAppUpdaterNotAvailable = () => { + const [appUpdateNotAvailable, setAppUpdateNotAvailable] = useAtom( + appUpdateNotAvailableAtom + ) + + const [open, setOpen] = useState(appUpdateNotAvailable) + + useEffect(() => { + setOpen(appUpdateNotAvailable) + }, [appUpdateNotAvailable]) + + return ( + +
+ +
App Update
+
+ + } + open={open} + onOpenChange={() => setOpen(!open)} + content={ +
+

+ You’re up to date! No new updates available +

+
+ +
+
+ } + /> + ) +} + +export default ModalAppUpdaterNotAvailable diff --git a/web/containers/ModalTroubleShoot/index.tsx b/web/containers/ModalTroubleShoot/index.tsx index 77ee51034..a069362da 100644 --- a/web/containers/ModalTroubleShoot/index.tsx +++ b/web/containers/ModalTroubleShoot/index.tsx @@ -83,7 +83,7 @@ const ModalTroubleShooting = () => { > Discord
-  & send it to #🆘|get-help channel for further +  & send it to #🆘|jan-help channel for further support.

diff --git a/web/containers/ModelDownloadButton/index.tsx b/web/containers/ModelDownloadButton/index.tsx index 946c2b2f8..cd97743c2 100644 --- a/web/containers/ModelDownloadButton/index.tsx +++ b/web/containers/ModelDownloadButton/index.tsx @@ -3,6 +3,8 @@ import { useCallback, useMemo } from 'react' import { Button, Tooltip } from '@janhq/joi' import { useAtomValue, useSetAtom } from 'jotai' +import { twMerge } from 'tailwind-merge' + import { MainViewState } from '@/constants/screens' import { useCreateNewThread } from '@/hooks/useCreateNewThread' @@ -13,6 +15,7 @@ import ModalCancelDownload from '../ModalCancelDownload' import { mainViewStateAtom } from '@/helpers/atoms/App.atom' import { assistantsAtom } from '@/helpers/atoms/Assistant.atom' +import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom' import { downloadedModelsAtom, getDownloadingModelAtom, @@ -22,10 +25,13 @@ interface Props { id: string theme?: 'primary' | 'ghost' | 'icon' | 'destructive' | undefined variant?: 'solid' | 'soft' | 'outline' | undefined + className?: string + hideProgress?: boolean } -const ModelDownloadButton = ({ id, theme, variant }: Props) => { +const ModelDownloadButton = ({ id, theme, className, hideProgress }: Props) => { const { downloadModel } = useDownloadModel() const downloadingModels = useAtomValue(getDownloadingModelAtom) + const serverEnabled = useAtomValue(serverEnabledAtom) const downloadedModels = useAtomValue(downloadedModelsAtom) const assistants = useAtomValue(assistantsAtom) const setMainViewState = useSetAtom(mainViewStateAtom) @@ -59,7 +65,7 @@ const ModelDownloadButton = ({ id, theme, variant }: Props) => { const defaultButton = ( ) - const downloadingButton = + const downloadingButton = !hideProgress && ( + + ) + const downloadedButton = ( { variant="outline" theme="ghost" className="min-w-[70px]" + disabled={serverEnabled} > Use } content="Threads are disabled while the server is running" + disabled={!serverEnabled} /> ) return ( diff --git a/web/containers/ModelDropdown/index.tsx b/web/containers/ModelDropdown/index.tsx index 6d2cc0b23..d95a114c4 100644 --- a/web/containers/ModelDropdown/index.tsx +++ b/web/containers/ModelDropdown/index.tsx @@ -15,6 +15,7 @@ import { import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai' import { + CheckIcon, ChevronDownIcon, ChevronUpIcon, DownloadCloudIcon, @@ -28,19 +29,23 @@ import ModelLabel from '@/containers/ModelLabel' import SetupRemoteModel from '@/containers/SetupRemoteModel' +import { useActiveModel } from '@/hooks/useActiveModel' + import { useCreateNewThread } from '@/hooks/useCreateNewThread' import useDownloadModel from '@/hooks/useDownloadModel' import { modelDownloadStateAtom } from '@/hooks/useDownloadState' import { useGetEngines } from '@/hooks/useEngineManagement' +import { useGetFeaturedSources } from '@/hooks/useModelSource' import useRecommendedModel from '@/hooks/useRecommendedModel' import useUpdateModelParameters from '@/hooks/useUpdateModelParameters' import { formatDownloadPercentage, toGigabytes } from '@/utils/converter' -import { manualRecommendationModel } from '@/utils/model' -import { getLogoEngine } from '@/utils/modelEngine' +import { getLogoEngine, getTitleByEngine } from '@/utils/modelEngine' + +import { extractModelName } from '@/utils/modelSource' import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom' import { @@ -50,6 +55,7 @@ import { showEngineListModelAtom, } from '@/helpers/atoms/Model.atom' +import { showScrollBarAtom } from '@/helpers/atoms/Setting.atom' import { activeThreadAtom, setThreadModelParamsAtom, @@ -72,6 +78,7 @@ const ModelDropdown = ({ const [modelDropdownState, setModelDropdownState] = useAtom( modelDropdownStateAtom ) + const showScrollBar = useAtomValue(showScrollBarAtom) const [searchFilter, setSearchFilter] = useState('local') const [searchText, setSearchText] = useState('') @@ -85,6 +92,7 @@ const ModelDropdown = ({ const [dropdownOptions, setDropdownOptions] = useState( null ) + const { sources: featuredModels } = useGetFeaturedSources() const { engines } = useGetEngines() @@ -93,13 +101,8 @@ const ModelDropdown = ({ const { updateModelParameter } = useUpdateModelParameters() const searchInputRef = useRef(null) const configuredModels = useAtomValue(configuredModelsAtom) + const { stopModel } = useActiveModel() - const featuredModels = configuredModels.filter( - (x) => - manualRecommendationModel.includes(x.id) && - x.metadata?.tags?.includes('Featured') && - x.metadata?.size < 5000000000 - ) const { updateThreadMetadata } = useCreateNewThread() const engineList = useMemo( @@ -226,6 +229,7 @@ const ModelDropdown = ({ const model = downloadedModels.find((m) => m.id === modelId) setSelectedModel(model) setOpen(false) + stopModel() if (activeThread) { // Change assistand tools based on model support RAG @@ -248,18 +252,13 @@ const ModelDropdown = ({ ], }) - const defaultContextLength = Math.min( - 8192, - model?.settings.ctx_len ?? 8192 - ) - + const contextLength = model?.settings.ctx_len + ? Math.min(8192, model?.settings.ctx_len ?? 8192) + : undefined const overriddenParameters = { - ctx_len: model?.settings.ctx_len ? defaultContextLength : undefined, - max_tokens: defaultContextLength - ? Math.min( - model?.parameters.max_tokens ?? 8192, - defaultContextLength - ) + ctx_len: contextLength, + max_tokens: contextLength + ? Math.min(model?.parameters.max_tokens ?? 8192, contextLength) : model?.parameters.max_tokens, } @@ -289,6 +288,7 @@ const ModelDropdown = ({ updateThreadMetadata, setThreadModelParams, updateModelParameter, + stopModel, ] ) @@ -384,7 +384,10 @@ const ModelDropdown = ({ } />
- + {engineList .filter((e) => e.type === searchFilter) .filter( @@ -429,7 +432,7 @@ const ModelDropdown = ({ /> )}
- {engine.name} + {getTitleByEngine(engine.name)}
@@ -464,9 +467,9 @@ const ModelDropdown = ({ showModel && !searchText.length && (
    - {featuredModels.map((model) => { + {featuredModels?.map((model) => { const isDownloading = downloadingModels.some( - (md) => md === model.id + (md) => md === (model.models[0]?.id ?? model.id) ) return (
  • - {model.name} + {extractModelName(model.id)}

    - {toGigabytes(model.metadata?.size)} + {toGigabytes(model.models[0]?.size)} {!isDownloading ? ( - downloadModel( - model.sources[0].url, - model.id - ) + downloadModel(model.models[0]?.id) } /> ) : ( Object.values(downloadStates) - .filter((x) => x.modelId === model.id) + .filter( + (x) => + x.modelId === + (model.models[0]?.id ?? model.id) + ) .map((item) => ( c.id === model.id ) return ( -
  • { - if (!isConfigured && engine.type === 'remote') - return null - if (isDownloaded) { - onClickModelItem(model.id) - } - }} - > -
    -

    + {isDownloaded && ( +

  • - {model.name} -

    - -
-
- {!isDownloaded && ( - - {toGigabytes(model.metadata?.size)} - - )} - {!isDownloading && !isDownloaded ? ( - - downloadModel( - model.sources[0].url, - model.id - ) + onClick={() => { + if ( + !isConfigured && + engine.type === 'remote' + ) + return null + if (isDownloaded) { + onClickModelItem(model.id) } - /> - ) : ( - Object.values(downloadStates) - .filter((x) => x.modelId === model.id) - .map((item) => ( - +
+

+ {model.name} +

+ +
+
+ {selectedModel?.id === model.id && ( + - )) - )} -
- + )} + {!isDownloaded && ( + + {toGigabytes(model.metadata?.size)} + + )} + {!isDownloading && !isDownloaded ? ( + + downloadModel( + model.sources[0].url, + model.id + ) + } + /> + ) : ( + Object.values(downloadStates) + .filter((x) => x.modelId === model.id) + .map((item) => ( + + )) + )} +
+ + )} + ) })} diff --git a/web/containers/ModelLabel/ModelLabel.test.tsx b/web/containers/ModelLabel/ModelLabel.test.tsx index ca5cf19dc..545fc30d8 100644 --- a/web/containers/ModelLabel/ModelLabel.test.tsx +++ b/web/containers/ModelLabel/ModelLabel.test.tsx @@ -1,8 +1,7 @@ import React from 'react' -import { render, waitFor, screen } from '@testing-library/react' +import { render } from '@testing-library/react' import { useAtomValue } from 'jotai' import { useActiveModel } from '@/hooks/useActiveModel' -import { useSettings } from '@/hooks/useSettings' import ModelLabel from '@/containers/ModelLabel' jest.mock('jotai', () => ({ @@ -14,14 +13,9 @@ jest.mock('@/hooks/useActiveModel', () => ({ useActiveModel: jest.fn(), })) -jest.mock('@/hooks/useSettings', () => ({ - useSettings: jest.fn(), -})) - describe('ModelLabel', () => { const mockUseAtomValue = useAtomValue as jest.Mock const mockUseActiveModel = useActiveModel as jest.Mock - const mockUseSettings = useSettings as jest.Mock const defaultProps: any = { metadata: { @@ -44,7 +38,6 @@ describe('ModelLabel', () => { mockUseActiveModel.mockReturnValue({ activeModel: { metadata: { size: 0 } }, }) - mockUseSettings.mockReturnValue({ settings: { run_mode: 'cpu' } }) const props = { ...defaultProps, diff --git a/web/containers/ModelLabel/index.tsx b/web/containers/ModelLabel/index.tsx index 564b7edf8..6c6c3cfda 100644 --- a/web/containers/ModelLabel/index.tsx +++ b/web/containers/ModelLabel/index.tsx @@ -4,8 +4,6 @@ import { useAtomValue } from 'jotai' import { useActiveModel } from '@/hooks/useActiveModel' -import { useSettings } from '@/hooks/useSettings' - import NotEnoughMemoryLabel from './NotEnoughMemoryLabel' import SlowOnYourDeviceLabel from './SlowOnYourDeviceLabel' @@ -26,18 +24,23 @@ const ModelLabel = ({ size, compact }: Props) => { const totalRam = useAtomValue(totalRamAtom) const usedRam = useAtomValue(usedRamAtom) const availableVram = useAtomValue(availableVramAtom) - const { settings } = useSettings() const getLabel = (size: number) => { - const minimumRamModel = size * 1.25 + const minimumRamModel = (size * 1.25) / (1024 * 1024) + const availableRam = - settings?.run_mode === 'gpu' + availableVram > 0 ? availableVram * 1000000 // MB to bytes - : totalRam - usedRam + (activeModel?.metadata?.size ?? 0) + : totalRam - + (usedRam + + (activeModel?.metadata?.size + ? (activeModel.metadata.size * 1.25) / (1024 * 1024) + : 0)) + if (minimumRamModel > totalRam) { return ( 0 ? 'VRAM' : 'RAM'} compact={compact} /> ) diff --git a/web/containers/Providers/AppUpdateListener.tsx b/web/containers/Providers/AppUpdateListener.tsx index 4d05f6010..39b78aac7 100644 --- a/web/containers/Providers/AppUpdateListener.tsx +++ b/web/containers/Providers/AppUpdateListener.tsx @@ -5,12 +5,16 @@ import { useSetAtom } from 'jotai' import { appDownloadProgressAtom, + appUpdateAvailableAtom, updateVersionErrorAtom, + appUpdateNotAvailableAtom, } from '@/helpers/atoms/App.atom' const AppUpdateListener = () => { const setProgress = useSetAtom(appDownloadProgressAtom) const setUpdateVersionError = useSetAtom(updateVersionErrorAtom) + const setAppUpdateAvailable = useSetAtom(appUpdateAvailableAtom) + const setAppUpdateNotvailable = useSetAtom(appUpdateNotAvailableAtom) useEffect(() => { if (window && window.electronAPI) { @@ -36,8 +40,17 @@ const AppUpdateListener = () => { window.electronAPI.onAppUpdateDownloadSuccess(() => { setProgress(-1) }) + + window.electronAPI.onAppUpdateAvailable(() => { + setAppUpdateAvailable(true) + }) + + window.electronAPI.onAppUpdateNotAvailable(() => { + setAppUpdateAvailable(false) + setAppUpdateNotvailable(true) + }) } - }, [setProgress, setUpdateVersionError]) + }, [setProgress, setUpdateVersionError, setAppUpdateAvailable]) return } diff --git a/web/containers/Providers/DataLoader.tsx b/web/containers/Providers/DataLoader.tsx index 832e47d1a..1174150f4 100644 --- a/web/containers/Providers/DataLoader.tsx +++ b/web/containers/Providers/DataLoader.tsx @@ -15,6 +15,7 @@ import { useDebouncedCallback } from 'use-debounce' import useAssistants from '@/hooks/useAssistants' import { useGetEngines } from '@/hooks/useEngineManagement' import useGetSystemResources from '@/hooks/useGetSystemResources' +import { useGetHardwareInfo } from '@/hooks/useHardwareManagement' import useModels from '@/hooks/useModels' import useThreads from '@/hooks/useThreads' @@ -34,6 +35,7 @@ const DataLoader: React.FC = () => { const setJanSettingScreen = useSetAtom(janSettingScreenAtom) const { getData: loadModels } = useModels() const { mutate } = useGetEngines() + const { mutate: getHardwareInfo } = useGetHardwareInfo(false) useThreads() useAssistants() @@ -42,6 +44,7 @@ const DataLoader: React.FC = () => { useEffect(() => { // Load data once loadModels() + getHardwareInfo() // eslint-disable-next-line react-hooks/exhaustive-deps }, []) const reloadData = useDebouncedCallback(() => { diff --git a/web/containers/Providers/DeepLinkListener.tsx b/web/containers/Providers/DeepLinkListener.tsx index f8eebe618..41a8d9e24 100644 --- a/web/containers/Providers/DeepLinkListener.tsx +++ b/web/containers/Providers/DeepLinkListener.tsx @@ -6,7 +6,10 @@ import { useDebouncedCallback } from 'use-debounce' import { MainViewState } from '@/constants/screens' -import { useModelSourcesMutation } from '@/hooks/useModelSource' +import { + useGetModelSources, + useModelSourcesMutation, +} from '@/hooks/useModelSource' import { loadingModalInfoAtom } from '../LoadingModal' import { toaster } from '../Toast' @@ -19,6 +22,7 @@ const DeepLinkListener: React.FC = () => { const setLoadingInfo = useSetAtom(loadingModalInfoAtom) const setMainView = useSetAtom(mainViewStateAtom) const setModelDetail = useSetAtom(modelDetailAtom) + const { mutate } = useGetModelSources() const handleDeepLinkAction = useDebouncedCallback( async (deepLinkAction: DeepLinkAction) => { @@ -37,7 +41,7 @@ const DeepLinkListener: React.FC = () => { title: 'Getting Hugging Face model details', message: 'Please wait..', }) - await addModelSource(deepLinkAction.resource) + await addModelSource(deepLinkAction.resource).then(() => mutate()) setLoadingInfo(undefined) setMainView(MainViewState.Hub) setModelDetail(deepLinkAction.resource) diff --git a/web/containers/Providers/EventListener.tsx b/web/containers/Providers/EventListener.tsx index 55c172beb..27764bcdc 100644 --- a/web/containers/Providers/EventListener.tsx +++ b/web/containers/Providers/EventListener.tsx @@ -4,8 +4,8 @@ import React from 'react' import { DownloadEvent, - events, DownloadState, + events, ModelEvent, ExtensionTypeEnum, ModelExtension, @@ -17,23 +17,15 @@ import { useSetAtom } from 'jotai' import { setDownloadStateAtom } from '@/hooks/useDownloadState' -import { formatExtensionsName } from '@/utils/converter' - import { toaster } from '../Toast' import AppUpdateListener from './AppUpdateListener' import ClipboardListener from './ClipboardListener' import ModelHandler from './ModelHandler' -import ModelImportListener from './ModelImportListener' import QuickAskListener from './QuickAskListener' import { extensionManager } from '@/extension' -import { - InstallingExtensionState, - removeInstallingExtensionAtom, - setInstallingExtensionAtom, -} from '@/helpers/atoms/Extension.atom' import { addDownloadingModelAtom, removeDownloadingModelAtom, @@ -41,109 +33,76 @@ import { const EventListener = () => { const setDownloadState = useSetAtom(setDownloadStateAtom) - const setInstallingExtension = useSetAtom(setInstallingExtensionAtom) - const removeInstallingExtension = useSetAtom(removeInstallingExtensionAtom) const addDownloadingModel = useSetAtom(addDownloadingModelAtom) const removeDownloadingModel = useSetAtom(removeDownloadingModelAtom) const onFileDownloadUpdate = useCallback( async (state: DownloadState) => { console.debug('onFileDownloadUpdate', state) - if (state.downloadType === 'extension') { - const installingExtensionState: InstallingExtensionState = { - extensionId: state.extensionId!, - percentage: state.percent, - localPath: state.localPath, - } - setInstallingExtension(state.extensionId!, installingExtensionState) - } else { - addDownloadingModel(state.modelId) - setDownloadState(state) - } + addDownloadingModel(state.modelId) + setDownloadState(state) }, - [addDownloadingModel, setDownloadState, setInstallingExtension] + [addDownloadingModel, setDownloadState] ) const onFileDownloadError = useCallback( (state: DownloadState) => { console.debug('onFileDownloadError', state) - if (state.downloadType === 'extension') { - removeInstallingExtension(state.extensionId!) - } else { - state.downloadState = 'error' - setDownloadState(state) - removeDownloadingModel(state.modelId) - } + state.downloadState = 'error' + setDownloadState(state) + removeDownloadingModel(state.modelId) }, - [removeInstallingExtension, setDownloadState, removeDownloadingModel] + [setDownloadState, removeDownloadingModel] ) const onFileDownloadStopped = useCallback( (state: DownloadState) => { console.debug('onFileDownloadError', state) - if (state.downloadType === 'extension') { - removeInstallingExtension(state.extensionId!) - } else { - state.downloadState = 'error' - state.error = 'aborted' - setDownloadState(state) - removeDownloadingModel(state.modelId) - } + + state.downloadState = 'error' + state.error = 'aborted' + setDownloadState(state) + removeDownloadingModel(state.modelId) }, - [removeInstallingExtension, setDownloadState, removeDownloadingModel] + [setDownloadState, removeDownloadingModel] ) const onFileDownloadSuccess = useCallback( async (state: DownloadState) => { console.debug('onFileDownloadSuccess', state) - if (state.downloadType !== 'extension') { - // Update model metadata accordingly - const model = ModelManager.instance().models.get(state.modelId) - if (model) { - await extensionManager - .get(ExtensionTypeEnum.Model) - ?.updateModel({ - id: model.id, - ...model.settings, - ...model.parameters, - } as Partial) - .catch((e) => console.debug(e)) - toaster({ - title: 'Download Completed', - description: `Download ${state.modelId} completed`, - type: 'success', - }) - } - state.downloadState = 'end' - setDownloadState(state) - removeDownloadingModel(state.modelId) - events.emit(ModelEvent.OnModelsUpdate, { fetch: true }) + // Update model metadata accordingly + const model = ModelManager.instance().models.get(state.modelId) + if (model) { + await extensionManager + .get(ExtensionTypeEnum.Model) + ?.updateModel({ + id: model.id, + ...model.settings, + ...model.parameters, + } as Partial) + .catch((e) => console.debug(e)) + + toaster({ + title: 'Download Completed', + description: `Download ${state.modelId} completed`, + type: 'success', + }) } + state.downloadState = 'end' + setDownloadState(state) + removeDownloadingModel(state.modelId) + events.emit(ModelEvent.OnModelsUpdate, { fetch: true }) }, [removeDownloadingModel, setDownloadState] ) - const onFileUnzipSuccess = useCallback( - (state: DownloadState) => { - console.debug('onFileUnzipSuccess', state) - toaster({ - title: 'Success', - description: `Install ${formatExtensionsName(state.extensionId!)} successfully.`, - type: 'success', - }) - removeInstallingExtension(state.extensionId!) - }, - [removeInstallingExtension] - ) - useEffect(() => { console.debug('EventListenerWrapper: registering event listeners...') events.on(DownloadEvent.onFileDownloadUpdate, onFileDownloadUpdate) events.on(DownloadEvent.onFileDownloadError, onFileDownloadError) events.on(DownloadEvent.onFileDownloadSuccess, onFileDownloadSuccess) events.on(DownloadEvent.onFileDownloadStopped, onFileDownloadStopped) - events.on(DownloadEvent.onFileUnzipSuccess, onFileUnzipSuccess) return () => { console.debug('EventListenerWrapper: unregistering event listeners...') @@ -151,13 +110,11 @@ const EventListener = () => { events.off(DownloadEvent.onFileDownloadError, onFileDownloadError) events.off(DownloadEvent.onFileDownloadSuccess, onFileDownloadSuccess) events.off(DownloadEvent.onFileDownloadStopped, onFileDownloadStopped) - events.off(DownloadEvent.onFileUnzipSuccess, onFileUnzipSuccess) } }, [ onFileDownloadUpdate, onFileDownloadError, onFileDownloadSuccess, - onFileUnzipSuccess, onFileDownloadStopped, ]) @@ -165,7 +122,6 @@ const EventListener = () => { <> - diff --git a/web/containers/Providers/ModelHandler.tsx b/web/containers/Providers/ModelHandler.tsx index 2c027539e..786dbd4f0 100644 --- a/web/containers/Providers/ModelHandler.tsx +++ b/web/containers/Providers/ModelHandler.tsx @@ -18,7 +18,7 @@ import { extractInferenceParams, ModelExtension, } from '@janhq/core' -import { useAtomValue, useSetAtom } from 'jotai' +import { useAtom, useAtomValue, useSetAtom } from 'jotai' import { ulid } from 'ulidx' import { activeModelAtom, stateModelAtom } from '@/hooks/useActiveModel' @@ -75,8 +75,10 @@ export default function ModelHandler() { const activeThreadRef = useRef(activeThread) const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom) const activeModelParamsRef = useRef(activeModelParams) - const setTokenSpeed = useSetAtom(tokenSpeedAtom) + + const [tokenSpeed, setTokenSpeed] = useAtom(tokenSpeedAtom) const { engines } = useGetEngines() + const tokenSpeedRef = useRef(tokenSpeed) useEffect(() => { activeThreadRef.current = activeThread @@ -106,6 +108,10 @@ export default function ModelHandler() { messageGenerationSubscriber.current = subscribedGeneratingMessage }, [subscribedGeneratingMessage]) + useEffect(() => { + tokenSpeedRef.current = tokenSpeed + }, [tokenSpeed]) + const onNewMessageResponse = useCallback( async (message: ThreadMessage) => { if (message.type === MessageRequestType.Thread) { @@ -143,8 +149,7 @@ export default function ModelHandler() { return } - // The thread title should not be updated if the message is less than 10 words - // And no new line character is present + // No new line character is presented in the title // And non-alphanumeric characters should be removed if (messageContent.includes('\n')) { messageContent = messageContent.replace(/\n/g, ' ') @@ -228,6 +233,7 @@ export default function ModelHandler() { tokenSpeed: averageTokenSpeed, tokenCount: totalTokenCount, message: message.id, + model: activeModelRef.current?.name, } }) return @@ -276,6 +282,13 @@ export default function ModelHandler() { metadata, }) + // Update message's metadata with token usage + message.metadata = { + ...message.metadata, + token_speed: tokenSpeedRef.current?.tokenSpeed, + model: activeModelRef.current?.name, + } + if (message.status === MessageStatus.Error) { message.metadata = { ...message.metadata, diff --git a/web/containers/Providers/ModelImportListener.tsx b/web/containers/Providers/ModelImportListener.tsx deleted file mode 100644 index e99b1e6fc..000000000 --- a/web/containers/Providers/ModelImportListener.tsx +++ /dev/null @@ -1,109 +0,0 @@ -import { Fragment, useCallback, useEffect } from 'react' - -import { - ImportingModel, - LocalImportModelEvent, - Model, - ModelEvent, - events, -} from '@janhq/core' -import { useSetAtom } from 'jotai' - -import { snackbar } from '../Toast' - -import { - setImportingModelErrorAtom, - setImportingModelSuccessAtom, - updateImportingModelProgressAtom, -} from '@/helpers/atoms/Model.atom' - -const ModelImportListener = () => { - const updateImportingModelProgress = useSetAtom( - updateImportingModelProgressAtom - ) - const setImportingModelSuccess = useSetAtom(setImportingModelSuccessAtom) - const setImportingModelFailed = useSetAtom(setImportingModelErrorAtom) - - const onImportModelUpdate = useCallback( - async (state: ImportingModel) => { - if (!state.importId) return - updateImportingModelProgress(state.importId, state.percentage ?? 0) - }, - [updateImportingModelProgress] - ) - - const onImportModelFailed = useCallback( - async (state: ImportingModel) => { - if (!state.importId) return - setImportingModelFailed(state.importId, state.error ?? '') - }, - [setImportingModelFailed] - ) - - const onImportModelSuccess = useCallback( - (state: ImportingModel) => { - if (!state.modelId) return - events.emit(ModelEvent.OnModelsUpdate, { fetch: true }) - setImportingModelSuccess(state.importId, state.modelId) - }, - [setImportingModelSuccess] - ) - - const onImportModelFinished = useCallback((importedModels: Model[]) => { - const modelText = importedModels.length === 1 ? 'model' : 'models' - snackbar({ - description: `Successfully imported ${importedModels.length} ${modelText}`, - type: 'success', - }) - }, []) - - useEffect(() => { - console.debug('ModelImportListener: registering event listeners..') - - events.on( - LocalImportModelEvent.onLocalImportModelUpdate, - onImportModelUpdate - ) - events.on( - LocalImportModelEvent.onLocalImportModelSuccess, - onImportModelSuccess - ) - events.on( - LocalImportModelEvent.onLocalImportModelFinished, - onImportModelFinished - ) - events.on( - LocalImportModelEvent.onLocalImportModelFailed, - onImportModelFailed - ) - - return () => { - console.debug('ModelImportListener: unregistering event listeners...') - events.off( - LocalImportModelEvent.onLocalImportModelUpdate, - onImportModelUpdate - ) - events.off( - LocalImportModelEvent.onLocalImportModelSuccess, - onImportModelSuccess - ) - events.off( - LocalImportModelEvent.onLocalImportModelFinished, - onImportModelFinished - ) - events.off( - LocalImportModelEvent.onLocalImportModelFailed, - onImportModelFailed - ) - } - }, [ - onImportModelUpdate, - onImportModelSuccess, - onImportModelFinished, - onImportModelFailed, - ]) - - return -} - -export default ModelImportListener diff --git a/web/containers/Providers/QuickAskConfigurator.tsx b/web/containers/Providers/QuickAskConfigurator.tsx new file mode 100644 index 000000000..40e5caf8e --- /dev/null +++ b/web/containers/Providers/QuickAskConfigurator.tsx @@ -0,0 +1,17 @@ +'use client' + +import { PropsWithChildren, useEffect, useState } from 'react' + +import { setupCoreServices } from '@/services/coreService' + +export const QuickAskConfigurator = ({ children }: PropsWithChildren) => { + const [setupCore, setSetupCore] = useState(false) + + // Services Setup + useEffect(() => { + setupCoreServices() + setSetupCore(true) + }, []) + + return <>{setupCore && <>{children}} +} diff --git a/web/containers/Providers/index.tsx b/web/containers/Providers/index.tsx index 5d14ea95a..78cb76d78 100644 --- a/web/containers/Providers/index.tsx +++ b/web/containers/Providers/index.tsx @@ -14,28 +14,39 @@ import DataLoader from './DataLoader' import DeepLinkListener from './DeepLinkListener' import KeyListener from './KeyListener' +import { QuickAskConfigurator } from './QuickAskConfigurator' import Responsive from './Responsive' import SWRConfigProvider from './SWRConfigProvider' import SettingsHandler from './SettingsHandler' const Providers = ({ children }: PropsWithChildren) => { + const isQuickAsk = + typeof window !== 'undefined' && window.electronAPI?.isQuickAsk() return ( - + {isQuickAsk && ( <> - - - - - - - - {children} + {children} - + )} + {!isQuickAsk && ( + + <> + + + + + + + +
+ {children} + + + )} diff --git a/web/containers/RightPanelContainer/index.tsx b/web/containers/RightPanelContainer/index.tsx index 7443ab61a..6d474d557 100644 --- a/web/containers/RightPanelContainer/index.tsx +++ b/web/containers/RightPanelContainer/index.tsx @@ -12,7 +12,10 @@ import { atom, useAtom, useAtomValue } from 'jotai' import { twMerge } from 'tailwind-merge' import { showRightPanelAtom } from '@/helpers/atoms/App.atom' -import { reduceTransparentAtom } from '@/helpers/atoms/Setting.atom' +import { + reduceTransparentAtom, + showScrollBarAtom, +} from '@/helpers/atoms/Setting.atom' type Props = PropsWithChildren @@ -28,6 +31,7 @@ const RightPanelContainer = ({ children }: Props) => { null ) const reduceTransparent = useAtomValue(reduceTransparentAtom) + const showScrollBar = useAtomValue(showScrollBarAtom) const [showRightPanel, setShowRightPanel] = useAtom(showRightPanelAtom) const matches = useMediaQuery('(max-width: 880px)') @@ -105,7 +109,10 @@ const RightPanelContainer = ({ children }: Props) => { style={{ width: showRightPanel ? rightPanelWidth : 0 }} onMouseDown={(e) => isResizing && e.preventDefault()} > - + {children} {showRightPanel && !matches && ( diff --git a/web/containers/ServerLogs/index.tsx b/web/containers/ServerLogs/index.tsx index 2e978bd23..c9074808c 100644 --- a/web/containers/ServerLogs/index.tsx +++ b/web/containers/ServerLogs/index.tsx @@ -14,6 +14,7 @@ import { useLogs } from '@/hooks/useLogs' import { usePath } from '@/hooks/usePath' import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom' +import { showScrollBarAtom } from '@/helpers/atoms/Setting.atom' type ServerLogsProps = { limit?: number; withCopy?: boolean } @@ -25,6 +26,7 @@ const ServerLogs = (props: ServerLogsProps) => { const listRef = useRef(null) const prevScrollTop = useRef(0) const isUserManuallyScrollingUp = useRef(false) + const showScrollBar = useAtomValue(showScrollBarAtom) const updateLogs = useCallback( () => @@ -93,205 +95,212 @@ const ServerLogs = (props: ServerLogsProps) => { }, [listRef.current?.scrollHeight, isUserManuallyScrollingUp, logs]) return ( - - {withCopy && ( -
-
- - -
-
- )} -
- {logs.length > 0 ? ( - - {logs.slice(-limit).map((log, i) => { - return ( -

- {log} -

- ) - })} -
- ) : ( -
- - - - +
+ {withCopy && ( +
+
+ + +
)}
- + +
+ {logs.length > 0 ? ( + + {logs.slice(-limit).map((log, i) => { + return ( +

+ {log} +

+ ) + })} +
+ ) : ( +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ Empty logs +

+
+ )} +
+
+ ) } diff --git a/web/containers/SliderRightPanel/index.tsx b/web/containers/SliderRightPanel/index.tsx index 3fad10212..5022845c9 100644 --- a/web/containers/SliderRightPanel/index.tsx +++ b/web/containers/SliderRightPanel/index.tsx @@ -73,7 +73,7 @@ const SliderRightPanel = ({ trigger={ (MainViewState.Thread) export const defaultJanDataFolderAtom = atom('') +export const LocalEngineDefaultVariantAtom = atom('') + +export const RecommendEngineVariantAtom = atomWithStorage( + 'recommendEngineVariant', + '', + undefined, + { getOnInit: true } +) + const SHOW_RIGHT_PANEL = 'showRightPanel' // Store panel atom @@ -23,6 +32,8 @@ export const showRightPanelAtom = atomWithStorage( export const showSystemMonitorPanelAtom = atom(false) export const appDownloadProgressAtom = atom(-1) export const updateVersionErrorAtom = atom(undefined) +export const appUpdateAvailableAtom = atom(false) +export const appUpdateNotAvailableAtom = atom(false) const COPY_OVER_INSTRUCTION_ENABLED = 'copy_over_instruction_enabled' diff --git a/web/helpers/atoms/Extension.atom.test.ts b/web/helpers/atoms/Extension.atom.test.ts index 2a12cd7db..d41290eea 100644 --- a/web/helpers/atoms/Extension.atom.test.ts +++ b/web/helpers/atoms/Extension.atom.test.ts @@ -9,54 +9,6 @@ describe('Extension.atom.ts', () => { jest.clearAllMocks() }) - describe('installingExtensionAtom', () => { - it('should initialize as an empty array', () => { - const { result } = renderHook(() => useAtomValue(ExtensionAtoms.installingExtensionAtom)) - expect(result.current).toEqual([]) - }) - }) - - describe('setInstallingExtensionAtom', () => { - it('should add a new installing extension', () => { - const { result: setAtom } = renderHook(() => useSetAtom(ExtensionAtoms.setInstallingExtensionAtom)) - const { result: getAtom } = renderHook(() => useAtomValue(ExtensionAtoms.installingExtensionAtom)) - - act(() => { - setAtom.current('ext1', { extensionId: 'ext1', percentage: 0 }) - }) - - expect(getAtom.current).toEqual([{ extensionId: 'ext1', percentage: 0 }]) - }) - - it('should update an existing installing extension', () => { - const { result: setAtom } = renderHook(() => useSetAtom(ExtensionAtoms.setInstallingExtensionAtom)) - const { result: getAtom } = renderHook(() => useAtomValue(ExtensionAtoms.installingExtensionAtom)) - - act(() => { - setAtom.current('ext1', { extensionId: 'ext1', percentage: 0 }) - setAtom.current('ext1', { extensionId: 'ext1', percentage: 50 }) - }) - - expect(getAtom.current).toEqual([{ extensionId: 'ext1', percentage: 50 }]) - }) - }) - - describe('removeInstallingExtensionAtom', () => { - it('should remove an installing extension', () => { - const { result: setAtom } = renderHook(() => useSetAtom(ExtensionAtoms.setInstallingExtensionAtom)) - const { result: removeAtom } = renderHook(() => useSetAtom(ExtensionAtoms.removeInstallingExtensionAtom)) - const { result: getAtom } = renderHook(() => useAtomValue(ExtensionAtoms.installingExtensionAtom)) - - act(() => { - setAtom.current('ext1', { extensionId: 'ext1', percentage: 0 }) - setAtom.current('ext2', { extensionId: 'ext2', percentage: 50 }) - removeAtom.current('ext1') - }) - - expect(getAtom.current).toEqual([{ extensionId: 'ext2', percentage: 50 }]) - }) - }) - describe('inActiveEngineProviderAtom', () => { it('should initialize as an empty array', () => { const { result } = renderHook(() => useAtomValue(ExtensionAtoms.inActiveEngineProviderAtom)) diff --git a/web/helpers/atoms/Extension.atom.ts b/web/helpers/atoms/Extension.atom.ts index 3f1843dc4..7e008df85 100644 --- a/web/helpers/atoms/Extension.atom.ts +++ b/web/helpers/atoms/Extension.atom.ts @@ -1,45 +1,5 @@ -import { atom } from 'jotai' import { atomWithStorage } from 'jotai/utils' -type ExtensionId = string - -export type InstallingExtensionState = { - extensionId: ExtensionId - percentage: number - localPath?: string -} - -export const installingExtensionAtom = atom([]) - -export const setInstallingExtensionAtom = atom( - null, - (get, set, extensionId: string, state: InstallingExtensionState) => { - const current = get(installingExtensionAtom) - - const isExists = current.some((e) => e.extensionId === extensionId) - if (isExists) { - const newCurrent = current.map((e) => { - if (e.extensionId === extensionId) { - return state - } - return e - }) - set(installingExtensionAtom, newCurrent) - } else { - set(installingExtensionAtom, [...current, state]) - } - } -) - -export const removeInstallingExtensionAtom = atom( - null, - (get, set, extensionId: string) => { - const current = get(installingExtensionAtom) - const newCurrent = current.filter((e) => e.extensionId !== extensionId) - set(installingExtensionAtom, newCurrent) - } -) - const INACTIVE_ENGINE_PROVIDER = 'inActiveEngineProvider' export const inActiveEngineProviderAtom = atomWithStorage( INACTIVE_ENGINE_PROVIDER, diff --git a/web/helpers/atoms/Setting.atom.ts b/web/helpers/atoms/Setting.atom.ts index 3568d87d0..d8ad38a6d 100644 --- a/web/helpers/atoms/Setting.atom.ts +++ b/web/helpers/atoms/Setting.atom.ts @@ -11,6 +11,7 @@ export const janSettingScreenAtom = atom([]) export const THEME = 'themeAppearance' export const REDUCE_TRANSPARENT = 'reduceTransparent' export const SPELL_CHECKING = 'spellChecking' +export const SCROLL_BAR = 'scrollBar' export const PRODUCT_ANALYTIC = 'productAnalytic' export const PRODUCT_ANALYTIC_PROMPT = 'productAnalyticPrompt' export const THEME_DATA = 'themeData' @@ -45,6 +46,12 @@ export const spellCheckAtom = atomWithStorage( undefined, { getOnInit: true } ) +export const showScrollBarAtom = atomWithStorage( + SCROLL_BAR, + false, + undefined, + { getOnInit: true } +) export const productAnalyticAtom = atomWithStorage( PRODUCT_ANALYTIC, false, diff --git a/web/hooks/useApp.ts b/web/hooks/useApp.ts new file mode 100644 index 000000000..f30b9e3c5 --- /dev/null +++ b/web/hooks/useApp.ts @@ -0,0 +1,10 @@ +import { extensionManager } from '@/extension' + +export function useApp() { + async function relaunch() { + const extensions = extensionManager.getAll() + await Promise.all(extensions.map((e) => e.onUnload())) + window.core?.api?.relaunch() + } + return { relaunch } +} diff --git a/web/hooks/useCreateNewThread.ts b/web/hooks/useCreateNewThread.ts index 4901b9846..57ceeb385 100644 --- a/web/hooks/useCreateNewThread.ts +++ b/web/hooks/useCreateNewThread.ts @@ -82,24 +82,18 @@ export const useCreateNewThread = () => { } // Default context length is 8192 - const defaultContextLength = Math.min( - 8192, - defaultModel?.settings?.ctx_len ?? 8192 - ) + const contextLength = defaultModel?.settings?.ctx_len + ? Math.min(8192, defaultModel?.settings?.ctx_len) + : undefined const overriddenSettings = { - ctx_len: defaultModel?.settings?.ctx_len - ? Math.min(8192, defaultModel?.settings?.ctx_len) - : undefined, + ctx_len: contextLength, } // Use ctx length by default const overriddenParameters = { - max_tokens: defaultContextLength - ? Math.min( - defaultModel?.parameters?.max_tokens ?? 8192, - defaultContextLength - ) + max_tokens: contextLength + ? Math.min(defaultModel?.parameters?.max_tokens ?? 8192, contextLength) : defaultModel?.parameters?.max_tokens, } diff --git a/web/hooks/useEngineManagement.ts b/web/hooks/useEngineManagement.ts index b08004c4e..02044ec65 100644 --- a/web/hooks/useEngineManagement.ts +++ b/web/hooks/useEngineManagement.ts @@ -1,4 +1,4 @@ -import { useMemo } from 'react' +import { useCallback, useMemo, useState } from 'react' import { ExtensionTypeEnum, @@ -32,6 +32,13 @@ export const releasedEnginesLatestCacheAtom = atomWithStorage<{ timestamp: number } | null>('releasedEnginesLatestCache', null, undefined, { getOnInit: true }) +export interface RemoteModelList { + data?: { + id?: string + name?: string + }[] +} + // fetcher function async function fetchExtensionData( extension: EngineManagementExtension | null, @@ -88,8 +95,12 @@ export function useGetRemoteModels(name: string) { error, mutate, } = useSWR( - extension ? 'remoteModels' : null, - () => fetchExtensionData(extension, (ext) => ext.getRemoteModels(name)), + extension ? `remoteModels_${name}` : null, + () => + fetchExtensionData( + extension, + (ext) => ext.getRemoteModels(name) as Promise + ), { revalidateOnFocus: false, revalidateOnReconnect: true, @@ -456,3 +467,33 @@ export const useGetEngineModelSources = () => { ), } } + +/** + * Refresh model list + * @param engine + * @returns + */ +export const useRefreshModelList = (engine: string) => { + const [refreshingModels, setRefreshingModels] = useState(false) + const { mutate: fetchRemoteModels } = useGetRemoteModels(engine) + + const refreshModels = useCallback( + (engine: string) => { + setRefreshingModels(true) + fetchRemoteModels() + .then((remoteModelList) => + Promise.all( + remoteModelList?.data?.map((model: { id?: string }) => + model?.id + ? addRemoteEngineModel(model.id, engine).catch(() => {}) + : {} + ) ?? [] + ) + ) + .finally(() => setRefreshingModels(false)) + }, + [fetchRemoteModels] + ) + + return { refreshingModels, refreshModels } +} diff --git a/web/hooks/useFactoryReset.ts b/web/hooks/useFactoryReset.ts index da2e15b03..7344b2eb1 100644 --- a/web/hooks/useFactoryReset.ts +++ b/web/hooks/useFactoryReset.ts @@ -58,6 +58,7 @@ export default function useFactoryReset() { const configuration: AppConfiguration = { data_folder: defaultJanDataFolder, quick_ask: appConfiguration?.quick_ask ?? false, + distinct_id: appConfiguration?.distinct_id, } await window.core?.api?.updateAppConfiguration(configuration) } diff --git a/web/hooks/useGetLatestRelease.ts b/web/hooks/useGetLatestRelease.ts new file mode 100644 index 000000000..58de2ddb8 --- /dev/null +++ b/web/hooks/useGetLatestRelease.ts @@ -0,0 +1,35 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +import useSWR from 'swr' + +const fetchLatestRelease = async (includeBeta: boolean) => { + const res = await fetch( + 'https://api.github.com/repos/menloresearch/jan/releases' + ) + if (!res.ok) throw new Error('Failed to fetch releases') + + const releases = await res.json() + + // Filter stable and beta releases + const stableRelease = releases.find( + (release: { prerelease: any; draft: any }) => + !release.prerelease && !release.draft + ) + const betaRelease = releases.find( + (release: { prerelease: any }) => release.prerelease + ) + + return includeBeta ? (betaRelease ?? stableRelease) : stableRelease +} + +export function useGetLatestRelease(includeBeta = false) { + const { data, error, mutate } = useSWR( + ['latestRelease', includeBeta], + () => fetchLatestRelease(includeBeta), + { + revalidateOnFocus: false, + revalidateOnReconnect: true, + } + ) + + return { release: data, error, mutate } +} diff --git a/web/hooks/useGetSystemResources.test.ts b/web/hooks/useGetSystemResources.test.ts index 10e539e07..78392b612 100644 --- a/web/hooks/useGetSystemResources.test.ts +++ b/web/hooks/useGetSystemResources.test.ts @@ -21,7 +21,7 @@ jest.mock('jotai', () => ({ describe('useGetSystemResources', () => { const mockMonitoringExtension = { - getResourcesInfo: jest.fn(), + getHardware: jest.fn(), getCurrentLoad: jest.fn(), } @@ -38,17 +38,17 @@ describe('useGetSystemResources', () => { }) it('should fetch system resources on initial render', async () => { - mockMonitoringExtension.getResourcesInfo.mockResolvedValue({ - mem: { usedMemory: 4000, totalMemory: 8000 }, + mockMonitoringExtension.getHardware.mockResolvedValue({ + cpu: { usage: 50 }, + ram: { available: 4000, total: 8000 }, }) mockMonitoringExtension.getCurrentLoad.mockResolvedValue({ - cpu: { usage: 50 }, gpu: [], }) const { result } = renderHook(() => useGetSystemResources()) - expect(mockMonitoringExtension.getResourcesInfo).toHaveBeenCalledTimes(1) + expect(mockMonitoringExtension.getHardware).toHaveBeenCalledTimes(1) }) it('should start watching system resources when watch is called', () => { @@ -58,14 +58,14 @@ describe('useGetSystemResources', () => { result.current.watch() }) - expect(mockMonitoringExtension.getResourcesInfo).toHaveBeenCalled() + expect(mockMonitoringExtension.getHardware).toHaveBeenCalled() // Fast-forward time by 2 seconds act(() => { jest.advanceTimersByTime(2000) }) - expect(mockMonitoringExtension.getResourcesInfo).toHaveBeenCalled() + expect(mockMonitoringExtension.getHardware).toHaveBeenCalled() }) it('should stop watching when stopWatching is called', () => { @@ -85,7 +85,7 @@ describe('useGetSystemResources', () => { }) // Expect no additional calls after stopping - expect(mockMonitoringExtension.getResourcesInfo).toHaveBeenCalled() + expect(mockMonitoringExtension.getHardware).toHaveBeenCalled() }) it('should not fetch resources if monitoring extension is not available', async () => { @@ -97,7 +97,7 @@ describe('useGetSystemResources', () => { result.current.getSystemResources() }) - expect(mockMonitoringExtension.getResourcesInfo).not.toHaveBeenCalled() + expect(mockMonitoringExtension.getHardware).not.toHaveBeenCalled() expect(mockMonitoringExtension.getCurrentLoad).not.toHaveBeenCalled() }) }) diff --git a/web/hooks/useGetSystemResources.ts b/web/hooks/useGetSystemResources.ts index a05a6a710..e40100a55 100644 --- a/web/hooks/useGetSystemResources.ts +++ b/web/hooks/useGetSystemResources.ts @@ -1,6 +1,7 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ import { useCallback, useEffect, useState } from 'react' -import { ExtensionTypeEnum, MonitoringExtension } from '@janhq/core' +import { ExtensionTypeEnum, HardwareManagementExtension } from '@janhq/core' import { useSetAtom } from 'jotai' @@ -20,58 +21,62 @@ export default function useGetSystemResources() { NodeJS.Timeout | number | undefined >(undefined) - const setTotalRam = useSetAtom(totalRamAtom) const setGpus = useSetAtom(gpusAtom) - const setUsedRam = useSetAtom(usedRamAtom) const setCpuUsage = useSetAtom(cpuUsageAtom) const setTotalNvidiaVram = useSetAtom(nvidiaTotalVramAtom) const setAvailableVram = useSetAtom(availableVramAtom) + const setUsedRam = useSetAtom(usedRamAtom) + const setTotalRam = useSetAtom(totalRamAtom) const setRamUtilitized = useSetAtom(ramUtilitizedAtom) const getSystemResources = useCallback(async () => { if ( - !extensionManager.get( - ExtensionTypeEnum.SystemMonitoring + !extensionManager.get( + ExtensionTypeEnum.Hardware ) ) { return } - const monitoring = extensionManager.get( - ExtensionTypeEnum.SystemMonitoring - ) - const resourceInfor = await monitoring?.getResourcesInfo() - const currentLoadInfor = await monitoring?.getCurrentLoad() - if (resourceInfor?.mem?.usedMemory) setUsedRam(resourceInfor.mem.usedMemory) - if (resourceInfor?.mem?.totalMemory) - setTotalRam(resourceInfor.mem.totalMemory) + const hardwareExtension = extensionManager.get( + ExtensionTypeEnum.Hardware + ) + + const hardwareInfo = await hardwareExtension?.getHardware() + + const usedMemory = + Number(hardwareInfo?.ram.total) - Number(hardwareInfo?.ram.available) + + if (hardwareInfo?.ram?.total && hardwareInfo?.ram?.available) + setUsedRam(Number(usedMemory)) + + if (hardwareInfo?.ram?.total) setTotalRam(hardwareInfo.ram.total) const ramUtilitized = - ((resourceInfor?.mem?.usedMemory ?? 0) / - (resourceInfor?.mem?.totalMemory ?? 1)) * - 100 + ((Number(usedMemory) ?? 0) / (hardwareInfo?.ram.total ?? 1)) * 100 + setRamUtilitized(Math.round(ramUtilitized)) - setCpuUsage(Math.round(currentLoadInfor?.cpu?.usage ?? 0)) + setCpuUsage(Math.round(hardwareInfo?.cpu.usage ?? 0)) - const gpus = currentLoadInfor?.gpu ?? [] - setGpus(gpus) + const gpus = hardwareInfo?.gpus ?? [] + setGpus(gpus as any) let totalNvidiaVram = 0 if (gpus.length > 0) { totalNvidiaVram = gpus.reduce( - (total: number, gpu: { memoryTotal: string }) => - total + Number(gpu.memoryTotal), + (total: number, gpu: { total_vram: number }) => + total + Number(gpu.total_vram), 0 ) } + setTotalNvidiaVram(totalNvidiaVram) + setAvailableVram( - gpus.reduce( - (total: number, gpu: { memoryFree: string }) => - total + Number(gpu.memoryFree), - 0 - ) + gpus.reduce((total, gpu) => { + return total + Number(gpu.free_vram || 0) + }, 0) ) }, [ setUsedRam, diff --git a/web/hooks/useGpuSetting.test.ts b/web/hooks/useGpuSetting.test.ts deleted file mode 100644 index f52f07af8..000000000 --- a/web/hooks/useGpuSetting.test.ts +++ /dev/null @@ -1,87 +0,0 @@ -// useGpuSetting.test.ts - -import { renderHook, act } from '@testing-library/react' -import { ExtensionTypeEnum, MonitoringExtension } from '@janhq/core' - -// Mock dependencies -jest.mock('@/extension') - -import useGpuSetting from './useGpuSetting' -import { extensionManager } from '@/extension' - -describe('useGpuSetting', () => { - beforeEach(() => { - jest.clearAllMocks() - }) - - it('should return GPU settings when available', async () => { - const mockGpuSettings = { - gpuCount: 2, - gpuNames: ['NVIDIA GeForce RTX 3080', 'NVIDIA GeForce RTX 3070'], - totalMemory: 20000, - freeMemory: 15000, - } - - const mockMonitoringExtension: Partial = { - getGpuSetting: jest.fn().mockResolvedValue(mockGpuSettings), - } - - jest - .spyOn(extensionManager, 'get') - .mockReturnValue(mockMonitoringExtension as MonitoringExtension) - - const { result } = renderHook(() => useGpuSetting()) - - let gpuSettings - await act(async () => { - gpuSettings = await result.current.getGpuSettings() - }) - - expect(gpuSettings).toEqual(mockGpuSettings) - expect(extensionManager.get).toHaveBeenCalledWith( - ExtensionTypeEnum.SystemMonitoring - ) - expect(mockMonitoringExtension.getGpuSetting).toHaveBeenCalled() - }) - - it('should return undefined when no GPU settings are found', async () => { - const mockMonitoringExtension: Partial = { - getGpuSetting: jest.fn().mockResolvedValue(undefined), - } - - jest - .spyOn(extensionManager, 'get') - .mockReturnValue(mockMonitoringExtension as MonitoringExtension) - - const { result } = renderHook(() => useGpuSetting()) - - let gpuSettings - await act(async () => { - gpuSettings = await result.current.getGpuSettings() - }) - - expect(gpuSettings).toBeUndefined() - expect(extensionManager.get).toHaveBeenCalledWith( - ExtensionTypeEnum.SystemMonitoring - ) - expect(mockMonitoringExtension.getGpuSetting).toHaveBeenCalled() - }) - - it('should handle missing MonitoringExtension', async () => { - jest.spyOn(extensionManager, 'get').mockReturnValue(undefined) - jest.spyOn(console, 'debug').mockImplementation(() => {}) - - const { result } = renderHook(() => useGpuSetting()) - - let gpuSettings - await act(async () => { - gpuSettings = await result.current.getGpuSettings() - }) - - expect(gpuSettings).toBeUndefined() - expect(extensionManager.get).toHaveBeenCalledWith( - ExtensionTypeEnum.SystemMonitoring - ) - expect(console.debug).toHaveBeenCalledWith('No GPU setting found') - }) -}) diff --git a/web/hooks/useGpuSetting.ts b/web/hooks/useGpuSetting.ts deleted file mode 100644 index 36f51ed57..000000000 --- a/web/hooks/useGpuSetting.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { useCallback } from 'react' - -import { ExtensionTypeEnum, MonitoringExtension } from '@janhq/core' - -import { extensionManager } from '@/extension' - -export default function useGpuSetting() { - const getGpuSettings = useCallback(async () => { - const gpuSetting = await extensionManager - ?.get(ExtensionTypeEnum.SystemMonitoring) - ?.getGpuSetting() - - if (!gpuSetting) { - console.debug('No GPU setting found') - return undefined - } - return gpuSetting - }, []) - - return { getGpuSettings } -} diff --git a/web/hooks/useHardwareManagement.ts b/web/hooks/useHardwareManagement.ts new file mode 100644 index 000000000..90dbdb2b5 --- /dev/null +++ b/web/hooks/useHardwareManagement.ts @@ -0,0 +1,99 @@ +import { useMemo } from 'react' + +import { ExtensionTypeEnum, HardwareManagementExtension } from '@janhq/core' + +import { useSetAtom } from 'jotai' +import useSWR from 'swr' + +import { extensionManager } from '@/extension/ExtensionManager' +import { + cpuUsageAtom, + ramUtilitizedAtom, + totalRamAtom, + usedRamAtom, +} from '@/helpers/atoms/SystemBar.atom' + +// fetcher function +async function fetchExtensionData( + extension: HardwareManagementExtension | null, + method: (extension: HardwareManagementExtension) => Promise +): Promise { + if (!extension) { + throw new Error('Extension not found') + } + return method(extension) +} + +const getExtension = () => + extensionManager.get( + ExtensionTypeEnum.Hardware + ) ?? null + +/** + * @returns A Promise that resolves to an object of list engines. + */ +export function useGetHardwareInfo(updatePeriodically: boolean = true) { + const setCpuUsage = useSetAtom(cpuUsageAtom) + const setUsedRam = useSetAtom(usedRamAtom) + const setTotalRam = useSetAtom(totalRamAtom) + const setRamUtilitized = useSetAtom(ramUtilitizedAtom) + + const extension = useMemo( + () => + extensionManager.get( + ExtensionTypeEnum.Hardware + ) ?? null, + [] + ) + + const { + data: hardware, + error, + mutate, + } = useSWR( + extension ? 'hardware' : null, + () => fetchExtensionData(extension, (ext) => ext.getHardware()), + { + revalidateOnFocus: false, + revalidateOnReconnect: false, + refreshInterval: updatePeriodically ? 2000 : undefined, + } + ) + + const usedMemory = + Number(hardware?.ram.total) - Number(hardware?.ram.available) + + if (hardware?.ram?.total && hardware?.ram?.available) + setUsedRam(Number(usedMemory)) + + if (hardware?.ram?.total) setTotalRam(hardware.ram.total) + + const ramUtilitized = + ((Number(usedMemory) ?? 0) / (hardware?.ram.total ?? 1)) * 100 + + setRamUtilitized(Math.round(ramUtilitized)) + + setCpuUsage(Math.round(hardware?.cpu.usage ?? 0)) + + return { hardware, error, mutate } +} + +/** + * set gpus activate + * @returns A Promise that resolves set gpus activate. + */ +export const setActiveGpus = async (data: { gpus: number[] }) => { + const extension = getExtension() + + if (!extension) { + throw new Error('Extension is not available') + } + + try { + const response = await extension.setAvtiveGpu(data) + return response + } catch (error) { + console.error('Failed to install engine variant:', error) + throw error + } +} diff --git a/web/hooks/useImportModel.ts b/web/hooks/useImportModel.ts index 84c6a5126..94d11b455 100644 --- a/web/hooks/useImportModel.ts +++ b/web/hooks/useImportModel.ts @@ -3,8 +3,8 @@ import { useCallback } from 'react' import { ExtensionTypeEnum, ImportingModel, - LocalImportModelEvent, Model, + ModelEvent, ModelExtension, OptionType, events, @@ -25,6 +25,7 @@ import { downloadedModelsAtom, importingModelsAtom, removeDownloadingModelAtom, + setImportingModelSuccessAtom, } from '@/helpers/atoms/Model.atom' export type ImportModelStage = @@ -59,6 +60,7 @@ const useImportModel = () => { const addDownloadingModel = useSetAtom(addDownloadingModelAtom) const removeDownloadingModel = useSetAtom(removeDownloadingModelAtom) const downloadedModels = useAtomValue(downloadedModelsAtom) + const setImportingModelSuccess = useSetAtom(setImportingModelSuccessAtom) const incrementalModelName = useCallback( (name: string, startIndex: number = 0): string => { @@ -83,10 +85,9 @@ const useImportModel = () => { ?.importModel(modelId, model.path, model.name, optionType) .finally(() => { removeDownloadingModel(modelId) - events.emit(LocalImportModelEvent.onLocalImportModelSuccess, { - importId: model.importId, - modelId: modelId, - }) + + events.emit(ModelEvent.OnModelsUpdate, { fetch: true }) + setImportingModelSuccess(model.importId, modelId) }) } }) diff --git a/web/hooks/useModelSource.ts b/web/hooks/useModelSource.ts index a797586f3..f9e01802a 100644 --- a/web/hooks/useModelSource.ts +++ b/web/hooks/useModelSource.ts @@ -36,6 +36,22 @@ export function useGetModelSources() { return { sources, error, mutate } } +/** + * @returns A Promise that resolves to featured model sources. + */ +export function useGetFeaturedSources() { + const { sources, error, mutate } = useGetModelSources() + + return { + sources: sources?.filter((e) => e.metadata?.tags?.includes('featured')), + error, + mutate, + } +} + +/** + * @returns A Promise that resolves to model source mutation. + */ export const useModelSourcesMutation = () => { const extension = useMemo( () => extensionManager.get(ExtensionTypeEnum.Model), diff --git a/web/hooks/useSendChatMessage.ts b/web/hooks/useSendChatMessage.ts index d3c8ff142..65124fcab 100644 --- a/web/hooks/useSendChatMessage.ts +++ b/web/hooks/useSendChatMessage.ts @@ -196,7 +196,10 @@ export default function useSendChatMessage() { } updateThread(updatedThread) - if (!isResend) { + if ( + !isResend && + (newMessage.content.length || newMessage.attachments?.length) + ) { // Add message const createdMessage = await extensionManager .get(ExtensionTypeEnum.Conversational) diff --git a/web/hooks/useSettings.ts b/web/hooks/useSettings.ts deleted file mode 100644 index 0f02d41af..000000000 --- a/web/hooks/useSettings.ts +++ /dev/null @@ -1,74 +0,0 @@ -import { useCallback, useEffect, useState } from 'react' - -import { fs, joinPath } from '@janhq/core' - -type NvidiaDriver = { - exist: boolean - version: string -} - -export type AppSettings = { - run_mode: 'cpu' | 'gpu' | undefined - notify: boolean - gpus_in_use: string[] - vulkan: boolean - gpus: string[] - nvidia_driver: NvidiaDriver - cuda: NvidiaDriver -} - -export const useSettings = () => { - const [settings, setSettings] = useState() - - useEffect(() => { - readSettings().then((settings) => setSettings(settings as AppSettings)) - - // eslint-disable-next-line react-hooks/exhaustive-deps - }, []) - - const readSettings = useCallback(async () => { - if (!window?.core?.api) { - return - } - const settingsFile = await joinPath(['file://settings', 'settings.json']) - if (await fs.existsSync(settingsFile)) { - const settings = await fs.readFileSync(settingsFile, 'utf-8') - return typeof settings === 'object' ? settings : JSON.parse(settings) - } - return {} - }, []) - - const saveSettings = async ({ - runMode, - notify, - gpusInUse, - vulkan, - }: { - runMode?: string | undefined - notify?: boolean | undefined - gpusInUse?: string[] | undefined - vulkan?: boolean | undefined - }) => { - const settingsFile = await joinPath(['file://settings', 'settings.json']) - const settings = await readSettings() - if (runMode != null) settings.run_mode = runMode - if (notify != null) settings.notify = notify - if (gpusInUse != null) settings.gpus_in_use = gpusInUse.filter((e) => !!e) - if (vulkan != null) { - settings.vulkan = vulkan - // GPU enabled, set run_mode to 'gpu' - if (settings.vulkan === true) { - settings.run_mode = 'gpu' - } else { - settings.run_mode = 'cpu' - } - } - await fs.writeFileSync(settingsFile, JSON.stringify(settings)) - } - - return { - readSettings, - saveSettings, - settings, - } -} diff --git a/web/next.config.js b/web/next.config.js index b6da1780c..dfb336a2d 100644 --- a/web/next.config.js +++ b/web/next.config.js @@ -35,7 +35,8 @@ const nextConfig = { POSTHOG_HOST: JSON.stringify(process.env.POSTHOG_HOST), ANALYTICS_HOST: JSON.stringify(process.env.ANALYTICS_HOST), API_BASE_URL: JSON.stringify( - process.env.API_BASE_URL ?? 'http://127.0.0.1:39291' + process.env.API_BASE_URL ?? + `http://127.0.0.1:${process.env.CORTEX_API_PORT ?? '39291'}` ), isMac: process.platform === 'darwin', isWindows: process.platform === 'win32', diff --git a/web/package.json b/web/package.json index 13d433b3a..1d5cef480 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/web", - "version": "0.5.13", + "version": "0.5.15", "private": true, "homepage": "./", "scripts": { @@ -14,6 +14,7 @@ "test": "jest" }, "dependencies": { + "@hello-pangea/dnd": "17.0.0", "@hookform/resolvers": "^3.9.1", "@janhq/core": "link:../core", "@janhq/joi": "link:../joi", @@ -29,7 +30,7 @@ "jotai": "^2.6.0", "katex": "^0.16.10", "lodash": "^4.17.21", - "lucide-react": "^0.291.0", + "lucide-react": "^0.311.0", "marked": "^9.1.2", "next": "14.2.3", "next-themes": "^0.2.1", @@ -48,6 +49,7 @@ "rehype-highlight": "^7.0.1", "rehype-highlight-code-lines": "^1.0.4", "rehype-katex": "^7.0.1", + "rehype-raw": "^7.0.0", "remark-gfm": "^4.0.0", "remark-math": "^6.0.0", "sass": "^1.69.4", @@ -57,7 +59,7 @@ "slate-react": "0.110.3", "swr": "^2.2.5", "tailwind-merge": "^2.0.0", - "tailwindcss": "3.3.5", + "tailwindcss": "3.4.17", "ulidx": "^2.3.0", "use-debounce": "^10.0.0", "uuid": "^9.0.1", diff --git a/web/public/images/ModelProvider/deepseek.svg b/web/public/images/ModelProvider/deepseek.svg new file mode 100644 index 000000000..6f4b775d3 --- /dev/null +++ b/web/public/images/ModelProvider/deepseek.svg @@ -0,0 +1,25 @@ + + + + +Created with Pixso. + + diff --git a/web/public/images/ModelProvider/google-gemini.svg b/web/public/images/ModelProvider/google-gemini.svg new file mode 100644 index 000000000..787c83710 --- /dev/null +++ b/web/public/images/ModelProvider/google-gemini.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/web/screens/Hub/ModelFilter/ContextLength/index.tsx b/web/screens/Hub/ModelFilter/ContextLength/index.tsx new file mode 100644 index 000000000..a0886e247 --- /dev/null +++ b/web/screens/Hub/ModelFilter/ContextLength/index.tsx @@ -0,0 +1,84 @@ +import { useState } from 'react' + +import { Slider, Input, Tooltip } from '@janhq/joi' + +import { atom, useAtom } from 'jotai' +import { InfoIcon } from 'lucide-react' + +export const hubCtxLenAtom = atom(0) + +export default function ContextLengthFilter() { + const [value, setValue] = useAtom(hubCtxLenAtom) + const [inputingValue, setInputingValue] = useState(false) + + const normalizeTextValue = (value: number) => { + return value === 100 ? '1M' : value === 0 ? 0 : `${value}K` + } + + return ( +
+
+

Context length

+ + } + content="Controls how much text the model can consider at once. Longer context allows the model to handle more input but uses more memory and runs slower." + /> +
+
+
+ { + setValue(Number(e[0])) + }} + min={0} + max={100} + step={1} + /> +
+

0

+

1M

+
+
+ + setInputingValue(true)} + onBlur={(e) => { + setInputingValue(false) + const numericValue = e.target.value.replace(/\D/g, '') + const value = Number(numericValue) + setValue(value > 100 ? 100 : value) + }} + onChange={(e) => { + // Passthru since it validates again onBlur + if (/^\d*\.?\d*$/.test(e.target.value)) { + setValue(Number(e.target.value)) + } + + // Should not accept invalid value or NaN + // E.g. anything changes that trigger onValueChanged + // Which is incorrect + if ( + Number(e.target.value) > 100 || + Number(e.target.value) < 0 || + Number.isNaN(Number(e.target.value)) + ) + return + setValue(Number(e.target.value)) + }} + /> +
+
+ ) +} diff --git a/web/screens/Hub/ModelFilter/ModelSize/index.tsx b/web/screens/Hub/ModelFilter/ModelSize/index.tsx new file mode 100644 index 000000000..b95d57f8b --- /dev/null +++ b/web/screens/Hub/ModelFilter/ModelSize/index.tsx @@ -0,0 +1,118 @@ +import { useRef, useState } from 'react' + +import { Slider, Input, Tooltip } from '@janhq/joi' + +import { atom, useAtom } from 'jotai' +import { InfoIcon } from 'lucide-react' + +export const hubModelSizeMinAtom = atom(0) +export const hubModelSizeMaxAtom = atom(100) + +export default function ModelSizeFilter({ max }: { max: number }) { + const [value, setValue] = useAtom(hubModelSizeMinAtom) + const [valueMax, setValueMax] = useAtom(hubModelSizeMaxAtom) + const [inputingMinValue, setInputingMinValue] = useState(false) + const [inputingMaxValue, setInputingMaxValue] = useState(false) + + const normalizeTextValue = (value: number) => { + return value === 0 ? 0 : `${value}GB` + } + + return ( +
+
+

Model size

+
+
+
+ { + setValue(Number(e[0])) + setValueMax(Number(e[1])) + }} + min={0} + max={max} + step={1} + /> +
+
+
+
+
+

from

+ + setInputingMinValue(true)} + onBlur={(e) => { + setInputingMinValue(false) + const numericValue = e.target.value.replace(/\D/g, '') + const value = Number(numericValue) + setValue(value > valueMax ? valueMax : value) + }} + onChange={(e) => { + // Passthru since it validates again onBlur + if (/^\d*\.?\d*$/.test(e.target.value)) { + setValue(Number(e.target.value)) + } + + // Should not accept invalid value or NaN + // E.g. anything changes that trigger onValueChanged + // Which is incorrect + if ( + Number(e.target.value) > max || + Number(e.target.value) < 0 || + Number.isNaN(Number(e.target.value)) + ) + return + setValue(Number(e.target.value)) + }} + /> +
+
+

to

+ + setInputingMaxValue(true)} + onBlur={(e) => { + setInputingMaxValue(false) + const numericValue = e.target.value.replace(/\D/g, '') + const value = Number(numericValue) + setValueMax(value > max ? max : value) + }} + onChange={(e) => { + // Passthru since it validates again onBlur + if (/^\d*\.?\d*$/.test(e.target.value)) { + setValueMax(Number(e.target.value)) + } + + // Should not accept invalid value or NaN + // E.g. anything changes that trigger onValueChanged + // Which is incorrect + if ( + Number(e.target.value) > max || + Number(e.target.value) < 0 || + Number.isNaN(Number(e.target.value)) + ) + return + setValueMax(Number(e.target.value)) + }} + /> +
+
+
+
+ ) +} diff --git a/web/screens/Hub/ModelList/ModelHeader/index.tsx b/web/screens/Hub/ModelList/ModelHeader/index.tsx index 3a88a93d8..5fe0993ff 100644 --- a/web/screens/Hub/ModelList/ModelHeader/index.tsx +++ b/web/screens/Hub/ModelList/ModelHeader/index.tsx @@ -1,12 +1,16 @@ -import { useCallback } from 'react' +import { useCallback, useMemo } from 'react' -import { ModelSource } from '@janhq/core' +import Image from 'next/image' + +import { InferenceEngine, ModelSource } from '@janhq/core' import { Button, Tooltip, Dropdown, Badge } from '@janhq/joi' import { useAtomValue, useSetAtom } from 'jotai' import { ChevronDownIcon } from 'lucide-react' +import { twMerge } from 'tailwind-merge' + import ModalCancelDownload from '@/containers/ModalCancelDownload' import { MainViewState } from '@/constants/screens' @@ -14,12 +18,13 @@ import { MainViewState } from '@/constants/screens' import { useCreateNewThread } from '@/hooks/useCreateNewThread' import useDownloadModel from '@/hooks/useDownloadModel' -import { useSettings } from '@/hooks/useSettings' - import { toGigabytes } from '@/utils/converter' +import { getLogoEngine } from '@/utils/modelEngine' import { extractModelName } from '@/utils/modelSource' +import { fuzzySearch } from '@/utils/search' + import { mainViewStateAtom } from '@/helpers/atoms/App.atom' import { assistantsAtom } from '@/helpers/atoms/Assistant.atom' import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom' @@ -29,10 +34,6 @@ import { getDownloadingModelAtom, } from '@/helpers/atoms/Model.atom' import { selectedSettingAtom } from '@/helpers/atoms/Setting.atom' -import { - nvidiaTotalVramAtom, - totalRamAtom, -} from '@/helpers/atoms/SystemBar.atom' type Props = { model: ModelSource @@ -45,17 +46,9 @@ const ModelItemHeader = ({ model, onSelectedModel }: Props) => { const downloadedModels = useAtomValue(downloadedModelsAtom) const setSelectedSetting = useSetAtom(selectedSettingAtom) const { requestCreateNewThread } = useCreateNewThread() - const totalRam = useAtomValue(totalRamAtom) - const { settings } = useSettings() - const nvidiaTotalVram = useAtomValue(nvidiaTotalVramAtom) const setMainViewState = useSetAtom(mainViewStateAtom) - // Default nvidia returns vram in MB, need to convert to bytes to match the unit of totalRamW - let ram = nvidiaTotalVram * 1024 * 1024 - if (ram === 0 || settings?.run_mode === 'cpu') { - ram = totalRam - } const serverEnabled = useAtomValue(serverEnabledAtom) const assistants = useAtomValue(assistantsAtom) @@ -66,6 +59,11 @@ const ModelItemHeader = ({ model, onSelectedModel }: Props) => { const isDownloaded = downloadedModels.some((md) => model.models.some((m) => m.id === md.id) ) + const defaultModel = useMemo(() => { + return model.models?.find( + (e) => e.id.includes('q4-km') || fuzzySearch('q4km', e.id) + ) + }, [model]) let downloadButton = (
@@ -76,19 +74,21 @@ const ModelItemHeader = ({ model, onSelectedModel }: Props) => { Download
({ name: (
{e.id} - - Default - + {e.id === defaultModel?.id && ( + + Default + + )}
), value: e.id, @@ -158,9 +158,23 @@ const ModelItemHeader = ({ model, onSelectedModel }: Props) => {
+ {model.type === 'cloud' && ( + <> + logo + + )} {extractModelName(model.metadata?.id)}
diff --git a/web/screens/Hub/ModelList/ModelItem/index.tsx b/web/screens/Hub/ModelList/ModelItem/index.tsx index b2f0b7e8a..e96021c20 100644 --- a/web/screens/Hub/ModelList/ModelItem/index.tsx +++ b/web/screens/Hub/ModelList/ModelItem/index.tsx @@ -5,11 +5,14 @@ import Image from 'next/image' import { ModelSource } from '@janhq/core' import { DownloadIcon, FileJson } from 'lucide-react' +import rehypeRaw from 'rehype-raw' import ModelLabel from '@/containers/ModelLabel' import ModelItemHeader from '@/screens/Hub/ModelList/ModelHeader' +import { markdownComponents } from '@/screens/Thread/ThreadCenterPanel/TextMessage/MarkdownUtils' + import { toGigabytes } from '@/utils/converter' import { extractDescription } from '@/utils/modelSource' import '@/styles/components/model.scss' @@ -31,15 +34,19 @@ const ModelItem: React.FC = ({ model, onSelectedModel }) => {
- + {extractDescription(model.metadata?.description) || '-'}
- {model.metadata?.author && ( + {(model?.author ?? model?.metadata?.author) && (

{model.id?.includes('huggingface.co') && ( <> @@ -52,7 +59,7 @@ const ModelItem: React.FC = ({ model, onSelectedModel }) => { />{' '} )}{' '} - {model.metadata?.author} + {model?.author ?? model?.metadata?.author}

)} {model.models?.length > 0 && ( diff --git a/web/screens/Hub/ModelList/index.tsx b/web/screens/Hub/ModelList/index.tsx index 5db431808..b8bafa61a 100644 --- a/web/screens/Hub/ModelList/index.tsx +++ b/web/screens/Hub/ModelList/index.tsx @@ -5,18 +5,29 @@ import ModelItem from '@/screens/Hub/ModelList/ModelItem' type Props = { models: ModelSource[] onSelectedModel: (model: ModelSource) => void + filterOption?: string } -const ModelList = ({ models, onSelectedModel }: Props) => { +const ModelList = ({ models, onSelectedModel, filterOption }: Props) => { return ( -
- {models.map((model) => ( - onSelectedModel(model)} - /> - ))} +
+ {models.length === 0 && filterOption === 'on-device' ? ( +
+ + No results found + +
+ ) : ( + <> + {models.map((model) => ( + onSelectedModel(model)} + /> + ))} + + )}
) } diff --git a/web/screens/Hub/ModelPage/RemoteModelRefresh.tsx b/web/screens/Hub/ModelPage/RemoteModelRefresh.tsx new file mode 100644 index 000000000..d091b97b5 --- /dev/null +++ b/web/screens/Hub/ModelPage/RemoteModelRefresh.tsx @@ -0,0 +1,29 @@ +import { Button } from '@janhq/joi' + +import { RefreshCwIcon } from 'lucide-react' + +import Spinner from '@/containers/Loader/Spinner' + +import { useRefreshModelList } from '@/hooks/useEngineManagement' + +function RemoteModelRefresh({ engine }: { engine: string }) { + const { refreshingModels, refreshModels } = useRefreshModelList(engine) + + return ( + + ) +} + +export default RemoteModelRefresh diff --git a/web/screens/Hub/ModelPage/index.tsx b/web/screens/Hub/ModelPage/index.tsx index dd551c96d..dcd0c833b 100644 --- a/web/screens/Hub/ModelPage/index.tsx +++ b/web/screens/Hub/ModelPage/index.tsx @@ -2,26 +2,32 @@ import Image from 'next/image' import { ModelSource } from '@janhq/core' import { Badge, Button, ScrollArea } from '@janhq/joi' -import { useSetAtom } from 'jotai' +import { useAtomValue, useSetAtom } from 'jotai' import { ArrowLeftIcon, DownloadIcon, FileJson, SettingsIcon, } from 'lucide-react' -import '@/styles/components/marked.scss' import ModelDownloadButton from '@/containers/ModelDownloadButton' +import ModelLabel from '@/containers/ModelLabel' + import { MainViewState } from '@/constants/screens' import { MarkdownTextMessage } from '@/screens/Thread/ThreadCenterPanel/TextMessage/MarkdownTextMessage' import { toGigabytes } from '@/utils/converter' -import { extractModelName } from '@/utils/modelSource' +import { extractModelName, removeYamlFrontMatter } from '@/utils/modelSource' + +import RemoteModelRefresh from './RemoteModelRefresh' import { mainViewStateAtom } from '@/helpers/atoms/App.atom' -import { selectedSettingAtom } from '@/helpers/atoms/Setting.atom' +import { + selectedSettingAtom, + showScrollBarAtom, +} from '@/helpers/atoms/Setting.atom' type Props = { model: ModelSource @@ -31,11 +37,17 @@ type Props = { const ModelPage = ({ model, onGoBack }: Props) => { const setSelectedSetting = useSetAtom(selectedSettingAtom) const setMainViewState = useSetAtom(mainViewStateAtom) + const showScrollBar = useAtomValue(showScrollBarAtom) + return ( - +
-
+
- {model.metadata?.author && ( + {(model?.author ?? model?.metadata?.author) && (

{model.id?.includes('huggingface.co') && ( <> @@ -103,7 +115,7 @@ const ModelPage = ({ model, onGoBack }: Props) => { />{' '} )} - {model.metadata?.author} + {model?.author ?? model?.metadata?.author}

)} {model.models?.length > 0 && ( @@ -122,24 +134,29 @@ const ModelPage = ({ model, onGoBack }: Props) => {
{/* Table of versions */}
-
- +
+
- {model.type !== 'cloud' && ( <> - + - )} - + @@ -151,7 +168,9 @@ const ModelPage = ({ model, onGoBack }: Props) => { > {model.type !== 'cloud' && ( <> + @@ -178,7 +200,11 @@ const ModelPage = ({ model, onGoBack }: Props) => { )} @@ -192,8 +218,8 @@ const ModelPage = ({ model, onGoBack }: Props) => { {/* README */}
diff --git a/web/screens/Hub/index.tsx b/web/screens/Hub/index.tsx index 51c0ea7e5..ff6bf3a77 100644 --- a/web/screens/Hub/index.tsx +++ b/web/screens/Hub/index.tsx @@ -7,10 +7,17 @@ import Image from 'next/image' import { ModelSource } from '@janhq/core' -import { ScrollArea, Button, Select, Tabs, useClickOutside } from '@janhq/joi' +import { + ScrollArea, + Button, + Select, + Tabs, + useClickOutside, + Switch, +} from '@janhq/joi' import { motion as m } from 'framer-motion' -import { useAtom, useAtomValue, useSetAtom } from 'jotai' +import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai' import { ImagePlusIcon, UploadCloudIcon, UploadIcon } from 'lucide-react' import { twMerge } from 'tailwind-merge' @@ -28,9 +35,15 @@ import { import ModelList from '@/screens/Hub/ModelList' +import { toGigabytes } from '@/utils/converter' import { extractModelRepo } from '@/utils/modelSource' import { fuzzySearch } from '@/utils/search' +import ContextLengthFilter, { hubCtxLenAtom } from './ModelFilter/ContextLength' +import ModelSizeFilter, { + hubModelSizeMaxAtom, + hubModelSizeMinAtom, +} from './ModelFilter/ModelSize' import ModelPage from './ModelPage' import { @@ -39,6 +52,9 @@ import { } from '@/helpers/atoms/App.atom' import { modelDetailAtom } from '@/helpers/atoms/Model.atom' +import { showScrollBarAtom } from '@/helpers/atoms/Setting.atom' +import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom' + const sortMenus = [ { name: 'Most downloaded', @@ -64,6 +80,8 @@ const filterOptions = [ }, ] +const hubCompatibleAtom = atom(false) + const HubScreen = () => { const { sources } = useGetModelSources() const { sources: remoteModelSources } = useGetEngineModelSources() @@ -78,12 +96,29 @@ const HubScreen = () => { const [selectedModel, setSelectedModel] = useState( undefined ) + const showScrollBar = useAtomValue(showScrollBarAtom) const [modelDetail, setModelDetail] = useAtom(modelDetailAtom) const setImportModelStage = useSetAtom(setImportModelStageAtom) const dropdownRef = useRef(null) const imageInputRef = useRef(null) const hubBannerSettingRef = useRef(null) + const [compatible, setCompatible] = useAtom(hubCompatibleAtom) + const totalRam = useAtomValue(totalRamAtom) + const [ctxLenFilter, setCtxLenFilter] = useAtom(hubCtxLenAtom) + const [minModelSizeFilter, setMinModelSizeFilter] = + useAtom(hubModelSizeMinAtom) + const [maxModelSizeFilter, setMaxModelSizeFilter] = + useAtom(hubModelSizeMaxAtom) + + const largestModel = + sources && + sources + .flatMap((model) => model.models) + .reduce((max, model) => (model.size > max.size ? model : max), { + size: 0, + }) + const searchedModels = useMemo( () => searchValue.length @@ -97,9 +132,34 @@ const HubScreen = () => { [sources, searchValue] ) + const filteredModels = useMemo(() => { + return (sources ?? []).filter((model) => { + const isCompatible = + !compatible || + model.models?.some((e) => e.size * 1.5 < totalRam * (1 << 20)) + const matchesCtxLen = + !ctxLenFilter || + model.metadata?.gguf?.context_length > ctxLenFilter * 1000 + const matchesMinSize = + !minModelSizeFilter || + model.models.some((e) => e.size >= minModelSizeFilter * (1 << 30)) + const matchesMaxSize = + maxModelSizeFilter === largestModel?.size || + model.models.some((e) => e.size <= maxModelSizeFilter * (1 << 30)) + + return isCompatible && matchesCtxLen && matchesMinSize && matchesMaxSize + }) + }, [ + sources, + compatible, + ctxLenFilter, + minModelSizeFilter, + maxModelSizeFilter, + totalRam, + ]) + const sortedModels = useMemo(() => { - if (!sources) return [] - return sources.sort((a, b) => { + return filteredModels.sort((a, b) => { if (sortSelected === 'most-downloaded') { return b.metadata.downloads - a.metadata.downloads } else { @@ -109,7 +169,7 @@ const HubScreen = () => { ) } }) - }, [sortSelected, sources]) + }, [sortSelected, filteredModels]) useEffect(() => { if (modelDetail) { @@ -118,6 +178,19 @@ const HubScreen = () => { } }, [modelDetail, sources, setModelDetail, addModelSource]) + useEffect(() => { + if (largestModel) { + setMaxModelSizeFilter( + Number( + toGigabytes(Number(largestModel?.size), { + hideUnit: true, + toFixed: 0, + }) + ) + ) + } + }, [largestModel]) + useEffect(() => { if (selectedModel) { // Try add the model source again to update it's data @@ -211,12 +284,13 @@ const HubScreen = () => { > {!selectedModel && ( <> -
-
+
+
{ />
{hubBannerOption === 'gallery' && ( - + {Array.from({ length: 30 }, (_, i) => i + 1).map( (e) => { return ( @@ -290,11 +367,11 @@ const HubScreen = () => { )} {hubBannerOption === 'upload' && (
{ imageInputRef.current?.click() }} - {...getRootProps()} >
@@ -348,35 +425,45 @@ const HubScreen = () => {
0 && 'visible' + 'invisible absolute mt-2 max-h-[400px] w-full overflow-y-auto rounded-lg border border-[hsla(var(--app-border))] bg-[hsla(var(--app-bg))] shadow-lg', + searchValue.length > 0 && 'visible' )} > - {searchedModels.map((model) => ( -
{ - setSelectedModel(model) - e.stopPropagation() - }} - > - - {searchValue.includes('huggingface.co') && ( - <> - {' '} - - )} - {extractModelRepo(model.id)} + {searchedModels.length === 0 ? ( +
+ + No results found
- ))} + ) : ( +
+ {searchedModels.map((model) => ( +
{ + setSelectedModel(model) + e.stopPropagation() + }} + > + + {searchValue.includes('huggingface.co') && ( + <> + {' '} + + )} + {extractModelRepo(model.id)} + +
+ ))} +
+ )}
@@ -392,55 +479,107 @@ const HubScreen = () => {
-
- <> -
-
-
- {filterOptions.map((e) => ( -
- +
+
+ setCompatible(!compatible)} + className="w-9" + /> + Compatible with my device +
+
+ +
+
+ +
+
+ + {/* Model List */} +
+ <> +
+
+
+ {filterOptions.map((e) => ( +
- {e.name} - -
- ))} + +
+ ))} +
+
+
+ { - setSortSelected(value) - }} - options={sortMenus} + {(filterOption === 'on-device' || + filterOption === 'all') && ( + setSelectedModel(model)} + filterOption={filterOption} /> -
-
- {(filterOption === 'on-device' || filterOption === 'all') && ( - setSelectedModel(model)} - /> - )} - {(filterOption === 'cloud' || filterOption === 'all') && ( - setSelectedModel(model)} - /> - )} - + )} + {(filterOption === 'cloud' || filterOption === 'all') && ( + setSelectedModel(model)} + /> + )} + +
diff --git a/web/screens/LocalServer/LocalServerCenterPanel/index.tsx b/web/screens/LocalServer/LocalServerCenterPanel/index.tsx index c5e42a9d2..455314b40 100644 --- a/web/screens/LocalServer/LocalServerCenterPanel/index.tsx +++ b/web/screens/LocalServer/LocalServerCenterPanel/index.tsx @@ -27,7 +27,7 @@ const LocalServerCenterPanel = () => { return (
-
+

Server Logs

-
-
-
-
Appearance
-
-

- Select a color theme. -

-
- handleClickTheme(e)} />
+ {themeData?.reduceTransparent && ( +
+
+
+
Interface theme
+
+
+
+ + +
+
+ )} +
+
+
+
Chat Width
+
+

+ Choose the width of the chat area to customize your conversation + view. +

+
+
+
+ {chatWidthOption.map((option) => { + return ( +
+ +
+ ) + })} +
+
+
+
+
+
+
Spell Check
+
+

+ Turn on to enable spell check. +

+
+
+ setSpellCheck(e.target.checked)} + /> +
+
+
+
+
+
Scrolling Bar
+
+

+ Turn on to make scrolling bar visible across windows. +

+
+
+ setShowScrollBar(e.target.checked)} + /> +
+
-
+ ) } diff --git a/web/screens/Settings/CoreExtensions/ExtensionItem.tsx b/web/screens/Settings/CoreExtensions/ExtensionItem.tsx deleted file mode 100644 index 497b8ac4a..000000000 --- a/web/screens/Settings/CoreExtensions/ExtensionItem.tsx +++ /dev/null @@ -1,197 +0,0 @@ -import { useCallback, useEffect, useState } from 'react' - -import { - BaseExtension, - Compatibility, - InstallationState, - abortDownload, -} from '@janhq/core' -import { Button, Progress, Tooltip } from '@janhq/joi' - -import { InfoCircledIcon } from '@radix-ui/react-icons' -import { useAtomValue } from 'jotai' - -import { Marked, Renderer } from 'marked' - -import { extensionManager } from '@/extension' -import { installingExtensionAtom } from '@/helpers/atoms/Extension.atom' - -type Props = { - item: BaseExtension -} - -const ExtensionItem: React.FC = ({ item }) => { - const [compatibility, setCompatibility] = useState( - undefined - ) - const [installState, setInstallState] = - useState('NotRequired') - const installingExtensions = useAtomValue(installingExtensionAtom) - const isInstalling = installingExtensions.some( - (e) => e.extensionId === item.name - ) - - const progress = isInstalling - ? (installingExtensions.find((e) => e.extensionId === item.name) - ?.percentage ?? -1) - : -1 - - useEffect(() => { - const getExtensionInstallationState = async () => { - const extension = extensionManager.getByName(item.name) - if (!extension) return - - if (typeof extension?.installationState === 'function') { - const installState = await extension.installationState() - setInstallState(installState) - } - } - - getExtensionInstallationState() - }, [item.name, isInstalling]) - - useEffect(() => { - const extension = extensionManager.getByName(item.name) - if (!extension) return - setCompatibility(extension.compatibility()) - }, [setCompatibility, item.name]) - - const onInstallClick = useCallback(async () => { - const extension = extensionManager.getByName(item.name) - if (!extension) return - - await extension.install() - }, [item.name]) - - const onCancelInstallingClick = () => { - const extension = installingExtensions.find( - (e) => e.extensionId === item.name - ) - if (extension?.localPath) { - abortDownload(extension.localPath) - } - } - - const description = marked.parse(item.description ?? '', { async: false }) - - return ( -
-
-
-
Additional Dependencies
-
-
-
- -
- -
-
- ) -} - -type InstallStateProps = { - installProgress: number - compatibility?: Compatibility - installState: InstallationState - onInstallClick: () => void - onCancelClick: () => void -} - -const InstallStateIndicator: React.FC = ({ - installProgress, - compatibility, - installState, - onInstallClick, - onCancelClick, -}) => { - if (installProgress !== -1) { - const progress = installProgress * 100 - return ( -
- -
- - - {progress.toFixed(0)}% - -
-
- ) - } - - switch (installState) { - case 'Installed': - return ( -
- Installed -
- ) - case 'NotCompatible': - return ( -
-
- Incompatible - - } - content={ - compatibility && - !compatibility['platform']?.includes(PLATFORM) ? ( - - Only available on  - {compatibility?.platform - ?.map((e: string) => - e === 'win32' - ? 'Windows' - : e === 'linux' - ? 'Linux' - : 'MacOS' - ) - .join(', ')} - - ) : ( - Your GPUs are not compatible with this extension - ) - } - /> -
-
- ) - case 'NotInstalled': - return ( - - ) - default: - return
- } -} - -const marked: Marked = new Marked({ - renderer: { - link: (href, title, text) => { - return Renderer.prototype.link - ?.apply(this, [href, title, text]) - .replace( - ' { const [coreActiveExtensions, setCoreActiveExtensions] = useState( [] ) - + const showScrollBar = useAtomValue(showScrollBarAtom) const [searchText, setSearchText] = useState('') const [showLoading, setShowLoading] = useState(false) const fileInputRef = useRef(null) + const { relaunch } = useApp() useEffect(() => { const getAllSettings = async () => { @@ -35,10 +40,7 @@ const ExtensionCatalog = () => { 'provider' in extension && typeof extension.provider === 'string' ) { - if ( - (settings && settings.length > 0) || - (await extension.installationState()) !== 'NotRequired' - ) { + if (settings && settings.length > 0) { engineMenu.push({ ...extension, provider: @@ -72,7 +74,7 @@ const ExtensionCatalog = () => { // Send the filename of the to be installed extension // to the main process for installation const installed = await extensionManager.install([extensionFile]) - if (installed) window.core?.api?.relaunch() + if (installed) relaunch() } /** @@ -85,7 +87,7 @@ const ExtensionCatalog = () => { // Send the filename of the to be uninstalled extension // to the main process for removal const res = await extensionManager.uninstall([name]) - if (res) window.core?.api?.relaunch() + if (res) relaunch() } /** @@ -103,7 +105,10 @@ const ExtensionCatalog = () => { return ( <> - +
{
- {coreActiveExtensions.length > 0 && ( -
-
- Core Extension -
-
- )} {coreActiveExtensions .filter((x) => x.name.includes(searchText.toLowerCase().trim())) .sort((a, b) => a.name.localeCompare(b.name)) diff --git a/web/screens/Settings/Engines/DeleteEngineVariant.tsx b/web/screens/Settings/Engines/DeleteEngineVariant.tsx index 1033164e6..d21dac3d8 100644 --- a/web/screens/Settings/Engines/DeleteEngineVariant.tsx +++ b/web/screens/Settings/Engines/DeleteEngineVariant.tsx @@ -25,7 +25,7 @@ const DeleteEngineVariant = ({ return ( Delete {variant.name}} + title={Delete Variant} open={open} onOpenChange={() => setOpen(!open)} trigger={ @@ -39,7 +39,8 @@ const DeleteEngineVariant = ({ content={

- Are you sure you want to delete this variant? + Are you sure you want to delete {variant.name}? This action cannot + be undone.

{
- onSwitchChange(engine)} - /> + {engine !== InferenceEngine.cortex_llamacpp && ( + onSwitchChange(engine)} + /> + )} - ) : ( - - )} - - )} + ) ?? 0 + )} + +
+ + ) : ( + + )} + + )} +
-
- ) - })} + ) + })}
diff --git a/web/screens/Settings/Engines/ModalAddModel.tsx b/web/screens/Settings/Engines/ModalAddModel.tsx index 40c986e92..1fbdabb6a 100644 --- a/web/screens/Settings/Engines/ModalAddModel.tsx +++ b/web/screens/Settings/Engines/ModalAddModel.tsx @@ -10,7 +10,7 @@ import { InferenceEngine, Model } from '@janhq/core' import { Button, Input, Modal } from '@janhq/joi' import { useAtomValue } from 'jotai' -import { PlusIcon } from 'lucide-react' +import { PlusIcon, ArrowUpRightFromSquare } from 'lucide-react' import { z } from 'zod' @@ -71,7 +71,7 @@ const ModelAddModel = ({ engine }: { engine: string }) => { {prefix} {label} -

+

{desc} {isRequired && *}

@@ -97,7 +97,7 @@ const ModelAddModel = ({ engine }: { engine: string }) => { className="w-[500px]" content={
-
+
+ {model.type !== 'cloud' ? 'Version' : 'Models'} + Format + Size + {model.type === 'cloud' && ( + + )} +
- {item.id?.split(':')?.pop()} + {model.type === 'cloud' + ? item.id + : item.id?.split(':')?.pop()} {i === 0 && model.type !== 'cloud' && ( { + + GGUF