diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 000000000..f980b9df7
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,4 @@
+{
+ "name": "jan",
+ "image": "node:20"
+}
\ No newline at end of file
diff --git a/.github/scripts/auto-sign.sh b/.github/scripts/auto-sign.sh
index 5e6ef9750..a2130e791 100755
--- a/.github/scripts/auto-sign.sh
+++ b/.github/scripts/auto-sign.sh
@@ -8,3 +8,5 @@ fi
# If both variables are set, execute the following commands
find "$APP_PATH" \( -type f -perm +111 -o -name "*.node" \) -exec codesign -s "$DEVELOPER_ID" --options=runtime {} \;
+
+find "$APP_PATH" -type f -name "*.o" -exec codesign -s "$DEVELOPER_ID" --options=runtime {} \;
diff --git a/.github/workflows/clean-cloudflare-page-preview-url-and-r2.yml b/.github/workflows/clean-cloudflare-page-preview-url-and-r2.yml
index 620f74714..de761ca69 100644
--- a/.github/workflows/clean-cloudflare-page-preview-url-and-r2.yml
+++ b/.github/workflows/clean-cloudflare-page-preview-url-and-r2.yml
@@ -55,10 +55,10 @@ jobs:
steps:
- name: install-aws-cli-action
uses: unfor19/install-aws-cli-action@v1
- - name: Delete object older than 7 days
+ - name: Delete object older than 10 days
run: |
# Get the list of objects in the 'latest' folder
- OBJECTS=$(aws s3api list-objects --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --query 'Contents[?LastModified<`'$(date -d "$current_date -30 days" -u +"%Y-%m-%dT%H:%M:%SZ")'`].{Key: Key}' --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com | jq -c .)
+ OBJECTS=$(aws s3api list-objects --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --query 'Contents[?LastModified<`'$(date -d "$current_date -10 days" -u +"%Y-%m-%dT%H:%M:%SZ")'`].{Key: Key}' --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com | jq -c .)
# Create a JSON file for the delete operation
echo "{\"Objects\": $OBJECTS, \"Quiet\": false}" > delete.json
diff --git a/.github/workflows/jan-electron-build-nightly.yml b/.github/workflows/jan-electron-build-nightly.yml
index f961ccd6f..bc32f9ccc 100644
--- a/.github/workflows/jan-electron-build-nightly.yml
+++ b/.github/workflows/jan-electron-build-nightly.yml
@@ -1,8 +1,14 @@
name: Jan Build Electron App Nightly or Manual
on:
+ push:
+ branches:
+ - main
+ paths-ignore:
+ - 'README.md'
+ - 'docs/**'
schedule:
- - cron: '0 20 * * 2,3,4' # At 8 PM UTC on Tuesday, Wednesday, and Thursday, which is 3 AM UTC+7
+ - cron: '0 20 * * 1,2,3' # At 8 PM UTC on Monday, Tuesday, and Wednesday which is 3 AM UTC+7 Tuesday, Wednesday, and Thursday
workflow_dispatch:
inputs:
public_provider:
@@ -23,19 +29,36 @@ jobs:
- name: Set public provider
id: set-public-provider
run: |
- if [ ${{ github.event == 'workflow_dispatch' }} ]; then
+ if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
echo "::set-output name=public_provider::${{ github.event.inputs.public_provider }}"
echo "::set-output name=ref::${{ github.ref }}"
else
- echo "::set-output name=public_provider::cloudflare-r2"
- echo "::set-output name=ref::refs/heads/dev"
+ if [ "${{ github.event_name }}" == "schedule" ]; then
+ echo "::set-output name=public_provider::cloudflare-r2"
+ echo "::set-output name=ref::refs/heads/dev"
+ elif [ "${{ github.event_name }}" == "push" ]; then
+ echo "::set-output name=public_provider::cloudflare-r2"
+ echo "::set-output name=ref::${{ github.ref }}"
+ else
+ echo "::set-output name=public_provider::none"
+ echo "::set-output name=ref::${{ github.ref }}"
+ fi
fi
# Job create Update app version based on latest release tag with build number and save to output
get-update-version:
uses: ./.github/workflows/template-get-update-version.yml
- build-macos:
- uses: ./.github/workflows/template-build-macos.yml
+ build-macos-x64:
+ uses: ./.github/workflows/template-build-macos-x64.yml
+ needs: [get-update-version, set-public-provider]
+ secrets: inherit
+ with:
+ ref: ${{ needs.set-public-provider.outputs.ref }}
+ public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
+ new_version: ${{ needs.get-update-version.outputs.new_version }}
+
+ build-macos-arm64:
+ uses: ./.github/workflows/template-build-macos-arm64.yml
needs: [get-update-version, set-public-provider]
secrets: inherit
with:
@@ -62,8 +85,51 @@ jobs:
public_provider: ${{ needs.set-public-provider.outputs.public_provider }}
new_version: ${{ needs.get-update-version.outputs.new_version }}
+ combine-latest-mac-yml:
+ needs: [set-public-provider, build-macos-x64, build-macos-arm64]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Getting the repo
+ uses: actions/checkout@v3
+ with:
+ ref: ${{ needs.set-public-provider.outputs.ref }}
+ - name: Download mac-x64 artifacts
+ uses: actions/download-artifact@v3
+ with:
+ name: latest-mac-x64
+ path: ./latest-mac-x64
+ - name: Download mac-arm artifacts
+ uses: actions/download-artifact@v3
+ with:
+ name: latest-mac-arm64
+ path: ./latest-mac-arm64
+
+ - name: 'Merge latest-mac.yml'
+ # unfortunately electron-builder doesn't understand that we have two different releases for mac-x64 and mac-arm, so we need to manually merge the latest files
+ # see https://github.com/electron-userland/electron-builder/issues/5592
+ run: |
+ ls -la .
+ ls -la ./latest-mac-x64
+ ls -la ./latest-mac-arm64
+ ls -la ./electron
+ cp ./electron/merge-latest-ymls.js /tmp/merge-latest-ymls.js
+ npm install js-yaml --prefix /tmp
+ node /tmp/merge-latest-ymls.js ./latest-mac-x64/latest-mac.yml ./latest-mac-arm64/latest-mac.yml ./latest-mac.yml
+ cat ./latest-mac.yml
+
+ - name: Upload latest-mac.yml
+ if: ${{ needs.set-public-provider.outputs.public_provider == 'cloudflare-r2' }}
+ run: |
+ aws s3api put-object --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --key "latest/latest-mac.yml" --body "./latest-mac.yml"
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.CLOUDFLARE_R2_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.CLOUDFLARE_R2_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: auto
+ AWS_EC2_METADATA_DISABLED: "true"
+
+
noti-discord-nightly-and-update-url-readme:
- needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version, set-public-provider]
+ needs: [build-macos-x64, build-macos-arm64, build-windows-x64, build-linux-x64, get-update-version, set-public-provider, combine-latest-mac-yml]
secrets: inherit
if: github.event_name == 'schedule'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
@@ -73,8 +139,19 @@ jobs:
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
+ noti-discord-pre-release-and-update-url-readme:
+ needs: [build-macos-x64, build-macos-arm64, build-windows-x64, build-linux-x64, get-update-version, set-public-provider, combine-latest-mac-yml]
+ secrets: inherit
+ if: github.event_name == 'push'
+ uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
+ with:
+ ref: refs/heads/dev
+ build_reason: Pre-release
+ push_to_branch: dev
+ new_version: ${{ needs.get-update-version.outputs.new_version }}
+
noti-discord-manual-and-update-url-readme:
- needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version, set-public-provider]
+ needs: [build-macos-x64, build-macos-arm64, build-windows-x64, build-linux-x64, get-update-version, set-public-provider, combine-latest-mac-yml]
secrets: inherit
if: github.event_name == 'workflow_dispatch' && github.event.inputs.public_provider == 'cloudflare-r2'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
diff --git a/.github/workflows/jan-electron-build-pre-release.yml b/.github/workflows/jan-electron-build-pre-release.yml
deleted file mode 100644
index d37cda5ab..000000000
--- a/.github/workflows/jan-electron-build-pre-release.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-name: Jan Build Electron Pre Release
-
-on:
- push:
- branches:
- - main
- paths:
- - "!README.md"
-
-jobs:
-
- # Job create Update app version based on latest release tag with build number and save to output
- get-update-version:
- uses: ./.github/workflows/template-get-update-version.yml
-
- build-macos:
- uses: ./.github/workflows/template-build-macos.yml
- secrets: inherit
- needs: [get-update-version]
- with:
- ref: ${{ github.ref }}
- public_provider: cloudflare-r2
- new_version: ${{ needs.get-update-version.outputs.new_version }}
-
- build-windows-x64:
- uses: ./.github/workflows/template-build-windows-x64.yml
- secrets: inherit
- needs: [get-update-version]
- with:
- ref: ${{ github.ref }}
- public_provider: cloudflare-r2
- new_version: ${{ needs.get-update-version.outputs.new_version }}
-
- build-linux-x64:
- uses: ./.github/workflows/template-build-linux-x64.yml
- secrets: inherit
- needs: [get-update-version]
- with:
- ref: ${{ github.ref }}
- public_provider: cloudflare-r2
- new_version: ${{ needs.get-update-version.outputs.new_version }}
-
- noti-discord-nightly-and-update-url-readme:
- needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version]
- secrets: inherit
- if: github.event_name == 'push' && github.ref == 'refs/heads/main'
- uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
- with:
- ref: refs/heads/dev
- build_reason: Nightly
- push_to_branch: dev
- new_version: ${{ needs.get-update-version.outputs.new_version }}
diff --git a/.github/workflows/jan-electron-build.yml b/.github/workflows/jan-electron-build.yml
index 20102447b..89e130bbd 100644
--- a/.github/workflows/jan-electron-build.yml
+++ b/.github/workflows/jan-electron-build.yml
@@ -9,8 +9,42 @@ jobs:
get-update-version:
uses: ./.github/workflows/template-get-update-version.yml
- build-macos:
- uses: ./.github/workflows/template-build-macos.yml
+ create-draft-release:
+ runs-on: ubuntu-latest
+ if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
+ outputs:
+ upload_url: ${{ steps.create_release.outputs.upload_url }}
+ version: ${{ steps.get_version.outputs.version }}
+ permissions:
+ contents: write
+ steps:
+ - name: Extract tag name without v prefix
+ id: get_version
+ run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV && echo "::set-output name=version::${GITHUB_REF#refs/tags/v}"
+ env:
+ GITHUB_REF: ${{ github.ref }}
+ - name: Create Draft Release
+ id: create_release
+ uses: actions/create-release@v1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ tag_name: ${{ github.ref_name }}
+ release_name: "${{ env.VERSION }}"
+ draft: true
+ prerelease: false
+
+ build-macos-x64:
+ uses: ./.github/workflows/template-build-macos-x64.yml
+ secrets: inherit
+ needs: [get-update-version]
+ with:
+ ref: ${{ github.ref }}
+ public_provider: github
+ new_version: ${{ needs.get-update-version.outputs.new_version }}
+
+ build-macos-arm64:
+ uses: ./.github/workflows/template-build-macos-arm64.yml
secrets: inherit
needs: [get-update-version]
with:
@@ -36,8 +70,52 @@ jobs:
public_provider: github
new_version: ${{ needs.get-update-version.outputs.new_version }}
+ combine-latest-mac-yml:
+ needs: [build-macos-x64, build-macos-arm64, create-draft-release]
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ steps:
+ - name: Getting the repo
+ uses: actions/checkout@v3
+
+ - name: Download mac-x64 artifacts
+ uses: actions/download-artifact@v3
+ with:
+ name: latest-mac-x64
+ path: ./latest-mac-x64
+ - name: Download mac-arm artifacts
+ uses: actions/download-artifact@v3
+ with:
+ name: latest-mac-arm64
+ path: ./latest-mac-arm64
+
+ - name: 'Merge latest-mac.yml'
+ # unfortunately electron-builder doesn't understand that we have two different releases for mac-x64 and mac-arm, so we need to manually merge the latest files
+ # see https://github.com/electron-userland/electron-builder/issues/5592
+ run: |
+ ls -la .
+ ls -la ./latest-mac-x64
+ ls -la ./latest-mac-arm64
+ ls -la ./electron
+ cp ./electron/merge-latest-ymls.js /tmp/merge-latest-ymls.js
+ npm install js-yaml --prefix /tmp
+ node /tmp/merge-latest-ymls.js ./latest-mac-x64/latest-mac.yml ./latest-mac-arm64/latest-mac.yml ./latest-mac.yml
+ cat ./latest-mac.yml
+
+ - name: Yet Another Upload Release Asset Action
+ uses: shogo82148/actions-upload-release-asset@v1.7.2
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ upload_url: ${{ needs.create-draft-release.outputs.upload_url }}
+ asset_path: ./latest-mac.yml
+ asset_name: latest-mac.yml
+ asset_content_type: text/yaml
+ overwrite: true
+
update_release_draft:
- needs: [build-macos, build-windows-x64, build-linux-x64]
+ needs: [build-macos-x64, build-macos-arm64, build-windows-x64, build-linux-x64, combine-latest-mac-yml]
permissions:
# write permission is required to create a github release
contents: write
diff --git a/.github/workflows/template-build-linux-x64.yml b/.github/workflows/template-build-linux-x64.yml
index c6d1eac97..08cb1dada 100644
--- a/.github/workflows/template-build-linux-x64.yml
+++ b/.github/workflows/template-build-linux-x64.yml
@@ -98,8 +98,8 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
- ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
- name: Upload Artifact .deb file
if: inputs.public_provider != 'github'
diff --git a/.github/workflows/template-build-macos-arm64.yml b/.github/workflows/template-build-macos-arm64.yml
new file mode 100644
index 000000000..54355d55c
--- /dev/null
+++ b/.github/workflows/template-build-macos-arm64.yml
@@ -0,0 +1,160 @@
+name: build-macos
+on:
+ workflow_call:
+ inputs:
+ ref:
+ required: true
+ type: string
+ default: 'refs/heads/main'
+ public_provider:
+ required: true
+ type: string
+ default: none
+ description: 'none: build only, github: build and publish to github, cloudflare: build and publish to cloudflare'
+ new_version:
+ required: true
+ type: string
+ default: ''
+ cloudflare_r2_path:
+ required: false
+ type: string
+ default: '/latest/'
+ secrets:
+ CLOUDFLARE_R2_BUCKET_NAME:
+ required: false
+ CLOUDFLARE_R2_ACCESS_KEY_ID:
+ required: false
+ CLOUDFLARE_R2_SECRET_ACCESS_KEY:
+ required: false
+ CLOUDFLARE_ACCOUNT_ID:
+ required: false
+ CODE_SIGN_P12_BASE64:
+ required: false
+ CODE_SIGN_P12_PASSWORD:
+ required: false
+ APPLE_ID:
+ required: false
+ APPLE_APP_SPECIFIC_PASSWORD:
+ required: false
+ DEVELOPER_ID:
+ required: false
+
+jobs:
+ build-macos:
+ runs-on: macos-silicon
+ environment: production
+ permissions:
+ contents: write
+ steps:
+ - name: Getting the repo
+ uses: actions/checkout@v3
+ with:
+ ref: ${{ inputs.ref }}
+
+ - name: Installing node
+ uses: actions/setup-node@v1
+ with:
+ node-version: 20
+ - name: Unblock keychain
+ run: |
+ security unlock-keychain -p ${{ secrets.KEYCHAIN_PASSWORD }} ~/Library/Keychains/login.keychain-db
+ # - uses: actions/setup-python@v5
+ # with:
+ # python-version: '3.11'
+
+ # - name: Install jq
+ # uses: dcarbone/install-jq-action@v2.0.1
+
+ - name: Update app version based on latest release tag with build number
+ if: inputs.public_provider != 'github'
+ run: |
+ echo "Version: ${{ inputs.new_version }}"
+ # Update the version in electron/package.json
+ jq --arg version "${{ inputs.new_version }}" '.version = $version' electron/package.json > /tmp/package.json
+ mv /tmp/package.json electron/package.json
+
+ jq --arg version "${{ inputs.new_version }}" '.version = $version' web/package.json > /tmp/package.json
+ mv /tmp/package.json web/package.json
+
+ jq '.build.publish = [{"provider": "generic", "url": "${{ secrets.CLOUDFLARE_R2_PUBLIC_URL }}", "channel": "latest"}, {"provider": "s3", "bucket": "${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }}", "region": "auto", "endpoint": "https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com", "path": "${{ inputs.cloudflare_r2_path }}", "channel": "latest"}]' electron/package.json > /tmp/package.json
+ mv /tmp/package.json electron/package.json
+ cat electron/package.json
+
+ - name: Update app version base on tag
+ if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github'
+ run: |
+ if [[ ! "${VERSION_TAG}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
+ echo "Error: Tag is not valid!"
+ exit 1
+ fi
+ jq --arg version "${VERSION_TAG#v}" '.version = $version' electron/package.json > /tmp/package.json
+ mv /tmp/package.json electron/package.json
+ jq --arg version "${VERSION_TAG#v}" '.version = $version' web/package.json > /tmp/package.json
+ mv /tmp/package.json web/package.json
+ env:
+ VERSION_TAG: ${{ inputs.new_version }}
+
+ # - name: Get Cer for code signing
+ # run: base64 -d <<< "$CODE_SIGN_P12_BASE64" > /tmp/codesign.p12
+ # shell: bash
+ # env:
+ # CODE_SIGN_P12_BASE64: ${{ secrets.CODE_SIGN_P12_BASE64 }}
+
+ # - uses: apple-actions/import-codesign-certs@v2
+ # continue-on-error: true
+ # with:
+ # p12-file-base64: ${{ secrets.CODE_SIGN_P12_BASE64 }}
+ # p12-password: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
+
+ - name: Build and publish app to cloudflare r2 or github artifactory
+ if: inputs.public_provider != 'github'
+ run: |
+ # check public_provider is true or not
+ echo "public_provider is ${{ inputs.public_provider }}"
+ if [ "${{ inputs.public_provider }}" == "none" ]; then
+ make build
+ else
+ make build-and-publish
+ fi
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ # CSC_LINK: "/tmp/codesign.p12"
+ # CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
+ # CSC_IDENTITY_AUTO_DISCOVERY: "true"
+ APPLE_ID: ${{ secrets.APPLE_ID }}
+ APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
+ APP_PATH: "."
+ DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.CLOUDFLARE_R2_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.CLOUDFLARE_R2_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: auto
+ AWS_EC2_METADATA_DISABLED: "true"
+
+ - name: Build and publish app to github
+ if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github'
+ run: |
+ make build-and-publish
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ # CSC_LINK: "/tmp/codesign.p12"
+ # CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
+ # CSC_IDENTITY_AUTO_DISCOVERY: "true"
+ APPLE_ID: ${{ secrets.APPLE_ID }}
+ APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
+ APP_PATH: "."
+ DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
+
+ - name: Upload Artifact
+ if: inputs.public_provider != 'github'
+ uses: actions/upload-artifact@v2
+ with:
+ name: jan-mac-arm64-${{ inputs.new_version }}
+ path: ./electron/dist/jan-mac-arm64-${{ inputs.new_version }}.dmg
+
+ - name: Upload Artifact
+ uses: actions/upload-artifact@v2
+ with:
+ name: latest-mac-arm64
+ path: ./electron/dist/latest-mac.yml
\ No newline at end of file
diff --git a/.github/workflows/template-build-macos.yml b/.github/workflows/template-build-macos-x64.yml
similarity index 94%
rename from .github/workflows/template-build-macos.yml
rename to .github/workflows/template-build-macos-x64.yml
index bc48e6c21..e313c2947 100644
--- a/.github/workflows/template-build-macos.yml
+++ b/.github/workflows/template-build-macos-x64.yml
@@ -137,8 +137,8 @@ jobs:
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: "."
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
- ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
- ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
- name: Upload Artifact
if: inputs.public_provider != 'github'
@@ -148,9 +148,8 @@ jobs:
path: ./electron/dist/jan-mac-x64-${{ inputs.new_version }}.dmg
- name: Upload Artifact
- if: inputs.public_provider != 'github'
uses: actions/upload-artifact@v2
with:
- name: jan-mac-arm64-${{ inputs.new_version }}
- path: ./electron/dist/jan-mac-arm64-${{ inputs.new_version }}.dmg
+ name: latest-mac-x64
+ path: ./electron/dist/latest-mac.yml
diff --git a/.github/workflows/template-build-windows-x64.yml b/.github/workflows/template-build-windows-x64.yml
index 5d96b3f49..b81997bde 100644
--- a/.github/workflows/template-build-windows-x64.yml
+++ b/.github/workflows/template-build-windows-x64.yml
@@ -127,8 +127,8 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
- ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
diff --git a/.github/workflows/update-release-url.yml b/.github/workflows/update-release-url.yml
index 545d6542e..99a3db0e0 100644
--- a/.github/workflows/update-release-url.yml
+++ b/.github/workflows/update-release-url.yml
@@ -17,7 +17,7 @@ jobs:
with:
fetch-depth: "0"
token: ${{ secrets.PAT_SERVICE_ACCOUNT }}
- ref: main
+ ref: dev
- name: Get Latest Release
uses: pozetroninc/github-action-get-latest-release@v0.7.0
@@ -46,4 +46,4 @@ jobs:
git config --global user.name "Service Account"
git add README.md
git commit -m "Update README.md with Stable Download URLs"
- git -c http.extraheader="AUTHORIZATION: bearer ${{ secrets.PAT_SERVICE_ACCOUNT }}" push origin HEAD:main
+ git -c http.extraheader="AUTHORIZATION: bearer ${{ secrets.PAT_SERVICE_ACCOUNT }}" push origin HEAD:dev
diff --git a/.gitignore b/.gitignore
index e3e4635fc..4540e5c7a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,8 @@ build
electron/renderer
electron/models
electron/docs
+electron/engines
+server/pre-install
package-lock.json
*.log
@@ -26,3 +28,4 @@ extensions/inference-nitro-extension/bin/*/*.exp
extensions/inference-nitro-extension/bin/*/*.lib
extensions/inference-nitro-extension/bin/saved-*
extensions/inference-nitro-extension/bin/*.tar.gz
+
diff --git a/README.md b/README.md
index 2722a2870..e1f74ef23 100644
--- a/README.md
+++ b/README.md
@@ -43,31 +43,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
| Experimental (Nightly Build) |
-
+
jan.exe
|
-
+
Intel
|
-
+
M1/M2
|
-
+
jan.deb
|
-
+
jan.AppImage
diff --git a/core/src/api/index.ts b/core/src/api/index.ts
index a3d0361e7..0d7cc51f7 100644
--- a/core/src/api/index.ts
+++ b/core/src/api/index.ts
@@ -3,7 +3,6 @@
* @description Enum of all the routes exposed by the app
*/
export enum AppRoute {
- appDataPath = 'appDataPath',
openExternalUrl = 'openExternalUrl',
openAppDirectory = 'openAppDirectory',
openFileExplore = 'openFileExplorer',
@@ -12,6 +11,7 @@ export enum AppRoute {
updateAppConfiguration = 'updateAppConfiguration',
relaunch = 'relaunch',
joinPath = 'joinPath',
+ isSubdirectory = 'isSubdirectory',
baseName = 'baseName',
startServer = 'startServer',
stopServer = 'stopServer',
@@ -61,7 +61,9 @@ export enum FileManagerRoute {
syncFile = 'syncFile',
getJanDataFolderPath = 'getJanDataFolderPath',
getResourcePath = 'getResourcePath',
+ getUserHomePath = 'getUserHomePath',
fileStat = 'fileStat',
+ writeBlob = 'writeBlob',
}
export type ApiFunction = (...args: any[]) => any
diff --git a/core/src/core.ts b/core/src/core.ts
index aa545e10e..8831c6001 100644
--- a/core/src/core.ts
+++ b/core/src/core.ts
@@ -22,7 +22,11 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
* @param {object} network - Optional object to specify proxy/whether to ignore SSL certificates.
* @returns {Promise} A promise that resolves when the file is downloaded.
*/
-const downloadFile: (url: string, fileName: string, network?: { proxy?: string, ignoreSSL?: boolean }) => Promise = (url, fileName, network) => {
+const downloadFile: (
+ url: string,
+ fileName: string,
+ network?: { proxy?: string; ignoreSSL?: boolean }
+) => Promise = (url, fileName, network) => {
return global.core?.api?.downloadFile(url, fileName, network)
}
@@ -79,6 +83,12 @@ const openExternalUrl: (url: string) => Promise = (url) =>
*/
const getResourcePath: () => Promise = () => global.core.api?.getResourcePath()
+/**
+ * Gets the user's home path.
+ * @returns return user's home path
+ */
+const getUserHomePath = (): Promise => global.core.api?.getUserHomePath()
+
/**
* Log to file from browser processes.
*
@@ -87,6 +97,17 @@ const getResourcePath: () => Promise = () => global.core.api?.getResourc
const log: (message: string, fileName?: string) => void = (message, fileName) =>
global.core.api?.log(message, fileName)
+/**
+ * Check whether the path is a subdirectory of another path.
+ *
+ * @param from - The path to check.
+ * @param to - The path to check against.
+ *
+ * @returns {Promise} - A promise that resolves with a boolean indicating whether the path is a subdirectory.
+ */
+const isSubdirectory: (from: string, to: string) => Promise = (from: string, to: string) =>
+ global.core.api?.isSubdirectory(from, to)
+
/**
* Register extension point function type definition
*/
@@ -94,7 +115,7 @@ export type RegisterExtensionPoint = (
extensionName: string,
extensionId: string,
method: Function,
- priority?: number,
+ priority?: number
) => void
/**
@@ -111,5 +132,7 @@ export {
openExternalUrl,
baseName,
log,
+ isSubdirectory,
+ getUserHomePath,
FileStat,
}
diff --git a/core/src/fs.ts b/core/src/fs.ts
index ea636977a..0e570d1f5 100644
--- a/core/src/fs.ts
+++ b/core/src/fs.ts
@@ -1,4 +1,4 @@
-import { FileStat } from "./types"
+import { FileStat } from './types'
/**
* Writes data to a file at the specified path.
@@ -6,6 +6,15 @@ import { FileStat } from "./types"
*/
const writeFileSync = (...args: any[]) => global.core.api?.writeFileSync(...args)
+/**
+ * Writes blob data to a file at the specified path.
+ * @param path - The path to file.
+ * @param data - The blob data.
+ * @returns
+ */
+const writeBlob: (path: string, data: string) => Promise = (path, data) =>
+ global.core.api?.writeBlob(path, data)
+
/**
* Reads the contents of a file at the specified path.
* @returns {Promise} A Promise that resolves with the contents of the file.
@@ -60,7 +69,6 @@ const syncFile: (src: string, dest: string) => Promise = (src, dest) =>
*/
const copyFileSync = (...args: any[]) => global.core.api?.copyFileSync(...args)
-
/**
* Gets the file's stats.
*
@@ -70,7 +78,6 @@ const copyFileSync = (...args: any[]) => global.core.api?.copyFileSync(...args)
const fileStat: (path: string) => Promise = (path) =>
global.core.api?.fileStat(path)
-
// TODO: Export `dummy` fs functions automatically
// Currently adding these manually
export const fs = {
@@ -84,5 +91,6 @@ export const fs = {
appendFileSync,
copyFileSync,
syncFile,
- fileStat
+ fileStat,
+ writeBlob,
}
diff --git a/core/src/node/api/common/builder.ts b/core/src/node/api/common/builder.ts
index a9819bce6..5c99cf4d8 100644
--- a/core/src/node/api/common/builder.ts
+++ b/core/src/node/api/common/builder.ts
@@ -2,7 +2,8 @@ import fs from 'fs'
import { JanApiRouteConfiguration, RouteConfiguration } from './configuration'
import { join } from 'path'
import { ContentType, MessageStatus, Model, ThreadMessage } from './../../../index'
-import { getJanDataFolderPath } from '../../utils'
+import { getEngineConfiguration, getJanDataFolderPath } from '../../utils'
+import { DEFAULT_CHAT_COMPLETION_URL } from './consts'
export const getBuilder = async (configuration: RouteConfiguration) => {
const directoryPath = join(getJanDataFolderPath(), configuration.dirName)
@@ -309,7 +310,7 @@ export const chatCompletions = async (request: any, reply: any) => {
const engineConfiguration = await getEngineConfiguration(requestedModel.engine)
let apiKey: string | undefined = undefined
- let apiUrl: string = 'http://127.0.0.1:3928/inferences/llamacpp/chat_completion' // default nitro url
+ let apiUrl: string = DEFAULT_CHAT_COMPLETION_URL
if (engineConfiguration) {
apiKey = engineConfiguration.api_key
@@ -320,7 +321,7 @@ export const chatCompletions = async (request: any, reply: any) => {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
- "Access-Control-Allow-Origin": "*"
+ 'Access-Control-Allow-Origin': '*',
})
const headers: Record = {
@@ -346,13 +347,3 @@ export const chatCompletions = async (request: any, reply: any) => {
response.body.pipe(reply.raw)
}
}
-
-const getEngineConfiguration = async (engineId: string) => {
- if (engineId !== 'openai') {
- return undefined
- }
- const directoryPath = join(getJanDataFolderPath(), 'engines')
- const filePath = join(directoryPath, `${engineId}.json`)
- const data = await fs.readFileSync(filePath, 'utf-8')
- return JSON.parse(data)
-}
diff --git a/core/src/node/api/common/consts.ts b/core/src/node/api/common/consts.ts
new file mode 100644
index 000000000..bc3cfe300
--- /dev/null
+++ b/core/src/node/api/common/consts.ts
@@ -0,0 +1,19 @@
+// The PORT to use for the Nitro subprocess
+export const NITRO_DEFAULT_PORT = 3928
+
+// The HOST address to use for the Nitro subprocess
+export const LOCAL_HOST = '127.0.0.1'
+
+export const SUPPORTED_MODEL_FORMAT = '.gguf'
+
+// The URL for the Nitro subprocess
+const NITRO_HTTP_SERVER_URL = `http://${LOCAL_HOST}:${NITRO_DEFAULT_PORT}`
+// The URL for the Nitro subprocess to load a model
+export const NITRO_HTTP_LOAD_MODEL_URL = `${NITRO_HTTP_SERVER_URL}/inferences/llamacpp/loadmodel`
+// The URL for the Nitro subprocess to validate a model
+export const NITRO_HTTP_VALIDATE_MODEL_URL = `${NITRO_HTTP_SERVER_URL}/inferences/llamacpp/modelstatus`
+
+// The URL for the Nitro subprocess to kill itself
+export const NITRO_HTTP_KILL_URL = `${NITRO_HTTP_SERVER_URL}/processmanager/destroy`
+
+export const DEFAULT_CHAT_COMPLETION_URL = `http://${LOCAL_HOST}:${NITRO_DEFAULT_PORT}/inferences/llamacpp/chat_completion` // default nitro url
diff --git a/core/src/node/api/common/startStopModel.ts b/core/src/node/api/common/startStopModel.ts
new file mode 100644
index 000000000..0d4934e1c
--- /dev/null
+++ b/core/src/node/api/common/startStopModel.ts
@@ -0,0 +1,351 @@
+import fs from 'fs'
+import { join } from 'path'
+import { getJanDataFolderPath, getJanExtensionsPath, getSystemResourceInfo } from '../../utils'
+import { logServer } from '../../log'
+import { ChildProcessWithoutNullStreams, spawn } from 'child_process'
+import { Model, ModelSettingParams, PromptTemplate } from '../../../types'
+import {
+ LOCAL_HOST,
+ NITRO_DEFAULT_PORT,
+ NITRO_HTTP_KILL_URL,
+ NITRO_HTTP_LOAD_MODEL_URL,
+ NITRO_HTTP_VALIDATE_MODEL_URL,
+ SUPPORTED_MODEL_FORMAT,
+} from './consts'
+
+// The subprocess instance for Nitro
+let subprocess: ChildProcessWithoutNullStreams | undefined = undefined
+
+// TODO: move this to core type
+interface NitroModelSettings extends ModelSettingParams {
+ llama_model_path: string
+ cpu_threads: number
+}
+
+export const startModel = async (modelId: string, settingParams?: ModelSettingParams) => {
+ try {
+ await runModel(modelId, settingParams)
+
+ return {
+ message: `Model ${modelId} started`,
+ }
+ } catch (e) {
+ return {
+ error: e,
+ }
+ }
+}
+
+const runModel = async (modelId: string, settingParams?: ModelSettingParams): Promise => {
+ const janDataFolderPath = getJanDataFolderPath()
+ const modelFolderFullPath = join(janDataFolderPath, 'models', modelId)
+
+ if (!fs.existsSync(modelFolderFullPath)) {
+ throw `Model not found: ${modelId}`
+ }
+
+ const files: string[] = fs.readdirSync(modelFolderFullPath)
+
+ // Look for GGUF model file
+ const ggufBinFile = files.find((file) => file.toLowerCase().includes(SUPPORTED_MODEL_FORMAT))
+
+ const modelMetadataPath = join(modelFolderFullPath, 'model.json')
+ const modelMetadata: Model = JSON.parse(fs.readFileSync(modelMetadataPath, 'utf-8'))
+
+ if (!ggufBinFile) {
+ throw 'No GGUF model file found'
+ }
+ const modelBinaryPath = join(modelFolderFullPath, ggufBinFile)
+
+ const nitroResourceProbe = await getSystemResourceInfo()
+ const nitroModelSettings: NitroModelSettings = {
+ ...modelMetadata.settings,
+ ...settingParams,
+ llama_model_path: modelBinaryPath,
+ // This is critical and requires real CPU physical core count (or performance core)
+ cpu_threads: Math.max(1, nitroResourceProbe.numCpuPhysicalCore),
+ ...(modelMetadata.settings.mmproj && {
+ mmproj: join(modelFolderFullPath, modelMetadata.settings.mmproj),
+ }),
+ }
+
+ logServer(`[NITRO]::Debug: Nitro model settings: ${JSON.stringify(nitroModelSettings)}`)
+
+ // Convert settings.prompt_template to system_prompt, user_prompt, ai_prompt
+ if (modelMetadata.settings.prompt_template) {
+ const promptTemplate = modelMetadata.settings.prompt_template
+ const prompt = promptTemplateConverter(promptTemplate)
+ if (prompt?.error) {
+ return Promise.reject(prompt.error)
+ }
+ nitroModelSettings.system_prompt = prompt.system_prompt
+ nitroModelSettings.user_prompt = prompt.user_prompt
+ nitroModelSettings.ai_prompt = prompt.ai_prompt
+ }
+
+ await runNitroAndLoadModel(modelId, nitroModelSettings)
+}
+
+// TODO: move to util
+const promptTemplateConverter = (promptTemplate: string): PromptTemplate => {
+ // Split the string using the markers
+ const systemMarker = '{system_message}'
+ const promptMarker = '{prompt}'
+
+ if (promptTemplate.includes(systemMarker) && promptTemplate.includes(promptMarker)) {
+ // Find the indices of the markers
+ const systemIndex = promptTemplate.indexOf(systemMarker)
+ const promptIndex = promptTemplate.indexOf(promptMarker)
+
+ // Extract the parts of the string
+ const system_prompt = promptTemplate.substring(0, systemIndex)
+ const user_prompt = promptTemplate.substring(systemIndex + systemMarker.length, promptIndex)
+ const ai_prompt = promptTemplate.substring(promptIndex + promptMarker.length)
+
+ // Return the split parts
+ return { system_prompt, user_prompt, ai_prompt }
+ } else if (promptTemplate.includes(promptMarker)) {
+ // Extract the parts of the string for the case where only promptMarker is present
+ const promptIndex = promptTemplate.indexOf(promptMarker)
+ const user_prompt = promptTemplate.substring(0, promptIndex)
+ const ai_prompt = promptTemplate.substring(promptIndex + promptMarker.length)
+
+ // Return the split parts
+ return { user_prompt, ai_prompt }
+ }
+
+ // Return an error if none of the conditions are met
+ return { error: 'Cannot split prompt template' }
+}
+
+const runNitroAndLoadModel = async (modelId: string, modelSettings: NitroModelSettings) => {
+ // Gather system information for CPU physical cores and memory
+ const tcpPortUsed = require('tcp-port-used')
+
+ await stopModel(modelId)
+ await tcpPortUsed.waitUntilFree(NITRO_DEFAULT_PORT, 300, 5000)
+
+ /**
+ * There is a problem with Windows process manager
+ * Should wait for awhile to make sure the port is free and subprocess is killed
+ * The tested threshold is 500ms
+ **/
+ if (process.platform === 'win32') {
+ await new Promise((resolve) => setTimeout(resolve, 500))
+ }
+
+ await spawnNitroProcess()
+ await loadLLMModel(modelSettings)
+ await validateModelStatus()
+}
+
+const spawnNitroProcess = async (): Promise => {
+ logServer(`[NITRO]::Debug: Spawning Nitro subprocess...`)
+
+ let binaryFolder = join(
+ getJanExtensionsPath(),
+ '@janhq',
+ 'inference-nitro-extension',
+ 'dist',
+ 'bin'
+ )
+
+ let executableOptions = executableNitroFile()
+ const tcpPortUsed = require('tcp-port-used')
+
+ const args: string[] = ['1', LOCAL_HOST, NITRO_DEFAULT_PORT.toString()]
+ // Execute the binary
+ logServer(
+ `[NITRO]::Debug: Spawn nitro at path: ${executableOptions.executablePath}, and args: ${args}`
+ )
+ subprocess = spawn(
+ executableOptions.executablePath,
+ ['1', LOCAL_HOST, NITRO_DEFAULT_PORT.toString()],
+ {
+ cwd: binaryFolder,
+ env: {
+ ...process.env,
+ CUDA_VISIBLE_DEVICES: executableOptions.cudaVisibleDevices,
+ },
+ }
+ )
+
+ // Handle subprocess output
+ subprocess.stdout.on('data', (data: any) => {
+ logServer(`[NITRO]::Debug: ${data}`)
+ })
+
+ subprocess.stderr.on('data', (data: any) => {
+ logServer(`[NITRO]::Error: ${data}`)
+ })
+
+ subprocess.on('close', (code: any) => {
+ logServer(`[NITRO]::Debug: Nitro exited with code: ${code}`)
+ subprocess = undefined
+ })
+
+ tcpPortUsed.waitUntilUsed(NITRO_DEFAULT_PORT, 300, 30000).then(() => {
+ logServer(`[NITRO]::Debug: Nitro is ready`)
+ })
+}
+
+type NitroExecutableOptions = {
+ executablePath: string
+ cudaVisibleDevices: string
+}
+
+const executableNitroFile = (): NitroExecutableOptions => {
+ const nvidiaInfoFilePath = join(getJanDataFolderPath(), 'settings', 'settings.json')
+ let binaryFolder = join(
+ getJanExtensionsPath(),
+ '@janhq',
+ 'inference-nitro-extension',
+ 'dist',
+ 'bin'
+ )
+
+ let cudaVisibleDevices = ''
+ let binaryName = 'nitro'
+ /**
+ * The binary folder is different for each platform.
+ */
+ if (process.platform === 'win32') {
+ /**
+ * For Windows: win-cpu, win-cuda-11-7, win-cuda-12-0
+ */
+ let nvidiaInfo = JSON.parse(fs.readFileSync(nvidiaInfoFilePath, 'utf-8'))
+ if (nvidiaInfo['run_mode'] === 'cpu') {
+ binaryFolder = join(binaryFolder, 'win-cpu')
+ } else {
+ if (nvidiaInfo['cuda'].version === '12') {
+ binaryFolder = join(binaryFolder, 'win-cuda-12-0')
+ } else {
+ binaryFolder = join(binaryFolder, 'win-cuda-11-7')
+ }
+ cudaVisibleDevices = nvidiaInfo['gpu_highest_vram']
+ }
+ binaryName = 'nitro.exe'
+ } else if (process.platform === 'darwin') {
+ /**
+ * For MacOS: mac-arm64 (Silicon), mac-x64 (InteL)
+ */
+ if (process.arch === 'arm64') {
+ binaryFolder = join(binaryFolder, 'mac-arm64')
+ } else {
+ binaryFolder = join(binaryFolder, 'mac-x64')
+ }
+ } else {
+ /**
+ * For Linux: linux-cpu, linux-cuda-11-7, linux-cuda-12-0
+ */
+ let nvidiaInfo = JSON.parse(fs.readFileSync(nvidiaInfoFilePath, 'utf-8'))
+ if (nvidiaInfo['run_mode'] === 'cpu') {
+ binaryFolder = join(binaryFolder, 'linux-cpu')
+ } else {
+ if (nvidiaInfo['cuda'].version === '12') {
+ binaryFolder = join(binaryFolder, 'linux-cuda-12-0')
+ } else {
+ binaryFolder = join(binaryFolder, 'linux-cuda-11-7')
+ }
+ cudaVisibleDevices = nvidiaInfo['gpu_highest_vram']
+ }
+ }
+
+ return {
+ executablePath: join(binaryFolder, binaryName),
+ cudaVisibleDevices,
+ }
+}
+
+const validateModelStatus = async (): Promise => {
+ // Send a GET request to the validation URL.
+ // Retry the request up to 3 times if it fails, with a delay of 500 milliseconds between retries.
+ const fetchRT = require('fetch-retry')
+ const fetchRetry = fetchRT(fetch)
+
+ return fetchRetry(NITRO_HTTP_VALIDATE_MODEL_URL, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ retries: 5,
+ retryDelay: 500,
+ }).then(async (res: Response) => {
+ logServer(`[NITRO]::Debug: Validate model state success with response ${JSON.stringify(res)}`)
+ // If the response is OK, check model_loaded status.
+ if (res.ok) {
+ const body = await res.json()
+ // If the model is loaded, return an empty object.
+ // Otherwise, return an object with an error message.
+ if (body.model_loaded) {
+ return Promise.resolve()
+ }
+ }
+ return Promise.reject('Validate model status failed')
+ })
+}
+
+const loadLLMModel = async (settings: NitroModelSettings): Promise => {
+ logServer(`[NITRO]::Debug: Loading model with params ${JSON.stringify(settings)}`)
+ const fetchRT = require('fetch-retry')
+ const fetchRetry = fetchRT(fetch)
+
+ return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(settings),
+ retries: 3,
+ retryDelay: 500,
+ })
+ .then((res: any) => {
+ logServer(`[NITRO]::Debug: Load model success with response ${JSON.stringify(res)}`)
+ return Promise.resolve(res)
+ })
+ .catch((err: any) => {
+ logServer(`[NITRO]::Error: Load model failed with error ${err}`)
+ return Promise.reject(err)
+ })
+}
+
+/**
+ * Stop model and kill nitro process.
+ */
+export const stopModel = async (_modelId: string) => {
+ if (!subprocess) {
+ return {
+ error: "Model isn't running",
+ }
+ }
+ return new Promise((resolve, reject) => {
+ const controller = new AbortController()
+ setTimeout(() => {
+ controller.abort()
+ reject({
+ error: 'Failed to stop model: Timedout',
+ })
+ }, 5000)
+ const tcpPortUsed = require('tcp-port-used')
+ logServer(`[NITRO]::Debug: Request to kill Nitro`)
+
+ fetch(NITRO_HTTP_KILL_URL, {
+ method: 'DELETE',
+ signal: controller.signal,
+ })
+ .then(() => {
+ subprocess?.kill()
+ subprocess = undefined
+ })
+ .catch(() => {
+ // don't need to do anything, we still kill the subprocess
+ })
+ .then(() => tcpPortUsed.waitUntilFree(NITRO_DEFAULT_PORT, 300, 5000))
+ .then(() => logServer(`[NITRO]::Debug: Nitro process is terminated`))
+ .then(() =>
+ resolve({
+ message: 'Model stopped',
+ })
+ )
+ })
+}
diff --git a/core/src/node/api/routes/common.ts b/core/src/node/api/routes/common.ts
index a6c65a382..8887755fe 100644
--- a/core/src/node/api/routes/common.ts
+++ b/core/src/node/api/routes/common.ts
@@ -10,6 +10,10 @@ import {
} from '../common/builder'
import { JanApiRouteConfiguration } from '../common/configuration'
+import { startModel, stopModel } from '../common/startStopModel'
+import { ModelSettingParams } from '../../../types'
+import { getJanDataFolderPath } from '../../utils'
+import { normalizeFilePath } from '../../path'
export const commonRouter = async (app: HttpServer) => {
// Common Routes
@@ -17,26 +21,47 @@ export const commonRouter = async (app: HttpServer) => {
app.get(`/${key}`, async (_request) => getBuilder(JanApiRouteConfiguration[key]))
app.get(`/${key}/:id`, async (request: any) =>
- retrieveBuilder(JanApiRouteConfiguration[key], request.params.id),
+ retrieveBuilder(JanApiRouteConfiguration[key], request.params.id)
)
app.delete(`/${key}/:id`, async (request: any) =>
- deleteBuilder(JanApiRouteConfiguration[key], request.params.id),
+ deleteBuilder(JanApiRouteConfiguration[key], request.params.id)
)
})
// Download Model Routes
app.get(`/models/download/:modelId`, async (request: any) =>
- downloadModel(request.params.modelId, { ignoreSSL: request.query.ignoreSSL === 'true', proxy: request.query.proxy }),
+ downloadModel(request.params.modelId, {
+ ignoreSSL: request.query.ignoreSSL === 'true',
+ proxy: request.query.proxy,
+ })
)
+ app.put(`/models/:modelId/start`, async (request: any) => {
+ let settingParams: ModelSettingParams | undefined = undefined
+ if (Object.keys(request.body).length !== 0) {
+ settingParams = JSON.parse(request.body) as ModelSettingParams
+ }
+
+ return startModel(request.params.modelId, settingParams)
+ })
+
+ app.put(`/models/:modelId/stop`, async (request: any) => stopModel(request.params.modelId))
+
// Chat Completion Routes
app.post(`/chat/completions`, async (request: any, reply: any) => chatCompletions(request, reply))
// App Routes
app.post(`/app/${AppRoute.joinPath}`, async (request: any, reply: any) => {
const args = JSON.parse(request.body) as any[]
- reply.send(JSON.stringify(join(...args[0])))
+
+ const paths = args[0].map((arg: string) =>
+ typeof arg === 'string' && (arg.startsWith(`file:/`) || arg.startsWith(`file:\\`))
+ ? join(getJanDataFolderPath(), normalizeFilePath(arg))
+ : arg
+ )
+
+ reply.send(JSON.stringify(join(...paths)))
})
app.post(`/app/${AppRoute.baseName}`, async (request: any, reply: any) => {
diff --git a/core/src/node/api/routes/download.ts b/core/src/node/api/routes/download.ts
index b4e11f957..ab8c0bd37 100644
--- a/core/src/node/api/routes/download.ts
+++ b/core/src/node/api/routes/download.ts
@@ -4,55 +4,55 @@ import { DownloadManager } from '../../download'
import { HttpServer } from '../HttpServer'
import { createWriteStream } from 'fs'
import { getJanDataFolderPath } from '../../utils'
-import { normalizeFilePath } from "../../path";
+import { normalizeFilePath } from '../../path'
export const downloadRouter = async (app: HttpServer) => {
app.post(`/${DownloadRoute.downloadFile}`, async (req, res) => {
- const strictSSL = !(req.query.ignoreSSL === "true");
- const proxy = req.query.proxy?.startsWith("http") ? req.query.proxy : undefined;
- const body = JSON.parse(req.body as any);
+ const strictSSL = !(req.query.ignoreSSL === 'true')
+ const proxy = req.query.proxy?.startsWith('http') ? req.query.proxy : undefined
+ const body = JSON.parse(req.body as any)
const normalizedArgs = body.map((arg: any) => {
- if (typeof arg === "string") {
- return join(getJanDataFolderPath(), normalizeFilePath(arg));
+ if (typeof arg === 'string' && arg.startsWith('file:')) {
+ return join(getJanDataFolderPath(), normalizeFilePath(arg))
}
- return arg;
- });
+ return arg
+ })
- const localPath = normalizedArgs[1];
- const fileName = localPath.split("/").pop() ?? "";
+ const localPath = normalizedArgs[1]
+ const fileName = localPath.split('/').pop() ?? ''
- const request = require("request");
- const progress = require("request-progress");
+ const request = require('request')
+ const progress = require('request-progress')
- const rq = request({ url: normalizedArgs[0], strictSSL, proxy });
+ const rq = request({ url: normalizedArgs[0], strictSSL, proxy })
progress(rq, {})
- .on("progress", function (state: any) {
- console.log("download onProgress", state);
+ .on('progress', function (state: any) {
+ console.log('download onProgress', state)
})
- .on("error", function (err: Error) {
- console.log("download onError", err);
+ .on('error', function (err: Error) {
+ console.log('download onError', err)
})
- .on("end", function () {
- console.log("download onEnd");
+ .on('end', function () {
+ console.log('download onEnd')
})
- .pipe(createWriteStream(normalizedArgs[1]));
+ .pipe(createWriteStream(normalizedArgs[1]))
- DownloadManager.instance.setRequest(fileName, rq);
- });
+ DownloadManager.instance.setRequest(fileName, rq)
+ })
app.post(`/${DownloadRoute.abortDownload}`, async (req, res) => {
- const body = JSON.parse(req.body as any);
+ const body = JSON.parse(req.body as any)
const normalizedArgs = body.map((arg: any) => {
- if (typeof arg === "string") {
- return join(getJanDataFolderPath(), normalizeFilePath(arg));
+ if (typeof arg === 'string' && arg.startsWith('file:')) {
+ return join(getJanDataFolderPath(), normalizeFilePath(arg))
}
- return arg;
- });
+ return arg
+ })
- const localPath = normalizedArgs[0];
- const fileName = localPath.split("/").pop() ?? "";
- const rq = DownloadManager.instance.networkRequests[fileName];
- DownloadManager.instance.networkRequests[fileName] = undefined;
- rq?.abort();
- });
-};
+ const localPath = normalizedArgs[0]
+ const fileName = localPath.split('/').pop() ?? ''
+ const rq = DownloadManager.instance.networkRequests[fileName]
+ DownloadManager.instance.networkRequests[fileName] = undefined
+ rq?.abort()
+ })
+}
diff --git a/core/src/node/api/routes/fileManager.ts b/core/src/node/api/routes/fileManager.ts
index 159c23a0c..66056444e 100644
--- a/core/src/node/api/routes/fileManager.ts
+++ b/core/src/node/api/routes/fileManager.ts
@@ -8,5 +8,7 @@ export const fsRouter = async (app: HttpServer) => {
app.post(`/app/${FileManagerRoute.getResourcePath}`, async (request: any, reply: any) => {})
+ app.post(`/app/${FileManagerRoute.getUserHomePath}`, async (request: any, reply: any) => {})
+
app.post(`/app/${FileManagerRoute.fileStat}`, async (request: any, reply: any) => {})
}
diff --git a/core/src/node/api/routes/fs.ts b/core/src/node/api/routes/fs.ts
index 5f511af27..c5404ccce 100644
--- a/core/src/node/api/routes/fs.ts
+++ b/core/src/node/api/routes/fs.ts
@@ -2,6 +2,7 @@ import { FileSystemRoute } from '../../../api'
import { join } from 'path'
import { HttpServer } from '../HttpServer'
import { getJanDataFolderPath } from '../../utils'
+import { normalizeFilePath } from '../../path'
export const fsRouter = async (app: HttpServer) => {
const moduleName = 'fs'
@@ -13,10 +14,10 @@ export const fsRouter = async (app: HttpServer) => {
const result = await import(moduleName).then((mdl) => {
return mdl[route](
...body.map((arg: any) =>
- typeof arg === 'string' && arg.includes('file:/')
- ? join(getJanDataFolderPath(), arg.replace('file:/', ''))
- : arg,
- ),
+ typeof arg === 'string' && (arg.startsWith(`file:/`) || arg.startsWith(`file:\\`))
+ ? join(getJanDataFolderPath(), normalizeFilePath(arg))
+ : arg
+ )
)
})
res.status(200).send(result)
diff --git a/core/src/node/log.ts b/core/src/node/log.ts
index 8a5155d8d..6f2c2f80f 100644
--- a/core/src/node/log.ts
+++ b/core/src/node/log.ts
@@ -2,38 +2,36 @@ import fs from 'fs'
import util from 'util'
import { getAppLogPath, getServerLogPath } from './utils'
-export const log = function (message: string) {
- const appLogPath = getAppLogPath()
+export const log = (message: string) => {
+ const path = getAppLogPath()
if (!message.startsWith('[')) {
message = `[APP]::${message}`
}
message = `${new Date().toISOString()} ${message}`
- if (fs.existsSync(appLogPath)) {
- var log_file = fs.createWriteStream(appLogPath, {
- flags: 'a',
- })
- log_file.write(util.format(message) + '\n')
- log_file.close()
- console.debug(message)
- }
+ writeLog(message, path)
}
-export const logServer = function (message: string) {
- const serverLogPath = getServerLogPath()
+export const logServer = (message: string) => {
+ const path = getServerLogPath()
if (!message.startsWith('[')) {
message = `[SERVER]::${message}`
}
message = `${new Date().toISOString()} ${message}`
+ writeLog(message, path)
+}
- if (fs.existsSync(serverLogPath)) {
- var log_file = fs.createWriteStream(serverLogPath, {
+const writeLog = (message: string, logPath: string) => {
+ if (!fs.existsSync(logPath)) {
+ fs.writeFileSync(logPath, message)
+ } else {
+ const logFile = fs.createWriteStream(logPath, {
flags: 'a',
})
- log_file.write(util.format(message) + '\n')
- log_file.close()
+ logFile.write(util.format(message) + '\n')
+ logFile.close()
console.debug(message)
}
}
diff --git a/core/src/node/utils/index.ts b/core/src/node/utils/index.ts
index 00db04c9b..4bcbf13b1 100644
--- a/core/src/node/utils/index.ts
+++ b/core/src/node/utils/index.ts
@@ -1,16 +1,18 @@
-import { AppConfiguration } from "../../types";
-import { join } from "path";
-import fs from "fs";
-import os from "os";
+import { AppConfiguration, SystemResourceInfo } from '../../types'
+import { join } from 'path'
+import fs from 'fs'
+import os from 'os'
+import { log, logServer } from '../log'
+import childProcess from 'child_process'
// TODO: move this to core
-const configurationFileName = "settings.json";
+const configurationFileName = 'settings.json'
// TODO: do no specify app name in framework module
-const defaultJanDataFolder = join(os.homedir(), "jan");
+const defaultJanDataFolder = join(os.homedir(), 'jan')
const defaultAppConfig: AppConfiguration = {
data_folder: defaultJanDataFolder,
-};
+}
/**
* Getting App Configurations.
@@ -20,39 +22,39 @@ const defaultAppConfig: AppConfiguration = {
export const getAppConfigurations = (): AppConfiguration => {
// Retrieve Application Support folder path
// Fallback to user home directory if not found
- const configurationFile = getConfigurationFilePath();
+ const configurationFile = getConfigurationFilePath()
if (!fs.existsSync(configurationFile)) {
// create default app config if we don't have one
- console.debug(`App config not found, creating default config at ${configurationFile}`);
- fs.writeFileSync(configurationFile, JSON.stringify(defaultAppConfig));
- return defaultAppConfig;
+ console.debug(`App config not found, creating default config at ${configurationFile}`)
+ fs.writeFileSync(configurationFile, JSON.stringify(defaultAppConfig))
+ return defaultAppConfig
}
try {
const appConfigurations: AppConfiguration = JSON.parse(
- fs.readFileSync(configurationFile, "utf-8"),
- );
- return appConfigurations;
+ fs.readFileSync(configurationFile, 'utf-8')
+ )
+ return appConfigurations
} catch (err) {
- console.error(`Failed to read app config, return default config instead! Err: ${err}`);
- return defaultAppConfig;
+ console.error(`Failed to read app config, return default config instead! Err: ${err}`)
+ return defaultAppConfig
}
-};
+}
const getConfigurationFilePath = () =>
join(
- global.core?.appPath() || process.env[process.platform == "win32" ? "USERPROFILE" : "HOME"],
- configurationFileName,
- );
+ global.core?.appPath() || process.env[process.platform == 'win32' ? 'USERPROFILE' : 'HOME'],
+ configurationFileName
+ )
export const updateAppConfiguration = (configuration: AppConfiguration): Promise => {
- const configurationFile = getConfigurationFilePath();
- console.debug("updateAppConfiguration, configurationFile: ", configurationFile);
+ const configurationFile = getConfigurationFilePath()
+ console.debug('updateAppConfiguration, configurationFile: ', configurationFile)
- fs.writeFileSync(configurationFile, JSON.stringify(configuration));
- return Promise.resolve();
-};
+ fs.writeFileSync(configurationFile, JSON.stringify(configuration))
+ return Promise.resolve()
+}
/**
* Utility function to get server log path
@@ -60,13 +62,13 @@ export const updateAppConfiguration = (configuration: AppConfiguration): Promise
* @returns {string} The log path.
*/
export const getServerLogPath = (): string => {
- const appConfigurations = getAppConfigurations();
- const logFolderPath = join(appConfigurations.data_folder, "logs");
+ const appConfigurations = getAppConfigurations()
+ const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
- fs.mkdirSync(logFolderPath, { recursive: true });
+ fs.mkdirSync(logFolderPath, { recursive: true })
}
- return join(logFolderPath, "server.log");
-};
+ return join(logFolderPath, 'server.log')
+}
/**
* Utility function to get app log path
@@ -74,13 +76,13 @@ export const getServerLogPath = (): string => {
* @returns {string} The log path.
*/
export const getAppLogPath = (): string => {
- const appConfigurations = getAppConfigurations();
- const logFolderPath = join(appConfigurations.data_folder, "logs");
+ const appConfigurations = getAppConfigurations()
+ const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
- fs.mkdirSync(logFolderPath, { recursive: true });
+ fs.mkdirSync(logFolderPath, { recursive: true })
}
- return join(logFolderPath, "app.log");
-};
+ return join(logFolderPath, 'app.log')
+}
/**
* Utility function to get data folder path
@@ -88,9 +90,9 @@ export const getAppLogPath = (): string => {
* @returns {string} The data folder path.
*/
export const getJanDataFolderPath = (): string => {
- const appConfigurations = getAppConfigurations();
- return appConfigurations.data_folder;
-};
+ const appConfigurations = getAppConfigurations()
+ return appConfigurations.data_folder
+}
/**
* Utility function to get extension path
@@ -98,6 +100,70 @@ export const getJanDataFolderPath = (): string => {
* @returns {string} The extensions path.
*/
export const getJanExtensionsPath = (): string => {
- const appConfigurations = getAppConfigurations();
- return join(appConfigurations.data_folder, "extensions");
-};
+ const appConfigurations = getAppConfigurations()
+ return join(appConfigurations.data_folder, 'extensions')
+}
+
+/**
+ * Utility function to physical cpu count
+ *
+ * @returns {number} The physical cpu count.
+ */
+export const physicalCpuCount = async (): Promise => {
+ const platform = os.platform()
+ if (platform === 'linux') {
+ const output = await exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
+ return parseInt(output.trim(), 10)
+ } else if (platform === 'darwin') {
+ const output = await exec('sysctl -n hw.physicalcpu_max')
+ return parseInt(output.trim(), 10)
+ } else if (platform === 'win32') {
+ const output = await exec('WMIC CPU Get NumberOfCores')
+ return output
+ .split(os.EOL)
+ .map((line: string) => parseInt(line))
+ .filter((value: number) => !isNaN(value))
+ .reduce((sum: number, number: number) => sum + number, 1)
+ } else {
+ const cores = os.cpus().filter((cpu: any, index: number) => {
+ const hasHyperthreading = cpu.model.includes('Intel')
+ const isOdd = index % 2 === 1
+ return !hasHyperthreading || isOdd
+ })
+ return cores.length
+ }
+}
+
+const exec = async (command: string): Promise => {
+ return new Promise((resolve, reject) => {
+ childProcess.exec(command, { encoding: 'utf8' }, (error, stdout) => {
+ if (error) {
+ reject(error)
+ } else {
+ resolve(stdout)
+ }
+ })
+ })
+}
+
+export const getSystemResourceInfo = async (): Promise => {
+ const cpu = await physicalCpuCount()
+ const message = `[NITRO]::CPU informations - ${cpu}`
+ log(message)
+ logServer(message)
+
+ return {
+ numCpuPhysicalCore: cpu,
+ memAvailable: 0, // TODO: this should not be 0
+ }
+}
+
+export const getEngineConfiguration = async (engineId: string) => {
+ if (engineId !== 'openai') {
+ return undefined
+ }
+ const directoryPath = join(getJanDataFolderPath(), 'engines')
+ const filePath = join(directoryPath, `${engineId}.json`)
+ const data = fs.readFileSync(filePath, 'utf-8')
+ return JSON.parse(data)
+}
diff --git a/core/src/types/assistant/assistantEntity.ts b/core/src/types/assistant/assistantEntity.ts
index 91bb2bb22..733dbea8d 100644
--- a/core/src/types/assistant/assistantEntity.ts
+++ b/core/src/types/assistant/assistantEntity.ts
@@ -2,6 +2,13 @@
* Assistant type defines the shape of an assistant object.
* @stored
*/
+
+export type AssistantTool = {
+ type: string
+ enabled: boolean
+ settings: any
+}
+
export type Assistant = {
/** Represents the avatar of the user. */
avatar: string
@@ -22,7 +29,7 @@ export type Assistant = {
/** Represents the instructions for the object. */
instructions?: string
/** Represents the tools associated with the object. */
- tools?: any
+ tools?: AssistantTool[]
/** Represents the file identifiers associated with the object. */
file_ids: string[]
/** Represents the metadata of the object. */
diff --git a/core/src/types/config/appConfigEvent.ts b/core/src/types/config/appConfigEvent.ts
new file mode 100644
index 000000000..50e33cfa2
--- /dev/null
+++ b/core/src/types/config/appConfigEvent.ts
@@ -0,0 +1,6 @@
+/**
+ * App configuration event name
+ */
+export enum AppConfigurationEventName {
+ OnConfigurationUpdate = 'OnConfigurationUpdate',
+}
diff --git a/core/src/types/config/index.ts b/core/src/types/config/index.ts
index 0fa3645aa..d2e182b99 100644
--- a/core/src/types/config/index.ts
+++ b/core/src/types/config/index.ts
@@ -1 +1,2 @@
export * from './appConfigEntity'
+export * from './appConfigEvent'
diff --git a/core/src/types/index.ts b/core/src/types/index.ts
index 3bdcb5421..ee6f4ef08 100644
--- a/core/src/types/index.ts
+++ b/core/src/types/index.ts
@@ -6,3 +6,4 @@ export * from './inference'
export * from './monitoring'
export * from './file'
export * from './config'
+export * from './miscellaneous'
diff --git a/core/src/types/inference/inferenceEntity.ts b/core/src/types/inference/inferenceEntity.ts
index 58b838ae7..c37e3b079 100644
--- a/core/src/types/inference/inferenceEntity.ts
+++ b/core/src/types/inference/inferenceEntity.ts
@@ -1,3 +1,5 @@
+import { ContentType, ContentValue } from '../message'
+
/**
* The role of the author of this message.
*/
@@ -13,7 +15,32 @@ export enum ChatCompletionRole {
*/
export type ChatCompletionMessage = {
/** The contents of the message. **/
- content?: string
+ content?: ChatCompletionMessageContent
/** The role of the author of this message. **/
role: ChatCompletionRole
}
+
+export type ChatCompletionMessageContent =
+ | string
+ | (ChatCompletionMessageContentText &
+ ChatCompletionMessageContentImage &
+ ChatCompletionMessageContentDoc)[]
+
+export enum ChatCompletionMessageContentType {
+ Text = 'text',
+ Image = 'image_url',
+ Doc = 'doc_url',
+}
+
+export type ChatCompletionMessageContentText = {
+ type: ChatCompletionMessageContentType
+ text: string
+}
+export type ChatCompletionMessageContentImage = {
+ type: ChatCompletionMessageContentType
+ image_url: { url: string }
+}
+export type ChatCompletionMessageContentDoc = {
+ type: ChatCompletionMessageContentType
+ doc_url: { url: string }
+}
diff --git a/core/src/types/message/messageEntity.ts b/core/src/types/message/messageEntity.ts
index 199743796..87e4b1997 100644
--- a/core/src/types/message/messageEntity.ts
+++ b/core/src/types/message/messageEntity.ts
@@ -1,5 +1,6 @@
import { ChatCompletionMessage, ChatCompletionRole } from '../inference'
import { ModelInfo } from '../model'
+import { Thread } from '../thread'
/**
* The `ThreadMessage` type defines the shape of a thread's message object.
@@ -35,7 +36,10 @@ export type ThreadMessage = {
export type MessageRequest = {
id?: string
- /** The thread id of the message request. **/
+ /**
+ * @deprecated Use thread object instead
+ * The thread id of the message request.
+ */
threadId: string
/**
@@ -48,6 +52,10 @@ export type MessageRequest = {
/** Settings for constructing a chat completion request **/
model?: ModelInfo
+
+ /** The thread of this message is belong to. **/
+ // TODO: deprecate threadId field
+ thread?: Thread
}
/**
@@ -62,7 +70,7 @@ export enum MessageStatus {
/** Message loaded with error. **/
Error = 'error',
/** Message is cancelled streaming */
- Stopped = "stopped"
+ Stopped = 'stopped',
}
/**
@@ -71,6 +79,7 @@ export enum MessageStatus {
export enum ContentType {
Text = 'text',
Image = 'image',
+ Pdf = 'pdf',
}
/**
@@ -80,6 +89,8 @@ export enum ContentType {
export type ContentValue = {
value: string
annotations: string[]
+ name?: string
+ size?: number
}
/**
diff --git a/core/src/types/miscellaneous/index.ts b/core/src/types/miscellaneous/index.ts
new file mode 100644
index 000000000..02c973323
--- /dev/null
+++ b/core/src/types/miscellaneous/index.ts
@@ -0,0 +1,2 @@
+export * from './systemResourceInfo'
+export * from './promptTemplate'
diff --git a/core/src/types/miscellaneous/promptTemplate.ts b/core/src/types/miscellaneous/promptTemplate.ts
new file mode 100644
index 000000000..a6743c67c
--- /dev/null
+++ b/core/src/types/miscellaneous/promptTemplate.ts
@@ -0,0 +1,6 @@
+export type PromptTemplate = {
+ system_prompt?: string
+ ai_prompt?: string
+ user_prompt?: string
+ error?: string
+}
diff --git a/core/src/types/miscellaneous/systemResourceInfo.ts b/core/src/types/miscellaneous/systemResourceInfo.ts
new file mode 100644
index 000000000..1472cda47
--- /dev/null
+++ b/core/src/types/miscellaneous/systemResourceInfo.ts
@@ -0,0 +1,4 @@
+export type SystemResourceInfo = {
+ numCpuPhysicalCore: number
+ memAvailable: number
+}
diff --git a/core/src/types/model/modelEntity.ts b/core/src/types/model/modelEntity.ts
index c60ab7650..644c34dfb 100644
--- a/core/src/types/model/modelEntity.ts
+++ b/core/src/types/model/modelEntity.ts
@@ -7,6 +7,7 @@ export type ModelInfo = {
settings: ModelSettingParams
parameters: ModelRuntimeParams
engine?: InferenceEngine
+ proxyEngine?: InferenceEngine
}
/**
@@ -18,7 +19,8 @@ export enum InferenceEngine {
nitro = 'nitro',
openai = 'openai',
triton_trtllm = 'triton_trtllm',
- hf_endpoint = 'hf_endpoint',
+
+ tool_retrieval_enabled = 'tool_retrieval_enabled',
}
export type ModelArtifact = {
@@ -90,6 +92,13 @@ export type Model = {
* The model engine.
*/
engine: InferenceEngine
+
+ proxyEngine?: InferenceEngine
+
+ /**
+ * Is multimodal or not.
+ */
+ visionModel?: boolean
}
export type ModelMetadata = {
@@ -114,6 +123,7 @@ export type ModelSettingParams = {
user_prompt?: string
llama_model_path?: string
mmproj?: string
+ cont_batching?: boolean
}
/**
@@ -129,4 +139,5 @@ export type ModelRuntimeParams = {
stop?: string[]
frequency_penalty?: number
presence_penalty?: number
+ engine?: string
}
diff --git a/core/src/types/thread/index.ts b/core/src/types/thread/index.ts
index c6ff6204a..32155e1cd 100644
--- a/core/src/types/thread/index.ts
+++ b/core/src/types/thread/index.ts
@@ -1,2 +1,3 @@
export * from './threadEntity'
export * from './threadInterface'
+export * from './threadEvent'
diff --git a/core/src/types/thread/threadEntity.ts b/core/src/types/thread/threadEntity.ts
index 4ff3aa1fc..dd88b10ec 100644
--- a/core/src/types/thread/threadEntity.ts
+++ b/core/src/types/thread/threadEntity.ts
@@ -1,3 +1,4 @@
+import { AssistantTool } from '../assistant'
import { ModelInfo } from '../model'
/**
@@ -30,6 +31,7 @@ export type ThreadAssistantInfo = {
assistant_name: string
model: ModelInfo
instructions?: string
+ tools?: AssistantTool[]
}
/**
@@ -41,5 +43,4 @@ export type ThreadState = {
waitingForResponse: boolean
error?: Error
lastMessage?: string
- isFinishInit?: boolean
}
diff --git a/core/src/types/thread/threadEvent.ts b/core/src/types/thread/threadEvent.ts
new file mode 100644
index 000000000..4b19b09c1
--- /dev/null
+++ b/core/src/types/thread/threadEvent.ts
@@ -0,0 +1,4 @@
+export enum ThreadEvent {
+ /** The `OnThreadStarted` event is emitted when a thread is started. */
+ OnThreadStarted = 'OnThreadStarted',
+}
diff --git a/docs/.env.example b/docs/.env.example
index 6755f2520..b4a7fa5f1 100644
--- a/docs/.env.example
+++ b/docs/.env.example
@@ -1,5 +1,5 @@
GTM_ID=xxxx
-POSTHOG_PROJECT_API_KEY=xxxx
-POSTHOG_APP_URL=xxxx
+UMAMI_PROJECT_API_KEY=xxxx
+UMAMI_APP_URL=xxxx
ALGOLIA_API_KEY=xxxx
ALGOLIA_APP_ID=xxxx
\ No newline at end of file
diff --git a/docs/docs/template/QA_script.md b/docs/docs/template/QA_script.md
index 05dbed2b4..bba667bcd 100644
--- a/docs/docs/template/QA_script.md
+++ b/docs/docs/template/QA_script.md
@@ -1,6 +1,6 @@
# [Release Version] QA Script
-**Release Version:**
+**Release Version:** v0.4.6
**Operating System:**
@@ -25,10 +25,10 @@
### 3. Users uninstall app
-- [ ] :key: Check that the uninstallation process removes all components of the app from the system.
+- [ ] :key::warning: Check that the uninstallation process removes the app successfully from the system.
- [ ] Clean the Jan root directory and open the app to check if it creates all the necessary folders, especially models and extensions.
- [ ] When updating the app, check if the `/models` directory has any JSON files that change according to the update.
-- [ ] Verify if updating the app also updates extensions correctly (test functionality changes; support notifications for necessary tests with each version related to extensions update).
+- [ ] Verify if updating the app also updates extensions correctly (test functionality changes, support notifications for necessary tests with each version related to extensions update).
### 4. Users close app
@@ -60,49 +60,45 @@
- [ ] :key: Ensure that the conversation thread is maintained without any loss of data upon sending multiple messages.
- [ ] Test for the ability to send different types of messages (e.g., text, emojis, code blocks).
- [ ] :key: Validate the scroll functionality in the chat window for lengthy conversations.
-- [ ] Check if the user can renew responses multiple times.
- [ ] Check if the user can copy the response.
- [ ] Check if the user can delete responses.
-- [ ] :warning: Test if the user deletes the message midway, then the assistant stops that response.
- [ ] :key: Check the `clear message` button works.
- [ ] :key: Check the `delete entire chat` works.
-- [ ] :warning: Check if deleting all the chat retains the system prompt.
+- [ ] Check if deleting all the chat retains the system prompt.
- [ ] Check the output format of the AI (code blocks, JSON, markdown, ...).
- [ ] :key: Validate that there is appropriate error handling and messaging if the assistant fails to respond.
- [ ] Test assistant's ability to maintain context over multiple exchanges.
- [ ] :key: Check the `create new chat` button works correctly
- [ ] Confirm that by changing `models` mid-thread the app can still handle it.
-- [ ] Check that by changing `instructions` mid-thread the app can still handle it.
-- [ ] Check the `regenerate` button renews the response.
-- [ ] Check the `Instructions` update correctly after the user updates it midway.
+- [ ] Check the `regenerate` button renews the response (single / multiple times).
+- [ ] Check the `Instructions` update correctly after the user updates it midway (mid-thread).
### 2. Users can customize chat settings like model parameters via both the GUI & thread.json
-- [ ] :key: Confirm that the chat settings options are accessible via the GUI.
+- [ ] :key: Confirm that the Threads settings options are accessible.
- [ ] Test the functionality to adjust model parameters (e.g., Temperature, Top K, Top P) from the GUI and verify they are reflected in the chat behavior.
- [ ] :key: Ensure that changes can be saved and persisted between sessions.
- [ ] Validate that users can access and modify the thread.json file.
- [ ] :key: Check that changes made in thread.json are correctly applied to the chat session upon reload or restart.
-- [ ] Verify if there is a revert option to go back to previous settings after changes are made.
-- [ ] Test for user feedback or confirmation after saving changes to settings.
- [ ] Check the maximum and minimum limits of the adjustable parameters and how they affect the assistant's responses.
- [ ] :key: Validate user permissions for those who can change settings and persist them.
- [ ] :key: Ensure that users switch between threads with different models, the app can handle it.
-### 3. Users can click on a history thread
+### 3. Model dropdown
+- [ ] :key: Model list should highlight recommended based on user RAM
+- [ ] Model size should display (for both installed and imported models)
+### 4. Users can click on a history thread
- [ ] Test the ability to click on any thread in the history panel.
- [ ] :key: Verify that clicking a thread brings up the past conversation in the main chat window.
- [ ] :key: Ensure that the selected thread is highlighted or otherwise indicated in the history panel.
- [ ] Confirm that the chat window displays the entire conversation from the selected history thread without any missing messages.
- [ ] :key: Check the performance and accuracy of the history feature when dealing with a large number of threads.
- [ ] Validate that historical threads reflect the exact state of the chat at that time, including settings.
-- [ ] :key: :warning: Test the search functionality within the history panel for quick navigation.
- [ ] :key: Verify the ability to delete or clean old threads.
- [ ] :key: Confirm that changing the title of the thread updates correctly.
-### 4. Users can config instructions for the assistant.
-
+### 5. Users can config instructions for the assistant.
- [ ] Ensure there is a clear interface to input or change instructions for the assistant.
- [ ] Test if the instructions set by the user are being followed by the assistant in subsequent conversations.
- [ ] :key: Validate that changes to instructions are updated in real time and do not require a restart of the application or session.
@@ -112,6 +108,8 @@
- [ ] Validate that instructions can be saved with descriptive names for easy retrieval.
- [ ] :key: Check if the assistant can handle conflicting instructions and how it resolves them.
- [ ] Ensure that instruction configurations are documented for user reference.
+- [ ] :key: RAG - Users can import documents and the system should process queries about the uploaded file, providing accurate and appropriate responses in the conversation thread.
+
## D. Hub
@@ -125,8 +123,7 @@
- [ ] Display the best model for their RAM at the top.
- [ ] :key: Ensure that models are labeled with RAM requirements and compatibility.
-- [ ] :key: Validate that the download function is disabled for models that exceed the user's system capabilities.
-- [ ] Test that the platform provides alternative recommendations for models not suitable due to RAM limitations.
+- [ ] :warning: Test that the platform provides alternative recommendations for models not suitable due to RAM limitations.
- [ ] :key: Check the download model functionality and validate if the cancel download feature works correctly.
### 3. Users can download models via a HuggingFace URL (coming soon)
@@ -139,7 +136,7 @@
- [ ] :key: Have clear instructions so users can do their own.
- [ ] :key: Ensure the new model updates after restarting the app.
-- [ ] Ensure it raises clear errors for users to fix the problem while adding a new model.
+- [ ] :warning:Ensure it raises clear errors for users to fix the problem while adding a new model.
### 5. Users can use the model as they want
@@ -149,9 +146,13 @@
- [ ] Check if starting another model stops the other model entirely.
- [ ] Check the `Explore models` navigate correctly to the model panel.
- [ ] :key: Check when deleting a model it will delete all the files on the user's computer.
-- [ ] The recommended tags should present right for the user's hardware.
+- [ ] :warning:The recommended tags should present right for the user's hardware.
- [ ] Assess that the descriptions of models are accurate and informative.
+### 6. Users can Integrate With a Remote Server
+- [ ] :key: Import openAI GPT model https://jan.ai/guides/using-models/integrate-with-remote-server/ and the model displayed in Hub / Thread dropdown
+- [ ] Users can use the remote model properly
+
## E. System Monitor
### 1. Users can see disk and RAM utilization
@@ -181,7 +182,7 @@
- [ ] Confirm that the application saves the theme preference and persists it across sessions.
- [ ] Validate that all elements of the UI are compatible with the theme changes and maintain legibility and contrast.
-### 2. Users change the extensions
+### 2. Users change the extensions [TBU]
- [ ] Confirm that the `Extensions` tab lists all available plugins.
- [ ] :key: Test the toggle switch for each plugin to ensure it enables or disables the plugin correctly.
@@ -208,3 +209,19 @@
- [ ] :key: Test that the application prevents the installation of incompatible or corrupt plugin files.
- [ ] :key: Check that the user can uninstall or disable custom plugins as easily as pre-installed ones.
- [ ] Verify that the application's performance remains stable after the installation of custom plugins.
+
+### 5. Advanced Settings
+- [ ] Attemp to test downloading model from hub using **HTTP Proxy** [guideline](https://github.com/janhq/jan/pull/1562)
+- [ ] Users can move **Jan data folder**
+- [ ] Users can click on Reset button to **factory reset** app settings to its original state & delete all usage data.
+
+## G. Local API server
+
+### 1. Local Server Usage with Server Options
+- [ ] :key: Explore API Reference: Swagger API for sending/receiving requests
+ - [ ] Use default server option
+ - [ ] Configure and use custom server options
+- [ ] Test starting/stopping the local API server with different Model/Model settings
+- [ ] Server logs captured with correct Server Options provided
+- [ ] Verify functionality of Open logs/Clear feature
+- [ ] Ensure that threads and other functions impacting the model are disabled while the local server is running
diff --git a/electron/handlers/app.ts b/electron/handlers/app.ts
index bdb70047a..c1f431ef3 100644
--- a/electron/handlers/app.ts
+++ b/electron/handlers/app.ts
@@ -1,5 +1,5 @@
import { app, ipcMain, dialog, shell } from 'electron'
-import { join, basename } from 'path'
+import { join, basename, relative as getRelative, isAbsolute } from 'path'
import { WindowManager } from './../managers/window'
import { getResourcePath } from './../utils/path'
import { AppRoute, AppConfiguration } from '@janhq/core'
@@ -50,6 +50,27 @@ export function handleAppIPCs() {
join(...paths)
)
+ /**
+ * Checks if the given path is a subdirectory of the given directory.
+ *
+ * @param _event - The IPC event object.
+ * @param from - The path to check.
+ * @param to - The directory to check against.
+ *
+ * @returns {Promise} - A promise that resolves with the result.
+ */
+ ipcMain.handle(
+ AppRoute.isSubdirectory,
+ async (_event, from: string, to: string) => {
+ const relative = getRelative(from, to)
+ const isSubdir =
+ relative && !relative.startsWith('..') && !isAbsolute(relative)
+
+ if (isSubdir === '') return false
+ else return isSubdir
+ }
+ )
+
/**
* Retrieve basename from given path, respect to the current OS.
*/
diff --git a/electron/handlers/fileManager.ts b/electron/handlers/fileManager.ts
index f41286934..e328cb53b 100644
--- a/electron/handlers/fileManager.ts
+++ b/electron/handlers/fileManager.ts
@@ -1,4 +1,4 @@
-import { ipcMain } from 'electron'
+import { ipcMain, app } from 'electron'
// @ts-ignore
import reflect from '@alumna/reflect'
@@ -38,6 +38,10 @@ export function handleFileMangerIPCs() {
getResourcePath()
)
+ ipcMain.handle(FileManagerRoute.getUserHomePath, async (_event) =>
+ app.getPath('home')
+ )
+
// handle fs is directory here
ipcMain.handle(
FileManagerRoute.fileStat,
@@ -59,4 +63,20 @@ export function handleFileMangerIPCs() {
return fileStat
}
)
+
+ ipcMain.handle(
+ FileManagerRoute.writeBlob,
+ async (_event, path: string, data: string): Promise => {
+ try {
+ const normalizedPath = normalizeFilePath(path)
+ const dataBuffer = Buffer.from(data, 'base64')
+ fs.writeFileSync(
+ join(getJanDataFolderPath(), normalizedPath),
+ dataBuffer
+ )
+ } catch (err) {
+ console.error(`writeFile ${path} result: ${err}`)
+ }
+ }
+ )
}
diff --git a/electron/handlers/fs.ts b/electron/handlers/fs.ts
index 408a5fd10..34026b940 100644
--- a/electron/handlers/fs.ts
+++ b/electron/handlers/fs.ts
@@ -1,9 +1,9 @@
import { ipcMain } from 'electron'
-import { FileSystemRoute } from '@janhq/core'
-import { join } from 'path'
import { getJanDataFolderPath, normalizeFilePath } from '@janhq/core/node'
-
+import fs from 'fs'
+import { FileManagerRoute, FileSystemRoute } from '@janhq/core'
+import { join } from 'path'
/**
* Handles file system operations.
*/
@@ -15,7 +15,7 @@ export function handleFsIPCs() {
mdl[route](
...args.map((arg) =>
typeof arg === 'string' &&
- (arg.includes(`file:/`) || arg.includes(`file:\\`))
+ (arg.startsWith(`file:/`) || arg.startsWith(`file:\\`))
? join(getJanDataFolderPath(), normalizeFilePath(arg))
: arg
)
diff --git a/electron/main.ts b/electron/main.ts
index fb7066cd0..5d7e59c0f 100644
--- a/electron/main.ts
+++ b/electron/main.ts
@@ -28,6 +28,22 @@ import { setupCore } from './utils/setup'
app
.whenReady()
+ .then(async () => {
+ if (!app.isPackaged) {
+ // Which means you're running from source code
+ const { default: installExtension, REACT_DEVELOPER_TOOLS } = await import(
+ 'electron-devtools-installer'
+ ) // Don't use import on top level, since the installer package is dev-only
+ try {
+ const name = installExtension(REACT_DEVELOPER_TOOLS)
+ console.log(`Added Extension: ${name}`)
+ } catch (err) {
+ console.log('An error occurred while installing devtools:')
+ console.error(err)
+ // Only log the error and don't throw it because it's not critical
+ }
+ }
+ })
.then(setupCore)
.then(createUserSpace)
.then(migrateExtensions)
diff --git a/electron/merge-latest-ymls.js b/electron/merge-latest-ymls.js
new file mode 100644
index 000000000..8172a3176
--- /dev/null
+++ b/electron/merge-latest-ymls.js
@@ -0,0 +1,27 @@
+const yaml = require('js-yaml')
+const fs = require('fs')
+
+// get two file paths from arguments:
+const [, , ...args] = process.argv
+const file1 = args[0]
+const file2 = args[1]
+const file3 = args[2]
+
+// check that all arguments are present and throw error instead
+if (!file1 || !file2 || !file3) {
+ throw new Error('Please provide 3 file paths as arguments: path to file1, to file2 and destination path')
+}
+
+const doc1 = yaml.load(fs.readFileSync(file1, 'utf8'))
+console.log('doc1: ', doc1)
+
+const doc2 = yaml.load(fs.readFileSync(file2, 'utf8'))
+console.log('doc2: ', doc2)
+
+const merged = { ...doc1, ...doc2 }
+merged.files.push(...doc1.files)
+
+console.log('merged', merged)
+
+const mergedYml = yaml.dump(merged)
+fs.writeFileSync(file3, mergedYml, 'utf8')
diff --git a/electron/package.json b/electron/package.json
index 173e54f2b..08f15b262 100644
--- a/electron/package.json
+++ b/electron/package.json
@@ -63,11 +63,11 @@
"build:test:darwin": "tsc -p . && electron-builder -p never -m --dir",
"build:test:win32": "tsc -p . && electron-builder -p never -w --dir",
"build:test:linux": "tsc -p . && electron-builder -p never -l --dir",
- "build:darwin": "tsc -p . && electron-builder -p never -m --x64 --arm64",
+ "build:darwin": "tsc -p . && electron-builder -p never -m",
"build:win32": "tsc -p . && electron-builder -p never -w",
"build:linux": "tsc -p . && electron-builder -p never -l deb -l AppImage",
"build:publish": "run-script-os",
- "build:publish:darwin": "tsc -p . && electron-builder -p always -m --x64 --arm64",
+ "build:publish:darwin": "tsc -p . && electron-builder -p always -m",
"build:publish:win32": "tsc -p . && electron-builder -p always -w",
"build:publish:linux": "tsc -p . && electron-builder -p always -l deb -l AppImage"
},
@@ -86,7 +86,7 @@
"request": "^2.88.2",
"request-progress": "^3.0.0",
"rimraf": "^5.0.5",
- "typescript": "^5.3.3",
+ "typescript": "^5.2.2",
"ulid": "^2.3.0",
"use-debounce": "^9.0.4"
},
@@ -99,6 +99,7 @@
"@typescript-eslint/parser": "^6.7.3",
"electron": "28.0.0",
"electron-builder": "^24.9.1",
+ "electron-devtools-installer": "^3.2.0",
"electron-playwright-helpers": "^1.6.0",
"eslint-plugin-react": "^7.33.2",
"run-script-os": "^1.1.6"
diff --git a/electron/playwright.config.ts b/electron/playwright.config.ts
index 98b2c7b45..8047b7513 100644
--- a/electron/playwright.config.ts
+++ b/electron/playwright.config.ts
@@ -1,9 +1,16 @@
-import { PlaywrightTestConfig } from "@playwright/test";
+import { PlaywrightTestConfig } from '@playwright/test'
const config: PlaywrightTestConfig = {
- testDir: "./tests",
+ testDir: './tests/e2e',
retries: 0,
- timeout: 120000,
-};
+ globalTimeout: 300000,
+ use: {
+ screenshot: 'only-on-failure',
+ video: 'retain-on-failure',
+ trace: 'retain-on-failure',
+ },
-export default config;
+ reporter: [['html', { outputFolder: './playwright-report' }]],
+}
+
+export default config
diff --git a/electron/tests/e2e/hub.e2e.spec.ts b/electron/tests/e2e/hub.e2e.spec.ts
new file mode 100644
index 000000000..68632058e
--- /dev/null
+++ b/electron/tests/e2e/hub.e2e.spec.ts
@@ -0,0 +1,34 @@
+import {
+ page,
+ test,
+ setupElectron,
+ teardownElectron,
+ TIMEOUT,
+} from '../pages/basePage'
+import { expect } from '@playwright/test'
+
+test.beforeAll(async () => {
+ const appInfo = await setupElectron()
+ expect(appInfo.asar).toBe(true)
+ expect(appInfo.executable).toBeTruthy()
+ expect(appInfo.main).toBeTruthy()
+ expect(appInfo.name).toBe('jan')
+ expect(appInfo.packageJson).toBeTruthy()
+ expect(appInfo.packageJson.name).toBe('jan')
+ expect(appInfo.platform).toBeTruthy()
+ expect(appInfo.platform).toBe(process.platform)
+ expect(appInfo.resourcesDir).toBeTruthy()
+})
+
+test.afterAll(async () => {
+ await teardownElectron()
+})
+
+test('explores hub', async () => {
+ await page.getByTestId('Hub').first().click({
+ timeout: TIMEOUT,
+ })
+ await page.getByTestId('hub-container-test-id').isVisible({
+ timeout: TIMEOUT,
+ })
+})
diff --git a/electron/tests/e2e/navigation.e2e.spec.ts b/electron/tests/e2e/navigation.e2e.spec.ts
new file mode 100644
index 000000000..2da59953c
--- /dev/null
+++ b/electron/tests/e2e/navigation.e2e.spec.ts
@@ -0,0 +1,38 @@
+import { expect } from '@playwright/test'
+import {
+ page,
+ setupElectron,
+ TIMEOUT,
+ test,
+ teardownElectron,
+} from '../pages/basePage'
+
+test.beforeAll(async () => {
+ await setupElectron()
+})
+
+test.afterAll(async () => {
+ await teardownElectron()
+})
+
+test('renders left navigation panel', async () => {
+ const systemMonitorBtn = await page
+ .getByTestId('System Monitor')
+ .first()
+ .isEnabled({
+ timeout: TIMEOUT,
+ })
+ const settingsBtn = await page
+ .getByTestId('Thread')
+ .first()
+ .isEnabled({ timeout: TIMEOUT })
+ expect([systemMonitorBtn, settingsBtn].filter((e) => !e).length).toBe(0)
+ // Chat section should be there
+ await page.getByTestId('Local API Server').first().click({
+ timeout: TIMEOUT,
+ })
+ const localServer = page.getByTestId('local-server-testid').first()
+ await expect(localServer).toBeVisible({
+ timeout: TIMEOUT,
+ })
+})
diff --git a/electron/tests/e2e/settings.e2e.spec.ts b/electron/tests/e2e/settings.e2e.spec.ts
new file mode 100644
index 000000000..54215d9b1
--- /dev/null
+++ b/electron/tests/e2e/settings.e2e.spec.ts
@@ -0,0 +1,23 @@
+import { expect } from '@playwright/test'
+
+import {
+ setupElectron,
+ teardownElectron,
+ test,
+ page,
+ TIMEOUT,
+} from '../pages/basePage'
+
+test.beforeAll(async () => {
+ await setupElectron()
+})
+
+test.afterAll(async () => {
+ await teardownElectron()
+})
+
+test('shows settings', async () => {
+ await page.getByTestId('Settings').first().click({ timeout: TIMEOUT })
+ const settingDescription = page.getByTestId('testid-setting-description')
+ await expect(settingDescription).toBeVisible({ timeout: TIMEOUT })
+})
diff --git a/electron/tests/explore.e2e.spec.ts b/electron/tests/explore.e2e.spec.ts
deleted file mode 100644
index 77eb3dbda..000000000
--- a/electron/tests/explore.e2e.spec.ts
+++ /dev/null
@@ -1,41 +0,0 @@
-import { _electron as electron } from 'playwright'
-import { ElectronApplication, Page, expect, test } from '@playwright/test'
-
-import {
- findLatestBuild,
- parseElectronApp,
- stubDialog,
-} from 'electron-playwright-helpers'
-
-let electronApp: ElectronApplication
-let page: Page
-
-test.beforeAll(async () => {
- process.env.CI = 'e2e'
-
- const latestBuild = findLatestBuild('dist')
- expect(latestBuild).toBeTruthy()
-
- // parse the packaged Electron app and find paths and other info
- const appInfo = parseElectronApp(latestBuild)
- expect(appInfo).toBeTruthy()
-
- electronApp = await electron.launch({
- args: [appInfo.main], // main file from package.json
- executablePath: appInfo.executable, // path to the Electron executable
- })
- await stubDialog(electronApp, 'showMessageBox', { response: 1 })
-
- page = await electronApp.firstWindow()
-})
-
-test.afterAll(async () => {
- await electronApp.close()
- await page.close()
-})
-
-test('explores models', async () => {
- await page.getByTestId('Hub').first().click()
- await page.getByTestId('testid-explore-models').isVisible()
- // More test cases here...
-})
diff --git a/electron/tests/main.e2e.spec.ts b/electron/tests/main.e2e.spec.ts
deleted file mode 100644
index 1a5bfe696..000000000
--- a/electron/tests/main.e2e.spec.ts
+++ /dev/null
@@ -1,55 +0,0 @@
-import { _electron as electron } from 'playwright'
-import { ElectronApplication, Page, expect, test } from '@playwright/test'
-
-import {
- findLatestBuild,
- parseElectronApp,
- stubDialog,
-} from 'electron-playwright-helpers'
-
-let electronApp: ElectronApplication
-let page: Page
-
-test.beforeAll(async () => {
- process.env.CI = 'e2e'
-
- const latestBuild = findLatestBuild('dist')
- expect(latestBuild).toBeTruthy()
-
- // parse the packaged Electron app and find paths and other info
- const appInfo = parseElectronApp(latestBuild)
- expect(appInfo).toBeTruthy()
- expect(appInfo.asar).toBe(true)
- expect(appInfo.executable).toBeTruthy()
- expect(appInfo.main).toBeTruthy()
- expect(appInfo.name).toBe('jan')
- expect(appInfo.packageJson).toBeTruthy()
- expect(appInfo.packageJson.name).toBe('jan')
- expect(appInfo.platform).toBeTruthy()
- expect(appInfo.platform).toBe(process.platform)
- expect(appInfo.resourcesDir).toBeTruthy()
-
- electronApp = await electron.launch({
- args: [appInfo.main], // main file from package.json
- executablePath: appInfo.executable, // path to the Electron executable
- })
- await stubDialog(electronApp, 'showMessageBox', { response: 1 })
-
- page = await electronApp.firstWindow()
-})
-
-test.afterAll(async () => {
- await electronApp.close()
- await page.close()
-})
-
-test('renders the home page', async () => {
- expect(page).toBeDefined()
-
- // Welcome text is available
- const welcomeText = await page
- .getByTestId('testid-welcome-title')
- .first()
- .isVisible()
- expect(welcomeText).toBe(false)
-})
diff --git a/electron/tests/navigation.e2e.spec.ts b/electron/tests/navigation.e2e.spec.ts
deleted file mode 100644
index 2f4f7b767..000000000
--- a/electron/tests/navigation.e2e.spec.ts
+++ /dev/null
@@ -1,54 +0,0 @@
-import { _electron as electron } from 'playwright'
-import { ElectronApplication, Page, expect, test } from '@playwright/test'
-
-import {
- findLatestBuild,
- parseElectronApp,
- stubDialog,
-} from 'electron-playwright-helpers'
-
-let electronApp: ElectronApplication
-let page: Page
-
-test.beforeAll(async () => {
- process.env.CI = 'e2e'
-
- const latestBuild = findLatestBuild('dist')
- expect(latestBuild).toBeTruthy()
-
- // parse the packaged Electron app and find paths and other info
- const appInfo = parseElectronApp(latestBuild)
- expect(appInfo).toBeTruthy()
-
- electronApp = await electron.launch({
- args: [appInfo.main], // main file from package.json
- executablePath: appInfo.executable, // path to the Electron executable
- })
- await stubDialog(electronApp, 'showMessageBox', { response: 1 })
-
- page = await electronApp.firstWindow()
-})
-
-test.afterAll(async () => {
- await electronApp.close()
- await page.close()
-})
-
-test('renders left navigation panel', async () => {
- // Chat section should be there
- const chatSection = await page.getByTestId('Chat').first().isVisible()
- expect(chatSection).toBe(false)
-
- // Home actions
- /* Disable unstable feature tests
- ** const botBtn = await page.getByTestId("Bot").first().isEnabled();
- ** Enable back when it is whitelisted
- */
-
- const systemMonitorBtn = await page
- .getByTestId('System Monitor')
- .first()
- .isEnabled()
- const settingsBtn = await page.getByTestId('Settings').first().isEnabled()
- expect([systemMonitorBtn, settingsBtn].filter((e) => !e).length).toBe(0)
-})
diff --git a/electron/tests/pages/basePage.ts b/electron/tests/pages/basePage.ts
new file mode 100644
index 000000000..5f1a6fca1
--- /dev/null
+++ b/electron/tests/pages/basePage.ts
@@ -0,0 +1,67 @@
+import {
+ expect,
+ test as base,
+ _electron as electron,
+ ElectronApplication,
+ Page,
+} from '@playwright/test'
+import {
+ findLatestBuild,
+ parseElectronApp,
+ stubDialog,
+} from 'electron-playwright-helpers'
+
+export const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
+
+export let electronApp: ElectronApplication
+export let page: Page
+
+export async function setupElectron() {
+ process.env.CI = 'e2e'
+
+ const latestBuild = findLatestBuild('dist')
+ expect(latestBuild).toBeTruthy()
+
+ // parse the packaged Electron app and find paths and other info
+ const appInfo = parseElectronApp(latestBuild)
+ expect(appInfo).toBeTruthy()
+
+ electronApp = await electron.launch({
+ args: [appInfo.main], // main file from package.json
+ executablePath: appInfo.executable, // path to the Electron executable
+ })
+ await stubDialog(electronApp, 'showMessageBox', { response: 1 })
+
+ page = await electronApp.firstWindow({
+ timeout: TIMEOUT,
+ })
+ // Return appInfo for future use
+ return appInfo
+}
+
+export async function teardownElectron() {
+ await page.close()
+ await electronApp.close()
+}
+
+export const test = base.extend<{
+ attachScreenshotsToReport: void
+}>({
+ attachScreenshotsToReport: [
+ async ({ request }, use, testInfo) => {
+ await use()
+
+ // After the test, we can check whether the test passed or failed.
+ if (testInfo.status !== testInfo.expectedStatus) {
+ const screenshot = await page.screenshot()
+ await testInfo.attach('screenshot', {
+ body: screenshot,
+ contentType: 'image/png',
+ })
+ }
+ },
+ { auto: true },
+ ],
+})
+
+test.setTimeout(TIMEOUT)
diff --git a/electron/tests/settings.e2e.spec.ts b/electron/tests/settings.e2e.spec.ts
deleted file mode 100644
index 798504c70..000000000
--- a/electron/tests/settings.e2e.spec.ts
+++ /dev/null
@@ -1,40 +0,0 @@
-import { _electron as electron } from 'playwright'
-import { ElectronApplication, Page, expect, test } from '@playwright/test'
-
-import {
- findLatestBuild,
- parseElectronApp,
- stubDialog,
-} from 'electron-playwright-helpers'
-
-let electronApp: ElectronApplication
-let page: Page
-
-test.beforeAll(async () => {
- process.env.CI = 'e2e'
-
- const latestBuild = findLatestBuild('dist')
- expect(latestBuild).toBeTruthy()
-
- // parse the packaged Electron app and find paths and other info
- const appInfo = parseElectronApp(latestBuild)
- expect(appInfo).toBeTruthy()
-
- electronApp = await electron.launch({
- args: [appInfo.main], // main file from package.json
- executablePath: appInfo.executable, // path to the Electron executable
- })
- await stubDialog(electronApp, 'showMessageBox', { response: 1 })
-
- page = await electronApp.firstWindow()
-})
-
-test.afterAll(async () => {
- await electronApp.close()
- await page.close()
-})
-
-test('shows settings', async () => {
- await page.getByTestId('Settings').first().click()
- await page.getByTestId('testid-setting-description').isVisible()
-})
diff --git a/electron/tests/system-monitor.e2e.spec.ts b/electron/tests/system-monitor.e2e.spec.ts
deleted file mode 100644
index 747a8ae18..000000000
--- a/electron/tests/system-monitor.e2e.spec.ts
+++ /dev/null
@@ -1,41 +0,0 @@
-import { _electron as electron } from 'playwright'
-import { ElectronApplication, Page, expect, test } from '@playwright/test'
-
-import {
- findLatestBuild,
- parseElectronApp,
- stubDialog,
-} from 'electron-playwright-helpers'
-
-let electronApp: ElectronApplication
-let page: Page
-
-test.beforeAll(async () => {
- process.env.CI = 'e2e'
-
- const latestBuild = findLatestBuild('dist')
- expect(latestBuild).toBeTruthy()
-
- // parse the packaged Electron app and find paths and other info
- const appInfo = parseElectronApp(latestBuild)
- expect(appInfo).toBeTruthy()
-
- electronApp = await electron.launch({
- args: [appInfo.main], // main file from package.json
- executablePath: appInfo.executable, // path to the Electron executable
- })
- await stubDialog(electronApp, 'showMessageBox', { response: 1 })
-
- page = await electronApp.firstWindow()
-})
-
-test.afterAll(async () => {
- await electronApp.close()
- await page.close()
-})
-
-test('shows system monitor', async () => {
- await page.getByTestId('System Monitor').first().click()
- await page.getByTestId('testid-system-monitor').isVisible()
- // More test cases here...
-})
diff --git a/extensions/assistant-extension/package.json b/extensions/assistant-extension/package.json
index 4e84aa573..84bcdf47e 100644
--- a/extensions/assistant-extension/package.json
+++ b/extensions/assistant-extension/package.json
@@ -3,26 +3,50 @@
"version": "1.0.0",
"description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models",
"main": "dist/index.js",
- "module": "dist/module.js",
+ "node": "dist/node/index.js",
"author": "Jan ",
"license": "AGPL-3.0",
"scripts": {
- "build": "tsc -b . && webpack --config webpack.config.js",
- "build:publish": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install"
+ "build": "tsc --module commonjs && rollup -c rollup.config.ts",
+ "build:publish:linux": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install",
+ "build:publish:darwin": "rimraf *.tgz --glob && npm run build && ../../.github/scripts/auto-sign.sh && npm pack && cpx *.tgz ../../electron/pre-install",
+ "build:publish:win32": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install",
+ "build:publish": "run-script-os"
},
"devDependencies": {
+ "@rollup/plugin-commonjs": "^25.0.7",
+ "@rollup/plugin-json": "^6.1.0",
+ "@rollup/plugin-node-resolve": "^15.2.3",
+ "@rollup/plugin-replace": "^5.0.5",
+ "@types/pdf-parse": "^1.1.4",
+ "cpx": "^1.5.0",
"rimraf": "^3.0.2",
- "webpack": "^5.88.2",
- "webpack-cli": "^5.1.4"
+ "rollup": "^2.38.5",
+ "rollup-plugin-define": "^1.0.1",
+ "rollup-plugin-sourcemaps": "^0.6.3",
+ "rollup-plugin-typescript2": "^0.36.0",
+ "typescript": "^5.3.3",
+ "run-script-os": "^1.1.6"
},
"dependencies": {
"@janhq/core": "file:../../core",
+ "@langchain/community": "0.0.13",
+ "hnswlib-node": "^1.4.2",
+ "langchain": "^0.0.214",
"path-browserify": "^1.0.1",
+ "pdf-parse": "^1.1.1",
"ts-loader": "^9.5.0"
},
"files": [
"dist/*",
"package.json",
"README.md"
+ ],
+ "bundleDependencies": [
+ "@janhq/core",
+ "@langchain/community",
+ "hnswlib-node",
+ "langchain",
+ "pdf-parse"
]
}
diff --git a/extensions/assistant-extension/rollup.config.ts b/extensions/assistant-extension/rollup.config.ts
new file mode 100644
index 000000000..7916ef9c8
--- /dev/null
+++ b/extensions/assistant-extension/rollup.config.ts
@@ -0,0 +1,81 @@
+import resolve from "@rollup/plugin-node-resolve";
+import commonjs from "@rollup/plugin-commonjs";
+import sourceMaps from "rollup-plugin-sourcemaps";
+import typescript from "rollup-plugin-typescript2";
+import json from "@rollup/plugin-json";
+import replace from "@rollup/plugin-replace";
+
+const packageJson = require("./package.json");
+
+const pkg = require("./package.json");
+
+export default [
+ {
+ input: `src/index.ts`,
+ output: [{ file: pkg.main, format: "es", sourcemap: true }],
+ // Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash')
+ external: [],
+ watch: {
+ include: "src/**",
+ },
+ plugins: [
+ replace({
+ NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`),
+ EXTENSION_NAME: JSON.stringify(packageJson.name),
+ VERSION: JSON.stringify(packageJson.version),
+ }),
+ // Allow json resolution
+ json(),
+ // Compile TypeScript files
+ typescript({ useTsconfigDeclarationDir: true }),
+ // Compile TypeScript files
+ // Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs)
+ commonjs(),
+ // Allow node_modules resolution, so you can use 'external' to control
+ // which external modules to include in the bundle
+ // https://github.com/rollup/rollup-plugin-node-resolve#usage
+ resolve({
+ extensions: [".js", ".ts", ".svelte"],
+ }),
+
+ // Resolve source maps to the original source
+ sourceMaps(),
+ ],
+ },
+ {
+ input: `src/node/index.ts`,
+ output: [{ dir: "dist/node", format: "cjs", sourcemap: false }],
+ // Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash')
+ external: [
+ "@janhq/core/node",
+ "@langchain/community",
+ "langchain",
+ "langsmith",
+ "path",
+ "hnswlib-node",
+ ],
+ watch: {
+ include: "src/node/**",
+ },
+ // inlineDynamicImports: true,
+ plugins: [
+ // Allow json resolution
+ json(),
+ // Compile TypeScript files
+ typescript({ useTsconfigDeclarationDir: true }),
+ // Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs)
+ commonjs({
+ ignoreDynamicRequires: true,
+ }),
+ // Allow node_modules resolution, so you can use 'external' to control
+ // which external modules to include in the bundle
+ // https://github.com/rollup/rollup-plugin-node-resolve#usage
+ resolve({
+ extensions: [".ts", ".js", ".json"],
+ }),
+
+ // Resolve source maps to the original source
+ // sourceMaps(),
+ ],
+ },
+];
diff --git a/extensions/assistant-extension/src/@types/global.d.ts b/extensions/assistant-extension/src/@types/global.d.ts
index 3b45ccc5a..dc11709a4 100644
--- a/extensions/assistant-extension/src/@types/global.d.ts
+++ b/extensions/assistant-extension/src/@types/global.d.ts
@@ -1 +1,3 @@
-declare const MODULE: string;
+declare const NODE: string;
+declare const EXTENSION_NAME: string;
+declare const VERSION: string;
diff --git a/extensions/assistant-extension/src/index.ts b/extensions/assistant-extension/src/index.ts
index 96de33b7b..6495ea786 100644
--- a/extensions/assistant-extension/src/index.ts
+++ b/extensions/assistant-extension/src/index.ts
@@ -1,16 +1,151 @@
-import { fs, Assistant } from "@janhq/core";
-import { AssistantExtension } from "@janhq/core";
-import { join } from "path";
+import {
+ fs,
+ Assistant,
+ MessageRequest,
+ events,
+ InferenceEngine,
+ MessageEvent,
+ InferenceEvent,
+ joinPath,
+ executeOnMain,
+ AssistantExtension,
+} from "@janhq/core";
export default class JanAssistantExtension extends AssistantExtension {
private static readonly _homeDir = "file://assistants";
+ controller = new AbortController();
+ isCancelled = false;
+ retrievalThreadId: string | undefined = undefined;
+
async onLoad() {
// making the assistant directory
- if (!(await fs.existsSync(JanAssistantExtension._homeDir)))
- fs.mkdirSync(JanAssistantExtension._homeDir).then(() => {
- this.createJanAssistant();
- });
+ const assistantDirExist = await fs.existsSync(
+ JanAssistantExtension._homeDir,
+ );
+ if (
+ localStorage.getItem(`${EXTENSION_NAME}-version`) !== VERSION ||
+ !assistantDirExist
+ ) {
+ if (!assistantDirExist)
+ await fs.mkdirSync(JanAssistantExtension._homeDir);
+
+ // Write assistant metadata
+ this.createJanAssistant();
+ // Finished migration
+ localStorage.setItem(`${EXTENSION_NAME}-version`, VERSION);
+ }
+
+ // Events subscription
+ events.on(MessageEvent.OnMessageSent, (data: MessageRequest) =>
+ JanAssistantExtension.handleMessageRequest(data, this),
+ );
+
+ events.on(InferenceEvent.OnInferenceStopped, () => {
+ JanAssistantExtension.handleInferenceStopped(this);
+ });
+ }
+
+ private static async handleInferenceStopped(instance: JanAssistantExtension) {
+ instance.isCancelled = true;
+ instance.controller?.abort();
+ }
+
+ private static async handleMessageRequest(
+ data: MessageRequest,
+ instance: JanAssistantExtension,
+ ) {
+ instance.isCancelled = false;
+ instance.controller = new AbortController();
+
+ if (
+ data.model?.engine !== InferenceEngine.tool_retrieval_enabled ||
+ !data.messages ||
+ !data.thread?.assistants[0]?.tools
+ ) {
+ return;
+ }
+
+ const latestMessage = data.messages[data.messages.length - 1];
+
+ // Ingest the document if needed
+ if (
+ latestMessage &&
+ latestMessage.content &&
+ typeof latestMessage.content !== "string"
+ ) {
+ const docFile = latestMessage.content[1]?.doc_url?.url;
+ if (docFile) {
+ await executeOnMain(
+ NODE,
+ "toolRetrievalIngestNewDocument",
+ docFile,
+ data.model?.proxyEngine,
+ );
+ }
+ }
+
+ // Load agent on thread changed
+ if (instance.retrievalThreadId !== data.threadId) {
+ await executeOnMain(NODE, "toolRetrievalLoadThreadMemory", data.threadId);
+
+ instance.retrievalThreadId = data.threadId;
+
+ // Update the text splitter
+ await executeOnMain(
+ NODE,
+ "toolRetrievalUpdateTextSplitter",
+ data.thread.assistants[0].tools[0]?.settings?.chunk_size ?? 4000,
+ data.thread.assistants[0].tools[0]?.settings?.chunk_overlap ?? 200,
+ );
+ }
+
+ if (latestMessage.content) {
+ const prompt =
+ typeof latestMessage.content === "string"
+ ? latestMessage.content
+ : latestMessage.content[0].text;
+ // Retrieve the result
+ console.debug("toolRetrievalQuery", latestMessage.content);
+ const retrievalResult = await executeOnMain(
+ NODE,
+ "toolRetrievalQueryResult",
+ prompt,
+ );
+
+ // Update the message content
+ // Using the retrieval template with the result and query
+ if (data.thread?.assistants[0].tools)
+ data.messages[data.messages.length - 1].content =
+ data.thread.assistants[0].tools[0].settings?.retrieval_template
+ ?.replace("{CONTEXT}", retrievalResult)
+ .replace("{QUESTION}", prompt);
+ }
+
+ // Filter out all the messages that are not text
+ data.messages = data.messages.map((message) => {
+ if (
+ message.content &&
+ typeof message.content !== "string" &&
+ (message.content.length ?? 0) > 0
+ ) {
+ return {
+ ...message,
+ content: [message.content[0]],
+ };
+ }
+ return message;
+ });
+
+ // Reroute the result to inference engine
+ const output = {
+ ...data,
+ model: {
+ ...data.model,
+ engine: data.model.proxyEngine,
+ },
+ };
+ events.emit(MessageEvent.OnMessageSent, output);
}
/**
@@ -19,15 +154,21 @@ export default class JanAssistantExtension extends AssistantExtension {
onUnload(): void {}
async createAssistant(assistant: Assistant): Promise {
- const assistantDir = join(JanAssistantExtension._homeDir, assistant.id);
+ const assistantDir = await joinPath([
+ JanAssistantExtension._homeDir,
+ assistant.id,
+ ]);
if (!(await fs.existsSync(assistantDir))) await fs.mkdirSync(assistantDir);
// store the assistant metadata json
- const assistantMetadataPath = join(assistantDir, "assistant.json");
+ const assistantMetadataPath = await joinPath([
+ assistantDir,
+ "assistant.json",
+ ]);
try {
await fs.writeFileSync(
assistantMetadataPath,
- JSON.stringify(assistant, null, 2)
+ JSON.stringify(assistant, null, 2),
);
} catch (err) {
console.error(err);
@@ -39,14 +180,17 @@ export default class JanAssistantExtension extends AssistantExtension {
// get all the assistant metadata json
const results: Assistant[] = [];
const allFileName: string[] = await fs.readdirSync(
- JanAssistantExtension._homeDir
+ JanAssistantExtension._homeDir,
);
for (const fileName of allFileName) {
- const filePath = join(JanAssistantExtension._homeDir, fileName);
+ const filePath = await joinPath([
+ JanAssistantExtension._homeDir,
+ fileName,
+ ]);
if (filePath.includes(".DS_Store")) continue;
const jsonFiles: string[] = (await fs.readdirSync(filePath)).filter(
- (file: string) => file === "assistant.json"
+ (file: string) => file === "assistant.json",
);
if (jsonFiles.length !== 1) {
@@ -55,8 +199,8 @@ export default class JanAssistantExtension extends AssistantExtension {
}
const content = await fs.readFileSync(
- join(filePath, jsonFiles[0]),
- "utf-8"
+ await joinPath([filePath, jsonFiles[0]]),
+ "utf-8",
);
const assistant: Assistant =
typeof content === "object" ? content : JSON.parse(content);
@@ -73,7 +217,10 @@ export default class JanAssistantExtension extends AssistantExtension {
}
// remove the directory
- const assistantDir = join(JanAssistantExtension._homeDir, assistant.id);
+ const assistantDir = await joinPath([
+ JanAssistantExtension._homeDir,
+ assistant.id,
+ ]);
await fs.rmdirSync(assistantDir);
return Promise.resolve();
}
@@ -89,7 +236,24 @@ export default class JanAssistantExtension extends AssistantExtension {
description: "A default assistant that can use all downloaded models",
model: "*",
instructions: "",
- tools: undefined,
+ tools: [
+ {
+ type: "retrieval",
+ enabled: false,
+ settings: {
+ top_k: 2,
+ chunk_size: 1024,
+ chunk_overlap: 64,
+ retrieval_template: `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
+ ----------------
+ CONTEXT: {CONTEXT}
+ ----------------
+ QUESTION: {QUESTION}
+ ----------------
+ Helpful Answer:`,
+ },
+ },
+ ],
file_ids: [],
metadata: undefined,
};
diff --git a/extensions/assistant-extension/src/node/engine.ts b/extensions/assistant-extension/src/node/engine.ts
new file mode 100644
index 000000000..54b2a6ba1
--- /dev/null
+++ b/extensions/assistant-extension/src/node/engine.ts
@@ -0,0 +1,13 @@
+import fs from "fs";
+import path from "path";
+import { getJanDataFolderPath } from "@janhq/core/node";
+
+// Sec: Do not send engine settings over requests
+// Read it manually instead
+export const readEmbeddingEngine = (engineName: string) => {
+ const engineSettings = fs.readFileSync(
+ path.join(getJanDataFolderPath(), "engines", `${engineName}.json`),
+ "utf-8",
+ );
+ return JSON.parse(engineSettings);
+};
diff --git a/extensions/assistant-extension/src/node/index.ts b/extensions/assistant-extension/src/node/index.ts
new file mode 100644
index 000000000..95a7243a4
--- /dev/null
+++ b/extensions/assistant-extension/src/node/index.ts
@@ -0,0 +1,39 @@
+import { getJanDataFolderPath, normalizeFilePath } from "@janhq/core/node";
+import { Retrieval } from "./tools/retrieval";
+import path from "path";
+
+const retrieval = new Retrieval();
+
+export async function toolRetrievalUpdateTextSplitter(
+ chunkSize: number,
+ chunkOverlap: number,
+) {
+ retrieval.updateTextSplitter(chunkSize, chunkOverlap);
+ return Promise.resolve();
+}
+export async function toolRetrievalIngestNewDocument(
+ file: string,
+ engine: string,
+) {
+ const filePath = path.join(getJanDataFolderPath(), normalizeFilePath(file));
+ const threadPath = path.dirname(filePath.replace("files", ""));
+ retrieval.updateEmbeddingEngine(engine);
+ await retrieval.ingestAgentKnowledge(filePath, `${threadPath}/memory`);
+ return Promise.resolve();
+}
+
+export async function toolRetrievalLoadThreadMemory(threadId: string) {
+ try {
+ await retrieval.loadRetrievalAgent(
+ path.join(getJanDataFolderPath(), "threads", threadId, "memory"),
+ );
+ return Promise.resolve();
+ } catch (err) {
+ console.debug(err);
+ }
+}
+
+export async function toolRetrievalQueryResult(query: string) {
+ const res = await retrieval.generateResult(query);
+ return Promise.resolve(res);
+}
diff --git a/extensions/assistant-extension/src/node/tools/retrieval/index.ts b/extensions/assistant-extension/src/node/tools/retrieval/index.ts
new file mode 100644
index 000000000..8c7a6aa2b
--- /dev/null
+++ b/extensions/assistant-extension/src/node/tools/retrieval/index.ts
@@ -0,0 +1,77 @@
+import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
+import { formatDocumentsAsString } from "langchain/util/document";
+import { PDFLoader } from "langchain/document_loaders/fs/pdf";
+
+import { HNSWLib } from "langchain/vectorstores/hnswlib";
+
+import { OpenAIEmbeddings } from "langchain/embeddings/openai";
+import { readEmbeddingEngine } from "../../engine";
+
+export class Retrieval {
+ public chunkSize: number = 100;
+ public chunkOverlap?: number = 0;
+ private retriever: any;
+
+ private embeddingModel?: OpenAIEmbeddings = undefined;
+ private textSplitter?: RecursiveCharacterTextSplitter;
+
+ constructor(chunkSize: number = 4000, chunkOverlap: number = 200) {
+ this.updateTextSplitter(chunkSize, chunkOverlap);
+ }
+
+ public updateTextSplitter(chunkSize: number, chunkOverlap: number): void {
+ this.chunkSize = chunkSize;
+ this.chunkOverlap = chunkOverlap;
+ this.textSplitter = new RecursiveCharacterTextSplitter({
+ chunkSize: chunkSize,
+ chunkOverlap: chunkOverlap,
+ });
+ }
+
+ public updateEmbeddingEngine(engine: string): void {
+ // Engine settings are not compatible with the current embedding model params
+ // Switch case manually for now
+ const settings = readEmbeddingEngine(engine);
+ if (engine === "nitro") {
+ this.embeddingModel = new OpenAIEmbeddings(
+ { openAIApiKey: "nitro-embedding" },
+ { basePath: "http://127.0.0.1:3928/v1" },
+ );
+ } else {
+ // Fallback to OpenAI Settings
+ this.embeddingModel = new OpenAIEmbeddings({
+ openAIApiKey: settings.api_key,
+ });
+ }
+ }
+
+ public ingestAgentKnowledge = async (
+ filePath: string,
+ memoryPath: string,
+ ): Promise => {
+ const loader = new PDFLoader(filePath, {
+ splitPages: true,
+ });
+ if (!this.embeddingModel) return Promise.reject();
+ const doc = await loader.load();
+ const docs = await this.textSplitter!.splitDocuments(doc);
+ const vectorStore = await HNSWLib.fromDocuments(docs, this.embeddingModel);
+ return vectorStore.save(memoryPath);
+ };
+
+ public loadRetrievalAgent = async (memoryPath: string): Promise => {
+ if (!this.embeddingModel) return Promise.reject();
+ const vectorStore = await HNSWLib.load(memoryPath, this.embeddingModel);
+ this.retriever = vectorStore.asRetriever(2);
+ return Promise.resolve();
+ };
+
+ public generateResult = async (query: string): Promise => {
+ if (!this.retriever) {
+ return Promise.resolve(" ");
+ }
+ const relevantDocs = await this.retriever.getRelevantDocuments(query);
+ const serializedDoc = formatDocumentsAsString(relevantDocs);
+ return Promise.resolve(serializedDoc);
+ };
+}
diff --git a/extensions/assistant-extension/tsconfig.json b/extensions/assistant-extension/tsconfig.json
index 2477d58ce..d3794cace 100644
--- a/extensions/assistant-extension/tsconfig.json
+++ b/extensions/assistant-extension/tsconfig.json
@@ -1,14 +1,20 @@
{
"compilerOptions": {
- "target": "es2016",
- "module": "ES6",
"moduleResolution": "node",
- "outDir": "./dist",
- "esModuleInterop": true,
- "forceConsistentCasingInFileNames": true,
- "strict": false,
+ "target": "es5",
+ "module": "ES2020",
+ "lib": ["es2015", "es2016", "es2017", "dom"],
+ "strict": true,
+ "sourceMap": true,
+ "declaration": true,
+ "allowSyntheticDefaultImports": true,
+ "experimentalDecorators": true,
+ "emitDecoratorMetadata": true,
+ "declarationDir": "dist/types",
+ "outDir": "dist",
+ "importHelpers": true,
+ "typeRoots": ["node_modules/@types"],
"skipLibCheck": true,
- "rootDir": "./src"
},
- "include": ["./src"]
+ "include": ["src"],
}
diff --git a/extensions/assistant-extension/webpack.config.js b/extensions/assistant-extension/webpack.config.js
deleted file mode 100644
index 74d16fc8e..000000000
--- a/extensions/assistant-extension/webpack.config.js
+++ /dev/null
@@ -1,38 +0,0 @@
-const path = require("path");
-const webpack = require("webpack");
-const packageJson = require("./package.json");
-
-module.exports = {
- experiments: { outputModule: true },
- entry: "./src/index.ts", // Adjust the entry point to match your project's main file
- mode: "production",
- module: {
- rules: [
- {
- test: /\.tsx?$/,
- use: "ts-loader",
- exclude: /node_modules/,
- },
- ],
- },
- output: {
- filename: "index.js", // Adjust the output file name as needed
- path: path.resolve(__dirname, "dist"),
- library: { type: "module" }, // Specify ESM output format
- },
- plugins: [
- new webpack.DefinePlugin({
- MODULE: JSON.stringify(`${packageJson.name}/${packageJson.module}`),
- }),
- ],
- resolve: {
- extensions: [".ts", ".js"],
- fallback: {
- path: require.resolve("path-browserify"),
- },
- },
- optimization: {
- minimize: false,
- },
- // Add loaders and other configuration as needed for your project
-};
diff --git a/extensions/conversational-extension/src/index.ts b/extensions/conversational-extension/src/index.ts
index 66becb748..bf8c213ad 100644
--- a/extensions/conversational-extension/src/index.ts
+++ b/extensions/conversational-extension/src/index.ts
@@ -4,16 +4,15 @@ import {
ConversationalExtension,
Thread,
ThreadMessage,
+ events,
} from '@janhq/core'
/**
* JSONConversationalExtension is a ConversationalExtension implementation that provides
* functionality for managing threads.
*/
-export default class JSONConversationalExtension
- extends ConversationalExtension
-{
- private static readonly _homeDir = 'file://threads'
+export default class JSONConversationalExtension extends ConversationalExtension {
+ private static readonly _threadFolder = 'file://threads'
private static readonly _threadInfoFileName = 'thread.json'
private static readonly _threadMessagesFileName = 'messages.jsonl'
@@ -21,8 +20,8 @@ export default class JSONConversationalExtension
* Called when the extension is loaded.
*/
async onLoad() {
- if (!(await fs.existsSync(JSONConversationalExtension._homeDir)))
- await fs.mkdirSync(JSONConversationalExtension._homeDir)
+ if (!(await fs.existsSync(JSONConversationalExtension._threadFolder)))
+ await fs.mkdirSync(JSONConversationalExtension._threadFolder)
console.debug('JSONConversationalExtension loaded')
}
@@ -69,7 +68,7 @@ export default class JSONConversationalExtension
async saveThread(thread: Thread): Promise {
try {
const threadDirPath = await joinPath([
- JSONConversationalExtension._homeDir,
+ JSONConversationalExtension._threadFolder,
thread.id,
])
const threadJsonPath = await joinPath([
@@ -93,7 +92,7 @@ export default class JSONConversationalExtension
*/
async deleteThread(threadId: string): Promise {
const path = await joinPath([
- JSONConversationalExtension._homeDir,
+ JSONConversationalExtension._threadFolder,
`${threadId}`,
])
try {
@@ -110,7 +109,7 @@ export default class JSONConversationalExtension
async addNewMessage(message: ThreadMessage): Promise {
try {
const threadDirPath = await joinPath([
- JSONConversationalExtension._homeDir,
+ JSONConversationalExtension._threadFolder,
message.thread_id,
])
const threadMessagePath = await joinPath([
@@ -119,6 +118,33 @@ export default class JSONConversationalExtension
])
if (!(await fs.existsSync(threadDirPath)))
await fs.mkdirSync(threadDirPath)
+
+ if (message.content[0]?.type === 'image') {
+ const filesPath = await joinPath([threadDirPath, 'files'])
+ if (!(await fs.existsSync(filesPath))) await fs.mkdirSync(filesPath)
+
+ const imagePath = await joinPath([filesPath, `${message.id}.png`])
+ const base64 = message.content[0].text.annotations[0]
+ await this.storeImage(base64, imagePath)
+ if ((await fs.existsSync(imagePath)) && message.content?.length) {
+ // Use file path instead of blob
+ message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.png`
+ }
+ }
+
+ if (message.content[0]?.type === 'pdf') {
+ const filesPath = await joinPath([threadDirPath, 'files'])
+ if (!(await fs.existsSync(filesPath))) await fs.mkdirSync(filesPath)
+
+ const filePath = await joinPath([filesPath, `${message.id}.pdf`])
+ const blob = message.content[0].text.annotations[0]
+ await this.storeFile(blob, filePath)
+
+ if ((await fs.existsSync(filePath)) && message.content?.length) {
+ // Use file path instead of blob
+ message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.pdf`
+ }
+ }
await fs.appendFileSync(threadMessagePath, JSON.stringify(message) + '\n')
Promise.resolve()
} catch (err) {
@@ -126,13 +152,32 @@ export default class JSONConversationalExtension
}
}
+ async storeImage(base64: string, filePath: string): Promise {
+ const base64Data = base64.replace(/^data:image\/\w+;base64,/, '')
+
+ try {
+ await fs.writeBlob(filePath, base64Data)
+ } catch (err) {
+ console.error(err)
+ }
+ }
+
+ async storeFile(base64: string, filePath: string): Promise {
+ const base64Data = base64.replace(/^data:application\/pdf;base64,/, '')
+ try {
+ await fs.writeBlob(filePath, base64Data)
+ } catch (err) {
+ console.error(err)
+ }
+ }
+
async writeMessages(
threadId: string,
messages: ThreadMessage[]
): Promise {
try {
const threadDirPath = await joinPath([
- JSONConversationalExtension._homeDir,
+ JSONConversationalExtension._threadFolder,
threadId,
])
const threadMessagePath = await joinPath([
@@ -160,7 +205,7 @@ export default class JSONConversationalExtension
private async readThread(threadDirName: string): Promise {
return fs.readFileSync(
await joinPath([
- JSONConversationalExtension._homeDir,
+ JSONConversationalExtension._threadFolder,
threadDirName,
JSONConversationalExtension._threadInfoFileName,
]),
@@ -174,14 +219,14 @@ export default class JSONConversationalExtension
*/
private async getValidThreadDirs(): Promise {
const fileInsideThread: string[] = await fs.readdirSync(
- JSONConversationalExtension._homeDir
+ JSONConversationalExtension._threadFolder
)
const threadDirs: string[] = []
for (let i = 0; i < fileInsideThread.length; i++) {
if (fileInsideThread[i].includes('.DS_Store')) continue
const path = await joinPath([
- JSONConversationalExtension._homeDir,
+ JSONConversationalExtension._threadFolder,
fileInsideThread[i],
])
@@ -201,7 +246,7 @@ export default class JSONConversationalExtension
async getAllMessages(threadId: string): Promise {
try {
const threadDirPath = await joinPath([
- JSONConversationalExtension._homeDir,
+ JSONConversationalExtension._threadFolder,
threadId,
])
@@ -218,18 +263,17 @@ export default class JSONConversationalExtension
JSONConversationalExtension._threadMessagesFileName,
])
- const result = await fs
- .readFileSync(messageFilePath, 'utf-8')
- .then((content) =>
- content
- .toString()
- .split('\n')
- .filter((line) => line !== '')
- )
+ let readResult = await fs.readFileSync(messageFilePath, 'utf-8')
+
+ if (typeof readResult === 'object') {
+ readResult = JSON.stringify(readResult)
+ }
+
+ const result = readResult.split('\n').filter((line) => line !== '')
const messages: ThreadMessage[] = []
result.forEach((line: string) => {
- messages.push(JSON.parse(line) as ThreadMessage)
+ messages.push(JSON.parse(line))
})
return messages
} catch (err) {
diff --git a/extensions/inference-nitro-extension/bin/version.txt b/extensions/inference-nitro-extension/bin/version.txt
index f2722b133..c2c0004f0 100644
--- a/extensions/inference-nitro-extension/bin/version.txt
+++ b/extensions/inference-nitro-extension/bin/version.txt
@@ -1 +1 @@
-0.2.12
+0.3.5
diff --git a/extensions/inference-nitro-extension/package.json b/extensions/inference-nitro-extension/package.json
index 9379e194b..8ad516ad9 100644
--- a/extensions/inference-nitro-extension/package.json
+++ b/extensions/inference-nitro-extension/package.json
@@ -35,11 +35,12 @@
"rollup-plugin-sourcemaps": "^0.6.3",
"rollup-plugin-typescript2": "^0.36.0",
"run-script-os": "^1.1.6",
- "typescript": "^5.3.3"
+ "typescript": "^5.2.2"
},
"dependencies": {
"@janhq/core": "file:../../core",
"@rollup/plugin-replace": "^5.0.5",
+ "@types/os-utils": "^0.0.4",
"fetch-retry": "^5.0.6",
"path-browserify": "^1.0.1",
"rxjs": "^7.8.1",
diff --git a/extensions/inference-nitro-extension/rollup.config.ts b/extensions/inference-nitro-extension/rollup.config.ts
index 374a054cd..77a9fb208 100644
--- a/extensions/inference-nitro-extension/rollup.config.ts
+++ b/extensions/inference-nitro-extension/rollup.config.ts
@@ -27,6 +27,9 @@ export default [
TROUBLESHOOTING_URL: JSON.stringify(
"https://jan.ai/guides/troubleshooting"
),
+ JAN_SERVER_INFERENCE_URL: JSON.stringify(
+ "http://localhost:1337/v1/chat/completions"
+ ),
}),
// Allow json resolution
json(),
diff --git a/extensions/inference-nitro-extension/src/@types/global.d.ts b/extensions/inference-nitro-extension/src/@types/global.d.ts
index 5fb41f0f8..7a4fb4805 100644
--- a/extensions/inference-nitro-extension/src/@types/global.d.ts
+++ b/extensions/inference-nitro-extension/src/@types/global.d.ts
@@ -1,22 +1,7 @@
declare const NODE: string;
declare const INFERENCE_URL: string;
declare const TROUBLESHOOTING_URL: string;
-
-/**
- * The parameters for the initModel function.
- * @property settings - The settings for the machine learning model.
- * @property settings.ctx_len - The context length.
- * @property settings.ngl - The number of generated tokens.
- * @property settings.cont_batching - Whether to use continuous batching.
- * @property settings.embedding - Whether to use embedding.
- */
-interface EngineSettings {
- ctx_len: number;
- ngl: number;
- cpu_threads: number;
- cont_batching: boolean;
- embedding: boolean;
-}
+declare const JAN_SERVER_INFERENCE_URL: string;
/**
* The response from the initModel function.
@@ -26,8 +11,3 @@ interface ModelOperationResponse {
error?: any;
modelFile?: string;
}
-
-interface ResourcesInfo {
- numCpuPhysicalCore: number;
- memAvailable: number;
-}
\ No newline at end of file
diff --git a/extensions/inference-nitro-extension/src/helpers/sse.ts b/extensions/inference-nitro-extension/src/helpers/sse.ts
index c6352383d..aab260828 100644
--- a/extensions/inference-nitro-extension/src/helpers/sse.ts
+++ b/extensions/inference-nitro-extension/src/helpers/sse.ts
@@ -6,6 +6,7 @@ import { Observable } from "rxjs";
* @returns An Observable that emits the generated response as a string.
*/
export function requestInference(
+ inferenceUrl: string,
recentMessages: any[],
model: Model,
controller?: AbortController
@@ -17,7 +18,7 @@ export function requestInference(
stream: true,
...model.parameters,
});
- fetch(INFERENCE_URL, {
+ fetch(inferenceUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
diff --git a/extensions/inference-nitro-extension/src/index.ts b/extensions/inference-nitro-extension/src/index.ts
index 735383a61..9e96ad93f 100644
--- a/extensions/inference-nitro-extension/src/index.ts
+++ b/extensions/inference-nitro-extension/src/index.ts
@@ -24,6 +24,7 @@ import {
MessageEvent,
ModelEvent,
InferenceEvent,
+ ModelSettingParams,
} from "@janhq/core";
import { requestInference } from "./helpers/sse";
import { ulid } from "ulid";
@@ -45,12 +46,12 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
private _currentModel: Model | undefined;
- private _engineSettings: EngineSettings = {
+ private _engineSettings: ModelSettingParams = {
ctx_len: 2048,
ngl: 100,
cpu_threads: 1,
cont_batching: false,
- embedding: false,
+ embedding: true,
};
controller = new AbortController();
@@ -67,16 +68,29 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
*/
private nitroProcessInfo: any = undefined;
+ private inferenceUrl = "";
+
/**
* Subscribes to events emitted by the @janhq/core package.
*/
async onLoad() {
if (!(await fs.existsSync(JanInferenceNitroExtension._homeDir))) {
- await fs
- .mkdirSync(JanInferenceNitroExtension._homeDir)
- .catch((err: Error) => console.debug(err));
+ try {
+ await fs.mkdirSync(JanInferenceNitroExtension._homeDir);
+ } catch (e) {
+ console.debug(e);
+ }
}
+ // init inference url
+ // @ts-ignore
+ const electronApi = window?.electronAPI;
+ this.inferenceUrl = INFERENCE_URL;
+ if (!electronApi) {
+ this.inferenceUrl = JAN_SERVER_INFERENCE_URL;
+ }
+ console.debug("Inference url: ", this.inferenceUrl);
+
if (!(await fs.existsSync(JanInferenceNitroExtension._settingsDir)))
await fs.mkdirSync(JanInferenceNitroExtension._settingsDir);
this.writeDefaultEngineSettings();
@@ -133,6 +147,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
const modelFullPath = await joinPath(["models", model.id]);
+ this._currentModel = model;
const nitroInitResult = await executeOnMain(NODE, "runModel", {
modelFullPath,
model,
@@ -143,7 +158,6 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
return;
}
- this._currentModel = model;
events.emit(ModelEvent.OnModelReady, model);
this.getNitroProcesHealthIntervalId = setInterval(
@@ -205,7 +219,11 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
return new Promise(async (resolve, reject) => {
if (!this._currentModel) return Promise.reject("No model loaded");
- requestInference(data.messages ?? [], this._currentModel).subscribe({
+ requestInference(
+ this.inferenceUrl,
+ data.messages ?? [],
+ this._currentModel
+ ).subscribe({
next: (_content: any) => {},
complete: async () => {
resolve(message);
@@ -250,7 +268,12 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
...(this._currentModel || {}),
...(data.model || {}),
};
- requestInference(data.messages ?? [], model, this.controller).subscribe({
+ requestInference(
+ this.inferenceUrl,
+ data.messages ?? [],
+ model,
+ this.controller
+ ).subscribe({
next: (content: any) => {
const messageContent: ThreadContent = {
type: ContentType.Text,
diff --git a/extensions/inference-nitro-extension/src/node/execute.ts b/extensions/inference-nitro-extension/src/node/execute.ts
index ca266639c..83b5226d4 100644
--- a/extensions/inference-nitro-extension/src/node/execute.ts
+++ b/extensions/inference-nitro-extension/src/node/execute.ts
@@ -25,12 +25,12 @@ export const executableNitroFile = (): NitroExecutableOptions => {
if (nvidiaInfo["run_mode"] === "cpu") {
binaryFolder = path.join(binaryFolder, "win-cpu");
} else {
- if (nvidiaInfo["cuda"].version === "12") {
- binaryFolder = path.join(binaryFolder, "win-cuda-12-0");
- } else {
+ if (nvidiaInfo["cuda"].version === "11") {
binaryFolder = path.join(binaryFolder, "win-cuda-11-7");
+ } else {
+ binaryFolder = path.join(binaryFolder, "win-cuda-12-0");
}
- cudaVisibleDevices = nvidiaInfo["gpu_highest_vram"];
+ cudaVisibleDevices = nvidiaInfo["gpus_in_use"].join(",");
}
binaryName = "nitro.exe";
} else if (process.platform === "darwin") {
@@ -50,12 +50,12 @@ export const executableNitroFile = (): NitroExecutableOptions => {
if (nvidiaInfo["run_mode"] === "cpu") {
binaryFolder = path.join(binaryFolder, "linux-cpu");
} else {
- if (nvidiaInfo["cuda"].version === "12") {
- binaryFolder = path.join(binaryFolder, "linux-cuda-12-0");
- } else {
+ if (nvidiaInfo["cuda"].version === "11") {
binaryFolder = path.join(binaryFolder, "linux-cuda-11-7");
+ } else {
+ binaryFolder = path.join(binaryFolder, "linux-cuda-12-0");
}
- cudaVisibleDevices = nvidiaInfo["gpu_highest_vram"];
+ cudaVisibleDevices = nvidiaInfo["gpus_in_use"].join(",");
}
}
return {
diff --git a/extensions/inference-nitro-extension/src/node/index.ts b/extensions/inference-nitro-extension/src/node/index.ts
index a75f33df2..7ba90b556 100644
--- a/extensions/inference-nitro-extension/src/node/index.ts
+++ b/extensions/inference-nitro-extension/src/node/index.ts
@@ -3,11 +3,19 @@ import path from "path";
import { ChildProcessWithoutNullStreams, spawn } from "child_process";
import tcpPortUsed from "tcp-port-used";
import fetchRT from "fetch-retry";
-import { log, getJanDataFolderPath } from "@janhq/core/node";
+import {
+ log,
+ getJanDataFolderPath,
+ getSystemResourceInfo,
+} from "@janhq/core/node";
import { getNitroProcessInfo, updateNvidiaInfo } from "./nvidia";
-import { Model, InferenceEngine, ModelSettingParams } from "@janhq/core";
+import {
+ Model,
+ InferenceEngine,
+ ModelSettingParams,
+ PromptTemplate,
+} from "@janhq/core";
import { executableNitroFile } from "./execute";
-import { physicalCpuCount } from "./utils";
// Polyfill fetch with retry
const fetchRetry = fetchRT(fetch);
@@ -19,25 +27,6 @@ interface ModelInitOptions {
modelFullPath: string;
model: Model;
}
-
-/**
- * The response object of Prompt Template parsing.
- */
-interface PromptTemplate {
- system_prompt?: string;
- ai_prompt?: string;
- user_prompt?: string;
- error?: string;
-}
-
-/**
- * Model setting args for Nitro model load.
- */
-interface ModelSettingArgs extends ModelSettingParams {
- llama_model_path: string;
- cpu_threads: number;
-}
-
// The PORT to use for the Nitro subprocess
const PORT = 3928;
// The HOST address to use for the Nitro subprocess
@@ -60,7 +49,7 @@ let subprocess: ChildProcessWithoutNullStreams | undefined = undefined;
// The current model file url
let currentModelFile: string = "";
// The current model settings
-let currentSettings: ModelSettingArgs | undefined = undefined;
+let currentSettings: ModelSettingParams | undefined = undefined;
/**
* Stops a Nitro subprocess.
@@ -78,7 +67,7 @@ function stopModel(): Promise {
* TODO: Should pass absolute of the model file instead of just the name - So we can modurize the module.ts to npm package
*/
async function runModel(
- wrapper: ModelInitOptions
+ wrapper: ModelInitOptions,
): Promise {
if (wrapper.model.engine !== InferenceEngine.nitro) {
// Not a nitro model
@@ -96,7 +85,7 @@ async function runModel(
const ggufBinFile = files.find(
(file) =>
file === path.basename(currentModelFile) ||
- file.toLowerCase().includes(SUPPORTED_MODEL_FORMAT)
+ file.toLowerCase().includes(SUPPORTED_MODEL_FORMAT),
);
if (!ggufBinFile) return Promise.reject("No GGUF model file found");
@@ -106,7 +95,7 @@ async function runModel(
if (wrapper.model.engine !== InferenceEngine.nitro) {
return Promise.reject("Not a nitro model");
} else {
- const nitroResourceProbe = await getResourcesInfo();
+ const nitroResourceProbe = await getSystemResourceInfo();
// Convert settings.prompt_template to system_prompt, user_prompt, ai_prompt
if (wrapper.model.settings.prompt_template) {
const promptTemplate = wrapper.model.settings.prompt_template;
@@ -133,7 +122,6 @@ async function runModel(
mmproj: path.join(modelFolderPath, wrapper.model.settings.mmproj),
}),
};
- console.log(currentSettings);
return runNitroAndLoadModel();
}
}
@@ -192,10 +180,10 @@ function promptTemplateConverter(promptTemplate: string): PromptTemplate {
const system_prompt = promptTemplate.substring(0, systemIndex);
const user_prompt = promptTemplate.substring(
systemIndex + systemMarker.length,
- promptIndex
+ promptIndex,
);
const ai_prompt = promptTemplate.substring(
- promptIndex + promptMarker.length
+ promptIndex + promptMarker.length,
);
// Return the split parts
@@ -205,7 +193,7 @@ function promptTemplateConverter(promptTemplate: string): PromptTemplate {
const promptIndex = promptTemplate.indexOf(promptMarker);
const user_prompt = promptTemplate.substring(0, promptIndex);
const ai_prompt = promptTemplate.substring(
- promptIndex + promptMarker.length
+ promptIndex + promptMarker.length,
);
// Return the split parts
@@ -221,6 +209,9 @@ function promptTemplateConverter(promptTemplate: string): PromptTemplate {
* @returns A Promise that resolves when the model is loaded successfully, or rejects with an error message if the model is not found or fails to load.
*/
function loadLLMModel(settings: any): Promise {
+ if (!settings?.ngl) {
+ settings.ngl = 100;
+ }
log(`[NITRO]::Debug: Loading model with params ${JSON.stringify(settings)}`);
return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, {
method: "POST",
@@ -234,14 +225,14 @@ function loadLLMModel(settings: any): Promise {
.then((res) => {
log(
`[NITRO]::Debug: Load model success with response ${JSON.stringify(
- res
- )}`
+ res,
+ )}`,
);
return Promise.resolve(res);
})
.catch((err) => {
log(`[NITRO]::Error: Load model failed with error ${err}`);
- return Promise.reject();
+ return Promise.reject(err);
});
}
@@ -263,8 +254,8 @@ async function validateModelStatus(): Promise {
retryDelay: 500,
}).then(async (res: Response) => {
log(
- `[NITRO]::Debug: Validate model state success with response ${JSON.stringify(
- res
+ `[NITRO]::Debug: Validate model state with response ${JSON.stringify(
+ res.status
)}`
);
// If the response is OK, check model_loaded status.
@@ -273,9 +264,19 @@ async function validateModelStatus(): Promise {
// If the model is loaded, return an empty object.
// Otherwise, return an object with an error message.
if (body.model_loaded) {
+ log(
+ `[NITRO]::Debug: Validate model state success with response ${JSON.stringify(
+ body
+ )}`
+ );
return Promise.resolve();
}
}
+ log(
+ `[NITRO]::Debug: Validate model state failed with response ${JSON.stringify(
+ res.statusText
+ )}`
+ );
return Promise.reject("Validate model status failed");
});
}
@@ -316,7 +317,7 @@ function spawnNitroProcess(): Promise {
const args: string[] = ["1", LOCAL_HOST, PORT.toString()];
// Execute the binary
log(
- `[NITRO]::Debug: Spawn nitro at path: ${executableOptions.executablePath}, and args: ${args}`
+ `[NITRO]::Debug: Spawn nitro at path: ${executableOptions.executablePath}, and args: ${args}`,
);
subprocess = spawn(
executableOptions.executablePath,
@@ -327,7 +328,7 @@ function spawnNitroProcess(): Promise {
...process.env,
CUDA_VISIBLE_DEVICES: executableOptions.cudaVisibleDevices,
},
- }
+ },
);
// Handle subprocess output
@@ -352,22 +353,6 @@ function spawnNitroProcess(): Promise {
});
}
-/**
- * Get the system resources information
- * TODO: Move to Core so that it can be reused
- */
-function getResourcesInfo(): Promise {
- return new Promise(async (resolve) => {
- const cpu = await physicalCpuCount();
- log(`[NITRO]::CPU informations - ${cpu}`);
- const response: ResourcesInfo = {
- numCpuPhysicalCore: cpu,
- memAvailable: 0,
- };
- resolve(response);
- });
-}
-
/**
* Every module should have a dispose function
* This will be called when the extension is unloaded and should clean up any resources
diff --git a/extensions/inference-nitro-extension/src/node/nvidia.ts b/extensions/inference-nitro-extension/src/node/nvidia.ts
index 13e43290b..bed2856a1 100644
--- a/extensions/inference-nitro-extension/src/node/nvidia.ts
+++ b/extensions/inference-nitro-extension/src/node/nvidia.ts
@@ -19,6 +19,8 @@ const DEFALT_SETTINGS = {
},
gpus: [],
gpu_highest_vram: "",
+ gpus_in_use: [],
+ is_initial: true,
};
/**
@@ -48,11 +50,15 @@ export interface NitroProcessInfo {
*/
export async function updateNvidiaInfo() {
if (process.platform !== "darwin") {
- await Promise.all([
- updateNvidiaDriverInfo(),
- updateCudaExistence(),
- updateGpuInfo(),
- ]);
+ let data;
+ try {
+ data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, "utf-8"));
+ } catch (error) {
+ data = DEFALT_SETTINGS;
+ writeFileSync(NVIDIA_INFO_FILE, JSON.stringify(data, null, 2));
+ }
+ updateNvidiaDriverInfo();
+ updateGpuInfo();
}
}
@@ -73,12 +79,7 @@ export async function updateNvidiaDriverInfo(): Promise {
exec(
"nvidia-smi --query-gpu=driver_version --format=csv,noheader",
(error, stdout) => {
- let data;
- try {
- data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, "utf-8"));
- } catch (error) {
- data = DEFALT_SETTINGS;
- }
+ let data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, "utf-8"));
if (!error) {
const firstLine = stdout.split("\n")[0].trim();
@@ -107,7 +108,7 @@ export function checkFileExistenceInPaths(
/**
* Validate cuda for linux and windows
*/
-export function updateCudaExistence() {
+export function updateCudaExistence(data: Record = DEFALT_SETTINGS): Record {
let filesCuda12: string[];
let filesCuda11: string[];
let paths: string[];
@@ -141,19 +142,14 @@ export function updateCudaExistence() {
cudaVersion = "12";
}
- let data;
- try {
- data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, "utf-8"));
- } catch (error) {
- data = DEFALT_SETTINGS;
- }
-
data["cuda"].exist = cudaExists;
data["cuda"].version = cudaVersion;
- if (cudaExists) {
+ console.log(data["is_initial"], data["gpus_in_use"]);
+ if (cudaExists && data["is_initial"] && data["gpus_in_use"].length > 0) {
data.run_mode = "gpu";
}
- writeFileSync(NVIDIA_INFO_FILE, JSON.stringify(data, null, 2));
+ data.is_initial = false;
+ return data;
}
/**
@@ -161,14 +157,9 @@ export function updateCudaExistence() {
*/
export async function updateGpuInfo(): Promise {
exec(
- "nvidia-smi --query-gpu=index,memory.total --format=csv,noheader,nounits",
+ "nvidia-smi --query-gpu=index,memory.total,name --format=csv,noheader,nounits",
(error, stdout) => {
- let data;
- try {
- data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, "utf-8"));
- } catch (error) {
- data = DEFALT_SETTINGS;
- }
+ let data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, "utf-8"));
if (!error) {
// Get GPU info and gpu has higher memory first
@@ -178,21 +169,27 @@ export async function updateGpuInfo(): Promise {
.trim()
.split("\n")
.map((line) => {
- let [id, vram] = line.split(", ");
+ let [id, vram, name] = line.split(", ");
vram = vram.replace(/\r/g, "");
if (parseFloat(vram) > highestVram) {
highestVram = parseFloat(vram);
highestVramId = id;
}
- return { id, vram };
+ return { id, vram, name };
});
- data["gpus"] = gpus;
- data["gpu_highest_vram"] = highestVramId;
+ data.gpus = gpus;
+ data.gpu_highest_vram = highestVramId;
} else {
- data["gpus"] = [];
+ data.gpus = [];
+ data.gpu_highest_vram = "";
}
+ if (!data["gpus_in_use"] || data["gpus_in_use"].length === 0) {
+ data.gpus_in_use = [data["gpu_highest_vram"]];
+ }
+
+ data = updateCudaExistence(data);
writeFileSync(NVIDIA_INFO_FILE, JSON.stringify(data, null, 2));
Promise.resolve();
}
diff --git a/extensions/inference-nitro-extension/src/node/utils.ts b/extensions/inference-nitro-extension/src/node/utils.ts
deleted file mode 100644
index c7ef2e9a6..000000000
--- a/extensions/inference-nitro-extension/src/node/utils.ts
+++ /dev/null
@@ -1,56 +0,0 @@
-import os from "os";
-import childProcess from "child_process";
-
-function exec(command: string): Promise {
- return new Promise((resolve, reject) => {
- childProcess.exec(command, { encoding: "utf8" }, (error, stdout) => {
- if (error) {
- reject(error);
- } else {
- resolve(stdout);
- }
- });
- });
-}
-
-let amount: number;
-const platform = os.platform();
-
-export async function physicalCpuCount(): Promise {
- return new Promise((resolve, reject) => {
- if (platform === "linux") {
- exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
- .then((output) => {
- amount = parseInt(output.trim(), 10);
- resolve(amount);
- })
- .catch(reject);
- } else if (platform === "darwin") {
- exec("sysctl -n hw.physicalcpu_max")
- .then((output) => {
- amount = parseInt(output.trim(), 10);
- resolve(amount);
- })
- .catch(reject);
- } else if (platform === "win32") {
- exec("WMIC CPU Get NumberOfCores")
- .then((output) => {
- amount = output
- .split(os.EOL)
- .map((line: string) => parseInt(line))
- .filter((value: number) => !isNaN(value))
- .reduce((sum: number, number: number) => sum + number, 1);
- resolve(amount);
- })
- .catch(reject);
- } else {
- const cores = os.cpus().filter((cpu: any, index: number) => {
- const hasHyperthreading = cpu.model.includes("Intel");
- const isOdd = index % 2 === 1;
- return !hasHyperthreading || isOdd;
- });
- amount = cores.length;
- resolve(amount);
- }
- });
-}
diff --git a/extensions/inference-openai-extension/src/index.ts b/extensions/inference-openai-extension/src/index.ts
index 54572041d..fd1230bc7 100644
--- a/extensions/inference-openai-extension/src/index.ts
+++ b/extensions/inference-openai-extension/src/index.ts
@@ -15,10 +15,13 @@ import {
ThreadMessage,
events,
fs,
+ InferenceEngine,
BaseExtension,
MessageEvent,
ModelEvent,
InferenceEvent,
+ AppConfigurationEventName,
+ joinPath,
} from "@janhq/core";
import { requestInference } from "./helpers/sse";
import { ulid } from "ulid";
@@ -30,7 +33,7 @@ import { join } from "path";
* It also subscribes to events emitted by the @janhq/core package and handles new message requests.
*/
export default class JanInferenceOpenAIExtension extends BaseExtension {
- private static readonly _homeDir = "file://engines";
+ private static readonly _engineDir = "file://engines";
private static readonly _engineMetadataFileName = "openai.json";
private static _currentModel: OpenAIModel;
@@ -47,9 +50,9 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
* Subscribes to events emitted by the @janhq/core package.
*/
async onLoad() {
- if (!(await fs.existsSync(JanInferenceOpenAIExtension._homeDir))) {
+ if (!(await fs.existsSync(JanInferenceOpenAIExtension._engineDir))) {
await fs
- .mkdirSync(JanInferenceOpenAIExtension._homeDir)
+ .mkdirSync(JanInferenceOpenAIExtension._engineDir)
.catch((err) => console.debug(err));
}
@@ -57,7 +60,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
// Events subscription
events.on(MessageEvent.OnMessageSent, (data) =>
- JanInferenceOpenAIExtension.handleMessageRequest(data, this)
+ JanInferenceOpenAIExtension.handleMessageRequest(data, this),
);
events.on(ModelEvent.OnModelInit, (model: OpenAIModel) => {
@@ -70,6 +73,20 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
events.on(InferenceEvent.OnInferenceStopped, () => {
JanInferenceOpenAIExtension.handleInferenceStopped(this);
});
+
+ const settingsFilePath = await joinPath([
+ JanInferenceOpenAIExtension._engineDir,
+ JanInferenceOpenAIExtension._engineMetadataFileName,
+ ]);
+
+ events.on(
+ AppConfigurationEventName.OnConfigurationUpdate,
+ (settingsKey: string) => {
+ // Update settings on changes
+ if (settingsKey === settingsFilePath)
+ JanInferenceOpenAIExtension.writeDefaultEngineSettings();
+ },
+ );
}
/**
@@ -80,8 +97,8 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
static async writeDefaultEngineSettings() {
try {
const engineFile = join(
- JanInferenceOpenAIExtension._homeDir,
- JanInferenceOpenAIExtension._engineMetadataFileName
+ JanInferenceOpenAIExtension._engineDir,
+ JanInferenceOpenAIExtension._engineMetadataFileName,
);
if (await fs.existsSync(engineFile)) {
const engine = await fs.readFileSync(engineFile, "utf-8");
@@ -90,7 +107,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
} else {
await fs.writeFileSync(
engineFile,
- JSON.stringify(JanInferenceOpenAIExtension._engineSettings, null, 2)
+ JSON.stringify(JanInferenceOpenAIExtension._engineSettings, null, 2),
);
}
} catch (err) {
@@ -98,7 +115,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
}
}
private static async handleModelInit(model: OpenAIModel) {
- if (model.engine !== "openai") {
+ if (model.engine !== InferenceEngine.openai) {
return;
} else {
JanInferenceOpenAIExtension._currentModel = model;
@@ -116,7 +133,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
}
private static async handleInferenceStopped(
- instance: JanInferenceOpenAIExtension
+ instance: JanInferenceOpenAIExtension,
) {
instance.isCancelled = true;
instance.controller?.abort();
@@ -130,7 +147,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
*/
private static async handleMessageRequest(
data: MessageRequest,
- instance: JanInferenceOpenAIExtension
+ instance: JanInferenceOpenAIExtension,
) {
if (data.model.engine !== "openai") {
return;
@@ -160,7 +177,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
...JanInferenceOpenAIExtension._currentModel,
parameters: data.model.parameters,
},
- instance.controller
+ instance.controller,
).subscribe({
next: (content) => {
const messageContent: ThreadContent = {
@@ -181,7 +198,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
},
error: async (err) => {
if (instance.isCancelled || message.content.length > 0) {
- message.status = MessageStatus.Error;
+ message.status = MessageStatus.Stopped;
events.emit(MessageEvent.OnMessageUpdate, message);
return;
}
@@ -193,7 +210,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
},
};
message.content = [messageContent];
- message.status = MessageStatus.Ready;
+ message.status = MessageStatus.Error;
events.emit(MessageEvent.OnMessageUpdate, message);
},
});
diff --git a/extensions/inference-openai-extension/tsconfig.json b/extensions/inference-openai-extension/tsconfig.json
index b48175a16..7bfdd9009 100644
--- a/extensions/inference-openai-extension/tsconfig.json
+++ b/extensions/inference-openai-extension/tsconfig.json
@@ -3,13 +3,12 @@
"target": "es2016",
"module": "ES6",
"moduleResolution": "node",
-
"outDir": "./dist",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": false,
"skipLibCheck": true,
- "rootDir": "./src"
+ "rootDir": "./src",
},
- "include": ["./src"]
+ "include": ["./src"],
}
diff --git a/extensions/inference-triton-trtllm-extension/tsconfig.json b/extensions/inference-triton-trtllm-extension/tsconfig.json
index b48175a16..7bfdd9009 100644
--- a/extensions/inference-triton-trtllm-extension/tsconfig.json
+++ b/extensions/inference-triton-trtllm-extension/tsconfig.json
@@ -3,13 +3,12 @@
"target": "es2016",
"module": "ES6",
"moduleResolution": "node",
-
"outDir": "./dist",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": false,
"skipLibCheck": true,
- "rootDir": "./src"
+ "rootDir": "./src",
},
- "include": ["./src"]
+ "include": ["./src"],
}
diff --git a/extensions/model-extension/src/index.ts b/extensions/model-extension/src/index.ts
index 5640177a0..b9fa7731e 100644
--- a/extensions/model-extension/src/index.ts
+++ b/extensions/model-extension/src/index.ts
@@ -286,6 +286,7 @@ export default class JanModelExtension extends ModelExtension {
* model.json file associated with it.
*
* This function will create a model.json file for the model.
+ * It works only with single binary file model.
*
* @param dirName the director which reside in ~/jan/models but does not have model.json file.
*/
@@ -302,15 +303,14 @@ export default class JanModelExtension extends ModelExtension {
let binaryFileSize: number | undefined = undefined
for (const file of files) {
- if (file.endsWith(JanModelExtension._incompletedModelFileName)) continue
- if (file.endsWith('.json')) continue
-
- const path = await joinPath([JanModelExtension._homeDir, dirName, file])
- const fileStats = await fs.fileStat(path)
- if (fileStats.isDirectory) continue
- binaryFileSize = fileStats.size
- binaryFileName = file
- break
+ if (file.endsWith(JanModelExtension._supportedModelFormat)) {
+ const path = await joinPath([JanModelExtension._homeDir, dirName, file])
+ const fileStats = await fs.fileStat(path)
+ if (fileStats.isDirectory) continue
+ binaryFileSize = fileStats.size
+ binaryFileName = file
+ break
+ }
}
if (!binaryFileName) {
@@ -318,7 +318,7 @@ export default class JanModelExtension extends ModelExtension {
return
}
- const defaultModel = await this.getDefaultModel()
+ const defaultModel = await this.getDefaultModel() as Model
if (!defaultModel) {
console.error('Unable to find default model')
return
@@ -326,8 +326,19 @@ export default class JanModelExtension extends ModelExtension {
const model: Model = {
...defaultModel,
+ // Overwrite default N/A fields
id: dirName,
name: dirName,
+ sources: [
+ {
+ url: binaryFileName,
+ filename: binaryFileName,
+ },
+ ],
+ settings: {
+ ...defaultModel.settings,
+ llama_model_path: binaryFileName,
+ },
created: Date.now(),
description: `${dirName} - user self import model`,
metadata: {
diff --git a/extensions/monitoring-extension/package.json b/extensions/monitoring-extension/package.json
index 9935e536e..20d3c485f 100644
--- a/extensions/monitoring-extension/package.json
+++ b/extensions/monitoring-extension/package.json
@@ -1,6 +1,6 @@
{
"name": "@janhq/monitoring-extension",
- "version": "1.0.9",
+ "version": "1.0.10",
"description": "This extension provides system health and OS level data",
"main": "dist/index.js",
"module": "dist/module.js",
@@ -26,6 +26,7 @@
"README.md"
],
"bundleDependencies": [
- "node-os-utils"
+ "node-os-utils",
+ "@janhq/core"
]
}
diff --git a/extensions/monitoring-extension/src/index.ts b/extensions/monitoring-extension/src/index.ts
index d3f20b437..9297a770f 100644
--- a/extensions/monitoring-extension/src/index.ts
+++ b/extensions/monitoring-extension/src/index.ts
@@ -1,5 +1,4 @@
-import { MonitoringExtension } from "@janhq/core";
-import { executeOnMain } from "@janhq/core";
+import { MonitoringExtension, executeOnMain } from "@janhq/core";
/**
* JanMonitoringExtension is a extension that provides system monitoring functionality.
diff --git a/extensions/monitoring-extension/src/module.ts b/extensions/monitoring-extension/src/module.ts
index 86b553d52..2c1b14343 100644
--- a/extensions/monitoring-extension/src/module.ts
+++ b/extensions/monitoring-extension/src/module.ts
@@ -1,4 +1,14 @@
const nodeOsUtils = require("node-os-utils");
+const getJanDataFolderPath = require("@janhq/core/node").getJanDataFolderPath;
+const path = require("path");
+const { readFileSync } = require("fs");
+const exec = require("child_process").exec;
+
+const NVIDIA_INFO_FILE = path.join(
+ getJanDataFolderPath(),
+ "settings",
+ "settings.json"
+);
const getResourcesInfo = () =>
new Promise((resolve) => {
@@ -16,18 +26,48 @@ const getResourcesInfo = () =>
});
const getCurrentLoad = () =>
- new Promise((resolve) => {
+ new Promise((resolve, reject) => {
nodeOsUtils.cpu.usage().then((cpuPercentage) => {
- const response = {
- cpu: {
- usage: cpuPercentage,
- },
+ let data = {
+ run_mode: "cpu",
+ gpus_in_use: [],
};
- resolve(response);
+ if (process.platform !== "darwin") {
+ data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, "utf-8"));
+ }
+ if (data.run_mode === "gpu" && data.gpus_in_use.length > 0) {
+ const gpuIds = data["gpus_in_use"].join(",");
+ if (gpuIds !== "") {
+ exec(
+ `nvidia-smi --query-gpu=index,name,temperature.gpu,utilization.gpu,memory.total,memory.free,utilization.memory --format=csv,noheader,nounits --id=${gpuIds}`,
+ (error, stdout, stderr) => {
+ if (error) {
+ console.error(`exec error: ${error}`);
+ reject(error);
+ return;
+ }
+ const gpuInfo = stdout.trim().split("\n").map((line) => {
+ const [id, name, temperature, utilization, memoryTotal, memoryFree, memoryUtilization] = line.split(", ").map(item => item.replace(/\r/g, ""));
+ return { id, name, temperature, utilization, memoryTotal, memoryFree, memoryUtilization };
+ });
+ resolve({
+ cpu: { usage: cpuPercentage },
+ gpu: gpuInfo
+ });
+ }
+ );
+ } else {
+ // Handle the case where gpuIds is empty
+ resolve({ cpu: { usage: cpuPercentage }, gpu: [] });
+ }
+ } else {
+ // Handle the case where run_mode is not 'gpu' or no GPUs are in use
+ resolve({ cpu: { usage: cpuPercentage }, gpu: [] });
+ }
});
});
module.exports = {
getResourcesInfo,
getCurrentLoad,
-};
+};
\ No newline at end of file
diff --git a/server/package.json b/server/package.json
index 9495a0d65..f61730da4 100644
--- a/server/package.json
+++ b/server/package.json
@@ -26,6 +26,8 @@
"dotenv": "^16.3.1",
"fastify": "^4.24.3",
"request": "^2.88.2",
+ "fetch-retry": "^5.0.6",
+ "tcp-port-used": "^1.0.2",
"request-progress": "^3.0.0"
},
"devDependencies": {
@@ -35,6 +37,7 @@
"@typescript-eslint/parser": "^6.7.3",
"eslint-plugin-react": "^7.33.2",
"run-script-os": "^1.1.6",
+ "@types/tcp-port-used": "^1.0.4",
"typescript": "^5.2.2"
}
}
diff --git a/uikit/package.json b/uikit/package.json
index 43e73dcf2..66f05840b 100644
--- a/uikit/package.json
+++ b/uikit/package.json
@@ -18,6 +18,7 @@
},
"dependencies": {
"@radix-ui/react-avatar": "^1.0.4",
+ "@radix-ui/react-checkbox": "^1.0.4",
"@radix-ui/react-context": "^1.0.1",
"@radix-ui/react-dialog": "^1.0.5",
"@radix-ui/react-icons": "^1.3.0",
diff --git a/uikit/src/button/styles.scss b/uikit/src/button/styles.scss
index 74585ed1e..003df5b4d 100644
--- a/uikit/src/button/styles.scss
+++ b/uikit/src/button/styles.scss
@@ -9,7 +9,7 @@
}
&-secondary-blue {
- @apply bg-blue-200 text-blue-600 hover:bg-blue-500/50;
+ @apply bg-blue-200 text-blue-600 hover:bg-blue-300/50 dark:hover:bg-blue-200/80;
}
&-danger {
@@ -17,7 +17,7 @@
}
&-secondary-danger {
- @apply bg-red-200 text-red-600 hover:bg-red-500/50;
+ @apply bg-red-200 text-red-600 hover:bg-red-300/50 dark:hover:bg-red-200/80;
}
&-outline {
@@ -67,14 +67,18 @@
[type='submit'] {
&.btn-primary {
@apply bg-primary hover:bg-primary/90;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-secondary {
@apply bg-secondary hover:bg-secondary/80;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-secondary-blue {
@apply bg-blue-200 text-blue-900 hover:bg-blue-200/80;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-danger {
@apply bg-danger hover:bg-danger/90;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
}
diff --git a/uikit/src/checkbox/index.tsx b/uikit/src/checkbox/index.tsx
new file mode 100644
index 000000000..1e78aeafb
--- /dev/null
+++ b/uikit/src/checkbox/index.tsx
@@ -0,0 +1,29 @@
+'use client'
+
+import * as React from 'react'
+import * as CheckboxPrimitive from '@radix-ui/react-checkbox'
+import { CheckIcon } from '@radix-ui/react-icons'
+
+import { twMerge } from 'tailwind-merge'
+
+const Checkbox = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+
+
+
+
+))
+Checkbox.displayName = CheckboxPrimitive.Root.displayName
+
+export { Checkbox }
diff --git a/uikit/src/checkbox/styles.scss b/uikit/src/checkbox/styles.scss
new file mode 100644
index 000000000..33610f837
--- /dev/null
+++ b/uikit/src/checkbox/styles.scss
@@ -0,0 +1,7 @@
+.checkbox {
+ @apply border-border data-[state=checked]:bg-primary h-5 w-5 flex-shrink-0 rounded-md border data-[state=checked]:text-white;
+
+ &--icon {
+ @apply h-4 w-4;
+ }
+}
diff --git a/uikit/src/index.ts b/uikit/src/index.ts
index 3d5eaa82a..1b0a26bd1 100644
--- a/uikit/src/index.ts
+++ b/uikit/src/index.ts
@@ -12,3 +12,4 @@ export * from './command'
export * from './textarea'
export * from './select'
export * from './slider'
+export * from './checkbox'
diff --git a/uikit/src/input/styles.scss b/uikit/src/input/styles.scss
index b78db270a..9990da8b4 100644
--- a/uikit/src/input/styles.scss
+++ b/uikit/src/input/styles.scss
@@ -1,6 +1,6 @@
.input {
@apply border-border placeholder:text-muted-foreground flex h-9 w-full rounded-lg border bg-transparent px-3 py-1 transition-colors;
- @apply disabled:cursor-not-allowed disabled:bg-zinc-100;
+ @apply disabled:cursor-not-allowed disabled:bg-zinc-100 disabled:dark:bg-zinc-800 disabled:dark:text-zinc-600;
@apply focus-within:outline-none focus-visible:outline-0 focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-1;
@apply file:border-0 file:bg-transparent file:font-medium;
}
diff --git a/uikit/src/main.scss b/uikit/src/main.scss
index 546f22811..c1326ba19 100644
--- a/uikit/src/main.scss
+++ b/uikit/src/main.scss
@@ -16,6 +16,7 @@
@import './textarea/styles.scss';
@import './select/styles.scss';
@import './slider/styles.scss';
+@import './checkbox/styles.scss';
.animate-spin {
animation: spin 1s linear infinite;
diff --git a/uikit/src/select/styles.scss b/uikit/src/select/styles.scss
index 665ca8cba..bc5b6c0cc 100644
--- a/uikit/src/select/styles.scss
+++ b/uikit/src/select/styles.scss
@@ -1,5 +1,6 @@
.select {
- @apply placeholder:text-muted-foreground border-border flex h-9 w-full items-center justify-between whitespace-nowrap rounded-md border bg-transparent px-3 py-2 text-sm shadow-sm disabled:cursor-not-allowed disabled:opacity-50 [&>span]:line-clamp-1;
+ @apply placeholder:text-muted-foreground border-border flex h-9 w-full items-center justify-between whitespace-nowrap rounded-md border bg-transparent px-3 py-2 text-sm shadow-sm disabled:cursor-not-allowed [&>span]:line-clamp-1;
+ @apply disabled:cursor-not-allowed disabled:bg-zinc-100 disabled:dark:bg-zinc-800 disabled:dark:text-zinc-600;
@apply focus-within:outline-none focus-visible:outline-0 focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-1;
&-caret {
diff --git a/web/containers/CardSidebar/index.tsx b/web/containers/CardSidebar/index.tsx
index bc5047497..38a8678d9 100644
--- a/web/containers/CardSidebar/index.tsx
+++ b/web/containers/CardSidebar/index.tsx
@@ -13,10 +13,13 @@ import { useClickOutside } from '@/hooks/useClickOutside'
import { usePath } from '@/hooks/usePath'
+import { openFileTitle } from '@/utils/titleUtils'
+
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
interface Props {
children: ReactNode
+ rightAction?: ReactNode
title: string
asChild?: boolean
hideMoreVerticalAction?: boolean
@@ -25,6 +28,7 @@ export default function CardSidebar({
children,
title,
asChild,
+ rightAction,
hideMoreVerticalAction,
}: Props) {
const [show, setShow] = useState(true)
@@ -36,13 +40,6 @@ export default function CardSidebar({
useClickOutside(() => setMore(false), null, [menu, toggle])
- let openFolderTitle: string = 'Open Containing Folder'
- if (isMac) {
- openFolderTitle = 'Show in Finder'
- } else if (isWindows) {
- openFolderTitle = 'Show in File Explorer'
- }
-
return (
- {title}
-
- {!asChild && (
- <>
- {!hideMoreVerticalAction && (
- setMore(!more)}
- >
-
-
- )}
- >
- )}
+
+ {title}
+
+
+ {rightAction && rightAction}
+ {!asChild && (
+ <>
+ {!hideMoreVerticalAction && (
+ setMore(!more)}
+ >
+
+
+ )}
+ >
+ )}
{more && (
@@ -110,7 +113,7 @@ export default function CardSidebar({
{title === 'Model' ? (
- {openFolderTitle}
+ {openFileTitle()}
Opens thread.json. Changes affect this thread only.
@@ -118,7 +121,7 @@ export default function CardSidebar({
) : (
- Show in Finder
+ {openFileTitle()}
)}
>
diff --git a/web/containers/Checkbox/index.tsx b/web/containers/Checkbox/index.tsx
index e8f916d98..a545771b6 100644
--- a/web/containers/Checkbox/index.tsx
+++ b/web/containers/Checkbox/index.tsx
@@ -9,54 +9,26 @@ import {
TooltipTrigger,
} from '@janhq/uikit'
-import { useAtomValue, useSetAtom } from 'jotai'
import { InfoIcon } from 'lucide-react'
-import { useActiveModel } from '@/hooks/useActiveModel'
-import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
-
-import { getConfigurationsData } from '@/utils/componentSettings'
-import { toSettingParams } from '@/utils/modelParam'
-
-import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
-import {
- engineParamsUpdateAtom,
- getActiveThreadIdAtom,
- getActiveThreadModelParamsAtom,
-} from '@/helpers/atoms/Thread.atom'
-
type Props = {
name: string
title: string
+ enabled?: boolean
description: string
checked: boolean
+ onValueChanged?: (e: string | number | boolean) => void
}
-const Checkbox: React.FC = ({ name, title, checked, description }) => {
- const { updateModelParameter } = useUpdateModelParameters()
- const threadId = useAtomValue(getActiveThreadIdAtom)
-
- const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
- const modelSettingParams = toSettingParams(activeModelParams)
-
- const engineParams = getConfigurationsData(modelSettingParams)
-
- const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
-
- const serverEnabled = useAtomValue(serverEnabledAtom)
-
- const { stopModel } = useActiveModel()
-
+const Checkbox: React.FC = ({
+ title,
+ checked,
+ enabled = true,
+ description,
+ onValueChanged,
+}) => {
const onCheckedChange = (checked: boolean) => {
- if (!threadId) return
- if (engineParams.some((x) => x.name.includes(name))) {
- setEngineParamsUpdate(true)
- stopModel()
- } else {
- setEngineParamsUpdate(false)
- }
- updateModelParameter(threadId, name, checked)
+ onValueChanged?.(checked)
}
return (
@@ -80,7 +52,7 @@ const Checkbox: React.FC = ({ name, title, checked, description }) => {
)
diff --git a/web/containers/DropdownListSidebar/index.tsx b/web/containers/DropdownListSidebar/index.tsx
index fdc39063a..140a1aba1 100644
--- a/web/containers/DropdownListSidebar/index.tsx
+++ b/web/containers/DropdownListSidebar/index.tsx
@@ -26,6 +26,8 @@ import { useMainViewState } from '@/hooks/useMainViewState'
import useRecommendedModel from '@/hooks/useRecommendedModel'
+import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
+
import { toGibibytes } from '@/utils/converter'
import ModelLabel from '../ModelLabel'
@@ -34,68 +36,40 @@ import OpenAiKeyInput from '../OpenAiKeyInput'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
import {
- ModelParams,
activeThreadAtom,
- getActiveThreadIdAtom,
setThreadModelParamsAtom,
- threadStatesAtom,
} from '@/helpers/atoms/Thread.atom'
export const selectedModelAtom = atom (undefined)
-export default function DropdownListSidebar() {
- const activeThreadId = useAtomValue(getActiveThreadIdAtom)
+// TODO: Move all of the unscoped logics outside of the component
+const DropdownListSidebar = ({
+ strictedThread = true,
+}: {
+ strictedThread?: boolean
+}) => {
const activeThread = useAtomValue(activeThreadAtom)
- const threadStates = useAtomValue(threadStatesAtom)
const [selectedModel, setSelectedModel] = useAtom(selectedModelAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
- const { activeModel, stateModel } = useActiveModel()
+
+ const { stateModel } = useActiveModel()
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
const { setMainViewState } = useMainViewState()
-
+ const [loader, setLoader] = useState(0)
const { recommendedModel, downloadedModels } = useRecommendedModel()
-
- /**
- * Default value for max_tokens and ctx_len
- * Its to avoid OOM issue since a model can set a big number for these settings
- */
- const defaultValue = (value?: number) => {
- if (value && value < 4096) return value
- return 4096
- }
+ const { updateModelParameter } = useUpdateModelParameters()
useEffect(() => {
- setSelectedModel(selectedModel || activeModel || recommendedModel)
+ if (!activeThread) return
- if (activeThread) {
- const finishInit = threadStates[activeThread.id].isFinishInit ?? true
- if (finishInit) return
- const modelParams: ModelParams = {
- ...recommendedModel?.parameters,
- ...recommendedModel?.settings,
- /**
- * This is to set default value for these settings instead of maximum value
- * Should only apply when model.json has these settings
- */
- ...(recommendedModel?.parameters.max_tokens && {
- max_tokens: defaultValue(recommendedModel?.parameters.max_tokens),
- }),
- ...(recommendedModel?.settings.ctx_len && {
- ctx_len: defaultValue(recommendedModel?.settings.ctx_len),
- }),
- }
- setThreadModelParams(activeThread.id, modelParams)
+ let model = downloadedModels.find(
+ (model) => model.id === activeThread.assistants[0].model.id
+ )
+ if (!model) {
+ model = recommendedModel
}
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [
- recommendedModel,
- activeThread,
- setSelectedModel,
- setThreadModelParams,
- threadStates,
- ])
-
- const [loader, setLoader] = useState(0)
+ setSelectedModel(model)
+ }, [recommendedModel, activeThread, downloadedModels, setSelectedModel])
// This is fake loader please fix this when we have realtime percentage when load model
useEffect(() => {
@@ -132,25 +106,35 @@ export default function DropdownListSidebar() {
setServerEnabled(false)
}
- if (activeThreadId) {
+ if (activeThread) {
const modelParams = {
...model?.parameters,
...model?.settings,
}
- setThreadModelParams(activeThreadId, modelParams)
+ // Update model paramter to the thread state
+ setThreadModelParams(activeThread.id, modelParams)
+
+ // Update model parameter to the thread file
+ if (model)
+ updateModelParameter(activeThread.id, {
+ params: modelParams,
+ modelId: model.id,
+ engine: model.engine,
+ })
}
},
- // eslint-disable-next-line react-hooks/exhaustive-deps
[
downloadedModels,
serverEnabled,
- activeThreadId,
- activeModel,
+ activeThread,
+ setSelectedModel,
+ setServerEnabled,
setThreadModelParams,
+ updateModelParameter,
]
)
- if (!activeThread) {
+ if (strictedThread && !activeThread) {
return null
}
@@ -236,10 +220,9 @@ export default function DropdownListSidebar() {
-
+
>
)
}
+
+export default DropdownListSidebar
diff --git a/web/containers/Layout/BottomBar/index.tsx b/web/containers/Layout/BottomBar/index.tsx
index 6e334b9ef..32dc70c70 100644
--- a/web/containers/Layout/BottomBar/index.tsx
+++ b/web/containers/Layout/BottomBar/index.tsx
@@ -26,11 +26,12 @@ import { MainViewState } from '@/constants/screens'
import { useActiveModel } from '@/hooks/useActiveModel'
import { useDownloadState } from '@/hooks/useDownloadState'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
+
import useGetSystemResources from '@/hooks/useGetSystemResources'
import { useMainViewState } from '@/hooks/useMainViewState'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
const menuLinks = [
{
@@ -47,14 +48,22 @@ const menuLinks = [
const BottomBar = () => {
const { activeModel, stateModel } = useActiveModel()
- const { ram, cpu } = useGetSystemResources()
+ const { ram, cpu, gpus } = useGetSystemResources()
const progress = useAtomValue(appDownloadProgress)
- const { downloadedModels } = useGetDownloadedModels()
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
+
const { setMainViewState } = useMainViewState()
const { downloadStates } = useDownloadState()
const setShowSelectModelModal = useSetAtom(showSelectModelModalAtom)
const [serverEnabled] = useAtom(serverEnabledAtom)
+ const calculateGpuMemoryUsage = (gpu: Record) => {
+ const total = parseInt(gpu.memoryTotal)
+ const free = parseInt(gpu.memoryFree)
+ if (!total || !free) return 0
+ return Math.round(((total - free) / total) * 100)
+ }
+
return (
@@ -117,6 +126,17 @@ const BottomBar = () => {
+ {gpus.length > 0 && (
+
+ {gpus.map((gpu, index) => (
+
+ ))}
+
+ )}
{/* VERSION is defined by webpack, please see next.config.js */}
Jan v{VERSION ?? ''}
diff --git a/web/containers/Layout/TopBar/CommandListDownloadedModel/index.tsx b/web/containers/Layout/TopBar/CommandListDownloadedModel/index.tsx
index 3edce06eb..ac5756e9f 100644
--- a/web/containers/Layout/TopBar/CommandListDownloadedModel/index.tsx
+++ b/web/containers/Layout/TopBar/CommandListDownloadedModel/index.tsx
@@ -11,7 +11,7 @@ import {
Badge,
} from '@janhq/uikit'
-import { useAtom } from 'jotai'
+import { useAtom, useAtomValue } from 'jotai'
import { DatabaseIcon, CpuIcon } from 'lucide-react'
import { showSelectModelModalAtom } from '@/containers/Providers/KeyListener'
@@ -19,14 +19,14 @@ import { showSelectModelModalAtom } from '@/containers/Providers/KeyListener'
import { MainViewState } from '@/constants/screens'
import { useActiveModel } from '@/hooks/useActiveModel'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
import { useMainViewState } from '@/hooks/useMainViewState'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
export default function CommandListDownloadedModel() {
const { setMainViewState } = useMainViewState()
- const { downloadedModels } = useGetDownloadedModels()
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
const { activeModel, startModel, stopModel } = useActiveModel()
const [serverEnabled] = useAtom(serverEnabledAtom)
const [showSelectModelModal, setShowSelectModelModal] = useAtom(
diff --git a/web/containers/Layout/TopBar/index.tsx b/web/containers/Layout/TopBar/index.tsx
index ac05e4e1a..206a9013d 100644
--- a/web/containers/Layout/TopBar/index.tsx
+++ b/web/containers/Layout/TopBar/index.tsx
@@ -20,20 +20,22 @@ import { MainViewState } from '@/constants/screens'
import { useClickOutside } from '@/hooks/useClickOutside'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
-import useGetAssistants, { getAssistants } from '@/hooks/useGetAssistants'
import { useMainViewState } from '@/hooks/useMainViewState'
import { usePath } from '@/hooks/usePath'
import { showRightSideBarAtom } from '@/screens/Chat/Sidebar'
+import { openFileTitle } from '@/utils/titleUtils'
+
+import { assistantsAtom } from '@/helpers/atoms/Assistant.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
const TopBar = () => {
const activeThread = useAtomValue(activeThreadAtom)
const { mainViewState } = useMainViewState()
const { requestCreateNewThread } = useCreateNewThread()
- const { assistants } = useGetAssistants()
+ const assistants = useAtomValue(assistantsAtom)
const [showRightSideBar, setShowRightSideBar] = useAtom(showRightSideBarAtom)
const [showLeftSideBar, setShowLeftSideBar] = useAtom(showLeftSideBarAtom)
const showing = useAtomValue(showRightSideBarAtom)
@@ -59,12 +61,7 @@ const TopBar = () => {
const onCreateConversationClick = async () => {
if (assistants.length === 0) {
- const res = await getAssistants()
- if (res.length === 0) {
- alert('No assistant available')
- return
- }
- requestCreateNewThread(res[0])
+ alert('No assistant available')
} else {
requestCreateNewThread(assistants[0])
}
@@ -126,7 +123,8 @@ const TopBar = () => {
showing && 'border-l border-border'
)}
>
- {activeThread && (
+ {((activeThread && mainViewState === MainViewState.Thread) ||
+ mainViewState === MainViewState.LocalServer) && (
{showing && (
@@ -161,7 +159,7 @@ const TopBar = () => {
className="text-muted-foreground"
/>
- Show in Finder
+ {openFileTitle()}
{
/>
- Show in Finder
+ {openFileTitle()}
diff --git a/web/containers/Layout/index.tsx b/web/containers/Layout/index.tsx
index 54a7845a4..77a1fe971 100644
--- a/web/containers/Layout/index.tsx
+++ b/web/containers/Layout/index.tsx
@@ -12,7 +12,8 @@ import TopBar from '@/containers/Layout/TopBar'
import { MainViewState } from '@/constants/screens'
import { useMainViewState } from '@/hooks/useMainViewState'
-import { SUCCESS_SET_NEW_DESTINATION } from '@/hooks/useVaultDirectory'
+
+import { SUCCESS_SET_NEW_DESTINATION } from '@/screens/Settings/Advanced/DataFolder'
const BaseLayout = (props: PropsWithChildren) => {
const { children } = props
@@ -28,7 +29,7 @@ const BaseLayout = (props: PropsWithChildren) => {
if (localStorage.getItem(SUCCESS_SET_NEW_DESTINATION) === 'true') {
setMainViewState(MainViewState.Settings)
}
- }, [])
+ }, [setMainViewState])
return (
diff --git a/web/containers/Loader/GenerateResponse.tsx b/web/containers/Loader/GenerateResponse.tsx
new file mode 100644
index 000000000..457c44987
--- /dev/null
+++ b/web/containers/Loader/GenerateResponse.tsx
@@ -0,0 +1,39 @@
+import React, { useEffect, useState } from 'react'
+
+export default function GenerateResponse() {
+ const [loader, setLoader] = useState(0)
+
+ // This is fake loader please fix this when we have realtime percentage when load model
+ useEffect(() => {
+ if (loader === 24) {
+ setTimeout(() => {
+ setLoader(loader + 1)
+ }, 250)
+ } else if (loader === 50) {
+ setTimeout(() => {
+ setLoader(loader + 1)
+ }, 250)
+ } else if (loader === 78) {
+ setTimeout(() => {
+ setLoader(loader + 1)
+ }, 250)
+ } else if (loader === 85) {
+ setLoader(85)
+ } else {
+ setLoader(loader + 1)
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [loader])
+
+ return (
+
+
+
+ Generating response...
+
+
+ )
+}
diff --git a/web/containers/ModelConfigInput/index.tsx b/web/containers/ModelConfigInput/index.tsx
index e409fd424..d573a0bf9 100644
--- a/web/containers/ModelConfigInput/index.tsx
+++ b/web/containers/ModelConfigInput/index.tsx
@@ -7,65 +7,26 @@ import {
TooltipTrigger,
} from '@janhq/uikit'
-import { useAtomValue, useSetAtom } from 'jotai'
-
import { InfoIcon } from 'lucide-react'
-import { useActiveModel } from '@/hooks/useActiveModel'
-import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
-
-import { getConfigurationsData } from '@/utils/componentSettings'
-
-import { toSettingParams } from '@/utils/modelParam'
-
-import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
-import {
- engineParamsUpdateAtom,
- getActiveThreadIdAtom,
- getActiveThreadModelParamsAtom,
-} from '@/helpers/atoms/Thread.atom'
-
type Props = {
title: string
+ enabled?: boolean
name: string
description: string
placeholder: string
value: string
+ onValueChanged?: (e: string | number | boolean) => void
}
const ModelConfigInput: React.FC = ({
title,
- name,
+ enabled = true,
value,
description,
placeholder,
+ onValueChanged,
}) => {
- const { updateModelParameter } = useUpdateModelParameters()
- const threadId = useAtomValue(getActiveThreadIdAtom)
-
- const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
- const modelSettingParams = toSettingParams(activeModelParams)
-
- const engineParams = getConfigurationsData(modelSettingParams)
-
- const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
-
- const { stopModel } = useActiveModel()
-
- const serverEnabled = useAtomValue(serverEnabledAtom)
-
- const onValueChanged = (e: React.ChangeEvent) => {
- if (!threadId) return
- if (engineParams.some((x) => x.name.includes(name))) {
- setEngineParamsUpdate(true)
- stopModel()
- } else {
- setEngineParamsUpdate(false)
- }
- updateModelParameter(threadId, name, e.target.value)
- }
-
return (
@@ -86,9 +47,9 @@ const ModelConfigInput: React.FC = ({
)
diff --git a/web/containers/OpenAiKeyInput/index.tsx b/web/containers/OpenAiKeyInput/index.tsx
index abd79e6a8..444c8074f 100644
--- a/web/containers/OpenAiKeyInput/index.tsx
+++ b/web/containers/OpenAiKeyInput/index.tsx
@@ -1,16 +1,19 @@
import React, { useEffect, useState } from 'react'
-import { InferenceEngine, Model } from '@janhq/core'
+import { InferenceEngine } from '@janhq/core'
import { Input } from '@janhq/uikit'
+import { useAtomValue } from 'jotai'
+
import { useEngineSettings } from '@/hooks/useEngineSettings'
-type Props = {
- selectedModel?: Model
- serverEnabled: boolean
-}
+import { selectedModelAtom } from '../DropdownListSidebar'
-const OpenAiKeyInput: React.FC = ({ selectedModel, serverEnabled }) => {
+import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
+
+const OpenAiKeyInput: React.FC = () => {
+ const selectedModel = useAtomValue(selectedModelAtom)
+ const serverEnabled = useAtomValue(serverEnabledAtom)
const [openAISettings, setOpenAISettings] = useState<
{ api_key: string } | undefined
>(undefined)
@@ -20,8 +23,7 @@ const OpenAiKeyInput: React.FC = ({ selectedModel, serverEnabled }) => {
readOpenAISettings().then((settings) => {
setOpenAISettings(settings)
})
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
+ }, [readOpenAISettings])
if (!selectedModel || selectedModel.engine !== InferenceEngine.openai) {
return null
diff --git a/web/containers/Providers/DataLoader.tsx b/web/containers/Providers/DataLoader.tsx
new file mode 100644
index 000000000..2b6675d98
--- /dev/null
+++ b/web/containers/Providers/DataLoader.tsx
@@ -0,0 +1,21 @@
+'use client'
+
+import { Fragment, ReactNode } from 'react'
+
+import useAssistants from '@/hooks/useAssistants'
+import useModels from '@/hooks/useModels'
+import useThreads from '@/hooks/useThreads'
+
+type Props = {
+ children: ReactNode
+}
+
+const DataLoader: React.FC = ({ children }) => {
+ useModels()
+ useThreads()
+ useAssistants()
+
+ return {children}
+}
+
+export default DataLoader
diff --git a/web/containers/Providers/EventHandler.tsx b/web/containers/Providers/EventHandler.tsx
index 1f9d6d7af..f22ed1bc7 100644
--- a/web/containers/Providers/EventHandler.tsx
+++ b/web/containers/Providers/EventHandler.tsx
@@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
-import { ReactNode, useEffect, useRef } from 'react'
+import { ReactNode, useCallback, useEffect, useRef } from 'react'
import {
events,
@@ -13,8 +13,13 @@ import {
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
-import { activeModelAtom, stateModelAtom } from '@/hooks/useActiveModel'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
+import {
+ activeModelAtom,
+ loadModelErrorAtom,
+ stateModelAtom,
+} from '@/hooks/useActiveModel'
+
+import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
import { toaster } from '../Toast'
@@ -23,22 +28,29 @@ import {
addNewMessageAtom,
updateMessageAtom,
} from '@/helpers/atoms/ChatMessage.atom'
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import {
updateThreadWaitingForResponseAtom,
threadsAtom,
+ isGeneratingResponseAtom,
+ updateThreadAtom,
} from '@/helpers/atoms/Thread.atom'
export default function EventHandler({ children }: { children: ReactNode }) {
const addNewMessage = useSetAtom(addNewMessageAtom)
const updateMessage = useSetAtom(updateMessageAtom)
- const { downloadedModels } = useGetDownloadedModels()
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
const setActiveModel = useSetAtom(activeModelAtom)
const setStateModel = useSetAtom(stateModelAtom)
+ const setQueuedMessage = useSetAtom(queuedMessageAtom)
+ const setLoadModelError = useSetAtom(loadModelErrorAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
const threads = useAtomValue(threadsAtom)
const modelsRef = useRef(downloadedModels)
const threadsRef = useRef(threads)
+ const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
+ const updateThread = useSetAtom(updateThreadAtom)
useEffect(() => {
threadsRef.current = threads
@@ -48,51 +60,72 @@ export default function EventHandler({ children }: { children: ReactNode }) {
modelsRef.current = downloadedModels
}, [downloadedModels])
- async function handleNewMessageResponse(message: ThreadMessage) {
- addNewMessage(message)
- }
+ const onNewMessageResponse = useCallback(
+ (message: ThreadMessage) => {
+ addNewMessage(message)
+ },
+ [addNewMessage]
+ )
- async function handleModelReady(model: Model) {
- setActiveModel(model)
- toaster({
- title: 'Success!',
- description: `Model ${model.id} has been started.`,
- })
- setStateModel(() => ({
- state: 'stop',
- loading: false,
- model: model.id,
- }))
- }
+ const onModelReady = useCallback(
+ (model: Model) => {
+ setActiveModel(model)
+ toaster({
+ title: 'Success!',
+ description: `Model ${model.id} has been started.`,
+ type: 'success',
+ })
+ setStateModel(() => ({
+ state: 'stop',
+ loading: false,
+ model: model.id,
+ }))
+ },
+ [setActiveModel, setStateModel]
+ )
- async function handleModelStopped() {
- setTimeout(async () => {
+ const onModelStopped = useCallback(() => {
+ setTimeout(() => {
setActiveModel(undefined)
setStateModel({ state: 'start', loading: false, model: '' })
}, 500)
- }
+ }, [setActiveModel, setStateModel])
- async function handleModelFail(res: any) {
- const errorMessage = `${res.error}`
- alert(errorMessage)
- setStateModel(() => ({
- state: 'start',
- loading: false,
- model: res.modelId,
- }))
- }
+ const onModelInitFailed = useCallback(
+ (res: any) => {
+ const errorMessage = `${res.error}`
+ console.error('Failed to load model: ' + errorMessage)
+ setLoadModelError(errorMessage)
+ setStateModel(() => ({
+ state: 'start',
+ loading: false,
+ model: res.modelId,
+ }))
+ setQueuedMessage(false)
+ },
+ [setStateModel, setQueuedMessage, setLoadModelError]
+ )
- async function handleMessageResponseUpdate(message: ThreadMessage) {
- updateMessage(
- message.id,
- message.thread_id,
- message.content,
- message.status
- )
- if (message.status !== MessageStatus.Pending) {
+ const onMessageResponseUpdate = useCallback(
+ (message: ThreadMessage) => {
+ updateMessage(
+ message.id,
+ message.thread_id,
+ message.content,
+ message.status
+ )
+ if (message.status === MessageStatus.Pending) {
+ if (message.content.length) {
+ updateThreadWaiting(message.thread_id, false)
+ setIsGeneratingResponse(false)
+ }
+ return
+ }
// Mark the thread as not waiting for response
updateThreadWaiting(message.thread_id, false)
+ setIsGeneratingResponse(false)
+
const thread = threadsRef.current?.find((e) => e.id == message.thread_id)
if (thread) {
const messageContent = message.content[0]?.text.value ?? ''
@@ -100,6 +133,12 @@ export default function EventHandler({ children }: { children: ReactNode }) {
...thread.metadata,
lastMessage: messageContent,
}
+
+ updateThread({
+ ...thread,
+ metadata,
+ })
+
extensionManager
.get(ExtensionTypeEnum.Conversational)
?.saveThread({
@@ -111,26 +150,33 @@ export default function EventHandler({ children }: { children: ReactNode }) {
.get(ExtensionTypeEnum.Conversational)
?.addNewMessage(message)
}
- }
- }
+ },
+ [updateMessage, updateThreadWaiting, setIsGeneratingResponse]
+ )
useEffect(() => {
+ console.log('Registering events')
if (window.core?.events) {
- events.on(MessageEvent.OnMessageResponse, handleNewMessageResponse)
- events.on(MessageEvent.OnMessageUpdate, handleMessageResponseUpdate)
- events.on(ModelEvent.OnModelReady, handleModelReady)
- events.on(ModelEvent.OnModelFail, handleModelFail)
- events.on(ModelEvent.OnModelStopped, handleModelStopped)
+ events.on(MessageEvent.OnMessageResponse, onNewMessageResponse)
+ events.on(MessageEvent.OnMessageUpdate, onMessageResponseUpdate)
+
+ events.on(ModelEvent.OnModelReady, onModelReady)
+ events.on(ModelEvent.OnModelFail, onModelInitFailed)
+ events.on(ModelEvent.OnModelStopped, onModelStopped)
}
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
+ }, [
+ onNewMessageResponse,
+ onMessageResponseUpdate,
+ onModelReady,
+ onModelInitFailed,
+ onModelStopped,
+ ])
useEffect(() => {
return () => {
- events.off(MessageEvent.OnMessageResponse, handleNewMessageResponse)
- events.off(MessageEvent.OnMessageUpdate, handleMessageResponseUpdate)
+ events.off(MessageEvent.OnMessageResponse, onNewMessageResponse)
+ events.off(MessageEvent.OnMessageUpdate, onMessageResponseUpdate)
}
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
+ }, [onNewMessageResponse, onMessageResponseUpdate])
return <>{children}>
}
diff --git a/web/containers/Providers/EventListener.tsx b/web/containers/Providers/EventListener.tsx
index 2816c88e2..5e8556f33 100644
--- a/web/containers/Providers/EventListener.tsx
+++ b/web/containers/Providers/EventListener.tsx
@@ -3,10 +3,9 @@
import { PropsWithChildren, useEffect, useRef } from 'react'
import { baseName } from '@janhq/core'
-import { useAtomValue, useSetAtom } from 'jotai'
+import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import { useDownloadState } from '@/hooks/useDownloadState'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
import { modelBinFileName } from '@/utils/model'
@@ -14,14 +13,17 @@ import EventHandler from './EventHandler'
import { appDownloadProgress } from './Jotai'
-import { downloadingModelsAtom } from '@/helpers/atoms/Model.atom'
+import {
+ downloadedModelsAtom,
+ downloadingModelsAtom,
+} from '@/helpers/atoms/Model.atom'
export default function EventListenerWrapper({ children }: PropsWithChildren) {
const setProgress = useSetAtom(appDownloadProgress)
const models = useAtomValue(downloadingModelsAtom)
const modelsRef = useRef(models)
- const { setDownloadedModels, downloadedModels } = useGetDownloadedModels()
+ const [downloadedModels, setDownloadedModels] = useAtom(downloadedModelsAtom)
const {
setDownloadState,
setDownloadStateSuccess,
@@ -105,12 +107,14 @@ export default function EventListenerWrapper({ children }: PropsWithChildren) {
})
}
return () => {}
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
+ }, [
+ setDownloadState,
+ setDownloadStateCancelled,
+ setDownloadStateFailed,
+ setDownloadStateSuccess,
+ setDownloadedModels,
+ setProgress,
+ ])
- return (
-
- {children}
-
- )
+ return {children}
}
diff --git a/web/containers/Providers/Jotai.tsx b/web/containers/Providers/Jotai.tsx
index 2554ce38d..103f0d9ee 100644
--- a/web/containers/Providers/Jotai.tsx
+++ b/web/containers/Providers/Jotai.tsx
@@ -9,9 +9,17 @@ type Props = {
}
export const currentPromptAtom = atom('')
+export const fileUploadAtom = atom([])
export const appDownloadProgress = atom(-1)
export const searchAtom = atom('')
export default function JotaiWrapper({ children }: Props) {
return {children}
}
+
+export type FileType = 'image' | 'pdf'
+
+export type FileInfo = {
+ file: File
+ type: FileType
+}
diff --git a/web/containers/Providers/index.tsx b/web/containers/Providers/index.tsx
index f9726e43d..e7a179ec4 100644
--- a/web/containers/Providers/index.tsx
+++ b/web/containers/Providers/index.tsx
@@ -6,8 +6,6 @@ import { Toaster } from 'react-hot-toast'
import { TooltipProvider } from '@janhq/uikit'
-import { PostHogProvider } from 'posthog-js/react'
-
import GPUDriverPrompt from '@/containers/GPUDriverPromptModal'
import EventListenerWrapper from '@/containers/Providers/EventListener'
import JotaiWrapper from '@/containers/Providers/Jotai'
@@ -21,7 +19,11 @@ import {
setupBaseExtensions,
} from '@/services/extensionService'
-import { instance } from '@/utils/posthog'
+import Umami from '@/utils/umami'
+
+import Loader from '../Loader'
+
+import DataLoader from './DataLoader'
import KeyListener from './KeyListener'
@@ -32,6 +34,7 @@ const Providers = (props: PropsWithChildren) => {
const [setupCore, setSetupCore] = useState(false)
const [activated, setActivated] = useState(false)
+ const [settingUp, setSettingUp] = useState(false)
async function setupExtensions() {
// Register all active extensions
@@ -39,11 +42,13 @@ const Providers = (props: PropsWithChildren) => {
setTimeout(async () => {
if (!isCoreExtensionInstalled()) {
- setupBaseExtensions()
+ setSettingUp(true)
+ await setupBaseExtensions()
return
}
extensionManager.load()
+ setSettingUp(false)
setActivated(true)
}, 500)
}
@@ -70,25 +75,25 @@ const Providers = (props: PropsWithChildren) => {
}, [setupCore])
return (
-
-
-
- {setupCore && activated && (
-
-
-
-
- {children}
-
- {!isMac && }
-
-
-
-
- )}
-
-
-
+
+
+
+ {settingUp && }
+ {setupCore && activated && (
+
+
+
+
+ {children}
+
+ {!isMac && }
+
+
+
+
+ )}
+
+
)
}
diff --git a/web/containers/Shortcut/index.tsx b/web/containers/Shortcut/index.tsx
index 6153f48a7..dd0518b56 100644
--- a/web/containers/Shortcut/index.tsx
+++ b/web/containers/Shortcut/index.tsx
@@ -1,6 +1,6 @@
export default function ShortCut(props: { menu: string }) {
const { menu } = props
- const symbol = isMac ? '⌘' : 'Ctrl'
+ const symbol = isMac ? '⌘' : 'Ctrl + '
return (
diff --git a/web/containers/SliderRightPanel/index.tsx b/web/containers/SliderRightPanel/index.tsx
index d9ed00f83..7c017e70f 100644
--- a/web/containers/SliderRightPanel/index.tsx
+++ b/web/containers/SliderRightPanel/index.tsx
@@ -9,74 +9,36 @@ import {
TooltipPortal,
TooltipTrigger,
} from '@janhq/uikit'
-import { useAtomValue, useSetAtom } from 'jotai'
import { InfoIcon } from 'lucide-react'
-import { useActiveModel } from '@/hooks/useActiveModel'
import { useClickOutside } from '@/hooks/useClickOutside'
-import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
-
-import { getConfigurationsData } from '@/utils/componentSettings'
-import { toSettingParams } from '@/utils/modelParam'
-
-import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
-import {
- engineParamsUpdateAtom,
- getActiveThreadIdAtom,
- getActiveThreadModelParamsAtom,
-} from '@/helpers/atoms/Thread.atom'
-
type Props = {
name: string
title: string
+ enabled: boolean
description: string
min: number
max: number
step: number
value: number
+ onValueChanged: (e: string | number | boolean) => void
}
const SliderRightPanel: React.FC = ({
- name,
title,
+ enabled,
min,
max,
step,
description,
value,
+ onValueChanged,
}) => {
- const { updateModelParameter } = useUpdateModelParameters()
- const threadId = useAtomValue(getActiveThreadIdAtom)
-
- const serverEnabled = useAtomValue(serverEnabledAtom)
-
- const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
- const modelSettingParams = toSettingParams(activeModelParams)
-
- const engineParams = getConfigurationsData(modelSettingParams)
-
- const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
-
- const { stopModel } = useActiveModel()
-
const [showTooltip, setShowTooltip] = useState({ max: false, min: false })
useClickOutside(() => setShowTooltip({ max: false, min: false }), null, [])
-
- const onValueChanged = (e: number[]) => {
- if (!threadId) return
- if (engineParams.some((x) => x.name.includes(name))) {
- setEngineParamsUpdate(true)
- stopModel()
- } else {
- setEngineParamsUpdate(false)
- }
- updateModelParameter(threadId, name, e[0])
- }
-
return (
@@ -99,11 +61,11 @@ const SliderRightPanel: React.FC = ({
onValueChanged?.(e[0])}
min={min}
max={max}
step={step}
- disabled={serverEnabled}
+ disabled={!enabled}
/>
{min}
@@ -118,18 +80,18 @@ const SliderRightPanel: React.FC = ({
min={min}
max={max}
value={String(value)}
- disabled={serverEnabled}
+ disabled={!enabled}
onBlur={(e) => {
if (Number(e.target.value) > Number(max)) {
- onValueChanged([Number(max)])
+ onValueChanged?.(Number(max))
setShowTooltip({ max: true, min: false })
} else if (Number(e.target.value) < Number(min)) {
- onValueChanged([Number(min)])
+ onValueChanged?.(Number(min))
setShowTooltip({ max: false, min: true })
}
}}
onChange={(e) => {
- onValueChanged([Number(e.target.value)])
+ onValueChanged?.(Number(e.target.value))
}}
/>
diff --git a/web/containers/Toast/index.tsx b/web/containers/Toast/index.tsx
index c5e5f03da..eae340fee 100644
--- a/web/containers/Toast/index.tsx
+++ b/web/containers/Toast/index.tsx
@@ -6,7 +6,99 @@ import { twMerge } from 'tailwind-merge'
type Props = {
title?: string
description?: string
- type?: 'default' | 'error' | 'success'
+ type?: 'default' | 'error' | 'success' | 'warning'
+}
+
+const ErrorIcon = () => {
+ return (
+
+ )
+}
+
+const WarningIcon = () => {
+ return (
+
+ )
+}
+
+const SuccessIcon = () => {
+ return (
+
+ )
+}
+
+const DefaultIcon = () => {
+ return (
+
+ )
+}
+
+const renderIcon = (type: string) => {
+ switch (type) {
+ case 'warning':
+ return
+
+ case 'error':
+ return
+
+ case 'success':
+ return
+
+ default:
+ return
+ }
}
export function toaster(props: Props) {
@@ -16,37 +108,52 @@ export function toaster(props: Props) {
return (
-
-
- {title}
-
-
- {description}
-
+
+ {renderIcon(type)}
+
+ {title}
+ {description}
+
+ toast.dismiss(t.id)}
+ />
- toast.dismiss(t.id)}
- />
)
},
- { id: 'toast', duration: 3000 }
+ { id: 'toast', duration: 2000, position: 'top-right' }
+ )
+}
+
+export function snackbar(props: Props) {
+ const { description, type = 'default' } = props
+ return toast.custom(
+ (t) => {
+ return (
+
+
+ {renderIcon(type)}
+ {description}
+ toast.dismiss(t.id)}
+ />
+
+
+ )
+ },
+ { id: 'snackbar', duration: 2000, position: 'bottom-center' }
)
}
diff --git a/web/helpers/atoms/Assistant.atom.ts b/web/helpers/atoms/Assistant.atom.ts
new file mode 100644
index 000000000..e90923d3d
--- /dev/null
+++ b/web/helpers/atoms/Assistant.atom.ts
@@ -0,0 +1,4 @@
+import { Assistant } from '@janhq/core/.'
+import { atom } from 'jotai'
+
+export const assistantsAtom = atom ([])
diff --git a/web/helpers/atoms/Model.atom.ts b/web/helpers/atoms/Model.atom.ts
index 6eb7f2ad6..5c9188ad7 100644
--- a/web/helpers/atoms/Model.atom.ts
+++ b/web/helpers/atoms/Model.atom.ts
@@ -24,3 +24,7 @@ export const removeDownloadingModelAtom = atom(
)
}
)
+
+export const downloadedModelsAtom = atom([])
+
+export const configuredModelsAtom = atom([])
diff --git a/web/helpers/atoms/SystemBar.atom.ts b/web/helpers/atoms/SystemBar.atom.ts
index aa5e77d58..22a7573ec 100644
--- a/web/helpers/atoms/SystemBar.atom.ts
+++ b/web/helpers/atoms/SystemBar.atom.ts
@@ -2,5 +2,8 @@ import { atom } from 'jotai'
export const totalRamAtom = atom(0)
export const usedRamAtom = atom(0)
+export const availableRamAtom = atom(0)
export const cpuUsageAtom = atom(0)
+
+export const nvidiaTotalVramAtom = atom(0)
diff --git a/web/helpers/atoms/Thread.atom.ts b/web/helpers/atoms/Thread.atom.ts
index 26b1e9c59..cab286bd1 100644
--- a/web/helpers/atoms/Thread.atom.ts
+++ b/web/helpers/atoms/Thread.atom.ts
@@ -23,6 +23,7 @@ export const setActiveThreadIdAtom = atom(
export const waitingToSendMessage = atom(undefined)
+export const isGeneratingResponseAtom = atom(undefined)
/**
* Stores all thread states for the current user
*/
@@ -46,18 +47,6 @@ export const deleteThreadStateAtom = atom(
}
)
-export const updateThreadInitSuccessAtom = atom(
- null,
- (get, set, threadId: string) => {
- const currentState = { ...get(threadStatesAtom) }
- currentState[threadId] = {
- ...currentState[threadId],
- isFinishInit: true,
- }
- set(threadStatesAtom, currentState)
- }
-)
-
export const updateThreadWaitingForResponseAtom = atom(
null,
(get, set, threadId: string, waitingForResponse: boolean) => {
diff --git a/web/hooks/useActiveModel.ts b/web/hooks/useActiveModel.ts
index 336f0be21..1b61a0dd1 100644
--- a/web/hooks/useActiveModel.ts
+++ b/web/hooks/useActiveModel.ts
@@ -1,14 +1,15 @@
import { events, Model, ModelEvent } from '@janhq/core'
-import { atom, useAtom, useAtomValue } from 'jotai'
+import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { toaster } from '@/containers/Toast'
-import { useGetDownloadedModels } from './useGetDownloadedModels'
import { LAST_USED_MODEL_ID } from './useRecommendedModel'
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const activeModelAtom = atom(undefined)
+export const loadModelErrorAtom = atom(undefined)
export const stateModelAtom = atom({
state: 'start',
@@ -20,7 +21,8 @@ export function useActiveModel() {
const [activeModel, setActiveModel] = useAtom(activeModelAtom)
const activeThread = useAtomValue(activeThreadAtom)
const [stateModel, setStateModel] = useAtom(stateModelAtom)
- const { downloadedModels } = useGetDownloadedModels()
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
+ const setLoadModelError = useSetAtom(loadModelErrorAtom)
const startModel = async (modelId: string) => {
if (
@@ -31,6 +33,7 @@ export function useActiveModel() {
return
}
// TODO: incase we have multiple assistants, the configuration will be from assistant
+ setLoadModelError(undefined)
setActiveModel(undefined)
@@ -42,6 +45,7 @@ export function useActiveModel() {
toaster({
title: `Model ${modelId} not found!`,
description: `Please download the model first.`,
+ type: 'warning',
})
setStateModel(() => ({
state: 'start',
diff --git a/web/hooks/useAssistants.ts b/web/hooks/useAssistants.ts
new file mode 100644
index 000000000..8f2c4a92c
--- /dev/null
+++ b/web/hooks/useAssistants.ts
@@ -0,0 +1,28 @@
+import { useEffect } from 'react'
+
+import { Assistant, AssistantExtension, ExtensionTypeEnum } from '@janhq/core'
+
+import { useSetAtom } from 'jotai'
+
+import { extensionManager } from '@/extension'
+import { assistantsAtom } from '@/helpers/atoms/Assistant.atom'
+
+const useAssistants = () => {
+ const setAssistants = useSetAtom(assistantsAtom)
+
+ useEffect(() => {
+ const getAssistants = async () => {
+ const assistants = await getLocalAssistants()
+ setAssistants(assistants)
+ }
+
+ getAssistants()
+ }, [setAssistants])
+}
+
+const getLocalAssistants = async (): Promise =>
+ extensionManager
+ .get(ExtensionTypeEnum.Assistant)
+ ?.getAssistants() ?? []
+
+export default useAssistants
diff --git a/web/hooks/useCreateNewThread.ts b/web/hooks/useCreateNewThread.ts
index e374d0cc5..12a5e04ca 100644
--- a/web/hooks/useCreateNewThread.ts
+++ b/web/hooks/useCreateNewThread.ts
@@ -9,17 +9,22 @@ import {
} from '@janhq/core'
import { atom, useAtomValue, useSetAtom } from 'jotai'
+import { selectedModelAtom } from '@/containers/DropdownListSidebar'
+import { fileUploadAtom } from '@/containers/Providers/Jotai'
+
import { generateThreadId } from '@/utils/thread'
-import useDeleteThread from './useDeleteThread'
+import useRecommendedModel from './useRecommendedModel'
+
+import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension'
+
import {
threadsAtom,
- setActiveThreadIdAtom,
threadStatesAtom,
updateThreadAtom,
- updateThreadInitSuccessAtom,
+ setThreadModelParamsAtom,
} from '@/helpers/atoms/Thread.atom'
const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
@@ -30,7 +35,6 @@ const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
hasMore: false,
waitingForResponse: false,
lastMessage: undefined,
- isFinishInit: false,
}
currentState[newThread.id] = threadState
set(threadStatesAtom, currentState)
@@ -41,45 +45,44 @@ const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
})
export const useCreateNewThread = () => {
- const threadStates = useAtomValue(threadStatesAtom)
- const updateThreadFinishInit = useSetAtom(updateThreadInitSuccessAtom)
const createNewThread = useSetAtom(createNewThreadAtom)
- const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
+ const { setActiveThread } = useSetActiveThread()
const updateThread = useSetAtom(updateThreadAtom)
+ const setFileUpload = useSetAtom(fileUploadAtom)
+ const setSelectedModel = useSetAtom(selectedModelAtom)
+ const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
- const { deleteThread } = useDeleteThread()
+ const { recommendedModel, downloadedModels } = useRecommendedModel()
+
+ const threads = useAtomValue(threadsAtom)
const requestCreateNewThread = async (
assistant: Assistant,
model?: Model | undefined
) => {
- // loop through threads state and filter if there's any thread that is not finish init
- let unfinishedInitThreadId: string | undefined = undefined
- for (const key in threadStates) {
- const isFinishInit = threadStates[key].isFinishInit ?? true
- if (!isFinishInit) {
- unfinishedInitThreadId = key
- break
- }
+ const defaultModel = model ?? recommendedModel ?? downloadedModels[0]
+
+ // check last thread message, if there empty last message use can not create thread
+ const lastMessage = threads[0]?.metadata?.lastMessage
+
+ if (!lastMessage && threads.length) {
+ return null
}
- if (unfinishedInitThreadId) {
- await deleteThread(unfinishedInitThreadId)
- }
-
- const modelId = model ? model.id : '*'
const createdAt = Date.now()
const assistantInfo: ThreadAssistantInfo = {
assistant_id: assistant.id,
assistant_name: assistant.name,
+ tools: assistant.tools,
model: {
- id: modelId,
- settings: {},
- parameters: {},
- engine: undefined,
+ id: defaultModel?.id ?? '*',
+ settings: defaultModel?.settings ?? {},
+ parameters: defaultModel?.parameters ?? {},
+ engine: defaultModel?.engine,
},
instructions: assistant.instructions,
}
+
const threadId = generateThreadId(assistant.id)
const thread: Thread = {
id: threadId,
@@ -91,19 +94,27 @@ export const useCreateNewThread = () => {
}
// add the new thread on top of the thread list to the state
+ //TODO: Why do we have thread list then thread states? Should combine them
createNewThread(thread)
- setActiveThreadId(thread.id)
+
+ setSelectedModel(defaultModel)
+ setThreadModelParams(thread.id, {
+ ...defaultModel?.settings,
+ ...defaultModel?.parameters,
+ })
+
+ // Delete the file upload state
+ setFileUpload([])
+ // Update thread metadata
+ await updateThreadMetadata(thread)
+
+ setActiveThread(thread)
}
- function updateThreadMetadata(thread: Thread) {
+ async function updateThreadMetadata(thread: Thread) {
updateThread(thread)
- const threadState = threadStates[thread.id]
- const isFinishInit = threadState?.isFinishInit ?? true
- if (!isFinishInit) {
- updateThreadFinishInit(thread.id)
- }
- extensionManager
+ await extensionManager
.get(ExtensionTypeEnum.Conversational)
?.saveThread(thread)
}
diff --git a/web/hooks/useDeleteModel.ts b/web/hooks/useDeleteModel.ts
index cd7292997..d9f2b94be 100644
--- a/web/hooks/useDeleteModel.ts
+++ b/web/hooks/useDeleteModel.ts
@@ -1,13 +1,14 @@
import { ExtensionTypeEnum, ModelExtension, Model } from '@janhq/core'
+import { useAtom } from 'jotai'
+
import { toaster } from '@/containers/Toast'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
-
import { extensionManager } from '@/extension/ExtensionManager'
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
export default function useDeleteModel() {
- const { setDownloadedModels, downloadedModels } = useGetDownloadedModels()
+ const [downloadedModels, setDownloadedModels] = useAtom(downloadedModelsAtom)
const deleteModel = async (model: Model) => {
await extensionManager
@@ -19,6 +20,7 @@ export default function useDeleteModel() {
toaster({
title: 'Model Deletion Successful',
description: `The model ${model.id} has been successfully deleted.`,
+ type: 'success',
})
}
diff --git a/web/hooks/useDeleteThread.ts b/web/hooks/useDeleteThread.ts
index 84dd8a468..87cee125d 100644
--- a/web/hooks/useDeleteThread.ts
+++ b/web/hooks/useDeleteThread.ts
@@ -21,7 +21,6 @@ import {
threadsAtom,
setActiveThreadIdAtom,
deleteThreadStateAtom,
- threadStatesAtom,
updateThreadStateLastMessageAtom,
} from '@/helpers/atoms/Thread.atom'
@@ -34,7 +33,6 @@ export default function useDeleteThread() {
const deleteMessages = useSetAtom(deleteChatMessagesAtom)
const cleanMessages = useSetAtom(cleanChatMessagesAtom)
const deleteThreadState = useSetAtom(deleteThreadStateAtom)
- const threadStates = useAtomValue(threadStatesAtom)
const updateThreadLastMessage = useSetAtom(updateThreadStateLastMessageAtom)
const cleanThread = async (threadId: string) => {
@@ -49,6 +47,14 @@ export default function useDeleteThread() {
threadId,
messages.filter((msg) => msg.role === ChatCompletionRole.System)
)
+
+ thread.metadata = {
+ ...thread.metadata,
+ lastMessage: undefined,
+ }
+ await extensionManager
+ .get(ExtensionTypeEnum.Conversational)
+ ?.saveThread(thread)
updateThreadLastMessage(threadId, undefined)
}
}
@@ -66,21 +72,16 @@ export default function useDeleteThread() {
const availableThreads = threads.filter((c) => c.id !== threadId)
setThreads(availableThreads)
- const deletingThreadState = threadStates[threadId]
- const isFinishInit = deletingThreadState?.isFinishInit ?? true
-
// delete the thread state
deleteThreadState(threadId)
- if (isFinishInit) {
- deleteMessages(threadId)
- setCurrentPrompt('')
- toaster({
- title: 'Thread successfully deleted.',
- description: `Thread ${threadId} has been successfully deleted.`,
- })
- }
-
+ deleteMessages(threadId)
+ setCurrentPrompt('')
+ toaster({
+ title: 'Thread successfully deleted.',
+ description: `Thread ${threadId} has been successfully deleted.`,
+ type: 'success',
+ })
if (availableThreads.length > 0) {
setActiveThreadId(availableThreads[0].id)
} else {
diff --git a/web/hooks/useDownloadState.ts b/web/hooks/useDownloadState.ts
index d39ab5e58..37f41d2a1 100644
--- a/web/hooks/useDownloadState.ts
+++ b/web/hooks/useDownloadState.ts
@@ -26,6 +26,7 @@ const setDownloadStateSuccessAtom = atom(null, (get, set, modelId: string) => {
toaster({
title: 'Download Completed',
description: `Download ${modelId} completed`,
+ type: 'success',
})
})
@@ -61,6 +62,7 @@ const setDownloadStateCancelledAtom = atom(
toaster({
title: 'Cancel Download',
description: `Model ${modelId} cancel download`,
+ type: 'warning',
})
return
diff --git a/web/hooks/useEngineSettings.ts b/web/hooks/useEngineSettings.ts
index 258a89aa4..4a17f91df 100644
--- a/web/hooks/useEngineSettings.ts
+++ b/web/hooks/useEngineSettings.ts
@@ -1,7 +1,9 @@
-import { fs, joinPath } from '@janhq/core'
+import { useCallback } from 'react'
+
+import { fs, joinPath, events, AppConfigurationEventName } from '@janhq/core'
export const useEngineSettings = () => {
- const readOpenAISettings = async () => {
+ const readOpenAISettings = useCallback(async () => {
if (
!(await fs.existsSync(await joinPath(['file://engines', 'openai.json'])))
)
@@ -14,17 +16,24 @@ export const useEngineSettings = () => {
return typeof settings === 'object' ? settings : JSON.parse(settings)
}
return {}
- }
+ }, [])
+
const saveOpenAISettings = async ({
apiKey,
}: {
apiKey: string | undefined
}) => {
const settings = await readOpenAISettings()
+ const settingFilePath = await joinPath(['file://engines', 'openai.json'])
+
settings.api_key = apiKey
- await fs.writeFileSync(
- await joinPath(['file://engines', 'openai.json']),
- JSON.stringify(settings)
+
+ await fs.writeFileSync(settingFilePath, JSON.stringify(settings))
+
+ // Sec: Don't attach the settings data to the event
+ events.emit(
+ AppConfigurationEventName.OnConfigurationUpdate,
+ settingFilePath
)
}
return { readOpenAISettings, saveOpenAISettings }
diff --git a/web/hooks/useFactoryReset.ts b/web/hooks/useFactoryReset.ts
new file mode 100644
index 000000000..56994d4c4
--- /dev/null
+++ b/web/hooks/useFactoryReset.ts
@@ -0,0 +1,59 @@
+import { useEffect, useState } from 'react'
+
+import { fs, AppConfiguration, joinPath, getUserHomePath } from '@janhq/core'
+
+export default function useFactoryReset() {
+ const [defaultJanDataFolder, setdefaultJanDataFolder] = useState('')
+
+ useEffect(() => {
+ async function getDefaultJanDataFolder() {
+ const homePath = await getUserHomePath()
+ const defaultJanDataFolder = await joinPath([homePath, 'jan'])
+ setdefaultJanDataFolder(defaultJanDataFolder)
+ }
+ getDefaultJanDataFolder()
+ }, [])
+
+ const resetAll = async (keepCurrentFolder?: boolean) => {
+ // read the place of jan data folder
+ const appConfiguration: AppConfiguration | undefined =
+ await window.core?.api?.getAppConfigurations()
+
+ if (!appConfiguration) {
+ console.debug('Failed to get app configuration')
+ }
+
+ console.debug('appConfiguration: ', appConfiguration)
+ const janDataFolderPath = appConfiguration!.data_folder
+
+ if (defaultJanDataFolder === janDataFolderPath) {
+ console.debug('Jan data folder is already at user home')
+ } else {
+ // if jan data folder is not at user home, we update the app configuration to point to user home
+ if (!keepCurrentFolder) {
+ const configuration: AppConfiguration = {
+ data_folder: defaultJanDataFolder,
+ }
+ await window.core?.api?.updateAppConfiguration(configuration)
+ }
+ }
+
+ const modelPath = await joinPath([janDataFolderPath, 'models'])
+ const threadPath = await joinPath([janDataFolderPath, 'threads'])
+
+ console.debug(`Removing models at ${modelPath}`)
+ await fs.rmdirSync(modelPath, { recursive: true })
+
+ console.debug(`Removing threads at ${threadPath}`)
+ await fs.rmdirSync(threadPath, { recursive: true })
+
+ // reset the localStorage
+ localStorage.clear()
+ await window.core?.api?.relaunch()
+ }
+
+ return {
+ defaultJanDataFolder,
+ resetAll,
+ }
+}
diff --git a/web/hooks/useGetAssistants.ts b/web/hooks/useGetAssistants.ts
deleted file mode 100644
index 2b34bfbd1..000000000
--- a/web/hooks/useGetAssistants.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import { useEffect, useState } from 'react'
-
-import { Assistant, ExtensionTypeEnum, AssistantExtension } from '@janhq/core'
-
-import { extensionManager } from '@/extension/ExtensionManager'
-
-export const getAssistants = async (): Promise =>
- extensionManager
- .get(ExtensionTypeEnum.Assistant)
- ?.getAssistants() ?? []
-
-/**
- * Hooks for get assistants
- *
- * @returns assistants
- */
-export default function useGetAssistants() {
- const [assistants, setAssistants] = useState([])
-
- useEffect(() => {
- getAssistants()
- .then((data) => setAssistants(data))
- .catch((err) => console.error(err))
- }, [])
-
- return { assistants }
-}
diff --git a/web/hooks/useGetConfiguredModels.ts b/web/hooks/useGetConfiguredModels.ts
deleted file mode 100644
index 919f43754..000000000
--- a/web/hooks/useGetConfiguredModels.ts
+++ /dev/null
@@ -1,31 +0,0 @@
-import { useEffect, useState } from 'react'
-
-import { ExtensionTypeEnum, ModelExtension, Model } from '@janhq/core'
-
-import { extensionManager } from '@/extension/ExtensionManager'
-
-export function useGetConfiguredModels() {
- const [loading, setLoading] = useState(false)
- const [models, setModels] = useState([])
-
- const getConfiguredModels = async (): Promise => {
- const models = await extensionManager
- .get(ExtensionTypeEnum.Model)
- ?.getConfiguredModels()
- return models ?? []
- }
-
- async function fetchModels() {
- setLoading(true)
- const models = await getConfiguredModels()
- setLoading(false)
- setModels(models)
- }
-
- useEffect(() => {
- fetchModels()
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
-
- return { loading, models }
-}
diff --git a/web/hooks/useGetDownloadedModels.ts b/web/hooks/useGetDownloadedModels.ts
deleted file mode 100644
index bba420858..000000000
--- a/web/hooks/useGetDownloadedModels.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import { useEffect } from 'react'
-
-import { ExtensionTypeEnum, ModelExtension, Model } from '@janhq/core'
-
-import { atom, useAtom } from 'jotai'
-
-import { extensionManager } from '@/extension/ExtensionManager'
-
-export const downloadedModelsAtom = atom([])
-
-export function useGetDownloadedModels() {
- const [downloadedModels, setDownloadedModels] = useAtom(downloadedModelsAtom)
-
- useEffect(() => {
- getDownloadedModels().then((downloadedModels) => {
- setDownloadedModels(downloadedModels)
- })
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
-
- return { downloadedModels, setDownloadedModels }
-}
-
-export const getDownloadedModels = async (): Promise =>
- extensionManager
- .get(ExtensionTypeEnum.Model)
- ?.getDownloadedModels() ?? []
diff --git a/web/hooks/useGetSystemResources.ts b/web/hooks/useGetSystemResources.ts
index 8dffa8eb4..3f71040d7 100644
--- a/web/hooks/useGetSystemResources.ts
+++ b/web/hooks/useGetSystemResources.ts
@@ -6,17 +6,23 @@ import { useSetAtom } from 'jotai'
import { extensionManager } from '@/extension/ExtensionManager'
import {
+ availableRamAtom,
cpuUsageAtom,
totalRamAtom,
usedRamAtom,
+ nvidiaTotalVramAtom,
} from '@/helpers/atoms/SystemBar.atom'
export default function useGetSystemResources() {
const [ram, setRam] = useState(0)
const [cpu, setCPU] = useState(0)
+
+ const [gpus, setGPUs] = useState[]>([])
const setTotalRam = useSetAtom(totalRamAtom)
const setUsedRam = useSetAtom(usedRamAtom)
+ const setAvailableRam = useSetAtom(availableRamAtom)
const setCpuUsage = useSetAtom(cpuUsageAtom)
+ const setTotalNvidiaVram = useSetAtom(nvidiaTotalVramAtom)
const getSystemResources = async () => {
if (
@@ -40,19 +46,36 @@ export default function useGetSystemResources() {
setTotalRam(resourceInfor.mem.totalMemory)
setRam(Math.round(ram * 100))
+ if (resourceInfor.mem.totalMemory && resourceInfor.mem.usedMemory)
+ setAvailableRam(
+ resourceInfor.mem.totalMemory - resourceInfor.mem.usedMemory
+ )
setCPU(Math.round(currentLoadInfor?.cpu?.usage ?? 0))
setCpuUsage(Math.round(currentLoadInfor?.cpu?.usage ?? 0))
+
+ const gpus = currentLoadInfor?.gpu ?? []
+ setGPUs(gpus)
+
+ let totalNvidiaVram = 0
+ if (gpus.length > 0) {
+ totalNvidiaVram = gpus.reduce(
+ (total: number, gpu: { memoryTotal: string }) =>
+ total + Number(gpu.memoryTotal),
+ 0
+ )
+ }
+ setTotalNvidiaVram(totalNvidiaVram)
}
useEffect(() => {
getSystemResources()
- // Fetch interval - every 0.5s
+ // Fetch interval - every 2s
// TODO: Will we really need this?
// There is a possibility that this will be removed and replaced by the process event hook?
const intervalId = setInterval(() => {
getSystemResources()
- }, 500)
+ }, 5000)
// clean up interval
return () => clearInterval(intervalId)
@@ -63,5 +86,6 @@ export default function useGetSystemResources() {
totalRamAtom,
ram,
cpu,
+ gpus,
}
}
diff --git a/web/hooks/useModels.ts b/web/hooks/useModels.ts
new file mode 100644
index 000000000..23e098007
--- /dev/null
+++ b/web/hooks/useModels.ts
@@ -0,0 +1,46 @@
+import { useEffect } from 'react'
+
+import { ExtensionTypeEnum, Model, ModelExtension } from '@janhq/core'
+
+import { useSetAtom } from 'jotai'
+
+import { extensionManager } from '@/extension'
+import {
+ configuredModelsAtom,
+ downloadedModelsAtom,
+} from '@/helpers/atoms/Model.atom'
+
+const useModels = () => {
+ const setDownloadedModels = useSetAtom(downloadedModelsAtom)
+ const setConfiguredModels = useSetAtom(configuredModelsAtom)
+
+ useEffect(() => {
+ const getDownloadedModels = async () => {
+ const models = await getLocalDownloadedModels()
+ setDownloadedModels(models)
+ }
+
+ getDownloadedModels()
+ }, [setDownloadedModels])
+
+ useEffect(() => {
+ const getConfiguredModels = async () => {
+ const models = await getLocalConfiguredModels()
+ setConfiguredModels(models)
+ }
+
+ getConfiguredModels()
+ }, [setConfiguredModels])
+}
+
+const getLocalConfiguredModels = async (): Promise =>
+ extensionManager
+ .get(ExtensionTypeEnum.Model)
+ ?.getConfiguredModels() ?? []
+
+const getLocalDownloadedModels = async (): Promise =>
+ extensionManager
+ .get(ExtensionTypeEnum.Model)
+ ?.getDownloadedModels() ?? []
+
+export default useModels
diff --git a/web/hooks/usePath.ts b/web/hooks/usePath.ts
index db6284f93..aea25bef1 100644
--- a/web/hooks/usePath.ts
+++ b/web/hooks/usePath.ts
@@ -3,28 +3,23 @@ import { useAtomValue } from 'jotai'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
-import { activeThreadAtom, threadStatesAtom } from '@/helpers/atoms/Thread.atom'
+import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const usePath = () => {
const activeThread = useAtomValue(activeThreadAtom)
- const threadStates = useAtomValue(threadStatesAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const onReviewInFinder = async (type: string) => {
- if (!activeThread) return
- const activeThreadState = threadStates[activeThread.id]
- if (!activeThreadState.isFinishInit) {
- alert('Thread is not started yet')
- return
- }
+ // TODO: this logic should be refactored.
+ if (type !== 'Model' && !activeThread) return
const userSpace = await getJanDataFolderPath()
let filePath = undefined
- const assistantId = activeThread.assistants[0]?.assistant_id
+ const assistantId = activeThread?.assistants[0]?.assistant_id
switch (type) {
case 'Engine':
case 'Thread':
- filePath = await joinPath(['threads', activeThread.id])
+ filePath = await joinPath(['threads', activeThread?.id ?? ''])
break
case 'Model':
if (!selectedModel) return
@@ -44,20 +39,20 @@ export const usePath = () => {
}
const onViewJson = async (type: string) => {
- if (!activeThread) return
- const activeThreadState = threadStates[activeThread.id]
- if (!activeThreadState.isFinishInit) {
- alert('Thread is not started yet')
- return
- }
+ // TODO: this logic should be refactored.
+ if (type !== 'Model' && !activeThread) return
const userSpace = await getJanDataFolderPath()
let filePath = undefined
- const assistantId = activeThread.assistants[0]?.assistant_id
+ const assistantId = activeThread?.assistants[0]?.assistant_id
switch (type) {
case 'Engine':
case 'Thread':
- filePath = await joinPath(['threads', activeThread.id, 'thread.json'])
+ filePath = await joinPath([
+ 'threads',
+ activeThread?.id ?? '',
+ 'thread.json',
+ ])
break
case 'Model':
if (!selectedModel) return
@@ -76,8 +71,32 @@ export const usePath = () => {
openFileExplorer(fullPath)
}
+ const onViewFile = async (id: string) => {
+ if (!activeThread) return
+
+ const userSpace = await getJanDataFolderPath()
+ let filePath = undefined
+ filePath = await joinPath(['threads', `${activeThread.id}/files`, `${id}`])
+ if (!filePath) return
+ const fullPath = await joinPath([userSpace, filePath])
+ openFileExplorer(fullPath)
+ }
+
+ const onViewFileContainer = async () => {
+ if (!activeThread) return
+
+ const userSpace = await getJanDataFolderPath()
+ let filePath = undefined
+ filePath = await joinPath(['threads', `${activeThread.id}/files`])
+ if (!filePath) return
+ const fullPath = await joinPath([userSpace, filePath])
+ openFileExplorer(fullPath)
+ }
+
return {
onReviewInFinder,
onViewJson,
+ onViewFile,
+ onViewFileContainer,
}
}
diff --git a/web/hooks/useRecommendedModel.ts b/web/hooks/useRecommendedModel.ts
index 2ee4c1a7f..8122e2b77 100644
--- a/web/hooks/useRecommendedModel.ts
+++ b/web/hooks/useRecommendedModel.ts
@@ -5,9 +5,9 @@ import { Model, InferenceEngine } from '@janhq/core'
import { atom, useAtomValue } from 'jotai'
import { activeModelAtom } from './useActiveModel'
-import { getDownloadedModels } from './useGetDownloadedModels'
-import { activeThreadAtom, threadStatesAtom } from '@/helpers/atoms/Thread.atom'
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
+import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const lastUsedModel = atom(undefined)
@@ -24,49 +24,31 @@ export const LAST_USED_MODEL_ID = 'last-used-model-id'
*/
export default function useRecommendedModel() {
const activeModel = useAtomValue(activeModelAtom)
- const [downloadedModels, setDownloadedModels] = useState([])
+ const [sortedModels, setSortedModels] = useState([])
const [recommendedModel, setRecommendedModel] = useState()
- const threadStates = useAtomValue(threadStatesAtom)
const activeThread = useAtomValue(activeThreadAtom)
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
const getAndSortDownloadedModels = useCallback(async (): Promise => {
- const models = (await getDownloadedModels()).sort((a, b) =>
+ const models = downloadedModels.sort((a, b) =>
a.engine !== InferenceEngine.nitro && b.engine === InferenceEngine.nitro
? 1
: -1
)
- setDownloadedModels(models)
+ setSortedModels(models)
return models
- }, [])
+ }, [downloadedModels])
const getRecommendedModel = useCallback(async (): Promise<
Model | undefined
> => {
const models = await getAndSortDownloadedModels()
- if (!activeThread) {
- return
- }
+ if (!activeThread) return
+ const modelId = activeThread.assistants[0]?.model.id
+ const model = models.find((model) => model.id === modelId)
- const finishInit = threadStates[activeThread.id].isFinishInit ?? true
- if (finishInit) {
- const modelId = activeThread.assistants[0]?.model.id
- const model = models.find((model) => model.id === modelId)
-
- if (model) {
- setRecommendedModel(model)
- }
-
- return
- } else {
- const modelId = activeThread.assistants[0]?.model.id
- if (modelId !== '*') {
- const model = models.find((model) => model.id === modelId)
-
- if (model) {
- setRecommendedModel(model)
- }
- return
- }
+ if (model) {
+ setRecommendedModel(model)
}
if (activeModel) {
@@ -117,5 +99,5 @@ export default function useRecommendedModel() {
getRecommendedModel()
}, [getRecommendedModel])
- return { recommendedModel, downloadedModels }
+ return { recommendedModel, downloadedModels: sortedModels }
}
diff --git a/web/hooks/useSendChatMessage.ts b/web/hooks/useSendChatMessage.ts
index bf9740489..7d89764db 100644
--- a/web/hooks/useSendChatMessage.ts
+++ b/web/hooks/useSendChatMessage.ts
@@ -1,4 +1,5 @@
-import { useEffect, useRef, useState } from 'react'
+/* eslint-disable @typescript-eslint/no-explicit-any */
+import { useEffect, useRef } from 'react'
import {
ChatCompletionMessage,
@@ -13,19 +14,21 @@ import {
Model,
ConversationalExtension,
MessageEvent,
+ InferenceEngine,
+ ChatCompletionMessageContentType,
+ AssistantTool,
} from '@janhq/core'
-import { useAtom, useAtomValue, useSetAtom } from 'jotai'
+import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { ulid } from 'ulid'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
-import { currentPromptAtom } from '@/containers/Providers/Jotai'
-
-import { toaster } from '@/containers/Toast'
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
+import { getBase64 } from '@/utils/base64'
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
-import { useActiveModel } from './useActiveModel'
+import { loadModelErrorAtom, useActiveModel } from './useActiveModel'
import { extensionManager } from '@/extension/ExtensionManager'
import {
@@ -36,47 +39,53 @@ import {
activeThreadAtom,
engineParamsUpdateAtom,
getActiveThreadModelParamsAtom,
- threadStatesAtom,
+ isGeneratingResponseAtom,
updateThreadAtom,
- updateThreadInitSuccessAtom,
updateThreadWaitingForResponseAtom,
} from '@/helpers/atoms/Thread.atom'
+export const queuedMessageAtom = atom(false)
+export const reloadModelAtom = atom(false)
+
export default function useSendChatMessage() {
const activeThread = useAtomValue(activeThreadAtom)
const addNewMessage = useSetAtom(addNewMessageAtom)
const updateThread = useSetAtom(updateThreadAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
- const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom)
+ const setCurrentPrompt = useSetAtom(currentPromptAtom)
const currentMessages = useAtomValue(getCurrentChatMessagesAtom)
const { activeModel } = useActiveModel()
const selectedModel = useAtomValue(selectedModelAtom)
const { startModel } = useActiveModel()
- const [queuedMessage, setQueuedMessage] = useState(false)
+ const setQueuedMessage = useSetAtom(queuedMessageAtom)
+ const loadModelFailed = useAtomValue(loadModelErrorAtom)
const modelRef = useRef()
- const threadStates = useAtomValue(threadStatesAtom)
- const updateThreadInitSuccess = useSetAtom(updateThreadInitSuccessAtom)
+ const loadModelFailedRef = useRef()
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
- const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
- const [reloadModel, setReloadModel] = useState(false)
+ const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
+ const setReloadModel = useSetAtom(reloadModelAtom)
+ const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
+ const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
useEffect(() => {
modelRef.current = activeModel
}, [activeModel])
+ useEffect(() => {
+ loadModelFailedRef.current = loadModelFailed
+ }, [loadModelFailed])
+
const resendChatMessage = async (currentMessage: ThreadMessage) => {
if (!activeThread) {
console.error('No active thread')
return
}
-
+ setIsGeneratingResponse(true)
updateThreadWaiting(activeThread.id, true)
-
const messages: ChatCompletionMessage[] = [
activeThread.assistants[0]?.instructions,
]
@@ -113,83 +122,36 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
- await WaitForModelStarting(modelId)
+ await waitForModelStarting(modelId)
setQueuedMessage(false)
}
events.emit(MessageEvent.OnMessageSent, messageRequest)
}
- // TODO: Refactor @louis
- const WaitForModelStarting = async (modelId: string) => {
- return new Promise((resolve) => {
- setTimeout(async () => {
- if (modelRef.current?.id !== modelId) {
- console.debug('waiting for model to start')
- await WaitForModelStarting(modelId)
- resolve()
- } else {
- resolve()
- }
- }, 200)
- })
- }
-
- const sendChatMessage = async () => {
- if (!currentPrompt || currentPrompt.trim().length === 0) return
+ const sendChatMessage = async (message: string) => {
+ if (!message || message.trim().length === 0) return
if (!activeThread) {
console.error('No active thread')
return
}
+ setIsGeneratingResponse(true)
if (engineParamsUpdate) setReloadModel(true)
- const activeThreadState = threadStates[activeThread.id]
const runtimeParams = toRuntimeParams(activeModelParams)
const settingParams = toSettingParams(activeModelParams)
- // if the thread is not initialized, we need to initialize it first
- if (
- !activeThreadState.isFinishInit ||
- activeThread.assistants[0].model.id !== selectedModel?.id
- ) {
- if (!selectedModel) {
- toaster({ title: 'Please select a model' })
- return
- }
- const assistantId = activeThread.assistants[0].assistant_id ?? ''
- const assistantName = activeThread.assistants[0].assistant_name ?? ''
- const instructions = activeThread.assistants[0].instructions ?? ''
-
- const updatedThread: Thread = {
- ...activeThread,
- assistants: [
- {
- assistant_id: assistantId,
- assistant_name: assistantName,
- instructions: instructions,
- model: {
- id: selectedModel.id,
- settings: settingParams,
- parameters: runtimeParams,
- engine: selectedModel.engine,
- },
- },
- ],
- }
- updateThreadInitSuccess(activeThread.id)
- updateThread(updatedThread)
-
- await extensionManager
- .get(ExtensionTypeEnum.Conversational)
- ?.saveThread(updatedThread)
- }
-
updateThreadWaiting(activeThread.id, true)
-
- const prompt = currentPrompt.trim()
+ const prompt = message.trim()
setCurrentPrompt('')
+ const base64Blob = fileUpload[0]
+ ? await getBase64(fileUpload[0].file).then()
+ : undefined
+
+ const msgId = ulid()
+
const messages: ChatCompletionMessage[] = [
activeThread.assistants[0]?.instructions,
]
@@ -210,16 +172,41 @@ export default function useSendChatMessage() {
.concat([
{
role: ChatCompletionRole.User,
- content: prompt,
+ content:
+ selectedModel && base64Blob
+ ? [
+ {
+ type: ChatCompletionMessageContentType.Text,
+ text: prompt,
+ },
+ {
+ type: ChatCompletionMessageContentType.Doc,
+ doc_url: {
+ url: `threads/${activeThread.id}/files/${msgId}.pdf`,
+ },
+ },
+ ]
+ : prompt,
} as ChatCompletionMessage,
])
)
- const msgId = ulid()
- const modelRequest = selectedModel ?? activeThread.assistants[0].model
+ let modelRequest = selectedModel ?? activeThread.assistants[0].model
if (runtimeParams.stream == null) {
runtimeParams.stream = true
}
+ // Add middleware to the model request with tool retrieval enabled
+ if (
+ activeThread.assistants[0].tools?.some(
+ (tool: AssistantTool) => tool.type === 'retrieval' && tool.enabled
+ )
+ ) {
+ modelRequest = {
+ ...modelRequest,
+ engine: InferenceEngine.tool_retrieval_enabled,
+ proxyEngine: modelRequest.engine,
+ }
+ }
const messageRequest: MessageRequest = {
id: msgId,
threadId: activeThread.id,
@@ -229,8 +216,44 @@ export default function useSendChatMessage() {
settings: settingParams,
parameters: runtimeParams,
},
+ thread: activeThread,
}
const timestamp = Date.now()
+
+ const content: any = []
+
+ if (base64Blob && fileUpload[0]?.type === 'image') {
+ content.push({
+ type: ContentType.Image,
+ text: {
+ value: prompt,
+ annotations: [base64Blob],
+ },
+ })
+ }
+
+ if (base64Blob && fileUpload[0]?.type === 'pdf') {
+ content.push({
+ type: ContentType.Pdf,
+ text: {
+ value: prompt,
+ annotations: [base64Blob],
+ name: fileUpload[0].file.name,
+ size: fileUpload[0].file.size,
+ },
+ })
+ }
+
+ if (prompt && !base64Blob) {
+ content.push({
+ type: ContentType.Text,
+ text: {
+ value: prompt,
+ annotations: [],
+ },
+ })
+ }
+
const threadMessage: ThreadMessage = {
id: msgId,
thread_id: activeThread.id,
@@ -239,18 +262,21 @@ export default function useSendChatMessage() {
created: timestamp,
updated: timestamp,
object: 'thread.message',
- content: [
- {
- type: ContentType.Text,
- text: {
- value: prompt,
- annotations: [],
- },
- },
- ],
+ content: content,
}
addNewMessage(threadMessage)
+ if (base64Blob) {
+ setFileUpload([])
+ }
+
+ const updatedThread: Thread = {
+ ...activeThread,
+ updated: timestamp,
+ }
+
+ // change last update thread when send message
+ updateThread(updatedThread)
await extensionManager
.get(ExtensionTypeEnum.Conversational)
@@ -261,7 +287,7 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
- await WaitForModelStarting(modelId)
+ await waitForModelStarting(modelId)
setQueuedMessage(false)
}
@@ -271,10 +297,21 @@ export default function useSendChatMessage() {
setEngineParamsUpdate(false)
}
+ const waitForModelStarting = async (modelId: string) => {
+ return new Promise((resolve) => {
+ setTimeout(async () => {
+ if (modelRef.current?.id !== modelId && !loadModelFailedRef.current) {
+ await waitForModelStarting(modelId)
+ resolve()
+ } else {
+ resolve()
+ }
+ }, 200)
+ })
+ }
+
return {
- reloadModel,
sendChatMessage,
resendChatMessage,
- queuedMessage,
}
}
diff --git a/web/hooks/useSetActiveThread.ts b/web/hooks/useSetActiveThread.ts
index 76a744bcd..6cf94d45d 100644
--- a/web/hooks/useSetActiveThread.ts
+++ b/web/hooks/useSetActiveThread.ts
@@ -1,3 +1,5 @@
+import { useCallback } from 'react'
+
import {
InferenceEvent,
ExtensionTypeEnum,
@@ -6,44 +8,54 @@ import {
ConversationalExtension,
} from '@janhq/core'
-import { useAtomValue, useSetAtom } from 'jotai'
+import { useSetAtom } from 'jotai'
+
+import { loadModelErrorAtom } from './useActiveModel'
import { extensionManager } from '@/extension'
import { setConvoMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import {
ModelParams,
- getActiveThreadIdAtom,
+ isGeneratingResponseAtom,
setActiveThreadIdAtom,
setThreadModelParamsAtom,
} from '@/helpers/atoms/Thread.atom'
export default function useSetActiveThread() {
- const activeThreadId = useAtomValue(getActiveThreadIdAtom)
const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
const setThreadMessage = useSetAtom(setConvoMessagesAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
+ const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
+ const setLoadModelError = useSetAtom(loadModelErrorAtom)
- const setActiveThread = async (thread: Thread) => {
- if (activeThreadId === thread.id) {
- console.debug('Thread already active')
- return
- }
+ const setActiveThread = useCallback(
+ async (thread: Thread) => {
+ setIsGeneratingResponse(false)
+ events.emit(InferenceEvent.OnInferenceStopped, thread.id)
- events.emit(InferenceEvent.OnInferenceStopped, thread.id)
+ // load the corresponding messages
+ const messages = await getLocalThreadMessage(thread.id)
+ setThreadMessage(thread.id, messages)
- // load the corresponding messages
- const messages = await extensionManager
- .get(ExtensionTypeEnum.Conversational)
- ?.getAllMessages(thread.id)
- setThreadMessage(thread.id, messages ?? [])
+ setActiveThreadId(thread.id)
+ const modelParams: ModelParams = {
+ ...thread.assistants[0]?.model?.parameters,
+ ...thread.assistants[0]?.model?.settings,
+ }
+ setThreadModelParams(thread.id, modelParams)
+ },
+ [
+ setActiveThreadId,
+ setThreadMessage,
+ setThreadModelParams,
+ setIsGeneratingResponse,
+ ]
+ )
- setActiveThreadId(thread.id)
- const modelParams: ModelParams = {
- ...thread.assistants[0]?.model?.parameters,
- ...thread.assistants[0]?.model?.settings,
- }
- setThreadModelParams(thread.id, modelParams)
- }
-
- return { activeThreadId, setActiveThread }
+ return { setActiveThread }
}
+
+const getLocalThreadMessage = async (threadId: string) =>
+ extensionManager
+ .get(ExtensionTypeEnum.Conversational)
+ ?.getAllMessages(threadId) ?? []
diff --git a/web/hooks/useSettings.ts b/web/hooks/useSettings.ts
index ef4e08480..289355b36 100644
--- a/web/hooks/useSettings.ts
+++ b/web/hooks/useSettings.ts
@@ -1,4 +1,4 @@
-import { useEffect, useState } from 'react'
+import { useCallback, useEffect, useState } from 'react'
import { fs, joinPath } from '@janhq/core'
import { atom, useAtom } from 'jotai'
@@ -32,7 +32,7 @@ export const useSettings = () => {
})
}
- const readSettings = async () => {
+ const readSettings = useCallback(async () => {
if (!window?.core?.api) {
return
}
@@ -42,18 +42,22 @@ export const useSettings = () => {
return typeof settings === 'object' ? settings : JSON.parse(settings)
}
return {}
- }
+ }, [])
+
const saveSettings = async ({
runMode,
notify,
+ gpusInUse,
}: {
runMode?: string | undefined
notify?: boolean | undefined
+ gpusInUse?: string[] | undefined
}) => {
const settingsFile = await joinPath(['file://settings', 'settings.json'])
const settings = await readSettings()
if (runMode != null) settings.run_mode = runMode
if (notify != null) settings.notify = notify
+ if (gpusInUse != null) settings.gpus_in_use = gpusInUse
await fs.writeFileSync(settingsFile, JSON.stringify(settings))
}
diff --git a/web/hooks/useThreads.ts b/web/hooks/useThreads.ts
index b79cfea92..1ac038b26 100644
--- a/web/hooks/useThreads.ts
+++ b/web/hooks/useThreads.ts
@@ -1,3 +1,5 @@
+import { useEffect } from 'react'
+
import {
ExtensionTypeEnum,
Thread,
@@ -5,7 +7,7 @@ import {
ConversationalExtension,
} from '@janhq/core'
-import { useAtom } from 'jotai'
+import { useSetAtom } from 'jotai'
import useSetActiveThread from './useSetActiveThread'
@@ -18,15 +20,13 @@ import {
} from '@/helpers/atoms/Thread.atom'
const useThreads = () => {
- const [threadStates, setThreadStates] = useAtom(threadStatesAtom)
- const [threads, setThreads] = useAtom(threadsAtom)
- const [threadModelRuntimeParams, setThreadModelRuntimeParams] = useAtom(
- threadModelParamsAtom
- )
+ const setThreadStates = useSetAtom(threadStatesAtom)
+ const setThreads = useSetAtom(threadsAtom)
+ const setThreadModelRuntimeParams = useSetAtom(threadModelParamsAtom)
const { setActiveThread } = useSetActiveThread()
- const getThreads = async () => {
- try {
+ useEffect(() => {
+ const getThreads = async () => {
const localThreads = await getLocalThreads()
const localThreadStates: Record = {}
const threadModelParams: Record = {}
@@ -39,7 +39,6 @@ const useThreads = () => {
hasMore: false,
waitingForResponse: false,
lastMessage,
- isFinishInit: true,
}
const modelParams = thread.assistants?.[0]?.model?.parameters
@@ -51,50 +50,23 @@ const useThreads = () => {
}
})
- // allow at max 1 unfinished init thread and it should be at the top of the list
- let unfinishedThreadId: string | undefined = undefined
- const unfinishedThreadState: Record = {}
-
- for (const key of Object.keys(threadStates)) {
- const threadState = threadStates[key]
- if (threadState.isFinishInit === false) {
- unfinishedThreadState[key] = threadState
- unfinishedThreadId = key
- break
- }
- }
- const unfinishedThread: Thread | undefined = threads.find(
- (thread) => thread.id === unfinishedThreadId
- )
-
- let allThreads: Thread[] = [...localThreads]
- if (unfinishedThread) {
- allThreads = [unfinishedThread, ...localThreads]
- }
-
- if (unfinishedThreadId) {
- localThreadStates[unfinishedThreadId] =
- unfinishedThreadState[unfinishedThreadId]
-
- threadModelParams[unfinishedThreadId] =
- threadModelRuntimeParams[unfinishedThreadId]
- }
-
// updating app states
setThreadStates(localThreadStates)
- setThreads(allThreads)
+ setThreads(localThreads)
setThreadModelRuntimeParams(threadModelParams)
- if (allThreads.length > 0) {
- setActiveThread(allThreads[0])
- }
- } catch (error) {
- console.error(error)
- }
- }
- return {
- getThreads,
- }
+ if (localThreads.length > 0) {
+ setActiveThread(localThreads[0])
+ }
+ }
+
+ getThreads()
+ }, [
+ setActiveThread,
+ setThreadModelRuntimeParams,
+ setThreadStates,
+ setThreads,
+ ])
}
const getLocalThreads = async (): Promise =>
diff --git a/web/hooks/useUpdateModelParameters.ts b/web/hooks/useUpdateModelParameters.ts
index 80070ef26..694394cee 100644
--- a/web/hooks/useUpdateModelParameters.ts
+++ b/web/hooks/useUpdateModelParameters.ts
@@ -2,12 +2,15 @@
import {
ConversationalExtension,
ExtensionTypeEnum,
+ InferenceEngine,
Thread,
ThreadAssistantInfo,
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
+import { selectedModelAtom } from '@/containers/DropdownListSidebar'
+
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
import { extensionManager } from '@/extension'
@@ -19,16 +22,22 @@ import {
threadsAtom,
} from '@/helpers/atoms/Thread.atom'
+export type UpdateModelParameter = {
+ params?: ModelParams
+ modelId?: string
+ engine?: InferenceEngine
+}
+
export default function useUpdateModelParameters() {
const threads = useAtomValue(threadsAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const activeThreadState = useAtomValue(activeThreadStateAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
+ const selectedModel = useAtomValue(selectedModelAtom)
const updateModelParameter = async (
threadId: string,
- name: string,
- value: number | boolean | string
+ settings: UpdateModelParameter
) => {
const thread = threads.find((thread) => thread.id === threadId)
if (!thread) {
@@ -40,21 +49,18 @@ export default function useUpdateModelParameters() {
console.error('No active thread')
return
}
+
+ const params = settings.modelId
+ ? settings.params
+ : { ...activeModelParams, ...settings.params }
+
const updatedModelParams: ModelParams = {
- ...activeModelParams,
- // Explicitly set the value to an array if the name is 'stop'
- // This is because the inference engine would only accept an array for the 'stop' parameter
- [name]: name === 'stop' ? (value === '' ? [] : [value]) : value,
+ ...params,
}
// update the state
setThreadModelParams(thread.id, updatedModelParams)
- if (!activeThreadState.isFinishInit) {
- // if thread is not initialized, we don't need to update thread.json
- return
- }
-
const assistants = thread.assistants.map(
(assistant: ThreadAssistantInfo) => {
const runtimeParams = toRuntimeParams(updatedModelParams)
@@ -62,6 +68,10 @@ export default function useUpdateModelParameters() {
assistant.model.parameters = runtimeParams
assistant.model.settings = settingParams
+ if (selectedModel) {
+ assistant.model.id = settings.modelId ?? selectedModel?.id
+ assistant.model.engine = settings.engine ?? selectedModel?.engine
+ }
return assistant
}
)
diff --git a/web/hooks/useVaultDirectory.ts b/web/hooks/useVaultDirectory.ts
deleted file mode 100644
index 3aa7383c9..000000000
--- a/web/hooks/useVaultDirectory.ts
+++ /dev/null
@@ -1,105 +0,0 @@
-import { useEffect } from 'react'
-
-import { fs, AppConfiguration } from '@janhq/core'
-
-import { atom, useAtom } from 'jotai'
-
-import { useMainViewState } from './useMainViewState'
-
-const isSameDirectoryAtom = atom(false)
-const isDirectoryConfirmAtom = atom(false)
-const isErrorSetNewDestAtom = atom(false)
-const currentPathAtom = atom('')
-const newDestinationPathAtom = atom('')
-
-export const SUCCESS_SET_NEW_DESTINATION = 'successSetNewDestination'
-
-export function useVaultDirectory() {
- const [isSameDirectory, setIsSameDirectory] = useAtom(isSameDirectoryAtom)
- const { setMainViewState } = useMainViewState()
- const [isDirectoryConfirm, setIsDirectoryConfirm] = useAtom(
- isDirectoryConfirmAtom
- )
- const [isErrorSetNewDest, setIsErrorSetNewDest] = useAtom(
- isErrorSetNewDestAtom
- )
- const [currentPath, setCurrentPath] = useAtom(currentPathAtom)
- const [newDestinationPath, setNewDestinationPath] = useAtom(
- newDestinationPathAtom
- )
-
- useEffect(() => {
- window.core?.api
- ?.getAppConfigurations()
- ?.then((appConfig: AppConfiguration) => {
- setCurrentPath(appConfig.data_folder)
- })
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
-
- const setNewDestination = async () => {
- const destFolder = await window.core?.api?.selectDirectory()
- setNewDestinationPath(destFolder)
-
- if (destFolder) {
- console.debug(`Destination folder selected: ${destFolder}`)
- try {
- const appConfiguration: AppConfiguration =
- await window.core?.api?.getAppConfigurations()
- const currentJanDataFolder = appConfiguration.data_folder
-
- if (currentJanDataFolder === destFolder) {
- console.debug(
- `Destination folder is the same as current folder. Ignore..`
- )
- setIsSameDirectory(true)
- setIsDirectoryConfirm(false)
- return
- } else {
- setIsSameDirectory(false)
- setIsDirectoryConfirm(true)
- }
- setIsErrorSetNewDest(false)
- } catch (e) {
- console.error(`Error: ${e}`)
- setIsErrorSetNewDest(true)
- }
- }
- }
-
- const applyNewDestination = async () => {
- try {
- const appConfiguration: AppConfiguration =
- await window.core?.api?.getAppConfigurations()
- const currentJanDataFolder = appConfiguration.data_folder
-
- appConfiguration.data_folder = newDestinationPath
-
- await fs.syncFile(currentJanDataFolder, newDestinationPath)
- await window.core?.api?.updateAppConfiguration(appConfiguration)
- console.debug(
- `File sync finished from ${currentPath} to ${newDestinationPath}`
- )
-
- setIsErrorSetNewDest(false)
- localStorage.setItem(SUCCESS_SET_NEW_DESTINATION, 'true')
- await window.core?.api?.relaunch()
- } catch (e) {
- console.error(`Error: ${e}`)
- setIsErrorSetNewDest(true)
- }
- }
-
- return {
- setNewDestination,
- newDestinationPath,
- applyNewDestination,
- isSameDirectory,
- setIsDirectoryConfirm,
- isDirectoryConfirm,
- setIsSameDirectory,
- currentPath,
- isErrorSetNewDest,
- setIsErrorSetNewDest,
- }
-}
diff --git a/web/next.config.js b/web/next.config.js
index 455ba70fc..a2e202c51 100644
--- a/web/next.config.js
+++ b/web/next.config.js
@@ -24,14 +24,9 @@ const nextConfig = {
config.plugins = [
...config.plugins,
new webpack.DefinePlugin({
- PLUGIN_CATALOG: JSON.stringify(
- 'https://cdn.jsdelivr.net/npm/@janhq/plugin-catalog@latest/dist/index.js'
- ),
VERSION: JSON.stringify(packageJson.version),
- ANALYTICS_ID:
- JSON.stringify(process.env.ANALYTICS_ID) ?? JSON.stringify('xxx'),
- ANALYTICS_HOST:
- JSON.stringify(process.env.ANALYTICS_HOST) ?? JSON.stringify('xxx'),
+ ANALYTICS_ID: JSON.stringify(process.env.ANALYTICS_ID),
+ ANALYTICS_HOST: JSON.stringify(process.env.ANALYTICS_HOST),
API_BASE_URL: JSON.stringify('http://localhost:1337'),
isMac: process.platform === 'darwin',
isWindows: process.platform === 'win32',
diff --git a/web/package.json b/web/package.json
index 5293cf765..498481aa3 100644
--- a/web/package.json
+++ b/web/package.json
@@ -8,6 +8,7 @@
"build": "next build",
"start": "next start",
"lint": "eslint .",
+ "lint:fix": "eslint . --fix",
"format": "prettier --write \"**/*.{js,jsx,ts,tsx}\"",
"compile": "tsc --noEmit -p . --pretty"
},
@@ -32,6 +33,7 @@
"posthog-js": "^1.95.1",
"react": "18.2.0",
"react-dom": "18.2.0",
+ "react-dropzone": "^14.2.3",
"react-hook-form": "^7.47.0",
"react-hot-toast": "^2.4.1",
"react-icons": "^4.12.0",
diff --git a/web/screens/Chat/AssistantSetting/index.tsx b/web/screens/Chat/AssistantSetting/index.tsx
new file mode 100644
index 000000000..df516def0
--- /dev/null
+++ b/web/screens/Chat/AssistantSetting/index.tsx
@@ -0,0 +1,78 @@
+import { useAtomValue } from 'jotai'
+
+import { useCreateNewThread } from '@/hooks/useCreateNewThread'
+
+import SettingComponentBuilder, {
+ SettingComponentData,
+} from '../ModelSetting/SettingComponent'
+
+import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
+
+const AssistantSetting = ({
+ componentData,
+}: {
+ componentData: SettingComponentData[]
+}) => {
+ const activeThread = useAtomValue(activeThreadAtom)
+ const { updateThreadMetadata } = useCreateNewThread()
+
+ return (
+
+ {activeThread && componentData && (
+ {
+ if (
+ activeThread.assistants[0].tools &&
+ (name === 'chunk_overlap' || name === 'chunk_size')
+ ) {
+ if (
+ activeThread.assistants[0].tools[0]?.settings.chunk_size <
+ activeThread.assistants[0].tools[0]?.settings.chunk_overlap
+ ) {
+ activeThread.assistants[0].tools[0].settings.chunk_overlap =
+ activeThread.assistants[0].tools[0].settings.chunk_size
+ }
+
+ if (
+ name === 'chunk_size' &&
+ value <
+ activeThread.assistants[0].tools[0].settings.chunk_overlap
+ ) {
+ activeThread.assistants[0].tools[0].settings.chunk_overlap =
+ value
+ } else if (
+ name === 'chunk_overlap' &&
+ value > activeThread.assistants[0].tools[0].settings.chunk_size
+ ) {
+ activeThread.assistants[0].tools[0].settings.chunk_size = value
+ }
+ }
+
+ updateThreadMetadata({
+ ...activeThread,
+ assistants: [
+ {
+ ...activeThread.assistants[0],
+ tools: [
+ {
+ type: 'retrieval',
+ enabled: true,
+ settings: {
+ ...(activeThread.assistants[0].tools &&
+ activeThread.assistants[0].tools[0]?.settings),
+ [name]: value,
+ },
+ },
+ ],
+ },
+ ],
+ })
+ }}
+ />
+ )}
+
+ )
+}
+
+export default AssistantSetting
diff --git a/web/screens/Chat/ChatBody/index.tsx b/web/screens/Chat/ChatBody/index.tsx
index f56e13845..c67d6a538 100644
--- a/web/screens/Chat/ChatBody/index.tsx
+++ b/web/screens/Chat/ChatBody/index.tsx
@@ -10,7 +10,7 @@ import LogoMark from '@/containers/Brand/Logo/Mark'
import { MainViewState } from '@/constants/screens'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
+import { loadModelErrorAtom } from '@/hooks/useActiveModel'
import { useMainViewState } from '@/hooks/useMainViewState'
@@ -19,10 +19,13 @@ import ChatItem from '../ChatItem'
import ErrorMessage from '../ErrorMessage'
import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
const ChatBody: React.FC = () => {
const messages = useAtomValue(getCurrentChatMessagesAtom)
- const { downloadedModels } = useGetDownloadedModels()
+
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
+
const { setMainViewState } = useMainViewState()
if (downloadedModels.length === 0)
@@ -80,7 +83,10 @@ const ChatBody: React.FC = () => {
{messages.map((message, index) => (
-
+ {(message.status !== MessageStatus.Pending ||
+ message.content.length > 0) && (
+
+ )}
{(message.status === MessageStatus.Error ||
message.status === MessageStatus.Stopped) &&
index === messages.length - 1 && (
diff --git a/web/screens/Chat/ChatInput/index.tsx b/web/screens/Chat/ChatInput/index.tsx
new file mode 100644
index 000000000..ee1ac9a41
--- /dev/null
+++ b/web/screens/Chat/ChatInput/index.tsx
@@ -0,0 +1,268 @@
+/* eslint-disable @typescript-eslint/no-explicit-any */
+import { useContext, useEffect, useRef, useState } from 'react'
+
+import { InferenceEvent, MessageStatus, events } from '@janhq/core'
+
+import {
+ Textarea,
+ Button,
+ Tooltip,
+ TooltipArrow,
+ TooltipContent,
+ TooltipPortal,
+ TooltipTrigger,
+} from '@janhq/uikit'
+import { useAtom, useAtomValue } from 'jotai'
+import {
+ FileTextIcon,
+ ImageIcon,
+ StopCircle,
+ PaperclipIcon,
+} from 'lucide-react'
+
+import { twMerge } from 'tailwind-merge'
+
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
+
+import { FeatureToggleContext } from '@/context/FeatureToggle'
+
+import { useActiveModel } from '@/hooks/useActiveModel'
+import { useClickOutside } from '@/hooks/useClickOutside'
+
+import useSendChatMessage from '@/hooks/useSendChatMessage'
+
+import FileUploadPreview from '../FileUploadPreview'
+import ImageUploadPreview from '../ImageUploadPreview'
+
+import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
+import {
+ activeThreadAtom,
+ getActiveThreadIdAtom,
+ waitingToSendMessage,
+} from '@/helpers/atoms/Thread.atom'
+
+const ChatInput: React.FC = () => {
+ const activeThread = useAtomValue(activeThreadAtom)
+ const { stateModel } = useActiveModel()
+ const messages = useAtomValue(getCurrentChatMessagesAtom)
+
+ const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom)
+ const { sendChatMessage } = useSendChatMessage()
+
+ const activeThreadId = useAtomValue(getActiveThreadIdAtom)
+ const [isWaitingToSend, setIsWaitingToSend] = useAtom(waitingToSendMessage)
+ const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
+ const textareaRef = useRef (null)
+ const fileInputRef = useRef(null)
+ const imageInputRef = useRef(null)
+ const [showAttacmentMenus, setShowAttacmentMenus] = useState(false)
+ const { experimentalFeature } = useContext(FeatureToggleContext)
+
+ const onPromptChange = (e: React.ChangeEvent) => {
+ setCurrentPrompt(e.target.value)
+ }
+
+ const refAttachmentMenus = useClickOutside(() => setShowAttacmentMenus(false))
+
+ useEffect(() => {
+ if (isWaitingToSend && activeThreadId) {
+ setIsWaitingToSend(false)
+ sendChatMessage(currentPrompt)
+ }
+ }, [
+ activeThreadId,
+ isWaitingToSend,
+ currentPrompt,
+ setIsWaitingToSend,
+ sendChatMessage,
+ ])
+
+ useEffect(() => {
+ if (textareaRef.current) {
+ textareaRef.current.focus()
+ }
+ }, [activeThreadId])
+
+ useEffect(() => {
+ if (textareaRef.current) {
+ textareaRef.current.style.height = '40px'
+ textareaRef.current.style.height = textareaRef.current.scrollHeight + 'px'
+ }
+ }, [currentPrompt])
+
+ const onKeyDown = async (e: React.KeyboardEvent) => {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault()
+ if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
+ sendChatMessage(currentPrompt)
+ else onStopInferenceClick()
+ }
+ }
+
+ const onStopInferenceClick = async () => {
+ events.emit(InferenceEvent.OnInferenceStopped, {})
+ }
+
+ /**
+ * Handles the change event of the extension file input element by setting the file name state.
+ * Its to be used to display the extension file name of the selected file.
+ * @param event - The change event object.
+ */
+ const handleFileChange = (event: React.ChangeEvent) => {
+ const file = event.target.files?.[0]
+ if (!file) return
+ setFileUpload([{ file: file, type: 'pdf' }])
+ setCurrentPrompt('Summarize this for me')
+ }
+
+ const handleImageChange = (event: React.ChangeEvent) => {
+ const file = event.target.files?.[0]
+ if (!file) return
+ setFileUpload([{ file: file, type: 'image' }])
+ setCurrentPrompt('What do you see in this image?')
+ }
+
+ const renderPreview = (fileUpload: any) => {
+ if (fileUpload.length > 0) {
+ if (fileUpload[0].type === 'image') {
+ return
+ } else {
+ return
+ }
+ }
+ }
+
+ return (
+
+
+ {renderPreview(fileUpload)}
+
+
+ {experimentalFeature && (
+
+
+ {
+ if (
+ fileUpload.length > 0 ||
+ (activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled)
+ ) {
+ e.stopPropagation()
+ } else {
+ setShowAttacmentMenus(!showAttacmentMenus)
+ }
+ }}
+ />
+
+
+ {fileUpload.length > 0 ||
+ (activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled && (
+
+ {fileUpload.length !== 0 && (
+
+ Currently, we only support 1 attachment at the same
+ time
+
+ )}
+ {activeThread?.assistants[0].tools &&
+ activeThread?.assistants[0].tools[0]?.enabled ===
+ false && (
+
+ Turn on Retrieval in Assistant Settings to use this
+ feature
+
+ )}
+
+
+ ))}
+
+
+ )}
+
+ {showAttacmentMenus && (
+
+
+ -
+
+ Image
+
+ - {
+ fileInputRef.current?.click()
+ setShowAttacmentMenus(false)
+ }}
+ >
+
+ Document
+
+
+
+ )}
+
+
+
+
+
+ {messages[messages.length - 1]?.status !== MessageStatus.Pending ? (
+
+ ) : (
+
+ )}
+
+ )
+}
+
+export default ChatInput
diff --git a/web/screens/Chat/CleanThreadModal/index.tsx b/web/screens/Chat/CleanThreadModal/index.tsx
new file mode 100644
index 000000000..6ef505e6f
--- /dev/null
+++ b/web/screens/Chat/CleanThreadModal/index.tsx
@@ -0,0 +1,65 @@
+import React, { useCallback } from 'react'
+
+import {
+ Button,
+ Modal,
+ ModalClose,
+ ModalContent,
+ ModalFooter,
+ ModalHeader,
+ ModalPortal,
+ ModalTitle,
+ ModalTrigger,
+} from '@janhq/uikit'
+import { Paintbrush } from 'lucide-react'
+
+import useDeleteThread from '@/hooks/useDeleteThread'
+
+type Props = {
+ threadId: string
+}
+
+const CleanThreadModal: React.FC = ({ threadId }) => {
+ const { cleanThread } = useDeleteThread()
+ const onCleanThreadClick = useCallback(
+ (e: React.MouseEvent) => {
+ e.stopPropagation()
+ cleanThread(threadId)
+ },
+ [cleanThread, threadId]
+ )
+
+ return (
+
+ e.stopPropagation()}>
+
+
+
+
+
+ Clean Thread
+
+ Are you sure you want to clean this thread?
+
+
+ e.stopPropagation()}>
+
+
+
+
+
+
+
+
+
+ )
+}
+
+export default React.memo(CleanThreadModal)
diff --git a/web/screens/Chat/DeleteThreadModal/index.tsx b/web/screens/Chat/DeleteThreadModal/index.tsx
new file mode 100644
index 000000000..edbdb09b4
--- /dev/null
+++ b/web/screens/Chat/DeleteThreadModal/index.tsx
@@ -0,0 +1,68 @@
+import React, { useCallback } from 'react'
+
+import {
+ Modal,
+ ModalTrigger,
+ ModalPortal,
+ ModalContent,
+ ModalHeader,
+ ModalTitle,
+ ModalFooter,
+ ModalClose,
+ Button,
+} from '@janhq/uikit'
+import { Trash2Icon } from 'lucide-react'
+
+import useDeleteThread from '@/hooks/useDeleteThread'
+
+type Props = {
+ threadId: string
+}
+
+const DeleteThreadModal: React.FC = ({ threadId }) => {
+ const { deleteThread } = useDeleteThread()
+ const onDeleteThreadClick = useCallback(
+ (e: React.MouseEvent) => {
+ e.stopPropagation()
+ deleteThread(threadId)
+ },
+ [deleteThread, threadId]
+ )
+
+ return (
+
+ e.stopPropagation()}>
+
+
+
+ Delete thread
+
+
+
+
+
+
+ Delete Thread
+
+
+ Are you sure you want to delete this thread? This action cannot be
+ undone.
+
+
+
+ e.stopPropagation()}>
+
+
+
+
+
+
+
+
+
+ )
+}
+
+export default React.memo(DeleteThreadModal)
diff --git a/web/screens/Chat/EngineSetting/index.tsx b/web/screens/Chat/EngineSetting/index.tsx
index 4394f835b..2153bcbde 100644
--- a/web/screens/Chat/EngineSetting/index.tsx
+++ b/web/screens/Chat/EngineSetting/index.tsx
@@ -6,11 +6,11 @@ import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { getConfigurationsData } from '@/utils/componentSettings'
import { toSettingParams } from '@/utils/modelParam'
-import settingComponentBuilder from '../ModelSetting/settingComponentBuilder'
+import SettingComponentBuilder from '../ModelSetting/SettingComponent'
import { getActiveThreadModelParamsAtom } from '@/helpers/atoms/Thread.atom'
-const EngineSetting = () => {
+const EngineSetting = ({ enabled = true }: { enabled?: boolean }) => {
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const selectedModel = useAtomValue(selectedModelAtom)
@@ -18,13 +18,18 @@ const EngineSetting = () => {
const modelSettingParams = toSettingParams(activeModelParams)
- const componentData = getConfigurationsData(modelSettingParams, selectedModel)
-
- componentData.sort((a, b) => a.title.localeCompare(b.title))
+ const componentData = getConfigurationsData(
+ modelSettingParams,
+ selectedModel
+ ).toSorted((a, b) => a.title.localeCompare(b.title))
return (
- {settingComponentBuilder(componentData)}
+ e.name !== 'prompt_template'}
+ />
)
}
diff --git a/web/screens/Chat/ErrorMessage/index.tsx b/web/screens/Chat/ErrorMessage/index.tsx
index 8879b15be..84a89cee8 100644
--- a/web/screens/Chat/ErrorMessage/index.tsx
+++ b/web/screens/Chat/ErrorMessage/index.tsx
@@ -17,7 +17,6 @@ import {
deleteMessageAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
-import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
@@ -25,8 +24,6 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
const thread = useAtomValue(activeThreadAtom)
const deleteMessage = useSetAtom(deleteMessageAtom)
const { resendChatMessage } = useSendChatMessage()
- const { activeModel } = useActiveModel()
- const totalRam = useAtomValue(totalRamAtom)
const regenerateMessage = async () => {
const lastMessageIndex = messages.length - 1
@@ -70,33 +67,26 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
{message.status === MessageStatus.Error && (
- {Number(activeModel?.metadata.size) > totalRam ? (
- <>
- Oops! Model size exceeds available RAM. Consider selecting a
- smaller model or upgrading your RAM for smoother performance.
- >
- ) : (
- <>
- Apologies, something's amiss!
- Jan's in beta. Find troubleshooting guides{' '}
-
- here
- {' '}
- or reach out to us on{' '}
-
- Discord
- {' '}
- for assistance.
- >
- )}
+ <>
+ Apologies, something's amiss!
+ Jan's in beta. Find troubleshooting guides{' '}
+
+ here
+ {' '}
+ or reach out to us on{' '}
+
+ Discord
+ {' '}
+ for assistance.
+ >
)}
diff --git a/web/screens/Chat/FileUploadPreview/Icon.tsx b/web/screens/Chat/FileUploadPreview/Icon.tsx
new file mode 100644
index 000000000..fdfcf5565
--- /dev/null
+++ b/web/screens/Chat/FileUploadPreview/Icon.tsx
@@ -0,0 +1,95 @@
+import React from 'react'
+
+type Props = {
+ type: string
+}
+
+const Icon: React.FC = ({ type }) => {
+ return (
+
+
+ {type}
+
+
+
+ )
+}
+
+export default Icon
diff --git a/web/screens/Chat/FileUploadPreview/index.tsx b/web/screens/Chat/FileUploadPreview/index.tsx
new file mode 100644
index 000000000..7e1a1bebd
--- /dev/null
+++ b/web/screens/Chat/FileUploadPreview/index.tsx
@@ -0,0 +1,47 @@
+import React from 'react'
+
+import { useAtom, useSetAtom } from 'jotai'
+
+import { XIcon } from 'lucide-react'
+
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
+
+import { toGibibytes } from '@/utils/converter'
+
+import Icon from './Icon'
+
+const FileUploadPreview: React.FC = () => {
+ const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
+ const setCurrentPrompt = useSetAtom(currentPromptAtom)
+
+ const onDeleteClick = () => {
+ setFileUpload([])
+ setCurrentPrompt('')
+ }
+
+ return (
+
+
+
+
+
+
+ {fileUpload[0].file.name.replaceAll(/[-._]/g, ' ')}
+
+
+ {toGibibytes(fileUpload[0].file.size)}
+
+
+
+
+
+
+
+
+ )
+}
+
+export default FileUploadPreview
diff --git a/web/screens/Chat/ImageUploadPreview/index.tsx b/web/screens/Chat/ImageUploadPreview/index.tsx
new file mode 100644
index 000000000..2a9c9b4ba
--- /dev/null
+++ b/web/screens/Chat/ImageUploadPreview/index.tsx
@@ -0,0 +1,54 @@
+import React, { useEffect } from 'react'
+import { useState } from 'react'
+
+import { useSetAtom } from 'jotai'
+
+import { XIcon } from 'lucide-react'
+
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
+
+import { getBase64 } from '@/utils/base64'
+
+type Props = {
+ file: File
+}
+
+const ImageUploadPreview: React.FC = ({ file }) => {
+ const [base64, setBase64] = useState()
+ const setFileUpload = useSetAtom(fileUploadAtom)
+ const setCurrentPrompt = useSetAtom(currentPromptAtom)
+
+ useEffect(() => {
+ getBase64(file)
+ .then((base64) => setBase64(base64))
+ .catch((err) => console.error(err))
+ }, [file])
+
+ if (!base64) {
+ return
+ }
+
+ const onDeleteClick = () => {
+ setFileUpload([])
+ setCurrentPrompt('')
+ }
+
+ return (
+
+
+ 
+
+ {file.name.replaceAll(/[-._]/g, ' ')}
+
+
+
+
+
+
+ )
+}
+
+export default React.memo(ImageUploadPreview)
diff --git a/web/screens/Chat/MessageQueuedBanner/index.tsx b/web/screens/Chat/MessageQueuedBanner/index.tsx
new file mode 100644
index 000000000..5847394b4
--- /dev/null
+++ b/web/screens/Chat/MessageQueuedBanner/index.tsx
@@ -0,0 +1,21 @@
+import { useAtomValue } from 'jotai'
+
+import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
+
+const MessageQueuedBanner: React.FC = () => {
+ const queuedMessage = useAtomValue(queuedMessageAtom)
+
+ return (
+
+ {queuedMessage && (
+
+
+ Message queued. It can be sent once the model has started
+
+
+ )}
+
+ )
+}
+
+export default MessageQueuedBanner
diff --git a/web/screens/Chat/MessageToolbar/index.tsx b/web/screens/Chat/MessageToolbar/index.tsx
index 183eae814..070022122 100644
--- a/web/screens/Chat/MessageToolbar/index.tsx
+++ b/web/screens/Chat/MessageToolbar/index.tsx
@@ -3,8 +3,9 @@ import {
ExtensionTypeEnum,
ThreadMessage,
ChatCompletionRole,
+ ConversationalExtension,
+ ContentType,
} from '@janhq/core'
-import { ConversationalExtension } from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
import { RefreshCcw, CopyIcon, Trash2Icon, CheckIcon } from 'lucide-react'
@@ -53,7 +54,9 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
{message.id === messages[messages.length - 1]?.id &&
- messages[messages.length - 1].status !== MessageStatus.Error && (
+ messages[messages.length - 1].status !== MessageStatus.Error &&
+ messages[messages.length - 1].content[0]?.type !==
+ ContentType.Pdf && (
boolean
+ updater?: (
+ threadId: string,
+ name: string,
+ value: string | number | boolean | string[]
+ ) => void
+}) => {
+ const { updateModelParameter } = useUpdateModelParameters()
+
+ const threadId = useAtomValue(getActiveThreadIdAtom)
+
+ const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
+
+ const modelSettingParams = toSettingParams(activeModelParams)
+
+ const engineParams = getConfigurationsData(modelSettingParams)
+
+ const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
+
+ const { stopModel } = useActiveModel()
+
+ const onValueChanged = (
+ name: string,
+ value: string | number | boolean | string[]
+ ) => {
+ if (!threadId) return
+ if (engineParams.some((x) => x.name.includes(name))) {
+ setEngineParamsUpdate(true)
+ stopModel()
+ } else {
+ setEngineParamsUpdate(false)
+ }
+ if (updater) updater(threadId, name, value)
+ else {
+ // Convert stop string to array
+ if (name === 'stop' && typeof value === 'string') {
+ value = [value]
+ }
+ updateModelParameter(threadId, {
+ params: { [name]: value },
+ })
+ }
+ }
+
+ const components = componentData
+ .filter((x) => (selector ? selector(x) : true))
+ .map((data) => {
+ switch (data.controllerType) {
+ case 'slider':
+ const { min, max, step, value } = data.controllerData as SliderData
+ return (
+ onValueChanged(data.name, value)}
+ />
+ )
+ case 'input':
+ const { placeholder, value: textValue } =
+ data.controllerData as InputData
+ return (
+ onValueChanged(data.name, value)}
+ />
+ )
+ case 'checkbox':
+ const { checked } = data.controllerData as CheckboxData
+ return (
+ onValueChanged(data.name, value)}
+ />
+ )
+ default:
+ return null
+ }
+ })
+
+ return {components}
+}
+
+export default SettingComponent
diff --git a/web/screens/Chat/ModelSetting/index.tsx b/web/screens/Chat/ModelSetting/index.tsx
index ff5d3d40f..ea95363eb 100644
--- a/web/screens/Chat/ModelSetting/index.tsx
+++ b/web/screens/Chat/ModelSetting/index.tsx
@@ -8,7 +8,7 @@ import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { getConfigurationsData } from '@/utils/componentSettings'
import { toRuntimeParams } from '@/utils/modelParam'
-import settingComponentBuilder from './settingComponentBuilder'
+import SettingComponentBuilder from './SettingComponent'
import { getActiveThreadModelParamsAtom } from '@/helpers/atoms/Thread.atom'
@@ -27,7 +27,10 @@ const ModelSetting = () => {
return (
- {settingComponentBuilder(componentData)}
+ e.name !== 'prompt_template'}
+ />
)
}
diff --git a/web/screens/Chat/ModelSetting/predefinedComponent.ts b/web/screens/Chat/ModelSetting/predefinedComponent.ts
index abcec508e..b67117184 100644
--- a/web/screens/Chat/ModelSetting/predefinedComponent.ts
+++ b/web/screens/Chat/ModelSetting/predefinedComponent.ts
@@ -1,4 +1,4 @@
-import { SettingComponentData } from './settingComponentBuilder'
+import { SettingComponentData } from './SettingComponent'
export const presetConfiguration: Record = {
prompt_template: {
@@ -141,4 +141,52 @@ export const presetConfiguration: Record = {
value: 1,
},
},
+ // assistant
+ chunk_size: {
+ name: 'chunk_size',
+ title: 'Chunk Size',
+ description: 'Maximum number of tokens in a chunk',
+ controllerType: 'slider',
+ controllerData: {
+ min: 128,
+ max: 2048,
+ step: 128,
+ value: 1024,
+ },
+ },
+ chunk_overlap: {
+ name: 'chunk_overlap',
+ title: 'Chunk Overlap',
+ description: 'Number of tokens overlapping between two adjacent chunks',
+ controllerType: 'slider',
+ controllerData: {
+ min: 32,
+ max: 512,
+ step: 32,
+ value: 64,
+ },
+ },
+ top_k: {
+ name: 'top_k',
+ title: 'Top K',
+ description: 'Number of top-ranked documents to retrieve',
+ controllerType: 'slider',
+ controllerData: {
+ min: 1,
+ max: 5,
+ step: 1,
+ value: 2,
+ },
+ },
+ retrieval_template: {
+ name: 'retrieval_template',
+ title: 'Retrieval Template',
+ description:
+ 'The template to use for retrieval. The following variables are available: {CONTEXT}, {QUESTION}',
+ controllerType: 'input',
+ controllerData: {
+ placeholder: 'Retrieval Template',
+ value: '',
+ },
+ },
}
diff --git a/web/screens/Chat/ModelSetting/settingComponentBuilder.tsx b/web/screens/Chat/ModelSetting/settingComponentBuilder.tsx
deleted file mode 100644
index 8ff8e7c02..000000000
--- a/web/screens/Chat/ModelSetting/settingComponentBuilder.tsx
+++ /dev/null
@@ -1,89 +0,0 @@
-/* eslint-disable no-case-declarations */
-import Checkbox from '@/containers/Checkbox'
-import ModelConfigInput from '@/containers/ModelConfigInput'
-import SliderRightPanel from '@/containers/SliderRightPanel'
-
-export type ControllerType = 'slider' | 'checkbox' | 'input'
-
-export type SettingComponentData = {
- name: string
- title: string
- description: string
- controllerType: ControllerType
- controllerData: SliderData | CheckboxData | InputData
-}
-
-export type InputData = {
- placeholder: string
- value: string
-}
-
-export type SliderData = {
- min: number
- max: number
-
- step: number
- value: number
-}
-
-type CheckboxData = {
- checked: boolean
-}
-
-const settingComponentBuilder = (
- componentData: SettingComponentData[],
- onlyPrompt?: boolean
-) => {
- const components = componentData
- .filter((x) =>
- onlyPrompt ? x.name === 'prompt_template' : x.name !== 'prompt_template'
- )
- .map((data) => {
- switch (data.controllerType) {
- case 'slider':
- const { min, max, step, value } = data.controllerData as SliderData
- return (
-
- )
- case 'input':
- const { placeholder, value: textValue } =
- data.controllerData as InputData
- return (
-
- )
- case 'checkbox':
- const { checked } = data.controllerData as CheckboxData
- return (
-
- )
- default:
- return null
- }
- })
-
- return {components}
-}
-
-export default settingComponentBuilder
diff --git a/web/screens/Chat/RequestDownloadModel/index.tsx b/web/screens/Chat/RequestDownloadModel/index.tsx
new file mode 100644
index 000000000..88fdadd57
--- /dev/null
+++ b/web/screens/Chat/RequestDownloadModel/index.tsx
@@ -0,0 +1,45 @@
+import React, { Fragment, useCallback } from 'react'
+
+import { Button } from '@janhq/uikit'
+
+import { useAtomValue } from 'jotai'
+
+import LogoMark from '@/containers/Brand/Logo/Mark'
+
+import { MainViewState } from '@/constants/screens'
+
+import { useMainViewState } from '@/hooks/useMainViewState'
+
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
+
+const RequestDownloadModel: React.FC = () => {
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
+ const { setMainViewState } = useMainViewState()
+
+ const onClick = useCallback(() => {
+ setMainViewState(MainViewState.Hub)
+ }, [setMainViewState])
+
+ return (
+
+ {downloadedModels.length === 0 && (
+
+
+ Welcome!
+
+ You need to download your first model
+
+
+
+ )}
+
+ )
+}
+
+export default React.memo(RequestDownloadModel)
diff --git a/web/screens/Chat/Sidebar/index.tsx b/web/screens/Chat/Sidebar/index.tsx
index 64e58d4d3..8088501b9 100644
--- a/web/screens/Chat/Sidebar/index.tsx
+++ b/web/screens/Chat/Sidebar/index.tsx
@@ -1,7 +1,8 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
-import React from 'react'
+import React, { useContext } from 'react'
-import { Input, Textarea } from '@janhq/uikit'
+import { InferenceEngine } from '@janhq/core'
+import { Input, Textarea, Switch } from '@janhq/uikit'
import { atom, useAtomValue } from 'jotai'
@@ -10,17 +11,22 @@ import { twMerge } from 'tailwind-merge'
import LogoMark from '@/containers/Brand/Logo/Mark'
import CardSidebar from '@/containers/CardSidebar'
-import DropdownListSidebar from '@/containers/DropdownListSidebar'
+import DropdownListSidebar, {
+ selectedModelAtom,
+} from '@/containers/DropdownListSidebar'
+
+import { FeatureToggleContext } from '@/context/FeatureToggle'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import { getConfigurationsData } from '@/utils/componentSettings'
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
+import AssistantSetting from '../AssistantSetting'
import EngineSetting from '../EngineSetting'
import ModelSetting from '../ModelSetting'
-import settingComponentBuilder from '../ModelSetting/settingComponentBuilder'
+import SettingComponentBuilder from '../ModelSetting/SettingComponent'
import {
activeThreadAtom,
@@ -33,18 +39,24 @@ const Sidebar: React.FC = () => {
const showing = useAtomValue(showRightSideBarAtom)
const activeThread = useAtomValue(activeThreadAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
+ const selectedModel = useAtomValue(selectedModelAtom)
const { updateThreadMetadata } = useCreateNewThread()
+ const { experimentalFeature } = useContext(FeatureToggleContext)
const modelEngineParams = toSettingParams(activeModelParams)
const modelRuntimeParams = toRuntimeParams(activeModelParams)
+ const componentDataAssistantSetting = getConfigurationsData(
+ (activeThread?.assistants[0]?.tools &&
+ activeThread?.assistants[0]?.tools[0]?.settings) ??
+ {}
+ )
const componentDataEngineSetting = getConfigurationsData(modelEngineParams)
const componentDataRuntimeSetting = getConfigurationsData(modelRuntimeParams)
return (
{
}}
/>
- {/* Temporary disabled */}
- {/*
-
-
-
-
-
- */}
+ {experimentalFeature && (
+
+ {activeThread?.assistants[0]?.tools &&
+ componentDataAssistantSetting.length > 0 && (
+
+ {
+ if (activeThread)
+ updateThreadMetadata({
+ ...activeThread,
+ assistants: [
+ {
+ ...activeThread.assistants[0],
+ tools: [
+ {
+ type: 'retrieval',
+ enabled: e,
+ settings:
+ (activeThread.assistants[0].tools &&
+ activeThread.assistants[0]
+ .tools[0]?.settings) ??
+ {},
+ },
+ ],
+ },
+ ],
+ })
+ }}
+ />
+ }
+ >
+ {activeThread?.assistants[0]?.tools[0].enabled && (
+
+
+
+
+
+
+
+
+
+ )}
+
+
+ )}
+
+ )}
-
-
-
-
+
+
- {componentDataRuntimeSetting.length !== 0 && (
+ {componentDataRuntimeSetting.length > 0 && (
@@ -161,13 +224,16 @@ const Sidebar: React.FC = () => {
- {settingComponentBuilder(componentDataEngineSetting, true)}
+ x.name === 'prompt_template'}
+ />
)}
- {componentDataEngineSetting.length !== 0 && (
+ {componentDataEngineSetting.length > 0 && (
diff --git a/web/screens/Chat/SimpleTextMessage/index.tsx b/web/screens/Chat/SimpleTextMessage/index.tsx
index 8754664aa..9be45e7e6 100644
--- a/web/screens/Chat/SimpleTextMessage/index.tsx
+++ b/web/screens/Chat/SimpleTextMessage/index.tsx
@@ -1,11 +1,24 @@
import React, { useEffect, useRef, useState } from 'react'
-import { ChatCompletionRole, MessageStatus, ThreadMessage } from '@janhq/core'
+import {
+ ChatCompletionRole,
+ ContentType,
+ MessageStatus,
+ ThreadMessage,
+} from '@janhq/core'
+import {
+ Tooltip,
+ TooltipArrow,
+ TooltipContent,
+ TooltipPortal,
+ TooltipTrigger,
+} from '@janhq/uikit'
import hljs from 'highlight.js'
import { useAtomValue } from 'jotai'
-import { Marked, Renderer } from 'marked'
+import { FolderOpenIcon } from 'lucide-react'
+import { Marked, Renderer, marked as markedDefault } from 'marked'
import { markedHighlight } from 'marked-highlight'
@@ -13,21 +26,39 @@ import { twMerge } from 'tailwind-merge'
import LogoMark from '@/containers/Brand/Logo/Mark'
-import BubbleLoader from '@/containers/Loader/Bubble'
-
import { useClipboard } from '@/hooks/useClipboard'
+import { usePath } from '@/hooks/usePath'
+import { toGibibytes } from '@/utils/converter'
import { displayDate } from '@/utils/datetime'
+import Icon from '../FileUploadPreview/Icon'
import MessageToolbar from '../MessageToolbar'
import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
+function isMarkdownValue(value: string): boolean {
+ const tokenTypes: string[] = []
+ markedDefault(value, {
+ walkTokens: (token) => {
+ tokenTypes.push(token.type)
+ },
+ })
+ const isMarkdown = ['code', 'codespan'].some((tokenType) => {
+ return tokenTypes.includes(tokenType)
+ })
+ return isMarkdown
+}
+
const SimpleTextMessage: React.FC = (props) => {
let text = ''
+ const isUser = props.role === ChatCompletionRole.User
+ const isSystem = props.role === ChatCompletionRole.System
+
if (props.content && props.content.length > 0) {
text = props.content[0]?.text?.value ?? ''
}
+
const clipboard = useClipboard({ timeout: 1000 })
const marked: Marked = new Marked(
@@ -73,11 +104,9 @@ const SimpleTextMessage: React.FC = (props) => {
}
)
+ const { onViewFile, onViewFileContainer } = usePath()
const parsedText = marked.parse(text)
- const isUser = props.role === ChatCompletionRole.User
- const isSystem = props.role === ChatCompletionRole.System
const [tokenCount, setTokenCount] = useState(0)
-
const [lastTimestamp, setLastTimestamp] = useState()
const [tokenSpeed, setTokenSpeed] = useState(0)
const messages = useAtomValue(getCurrentChatMessagesAtom)
@@ -148,6 +177,7 @@ const SimpleTextMessage: React.FC = (props) => {
)}
+
= (props) => {
- {props.status === MessageStatus.Pending &&
- (!props.content[0] || props.content[0].text.value === '') ? (
-
- ) : (
- <>
+ <>
+ {props.content[0]?.type === ContentType.Image && (
+
+ ![{props.content[0]?.text.name}]({props.content[0]?.text.annotations[0]}) onViewFile(`${props.id}.png`)}
+ />
+
+
+
+
+
+
+
+
+
+ Show in finder
+
+
+
+
+
+ )}
+
+ {props.content[0]?.type === ContentType.Pdf && (
+
+
+ onViewFile(`${props.id}.${props.content[0]?.type}`)
+ }
+ />
+
+
+
+
+
+
+
+
+ Show in finder
+
+
+
+
+
+
+
+
+
+ {props.content[0].text.name?.replaceAll(/[-._]/g, ' ')}
+
+
+ {toGibibytes(Number(props.content[0].text.size))}
+
+
+
+ )}
+
+ {isUser && !isMarkdownValue(text) ? (
+
+ {text}
+
+ ) : (
= (props) => {
// eslint-disable-next-line @typescript-eslint/naming-convention
dangerouslySetInnerHTML={{ __html: parsedText }}
/>
- >
- )}
+ )}
+ >
)
diff --git a/web/screens/Chat/ThreadList/index.tsx b/web/screens/Chat/ThreadList/index.tsx
index 0e09a20a7..2ad9a28c4 100644
--- a/web/screens/Chat/ThreadList/index.tsx
+++ b/web/screens/Chat/ThreadList/index.tsx
@@ -1,74 +1,39 @@
-import { useEffect } from 'react'
+import { useCallback } from 'react'
-import {
- Modal,
- ModalTrigger,
- ModalClose,
- ModalFooter,
- ModalPortal,
- ModalContent,
- ModalHeader,
- ModalTitle,
- Button,
-} from '@janhq/uikit'
+import { Thread } from '@janhq/core/'
import { motion as m } from 'framer-motion'
import { useAtomValue } from 'jotai'
-import {
- GalleryHorizontalEndIcon,
- MoreVerticalIcon,
- Trash2Icon,
- Paintbrush,
-} from 'lucide-react'
+import { GalleryHorizontalEndIcon, MoreVerticalIcon } from 'lucide-react'
import { twMerge } from 'tailwind-merge'
-import { useCreateNewThread } from '@/hooks/useCreateNewThread'
-import useDeleteThread from '@/hooks/useDeleteThread'
-
-import useGetAssistants from '@/hooks/useGetAssistants'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
import useSetActiveThread from '@/hooks/useSetActiveThread'
-import useThreads from '@/hooks/useThreads'
-
import { displayDate } from '@/utils/datetime'
+import CleanThreadModal from '../CleanThreadModal'
+
+import DeleteThreadModal from '../DeleteThreadModal'
+
import {
- activeThreadAtom,
+ getActiveThreadIdAtom,
threadStatesAtom,
threadsAtom,
} from '@/helpers/atoms/Thread.atom'
export default function ThreadList() {
- const threads = useAtomValue(threadsAtom)
const threadStates = useAtomValue(threadStatesAtom)
- const { getThreads } = useThreads()
- const { assistants } = useGetAssistants()
- const { requestCreateNewThread } = useCreateNewThread()
- const activeThread = useAtomValue(activeThreadAtom)
- const { deleteThread, cleanThread } = useDeleteThread()
- const { downloadedModels } = useGetDownloadedModels()
+ const threads = useAtomValue(threadsAtom)
+ const activeThreadId = useAtomValue(getActiveThreadIdAtom)
+ const { setActiveThread } = useSetActiveThread()
- const { activeThreadId, setActiveThread: onThreadClick } =
- useSetActiveThread()
-
- useEffect(() => {
- getThreads()
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
-
- useEffect(() => {
- if (
- downloadedModels.length !== 0 &&
- threads.length === 0 &&
- assistants.length !== 0 &&
- !activeThread
- ) {
- requestCreateNewThread(assistants[0])
- }
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [assistants, threads, downloadedModels, activeThread])
+ const onThreadClick = useCallback(
+ (thread: Thread) => {
+ setActiveThread(thread)
+ },
+ [setActiveThread]
+ )
return (
@@ -81,136 +46,46 @@ export default function ThreadList() {
No Thread History
) : (
- threads.map((thread, i) => {
- const lastMessage =
- threadStates[thread.id]?.lastMessage ?? 'No new message'
- return (
- {
- onThreadClick(thread)
- }}
- >
-
-
- {thread.title}
-
- {thread.updated &&
- displayDate(new Date(thread.updated).getTime())}
-
-
-
- {lastMessage || 'No new message'}
-
-
-
-
-
-
- e.stopPropagation()}>
-
-
-
-
-
- Clean Thread
-
- Are you sure you want to clean this thread?
-
-
- e.stopPropagation()}
- >
-
-
-
-
-
-
-
-
-
-
- e.stopPropagation()}>
-
-
-
- Delete thread
-
-
-
-
-
-
- Delete Thread
-
-
- Are you sure you want to delete this thread? This action
- cannot be undone.
-
-
-
- e.stopPropagation()}
- >
-
-
-
-
-
-
-
-
-
-
-
- {activeThreadId === thread.id && (
-
- )}
+ threads.map((thread) => (
+ {
+ onThreadClick(thread)
+ }}
+ >
+
+
+ {thread.updated && displayDate(thread.updated)}
+
+ {thread.title}
+
+ {threadStates[thread.id]?.lastMessage
+ ? threadStates[thread.id]?.lastMessage
+ : 'No new message'}
+
- )
- })
+
+ {activeThreadId === thread.id && (
+
+ )}
+
+ ))
)}
)
diff --git a/web/screens/Chat/index.tsx b/web/screens/Chat/index.tsx
index 684027e49..e3eedb6c1 100644
--- a/web/screens/Chat/index.tsx
+++ b/web/screens/Chat/index.tsx
@@ -1,111 +1,147 @@
-import { ChangeEvent, Fragment, KeyboardEvent, useEffect, useRef } from 'react'
+/* eslint-disable @typescript-eslint/naming-convention */
+import React, { useContext, useEffect, useState } from 'react'
-import { InferenceEvent, MessageStatus, events } from '@janhq/core'
-import { Button, Textarea } from '@janhq/uikit'
+import { useDropzone } from 'react-dropzone'
-import { useAtom, useAtomValue } from 'jotai'
+import { useAtomValue, useSetAtom } from 'jotai'
-import { debounce } from 'lodash'
-import { StopCircle } from 'lucide-react'
+import { UploadCloudIcon } from 'lucide-react'
-import LogoMark from '@/containers/Brand/Logo/Mark'
+import { twMerge } from 'tailwind-merge'
+import GenerateResponse from '@/containers/Loader/GenerateResponse'
import ModelReload from '@/containers/Loader/ModelReload'
import ModelStart from '@/containers/Loader/ModelStart'
-import { currentPromptAtom } from '@/containers/Providers/Jotai'
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { showLeftSideBarAtom } from '@/containers/Providers/KeyListener'
-import { MainViewState } from '@/constants/screens'
+import { snackbar } from '@/containers/Toast'
-import { useActiveModel } from '@/hooks/useActiveModel'
+import { FeatureToggleContext } from '@/context/FeatureToggle'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
-import { useMainViewState } from '@/hooks/useMainViewState'
-
-import useSendChatMessage from '@/hooks/useSendChatMessage'
+import { activeModelAtom } from '@/hooks/useActiveModel'
+import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage'
import ChatBody from '@/screens/Chat/ChatBody'
import ThreadList from '@/screens/Chat/ThreadList'
+import ChatInput from './ChatInput'
+import RequestDownloadModel from './RequestDownloadModel'
import Sidebar from './Sidebar'
-import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
-
import {
activeThreadAtom,
engineParamsUpdateAtom,
- getActiveThreadIdAtom,
- waitingToSendMessage,
+ isGeneratingResponseAtom,
} from '@/helpers/atoms/Thread.atom'
-import { activeThreadStateAtom } from '@/helpers/atoms/Thread.atom'
+const renderError = (code: string) => {
+ switch (code) {
+ case 'multiple-upload':
+ return 'Currently, we only support 1 attachment at the same time'
-const ChatScreen = () => {
- const activeThread = useAtomValue(activeThreadAtom)
- const { downloadedModels } = useGetDownloadedModels()
- const showLeftSideBar = useAtomValue(showLeftSideBarAtom)
+ case 'retrieval-off':
+ return 'Turn on Retrieval in Assistant Settings to use this feature'
- const { activeModel, stateModel } = useActiveModel()
- const { setMainViewState } = useMainViewState()
- const messages = useAtomValue(getCurrentChatMessagesAtom)
+ case 'file-invalid-type':
+ return 'We do not support this file type'
- const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom)
- const activeThreadState = useAtomValue(activeThreadStateAtom)
- const { sendChatMessage, queuedMessage, reloadModel } = useSendChatMessage()
- const isWaitingForResponse = activeThreadState?.waitingForResponse ?? false
- const isDisabledChatbox =
- currentPrompt.trim().length === 0 || isWaitingForResponse
-
- const activeThreadId = useAtomValue(getActiveThreadIdAtom)
- const [isWaitingToSend, setIsWaitingToSend] = useAtom(waitingToSendMessage)
-
- const textareaRef = useRef (null)
- const modelRef = useRef(activeModel)
- const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
-
- useEffect(() => {
- modelRef.current = activeModel
- }, [activeModel])
-
- const onPromptChange = (e: React.ChangeEvent) => {
- setCurrentPrompt(e.target.value)
+ default:
+ return 'Oops, something error, please try again.'
}
+}
- useEffect(() => {
- if (isWaitingToSend && activeThreadId) {
- setIsWaitingToSend(false)
- sendChatMessage()
- }
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [waitingToSendMessage, activeThreadId])
+const ChatScreen: React.FC = () => {
+ const setCurrentPrompt = useSetAtom(currentPromptAtom)
+ const activeThread = useAtomValue(activeThreadAtom)
+ const showLeftSideBar = useAtomValue(showLeftSideBarAtom)
+ const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
+ const [dragOver, setDragOver] = useState(false)
- useEffect(() => {
- if (textareaRef.current) {
- textareaRef.current.style.height = '40px'
- textareaRef.current.style.height = textareaRef.current.scrollHeight + 'px'
- }
- }, [currentPrompt])
+ const queuedMessage = useAtomValue(queuedMessageAtom)
+ const reloadModel = useAtomValue(reloadModelAtom)
+ const [dragRejected, setDragRejected] = useState({ code: '' })
+ const setFileUpload = useSetAtom(fileUploadAtom)
+ const { experimentalFeature } = useContext(FeatureToggleContext)
- const onKeyDown = debounce(
- async (e: React.KeyboardEvent) => {
- if (e.key === 'Enter') {
- if (!e.shiftKey) {
- e.preventDefault()
- if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
- sendChatMessage()
- else onStopInferenceClick()
- }
+ const activeModel = useAtomValue(activeModelAtom)
+
+ const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom)
+
+ const { getRootProps, isDragReject } = useDropzone({
+ noClick: true,
+ multiple: false,
+ accept: {
+ 'application/pdf': ['.pdf'],
+ },
+
+ onDragOver: (e) => {
+ // Retrieval file drag and drop is experimental feature
+ if (!experimentalFeature) return
+ if (
+ e.dataTransfer.items.length === 1 &&
+ activeThread?.assistants[0].tools &&
+ activeThread?.assistants[0].tools[0]?.enabled
+ ) {
+ setDragOver(true)
+ } else if (
+ activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled
+ ) {
+ setDragRejected({ code: 'retrieval-off' })
+ } else {
+ setDragRejected({ code: 'multiple-upload' })
}
},
- 50,
- { leading: false, trailing: true }
- )
+ onDragLeave: () => setDragOver(false),
+ onDrop: (files, rejectFiles) => {
+ // Retrieval file drag and drop is experimental feature
+ if (!experimentalFeature) return
+ if (
+ !files ||
+ files.length !== 1 ||
+ rejectFiles.length !== 0 ||
+ (activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled)
+ )
+ return
+ const imageType = files[0]?.type.includes('image')
+ setFileUpload([{ file: files[0], type: imageType ? 'image' : 'pdf' }])
+ setDragOver(false)
+ if (imageType) {
+ setCurrentPrompt('What do you see in this image?')
+ } else {
+ setCurrentPrompt('Summarize this for me')
+ }
+ },
+ onDropRejected: (e) => {
+ if (
+ activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled
+ ) {
+ setDragRejected({ code: 'retrieval-off' })
+ } else {
+ setDragRejected({ code: e[0].errors[0].code })
+ }
+ setDragOver(false)
+ },
+ })
- const onStopInferenceClick = async () => {
- events.emit(InferenceEvent.OnInferenceStopped, {})
- }
+ useEffect(() => {
+ if (dragRejected.code) {
+ snackbar({
+ description: renderError(dragRejected.code),
+ type: 'error',
+ })
+ }
+ setTimeout(() => {
+ if (dragRejected.code) {
+ setDragRejected({ code: '' })
+ }
+ }, 2000)
+ }, [dragRejected.code])
return (
@@ -116,34 +152,41 @@ const ChatScreen = () => {
) : null}
-
+
+ {dragOver && (
+
+
+
+
+
+
+
+
+ {isDragReject
+ ? 'Currently, we only support 1 attachment at the same time with PDF format'
+ : 'Drop file here'}
+
+ {!isDragReject && (PDF) }
+
+
+
+
+ )}
{activeThread ? (
) : (
-
- {downloadedModels.length === 0 && (
-
-
- Welcome!
-
- You need to download your first model
-
-
-
- )}
-
+
)}
{!engineParamsUpdate && }
@@ -167,44 +210,8 @@ const ChatScreen = () => {
)}
-
-
+ {activeModel && isGeneratingResponse && }
+
diff --git a/web/screens/ExploreModels/ExploreModelItemHeader/index.tsx b/web/screens/ExploreModels/ExploreModelItemHeader/index.tsx
index 656a671c7..17b897d51 100644
--- a/web/screens/ExploreModels/ExploreModelItemHeader/index.tsx
+++ b/web/screens/ExploreModels/ExploreModelItemHeader/index.tsx
@@ -24,16 +24,21 @@ import { MainViewState } from '@/constants/screens'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import useDownloadModel from '@/hooks/useDownloadModel'
+
import { useDownloadState } from '@/hooks/useDownloadState'
-import { getAssistants } from '@/hooks/useGetAssistants'
-import { downloadedModelsAtom } from '@/hooks/useGetDownloadedModels'
+
import { useMainViewState } from '@/hooks/useMainViewState'
import { toGibibytes } from '@/utils/converter'
+import { assistantsAtom } from '@/helpers/atoms/Assistant.atom'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
-import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
+import {
+ nvidiaTotalVramAtom,
+ totalRamAtom,
+} from '@/helpers/atoms/SystemBar.atom'
type Props = {
model: Model
@@ -47,7 +52,14 @@ const ExploreModelItemHeader: React.FC = ({ model, onClick, open }) => {
const { modelDownloadStateAtom } = useDownloadState()
const { requestCreateNewThread } = useCreateNewThread()
const totalRam = useAtomValue(totalRamAtom)
+ const nvidiaTotalVram = useAtomValue(nvidiaTotalVramAtom)
+ // Default nvidia returns vram in MB, need to convert to bytes to match the unit of totalRamW
+ let ram = nvidiaTotalVram * 1024 * 1024
+ if (ram === 0) {
+ ram = totalRam
+ }
const serverEnabled = useAtomValue(serverEnabledAtom)
+ const assistants = useAtomValue(assistantsAtom)
const downloadAtom = useMemo(
() => atom((get) => get(modelDownloadStateAtom)[model.id]),
@@ -58,17 +70,23 @@ const ExploreModelItemHeader: React.FC = ({ model, onClick, open }) => {
const onDownloadClick = useCallback(() => {
downloadModel(model)
- // eslint-disable-next-line react-hooks/exhaustive-deps
}, [model])
const isDownloaded = downloadedModels.find((md) => md.id === model.id) != null
let downloadButton = (
-
+
)
const onUseModelClick = useCallback(async () => {
- const assistants = await getAssistants()
if (assistants.length === 0) {
alert('No assistant available')
return
@@ -105,7 +123,7 @@ const ExploreModelItemHeader: React.FC = ({ model, onClick, open }) => {
}
const getLabel = (size: number) => {
- if (size * 1.25 >= totalRam) {
+ if (size * 1.25 >= ram) {
return (
Not enough RAM
diff --git a/web/screens/ExploreModels/ModelVersionItem/index.tsx b/web/screens/ExploreModels/ModelVersionItem/index.tsx
index 50d71b161..3a9385670 100644
--- a/web/screens/ExploreModels/ModelVersionItem/index.tsx
+++ b/web/screens/ExploreModels/ModelVersionItem/index.tsx
@@ -10,9 +10,11 @@ import { MainViewState } from '@/constants/screens'
import useDownloadModel from '@/hooks/useDownloadModel'
import { useDownloadState } from '@/hooks/useDownloadState'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
+
import { useMainViewState } from '@/hooks/useMainViewState'
+import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
+
type Props = {
model: Model
isRecommended: boolean
@@ -20,7 +22,7 @@ type Props = {
const ModelVersionItem: React.FC = ({ model }) => {
const { downloadModel } = useDownloadModel()
- const { downloadedModels } = useGetDownloadedModels()
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
const { setMainViewState } = useMainViewState()
const isDownloaded =
downloadedModels.find(
diff --git a/web/screens/ExploreModels/index.tsx b/web/screens/ExploreModels/index.tsx
index d988fcafc..7002c60b7 100644
--- a/web/screens/ExploreModels/index.tsx
+++ b/web/screens/ExploreModels/index.tsx
@@ -1,4 +1,4 @@
-import { useState } from 'react'
+import { useCallback, useState } from 'react'
import { openExternalUrl } from '@janhq/core'
import {
@@ -12,24 +12,24 @@ import {
SelectItem,
} from '@janhq/uikit'
+import { useAtomValue } from 'jotai'
import { SearchIcon } from 'lucide-react'
-import Loader from '@/containers/Loader'
-
-import { useGetConfiguredModels } from '@/hooks/useGetConfiguredModels'
-
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
-
import ExploreModelList from './ExploreModelList'
+import {
+ configuredModelsAtom,
+ downloadedModelsAtom,
+} from '@/helpers/atoms/Model.atom'
+
const ExploreModelsScreen = () => {
- const { loading, models } = useGetConfiguredModels()
+ const configuredModels = useAtomValue(configuredModelsAtom)
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
const [searchValue, setsearchValue] = useState('')
- const { downloadedModels } = useGetDownloadedModels()
const [sortSelected, setSortSelected] = useState('All Models')
const sortMenu = ['All Models', 'Recommended', 'Downloaded']
- const filteredModels = models.filter((x) => {
+ const filteredModels = configuredModels.filter((x) => {
if (sortSelected === 'Downloaded') {
return (
x.name.toLowerCase().includes(searchValue.toLowerCase()) &&
@@ -45,16 +45,17 @@ const ExploreModelsScreen = () => {
}
})
- const onHowToImportModelClick = () => {
+ const onHowToImportModelClick = useCallback(() => {
openExternalUrl('https://jan.ai/guides/using-models/import-manually/')
- }
-
- if (loading) return
+ }, [])
return (
-
+
-
+
![]() {
const { getServerLog } = useServerLog()
+ const serverEnabled = useAtomValue(serverEnabledAtom)
const [logs, setLogs] = useState([])
useEffect(() => {
getServerLog().then((log) => {
- if (typeof log?.split === 'function') setLogs(log.split(/\r?\n|\r|\n/g))
+ if (typeof log?.split === 'function') {
+ setLogs(log.split(/\r?\n|\r|\n/g))
+ }
})
// eslint-disable-next-line react-hooks/exhaustive-deps
- }, [logs])
+ }, [logs, serverEnabled])
return (
diff --git a/web/screens/LocalServer/index.tsx b/web/screens/LocalServer/index.tsx
index ce709d831..b96f4c228 100644
--- a/web/screens/LocalServer/index.tsx
+++ b/web/screens/LocalServer/index.tsx
@@ -1,7 +1,6 @@
-/* eslint-disable @typescript-eslint/no-explicit-any */
'use client'
-import React, { useEffect, useState } from 'react'
+import React, { useCallback, useEffect, useState } from 'react'
import ScrollToBottom from 'react-scroll-to-bottom'
@@ -29,6 +28,7 @@ import { ExternalLinkIcon, InfoIcon } from 'lucide-react'
import { twMerge } from 'tailwind-merge'
import CardSidebar from '@/containers/CardSidebar'
+
import DropdownListSidebar, {
selectedModelAtom,
} from '@/containers/DropdownListSidebar'
@@ -41,7 +41,7 @@ import { toSettingParams } from '@/utils/modelParam'
import EngineSetting from '../Chat/EngineSetting'
-import settingComponentBuilder from '../Chat/ModelSetting/settingComponentBuilder'
+import SettingComponentBuilder from '../Chat/ModelSetting/SettingComponent'
import { showRightSideBarAtom } from '../Chat/Sidebar'
@@ -58,7 +58,7 @@ const portAtom = atom('1337')
const LocalServerScreen = () => {
const [errorRangePort, setErrorRangePort] = useState(false)
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
- const showing = useAtomValue(showRightSideBarAtom)
+ const showRightSideBar = useAtomValue(showRightSideBarAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const modelEngineParams = toSettingParams(activeModelParams)
@@ -66,43 +66,44 @@ const LocalServerScreen = () => {
const { openServerLog, clearServerLog } = useServerLog()
const { startModel, stateModel } = useActiveModel()
- const [selectedModel] = useAtom(selectedModelAtom)
+ const selectedModel = useAtomValue(selectedModelAtom)
const [isCorsEnabled, setIsCorsEnabled] = useAtom(corsEnabledAtom)
const [isVerboseEnabled, setIsVerboseEnabled] = useAtom(verboseEnabledAtom)
const [host, setHost] = useAtom(hostAtom)
const [port, setPort] = useAtom(portAtom)
+ const hostOptions = ['127.0.0.1', '0.0.0.0']
+
const FIRST_TIME_VISIT_API_SERVER = 'firstTimeVisitAPIServer'
const [firstTimeVisitAPIServer, setFirstTimeVisitAPIServer] =
useState (false)
- const handleChangePort = (value: any) => {
- if (Number(value) <= 0 || Number(value) >= 65536) {
- setErrorRangePort(true)
- } else {
- setErrorRangePort(false)
- }
- setPort(value)
- }
+ const handleChangePort = useCallback(
+ (value: string) => {
+ if (Number(value) <= 0 || Number(value) >= 65536) {
+ setErrorRangePort(true)
+ } else {
+ setErrorRangePort(false)
+ }
+ setPort(value)
+ },
+ [setPort]
+ )
useEffect(() => {
- if (
- localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === null ||
- localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === 'true'
- ) {
- localStorage.setItem(FIRST_TIME_VISIT_API_SERVER, 'true')
+ if (localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) == null) {
setFirstTimeVisitAPIServer(true)
}
}, [firstTimeVisitAPIServer])
useEffect(() => {
handleChangePort(port)
- }, [])
+ }, [handleChangePort, port])
return (
-
+
{/* Left SideBar */}
@@ -116,7 +117,7 @@ const LocalServerScreen = () => {
|