Merge branch 'dev'

# Conflicts:
#	core/src/browser/core.ts
#	core/src/browser/extensions/monitoring.ts
#	core/src/browser/fs.ts
#	core/src/extensions/ai-engines/LocalOAIEngine.ts
#	extensions/monitoring-extension/src/node/index.ts
#	extensions/tensorrt-llm-extension/src/index.ts
#	extensions/tensorrt-llm-extension/src/node/index.ts
#	web/hooks/useSendChatMessage.ts
This commit is contained in:
Louis 2024-03-28 10:46:05 +07:00
commit 75eea1fdb2
No known key found for this signature in database
GPG Key ID: 44FA9F4D33C37DE2
548 changed files with 2637 additions and 35074 deletions

View File

@ -0,0 +1,46 @@
# Pipeline auto assign current milestone for PR after the PR is merge
name: Assign Milestone
on:
pull_request:
types: [closed]
jobs:
assign_milestone:
runs-on: ubuntu-latest
permissions:
pull-requests: write
issues: write
steps:
- name: Assign Milestone
uses: actions/github-script@v3
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const { owner, repo } = context.repo;
const { number, merged } = context.payload.pull_request;
if (merged) {
const { data: milestones } = await github.issues.listMilestones({
owner,
repo,
state: 'open',
});
const mergedDate = new Date(context.payload.pull_request.merged_at);
const currentMilestone = milestones
.filter(milestone => milestone.due_on !== null)
.find((milestone) => {
const dueDate = new Date(milestone.due_on);
return mergedDate <= dueDate;
});
if (currentMilestone) {
await github.issues.update({
owner,
repo,
issue_number: number,
milestone: currentMilestone.number
});
}
}
debug: true

View File

@ -1,75 +0,0 @@
name: "Clean old cloudflare pages preview urls and nightly build"
on:
schedule:
- cron: "0 0 * * *" # every day at 00:00
workflow_dispatch:
jobs:
clean-cloudflare-pages-preview-urls:
strategy:
matrix:
project: ["jan", "nitro"]
runs-on: ubuntu-latest
steps:
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- name: install requests
run: |
python3 -m pip install requests pytz tqdm
- name: Python Inline script
uses: jannekem/run-python-script-action@v1
with:
script: |
import requests
from datetime import datetime, UTC
from pytz import timezone
from tqdm import tqdm
# Configuration
endpoint = "https://api.cloudflare.com/client/v4/accounts/${{ secrets.CLOUDFLARE_ACCOUNT_ID }}/pages/projects/${{ matrix.project }}/deployments"
expiration_days = 3
headers = {
"Content-Type": "application/json;charset=UTF-8",
"Authorization": "Bearer ${{ secrets.CLOUDFLARE_API_TOKEN }}"
}
utc_tz = timezone('UTC')
# Fetch the list of deployments
response = requests.get(endpoint, headers=headers)
deployments = response.json()
for deployment in tqdm(deployments['result']):
# Calculate the age of the deployment
created_on = datetime.strptime(deployment['created_on'], "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=utc_tz)
if (datetime.now(UTC) - created_on).days > expiration_days:
# Delete the deployment
delete_response = requests.delete(f"{endpoint}/{deployment['id']}", headers=headers)
if delete_response.status_code == 200:
print(f"Deleted deployment: {deployment['id']}")
else:
print(f"Failed to delete deployment: {deployment['id']}")
clean-cloudflare-r2:
runs-on: ubuntu-latest
environment: production
steps:
- name: install-aws-cli-action
uses: unfor19/install-aws-cli-action@v1
- name: Delete object older than 10 days
run: |
# Get the list of objects in the 'latest' folder
OBJECTS=$(aws s3api list-objects --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --prefix "latest/" --query 'Contents[?LastModified<`'$(date -d "$current_date -10 days" -u +"%Y-%m-%dT%H:%M:%SZ")'`].{Key: Key}' --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com | jq -c .)
# Create a JSON file for the delete operation
echo "{\"Objects\": $OBJECTS, \"Quiet\": false}" > delete.json
# Delete the objects
echo q | aws s3api delete-objects --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --delete file://delete.json --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com
# Remove the JSON file
rm delete.json
env:
AWS_ACCESS_KEY_ID: ${{ secrets.CLOUDFLARE_R2_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CLOUDFLARE_R2_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: "true"

View File

@ -0,0 +1,31 @@
name: "Clean Cloudflare R2 nightly build artifacts older than 10 days"
on:
schedule:
- cron: "0 0 * * *" # every day at 00:00
workflow_dispatch:
jobs:
clean-cloudflare-r2:
runs-on: ubuntu-latest
environment: production
steps:
- name: install-aws-cli-action
uses: unfor19/install-aws-cli-action@v1
- name: Delete object older than 10 days
run: |
# Get the list of objects in the 'latest' folder
OBJECTS=$(aws s3api list-objects --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --prefix "latest/" --query 'Contents[?LastModified<`'$(date -d "$current_date -10 days" -u +"%Y-%m-%dT%H:%M:%SZ")'`].{Key: Key}' --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com | jq -c .)
# Create a JSON file for the delete operation
echo "{\"Objects\": $OBJECTS, \"Quiet\": false}" > delete.json
# Delete the objects
echo q | aws s3api delete-objects --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --delete file://delete.json --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com
# Remove the JSON file
rm delete.json
env:
AWS_ACCESS_KEY_ID: ${{ secrets.CLOUDFLARE_R2_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CLOUDFLARE_R2_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: "true"

View File

@ -1,104 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches:
- main
- dev
paths:
- "electron/**"
- .github/workflows/jan-electron-linter-and-test.yml
- "web/**"
- "uikit/**"
- "package.json"
- "node_modules/**"
- "yarn.lock"
- "core/**"
- "extensions/**"
- "!README.md"
- "Makefile"
pull_request:
branches:
- main
- dev
paths:
- "electron/**"
- .github/workflows/jan-electron-linter-and-test.yml
- "web/**"
- "uikit/**"
- "package.json"
- "node_modules/**"
- "yarn.lock"
- "Makefile"
jobs:
analyze:
name: Analyze
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners
# Consider using larger runners for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: ["javascript-typescript"]
# CodeQL supports [ 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' ]
# Use only 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@ -1,114 +0,0 @@
name: Jan Docs
on:
push:
branches:
- main
- dev
- docs
paths:
- 'docs/**'
- '.github/workflows/jan-docs.yml'
pull_request:
branches:
- main
- dev
- docs
paths:
- 'docs/**'
- '.github/workflows/jan-docs.yml'
# Review gh actions docs if you want to further define triggers, paths, etc
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#on
jobs:
deploy:
name: Deploy to GitHub Pages
env:
CLOUDFLARE_PROJECT_NAME: jan
runs-on: ubuntu-latest
permissions:
contents: write
deployments: write
pull-requests: write
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 18
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Fill env vars
run: |
env_example_file=".env.example"
touch .env
while IFS= read -r line || [[ -n "$line" ]]; do
if [[ "$line" == *"="* ]]; then
var_name=$(echo $line | cut -d '=' -f 1)
echo $var_name
var_value="$(jq -r --arg key "$var_name" '.[$key]' <<< "$SECRETS")"
echo "$var_name=$var_value" >> .env
fi
done < "$env_example_file"
working-directory: docs
env:
SECRETS: '${{ toJson(secrets) }}'
- name: Install dependencies
run: yarn install
working-directory: docs
- name: Build website
run: sed -i '/process.env.DEBUG = namespaces;/c\// process.env.DEBUG = namespaces;' ./node_modules/debug/src/node.js && yarn build
working-directory: docs
- name: Publish to Cloudflare Pages PR Preview and Staging
if: github.event_name == 'push' && github.ref == 'refs/heads/dev'
uses: cloudflare/pages-action@v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
projectName: ${{ env.CLOUDFLARE_PROJECT_NAME }}
directory: ./docs/build
branch: main
# Optional: Enable this if you want to have GitHub Deployments triggered
gitHubToken: ${{ secrets.GITHUB_TOKEN }}
- name: Publish to Cloudflare Pages PR Preview and Staging
if: github.event_name == 'pull_request'
uses: cloudflare/pages-action@v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
projectName: ${{ env.CLOUDFLARE_PROJECT_NAME }}
directory: ./docs/build
# Optional: Enable this if you want to have GitHub Deployments triggered
gitHubToken: ${{ secrets.GITHUB_TOKEN }}
id: deployCloudflarePages
- uses: mshick/add-pr-comment@v2
if: github.event_name == 'pull_request'
with:
message: |
Preview URL: ${{ steps.deployCloudflarePages.outputs.url }}
- name: Add Custome Domain file
if: github.event_name == 'push' && github.ref == 'refs/heads/docs' && github.event.pull_request.head.repo.full_name != github.repository
run: echo "${{ vars.DOCUSAURUS_DOMAIN }}" > ./docs/build/CNAME
# Popular action to deploy to GitHub Pages:
# Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
- name: Deploy to GitHub Pages
if: github.event_name == 'push' && github.ref == 'refs/heads/docs' && github.event.pull_request.head.repo.full_name != github.repository
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
# Build output to publish to the `gh-pages` branch:
publish_dir: ./docs/build
# The following lines assign commit authorship to the official
# GH-Actions bot for deploys to `gh-pages` branch:
# https://github.com/actions/checkout/issues/13#issuecomment-724415212
# The GH actions bot is used by default if you didn't specify the two fields.
# You can swap them out with your own user credentials.
user_name: github-actions[bot]
user_email: 41898282+github-actions[bot]@users.noreply.github.com

View File

@ -51,7 +51,7 @@ jobs:
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Installing node - name: Installing node
uses: actions/setup-node@v1 uses: actions/setup-node@v3
with: with:
node-version: 20 node-version: 20
@ -67,11 +67,44 @@ jobs:
CSC_IDENTITY_AUTO_DISCOVERY: "false" CSC_IDENTITY_AUTO_DISCOVERY: "false"
test-on-windows: test-on-windows:
if: github.event_name == 'push'
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
antivirus-tools: ['mcafee', 'default-windows-security','bit-defender'] antivirus-tools: ['mcafee', 'default-windows-security','bit-defender']
runs-on: windows-desktop-${{ matrix.antivirus-tools }} runs-on: windows-desktop-${{ matrix.antivirus-tools }}
steps:
- name: Clean workspace
run: |
Remove-Item -Path "\\?\$(Get-Location)\*" -Force -Recurse
$path = "$Env:APPDATA\jan"
if (Test-Path $path) {
Remove-Item "\\?\$path" -Recurse -Force
} else {
Write-Output "Folder does not exist."
}
- name: Getting the repo
uses: actions/checkout@v3
- name: Installing node
uses: actions/setup-node@v3
with:
node-version: 20
# Clean cache, continue on error
- name: "Cleanup cache"
shell: powershell
continue-on-error: true
run: |
make clean
- name: Linter and test
shell: powershell
run: |
make test
test-on-windows-pr:
if: github.event_name == 'pull_request'
runs-on: windows-desktop-default-windows-security
steps: steps:
- name: Clean workspace - name: Clean workspace
run: | run: |
@ -116,7 +149,7 @@ jobs:
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Installing node - name: Installing node
uses: actions/setup-node@v1 uses: actions/setup-node@v3
with: with:
node-version: 20 node-version: 20

View File

@ -44,7 +44,7 @@ COPY --from=builder /app/web ./web/
COPY --from=builder /app/models ./models/ COPY --from=builder /app/models ./models/
RUN yarn workspace @janhq/uikit install && yarn workspace @janhq/uikit build RUN yarn workspace @janhq/uikit install && yarn workspace @janhq/uikit build
RUN yarn workspace jan-web install RUN yarn workspace @janhq/web install
RUN npm install -g serve@latest RUN npm install -g serve@latest
@ -55,7 +55,7 @@ ENV JAN_API_PORT 1337
ENV API_BASE_URL http://localhost:1337 ENV API_BASE_URL http://localhost:1337
CMD ["sh", "-c", "export NODE_ENV=production && yarn workspace jan-web build && cd web && npx serve out & cd server && node build/main.js"] CMD ["sh", "-c", "export NODE_ENV=production && yarn workspace @janhq/web build && cd web && npx serve out & cd server && node build/main.js"]
# docker build -t jan . # docker build -t jan .
# docker run -p 1337:1337 -p 3000:3000 -p 3928:3928 jan # docker run -p 1337:1337 -p 3000:3000 -p 3928:3928 jan

View File

@ -68,7 +68,7 @@ COPY --from=builder /app/web ./web/
COPY --from=builder /app/models ./models/ COPY --from=builder /app/models ./models/
RUN yarn workspace @janhq/uikit install && yarn workspace @janhq/uikit build RUN yarn workspace @janhq/uikit install && yarn workspace @janhq/uikit build
RUN yarn workspace jan-web install RUN yarn workspace @janhq/web install
RUN npm install -g serve@latest RUN npm install -g serve@latest
@ -81,7 +81,7 @@ ENV JAN_API_PORT 1337
ENV API_BASE_URL http://localhost:1337 ENV API_BASE_URL http://localhost:1337
CMD ["sh", "-c", "export NODE_ENV=production && yarn workspace jan-web build && cd web && npx serve out & cd server && node build/main.js"] CMD ["sh", "-c", "export NODE_ENV=production && yarn workspace @janhq/web build && cd web && npx serve out & cd server && node build/main.js"]
# pre-requisites: nvidia-docker # pre-requisites: nvidia-docker
# docker build -t jan-gpu . -f Dockerfile.gpu # docker build -t jan-gpu . -f Dockerfile.gpu

View File

@ -17,6 +17,7 @@ install-and-build: build-uikit
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)
yarn config set network-timeout 300000 yarn config set network-timeout 300000
endif endif
yarn global add turbo
yarn build:core yarn build:core
yarn build:server yarn build:server
yarn install yarn install
@ -24,9 +25,9 @@ endif
check-file-counts: install-and-build check-file-counts: install-and-build
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)
powershell -Command "if ((Get-ChildItem -Path pre-install -Filter *.tgz | Measure-Object | Select-Object -ExpandProperty Count) -ne (Get-ChildItem -Path extensions -Directory | Measure-Object | Select-Object -ExpandProperty Count)) { Write-Host 'Number of .tgz files in pre-install does not match the number of subdirectories in extension'; exit 1 } else { Write-Host 'Extension build successful' }" powershell -Command "if ((Get-ChildItem -Path pre-install -Filter *.tgz | Measure-Object | Select-Object -ExpandProperty Count) -ne (Get-ChildItem -Path extensions -Directory | Where-Object Name -like *-extension* | Measure-Object | Select-Object -ExpandProperty Count)) { Write-Host 'Number of .tgz files in pre-install does not match the number of subdirectories in extensions with package.json'; exit 1 } else { Write-Host 'Extension build successful' }"
else else
@tgz_count=$$(find pre-install -type f -name "*.tgz" | wc -l); dir_count=$$(find extensions -mindepth 1 -maxdepth 1 -type d | wc -l); if [ $$tgz_count -ne $$dir_count ]; then echo "Number of .tgz files in pre-install ($$tgz_count) does not match the number of subdirectories in extension ($$dir_count)"; exit 1; else echo "Extension build successful"; fi @tgz_count=$$(find pre-install -type f -name "*.tgz" | wc -l); dir_count=$$(find extensions -mindepth 1 -maxdepth 1 -type d -exec test -e '{}/package.json' \; -print | wc -l); if [ $$tgz_count -ne $$dir_count ]; then echo "Number of .tgz files in pre-install ($$tgz_count) does not match the number of subdirectories in extension ($$dir_count)"; exit 1; else echo "Extension build successful"; fi
endif endif
dev: check-file-counts dev: check-file-counts
@ -53,15 +54,17 @@ build: check-file-counts
clean: clean:
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)
powershell -Command "Get-ChildItem -Path . -Include node_modules, .next, dist, build, out -Recurse -Directory | Remove-Item -Recurse -Force" powershell -Command "Get-ChildItem -Path . -Include node_modules, .next, dist, build, out -Recurse -Directory | Remove-Item -Recurse -Force"
powershell -Command "Get-ChildItem -Path . -Include package-lock.json -Recurse -File | Remove-Item -Recurse -Force"
powershell -Command "Remove-Item -Recurse -Force ./pre-install/*.tgz" powershell -Command "Remove-Item -Recurse -Force ./pre-install/*.tgz"
powershell -Command "Remove-Item -Recurse -Force ./electron/pre-install/*.tgz" powershell -Command "Remove-Item -Recurse -Force ./electron/pre-install/*.tgz"
rmdir /s /q "%USERPROFILE%\jan\extensions" powershell -Command "if (Test-Path \"$($env:USERPROFILE)\jan\extensions\") { Remove-Item -Path \"$($env:USERPROFILE)\jan\extensions\" -Recurse -Force }"
else ifeq ($(shell uname -s),Linux) else ifeq ($(shell uname -s),Linux)
find . -name "node_modules" -type d -prune -exec rm -rf '{}' + find . -name "node_modules" -type d -prune -exec rm -rf '{}' +
find . -name ".next" -type d -exec rm -rf '{}' + find . -name ".next" -type d -exec rm -rf '{}' +
find . -name "dist" -type d -exec rm -rf '{}' + find . -name "dist" -type d -exec rm -rf '{}' +
find . -name "build" -type d -exec rm -rf '{}' + find . -name "build" -type d -exec rm -rf '{}' +
find . -name "out" -type d -exec rm -rf '{}' + find . -name "out" -type d -exec rm -rf '{}' +
find . -name "packake-lock.json" -type f -exec rm -rf '{}' +
rm -rf ./pre-install/*.tgz rm -rf ./pre-install/*.tgz
rm -rf ./electron/pre-install/*.tgz rm -rf ./electron/pre-install/*.tgz
rm -rf "~/jan/extensions" rm -rf "~/jan/extensions"
@ -72,6 +75,7 @@ else
find . -name "dist" -type d -exec rm -rf '{}' + find . -name "dist" -type d -exec rm -rf '{}' +
find . -name "build" -type d -exec rm -rf '{}' + find . -name "build" -type d -exec rm -rf '{}' +
find . -name "out" -type d -exec rm -rf '{}' + find . -name "out" -type d -exec rm -rf '{}' +
find . -name "packake-lock.json" -type f -exec rm -rf '{}' +
rm -rf ./pre-install/*.tgz rm -rf ./pre-install/*.tgz
rm -rf ./electron/pre-install/*.tgz rm -rf ./electron/pre-install/*.tgz
rm -rf ~/jan/extensions rm -rf ~/jan/extensions

View File

@ -43,31 +43,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<tr style="text-align:center"> <tr style="text-align:center">
<td style="text-align:center"><b>Stable (Recommended)</b></td> <td style="text-align:center"><b>Stable (Recommended)</b></td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.8/jan-win-x64-0.4.8.exe'> <a href='https://github.com/janhq/jan/releases/download/v0.4.9/jan-win-x64-0.4.9.exe'>
<img src='./docs/static/img/windows.png' style="height:14px; width: 14px" /> <img src='./docs/static/img/windows.png' style="height:14px; width: 14px" />
<b>jan.exe</b> <b>jan.exe</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.8/jan-mac-x64-0.4.8.dmg'> <a href='https://github.com/janhq/jan/releases/download/v0.4.9/jan-mac-x64-0.4.9.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" /> <img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b> <b>Intel</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.8/jan-mac-arm64-0.4.8.dmg'> <a href='https://github.com/janhq/jan/releases/download/v0.4.9/jan-mac-arm64-0.4.9.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" /> <img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b> <b>M1/M2</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.8/jan-linux-amd64-0.4.8.deb'> <a href='https://github.com/janhq/jan/releases/download/v0.4.9/jan-linux-amd64-0.4.9.deb'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" /> <img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.deb</b> <b>jan.deb</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.8/jan-linux-x86_64-0.4.8.AppImage'> <a href='https://github.com/janhq/jan/releases/download/v0.4.9/jan-linux-x86_64-0.4.9.AppImage'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" /> <img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.AppImage</b> <b>jan.AppImage</b>
</a> </a>
@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<tr style="text-align:center"> <tr style="text-align:center">
<td style="text-align:center"><b>Experimental (Nightly Build)</b></td> <td style="text-align:center"><b>Experimental (Nightly Build)</b></td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.8-324.exe'> <a href='https://delta.jan.ai/latest/jan-win-x64-0.4.9-345.exe'>
<img src='./docs/static/img/windows.png' style="height:14px; width: 14px" /> <img src='./docs/static/img/windows.png' style="height:14px; width: 14px" />
<b>jan.exe</b> <b>jan.exe</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.8-324.dmg'> <a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.9-345.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" /> <img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b> <b>Intel</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.8-324.dmg'> <a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.9-345.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" /> <img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b> <b>M1/M2</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.8-324.deb'> <a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.9-345.deb'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" /> <img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.deb</b> <b>jan.deb</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.8-324.AppImage'> <a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.9-345.AppImage'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" /> <img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.AppImage</b> <b>jan.AppImage</b>
</a> </a>
@ -327,6 +327,7 @@ Jan builds on top of other open-source projects:
- [llama.cpp](https://github.com/ggerganov/llama.cpp) - [llama.cpp](https://github.com/ggerganov/llama.cpp)
- [LangChain](https://github.com/langchain-ai) - [LangChain](https://github.com/langchain-ai)
- [TensorRT](https://github.com/NVIDIA/TensorRT) - [TensorRT](https://github.com/NVIDIA/TensorRT)
- [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM)
## Contact ## Contact

View File

@ -150,7 +150,7 @@ common:
command: ['/bin/sh', '-c'] command: ['/bin/sh', '-c']
args: args:
[ [
'export NODE_ENV=production && yarn workspace jan-web build && cd web && npx serve out', 'export NODE_ENV=production && yarn workspace @janhq/web build && cd web && npx serve out',
] ]
replicaCount: 1 replicaCount: 1

View File

@ -8,8 +8,8 @@
], ],
"homepage": "https://jan.ai", "homepage": "https://jan.ai",
"license": "AGPL-3.0", "license": "AGPL-3.0",
"main": "dist/core.umd.js", "main": "dist/core.es5.js",
"module": "dist/core.es5.js", "module": "dist/core.cjs.js",
"typings": "dist/types/index.d.ts", "typings": "dist/types/index.d.ts",
"files": [ "files": [
"dist", "dist",
@ -17,8 +17,7 @@
], ],
"author": "Jan <service@jan.ai>", "author": "Jan <service@jan.ai>",
"exports": { "exports": {
".": "./dist/core.umd.js", ".": "./dist/core.es5.js",
"./sdk": "./dist/core.umd.js",
"./node": "./dist/node/index.cjs.js" "./node": "./dist/node/index.cjs.js"
}, },
"typesVersions": { "typesVersions": {
@ -27,10 +26,6 @@
"./dist/core.es5.js.map", "./dist/core.es5.js.map",
"./dist/types/index.d.ts" "./dist/types/index.d.ts"
], ],
"sdk": [
"./dist/core.es5.js.map",
"./dist/types/index.d.ts"
],
"node": [ "node": [
"./dist/node/index.cjs.js.map", "./dist/node/index.cjs.js.map",
"./dist/types/node/index.d.ts" "./dist/types/node/index.d.ts"
@ -45,6 +40,7 @@
"start": "rollup -c rollup.config.ts -w" "start": "rollup -c rollup.config.ts -w"
}, },
"devDependencies": { "devDependencies": {
"@rollup/plugin-replace": "^5.0.5",
"@types/jest": "^29.5.12", "@types/jest": "^29.5.12",
"@types/node": "^20.11.4", "@types/node": "^20.11.4",
"eslint": "8.57.0", "eslint": "8.57.0",
@ -63,6 +59,6 @@
}, },
"dependencies": { "dependencies": {
"rxjs": "^7.8.1", "rxjs": "^7.8.1",
"ulid": "^2.3.0" "ulidx": "^2.3.0"
} }
} }

View File

@ -3,17 +3,16 @@ import commonjs from 'rollup-plugin-commonjs'
import sourceMaps from 'rollup-plugin-sourcemaps' import sourceMaps from 'rollup-plugin-sourcemaps'
import typescript from 'rollup-plugin-typescript2' import typescript from 'rollup-plugin-typescript2'
import json from 'rollup-plugin-json' import json from 'rollup-plugin-json'
import replace from '@rollup/plugin-replace'
const pkg = require('./package.json') const pkg = require('./package.json')
const libraryName = 'core'
export default [ export default [
{ {
input: `src/index.ts`, input: `src/index.ts`,
output: [ output: [
{ file: pkg.main, name: libraryName, format: 'umd', sourcemap: true }, // { file: pkg.main, name: libraryName, format: 'umd', sourcemap: true },
{ file: pkg.module, format: 'es', sourcemap: true }, { file: pkg.main, format: 'es', sourcemap: true },
], ],
// Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash') // Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash')
external: ['path'], external: ['path'],
@ -30,7 +29,13 @@ export default [
// Allow node_modules resolution, so you can use 'external' to control // Allow node_modules resolution, so you can use 'external' to control
// which external modules to include in the bundle // which external modules to include in the bundle
// https://github.com/rollup/rollup-plugin-node-resolve#usage // https://github.com/rollup/rollup-plugin-node-resolve#usage
resolve(), replace({
'node:crypto': 'crypto',
'delimiters': ['"', '"'],
}),
resolve({
browser: true,
}),
// Resolve source maps to the original source // Resolve source maps to the original source
sourceMaps(), sourceMaps(),
@ -46,7 +51,7 @@ export default [
'pacote', 'pacote',
'@types/pacote', '@types/pacote',
'@npmcli/arborist', '@npmcli/arborist',
'ulid', 'ulidx',
'node-fetch', 'node-fetch',
'fs', 'fs',
'request', 'request',
@ -64,7 +69,7 @@ export default [
// Allow json resolution // Allow json resolution
json(), json(),
// Compile TypeScript files // Compile TypeScript files
typescript({ useTsconfigDeclarationDir: true, exclude: ['src/*.ts', 'src/extensions/**'] }), typescript({ useTsconfigDeclarationDir: true }),
// Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs) // Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs)
commonjs(), commonjs(),
// Allow node_modules resolution, so you can use 'external' to control // Allow node_modules resolution, so you can use 'external' to control

View File

@ -1,4 +1,4 @@
import { DownloadRequest, FileStat, NetworkConfig, SystemInformation } from './types' import { DownloadRequest, FileStat, NetworkConfig, SystemInformation } from '../types'
/** /**
* Execute a extension module function in main process * Execute a extension module function in main process
@ -13,7 +13,7 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
extension, extension,
method, method,
...args ...args
) => global.core?.api?.invokeExtensionFunc(extension, method, ...args) ) => globalThis.core?.api?.invokeExtensionFunc(extension, method, ...args)
/** /**
* Downloads a file from a URL and saves it to the local file system. * Downloads a file from a URL and saves it to the local file system.
@ -26,7 +26,7 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
const downloadFile: (downloadRequest: DownloadRequest, network?: NetworkConfig) => Promise<any> = ( const downloadFile: (downloadRequest: DownloadRequest, network?: NetworkConfig) => Promise<any> = (
downloadRequest, downloadRequest,
network network
) => global.core?.api?.downloadFile(downloadRequest, network) ) => globalThis.core?.api?.downloadFile(downloadRequest, network)
/** /**
* Aborts the download of a specific file. * Aborts the download of a specific file.
@ -34,14 +34,14 @@ const downloadFile: (downloadRequest: DownloadRequest, network?: NetworkConfig)
* @returns {Promise<any>} A promise that resolves when the download has been aborted. * @returns {Promise<any>} A promise that resolves when the download has been aborted.
*/ */
const abortDownload: (fileName: string) => Promise<any> = (fileName) => const abortDownload: (fileName: string) => Promise<any> = (fileName) =>
global.core.api?.abortDownload(fileName) globalThis.core.api?.abortDownload(fileName)
/** /**
* Gets Jan's data folder path. * Gets Jan's data folder path.
* *
* @returns {Promise<string>} A Promise that resolves with Jan's data folder path. * @returns {Promise<string>} A Promise that resolves with Jan's data folder path.
*/ */
const getJanDataFolderPath = (): Promise<string> => global.core.api?.getJanDataFolderPath() const getJanDataFolderPath = (): Promise<string> => globalThis.core.api?.getJanDataFolderPath()
/** /**
* Opens the file explorer at a specific path. * Opens the file explorer at a specific path.
@ -49,21 +49,22 @@ const getJanDataFolderPath = (): Promise<string> => global.core.api?.getJanDataF
* @returns {Promise<any>} A promise that resolves when the file explorer is opened. * @returns {Promise<any>} A promise that resolves when the file explorer is opened.
*/ */
const openFileExplorer: (path: string) => Promise<any> = (path) => const openFileExplorer: (path: string) => Promise<any> = (path) =>
global.core.api?.openFileExplorer(path) globalThis.core.api?.openFileExplorer(path)
/** /**
* Joins multiple paths together. * Joins multiple paths together.
* @param paths - The paths to join. * @param paths - The paths to join.
* @returns {Promise<string>} A promise that resolves with the joined path. * @returns {Promise<string>} A promise that resolves with the joined path.
*/ */
const joinPath: (paths: string[]) => Promise<string> = (paths) => global.core.api?.joinPath(paths) const joinPath: (paths: string[]) => Promise<string> = (paths) =>
globalThis.core.api?.joinPath(paths)
/** /**
* Retrive the basename from an url. * Retrive the basename from an url.
* @param path - The path to retrieve. * @param path - The path to retrieve.
* @returns {Promise<string>} A promise that resolves with the basename. * @returns {Promise<string>} A promise that resolves with the basename.
*/ */
const baseName: (paths: string) => Promise<string> = (path) => global.core.api?.baseName(path) const baseName: (paths: string) => Promise<string> = (path) => globalThis.core.api?.baseName(path)
/** /**
* Opens an external URL in the default web browser. * Opens an external URL in the default web browser.
@ -72,20 +73,20 @@ const baseName: (paths: string) => Promise<string> = (path) => global.core.api?.
* @returns {Promise<any>} - A promise that resolves when the URL has been successfully opened. * @returns {Promise<any>} - A promise that resolves when the URL has been successfully opened.
*/ */
const openExternalUrl: (url: string) => Promise<any> = (url) => const openExternalUrl: (url: string) => Promise<any> = (url) =>
global.core.api?.openExternalUrl(url) globalThis.core.api?.openExternalUrl(url)
/** /**
* Gets the resource path of the application. * Gets the resource path of the application.
* *
* @returns {Promise<string>} - A promise that resolves with the resource path. * @returns {Promise<string>} - A promise that resolves with the resource path.
*/ */
const getResourcePath: () => Promise<string> = () => global.core.api?.getResourcePath() const getResourcePath: () => Promise<string> = () => globalThis.core.api?.getResourcePath()
/** /**
* Gets the user's home path. * Gets the user's home path.
* @returns return user's home path * @returns return user's home path
*/ */
const getUserHomePath = (): Promise<string> => global.core.api?.getUserHomePath() const getUserHomePath = (): Promise<string> => globalThis.core.api?.getUserHomePath()
/** /**
* Log to file from browser processes. * Log to file from browser processes.
@ -93,7 +94,7 @@ const getUserHomePath = (): Promise<string> => global.core.api?.getUserHomePath(
* @param message - Message to log. * @param message - Message to log.
*/ */
const log: (message: string, fileName?: string) => void = (message, fileName) => const log: (message: string, fileName?: string) => void = (message, fileName) =>
global.core.api?.log(message, fileName) globalThis.core.api?.log(message, fileName)
/** /**
* Check whether the path is a subdirectory of another path. * Check whether the path is a subdirectory of another path.
@ -104,14 +105,14 @@ const log: (message: string, fileName?: string) => void = (message, fileName) =>
* @returns {Promise<boolean>} - A promise that resolves with a boolean indicating whether the path is a subdirectory. * @returns {Promise<boolean>} - A promise that resolves with a boolean indicating whether the path is a subdirectory.
*/ */
const isSubdirectory: (from: string, to: string) => Promise<boolean> = (from: string, to: string) => const isSubdirectory: (from: string, to: string) => Promise<boolean> = (from: string, to: string) =>
global.core.api?.isSubdirectory(from, to) globalThis.core.api?.isSubdirectory(from, to)
/** /**
* Get system information * Get system information
* @returns {Promise<any>} - A promise that resolves with the system information. * @returns {Promise<any>} - A promise that resolves with the system information.
*/ */
const systemInformation: () => Promise<SystemInformation> = () => const systemInformation: () => Promise<SystemInformation> = () =>
global.core.api?.systemInformation() globalThis.core.api?.systemInformation()
/** /**
* Show toast message from browser processes. * Show toast message from browser processes.
@ -120,7 +121,7 @@ const systemInformation: () => Promise<SystemInformation> = () =>
* @returns * @returns
*/ */
const showToast: (title: string, message: string) => void = (title, message) => const showToast: (title: string, message: string) => void = (title, message) =>
global.core.api?.showToast(title, message) globalThis.core.api?.showToast(title, message)
/** /**
* Register extension point function type definition * Register extension point function type definition
*/ */

View File

@ -5,7 +5,7 @@
* @param handler The handler function to call when the event is observed. * @param handler The handler function to call when the event is observed.
*/ */
const on: (eventName: string, handler: Function) => void = (eventName, handler) => { const on: (eventName: string, handler: Function) => void = (eventName, handler) => {
global.core?.events?.on(eventName, handler) globalThis.core?.events?.on(eventName, handler)
} }
/** /**
@ -15,7 +15,7 @@ const on: (eventName: string, handler: Function) => void = (eventName, handler)
* @param handler The handler function to call when the event is observed. * @param handler The handler function to call when the event is observed.
*/ */
const off: (eventName: string, handler: Function) => void = (eventName, handler) => { const off: (eventName: string, handler: Function) => void = (eventName, handler) => {
global.core?.events?.off(eventName, handler) globalThis.core?.events?.off(eventName, handler)
} }
/** /**
@ -25,7 +25,7 @@ const off: (eventName: string, handler: Function) => void = (eventName, handler)
* @param object The object to pass to the event callback. * @param object The object to pass to the event callback.
*/ */
const emit: (eventName: string, object: any) => void = (eventName, object) => { const emit: (eventName: string, object: any) => void = (eventName, object) => {
global.core?.events?.emit(eventName, object) globalThis.core?.events?.emit(eventName, object)
} }
export const events = { export const events = {

View File

@ -1,4 +1,4 @@
import { Assistant, AssistantInterface } from '../index' import { Assistant, AssistantInterface } from '../../types'
import { BaseExtension, ExtensionTypeEnum } from '../extension' import { BaseExtension, ExtensionTypeEnum } from '../extension'
/** /**

View File

@ -1,4 +1,4 @@
import { Thread, ThreadInterface, ThreadMessage, MessageInterface } from '../index' import { Thread, ThreadInterface, ThreadMessage, MessageInterface } from '../../types'
import { BaseExtension, ExtensionTypeEnum } from '../extension' import { BaseExtension, ExtensionTypeEnum } from '../extension'
/** /**

View File

@ -2,7 +2,8 @@ import { getJanDataFolderPath, joinPath } from '../../core'
import { events } from '../../events' import { events } from '../../events'
import { BaseExtension } from '../../extension' import { BaseExtension } from '../../extension'
import { fs } from '../../fs' import { fs } from '../../fs'
import { Model, ModelEvent } from '../../types' import { MessageRequest, Model, ModelEvent } from '../../../types'
import { EngineManager } from './EngineManager'
/** /**
* Base AIEngine * Base AIEngine
@ -11,30 +12,73 @@ import { Model, ModelEvent } from '../../types'
export abstract class AIEngine extends BaseExtension { export abstract class AIEngine extends BaseExtension {
// The inference engine // The inference engine
abstract provider: string abstract provider: string
// The model folder
modelFolder: string = 'models'
abstract models(): Promise<Model[]>
/** /**
* On extension load, subscribe to events. * On extension load, subscribe to events.
*/ */
onLoad() { override onLoad() {
this.registerEngine()
events.on(ModelEvent.OnModelInit, (model: Model) => this.loadModel(model))
events.on(ModelEvent.OnModelStop, (model: Model) => this.unloadModel(model))
this.prePopulateModels() this.prePopulateModels()
} }
/**
* Defines models
*/
models(): Promise<Model[]> {
return Promise.resolve([])
}
/**
* Registers AI Engines
*/
registerEngine() {
EngineManager.instance().register(this)
}
/**
* Loads the model.
*/
async loadModel(model: Model): Promise<any> {
if (model.engine.toString() !== this.provider) return Promise.resolve()
events.emit(ModelEvent.OnModelReady, model)
return Promise.resolve()
}
/**
* Stops the model.
*/
async unloadModel(model?: Model): Promise<any> {
if (model?.engine && model.engine.toString() !== this.provider) return Promise.resolve()
events.emit(ModelEvent.OnModelStopped, model ?? {})
return Promise.resolve()
}
/*
* Inference request
*/
inference(data: MessageRequest) {}
/**
* Stop inference
*/
stopInference() {}
/** /**
* Pre-populate models to App Data Folder * Pre-populate models to App Data Folder
*/ */
prePopulateModels(): Promise<void> { prePopulateModels(): Promise<void> {
const modelFolder = 'models'
return this.models().then((models) => { return this.models().then((models) => {
const prePoluateOperations = models.map((model) => const prePoluateOperations = models.map((model) =>
getJanDataFolderPath() getJanDataFolderPath()
.then((janDataFolder) => .then((janDataFolder) =>
// Attempt to create the model folder // Attempt to create the model folder
joinPath([janDataFolder, this.modelFolder, model.id]).then((path) => joinPath([janDataFolder, modelFolder, model.id]).then((path) =>
fs fs
.mkdirSync(path) .mkdir(path)
.catch() .catch()
.then(() => path) .then(() => path)
) )

View File

@ -0,0 +1,32 @@
import { AIEngine } from './AIEngine'
/**
* Manages the registration and retrieval of inference engines.
*/
export class EngineManager {
public engines = new Map<string, AIEngine>()
/**
* Registers an engine.
* @param engine - The engine to register.
*/
register<T extends AIEngine>(engine: T) {
this.engines.set(engine.provider, engine)
}
/**
* Retrieves a engine by provider.
* @param provider - The name of the engine to retrieve.
* @returns The engine, if found.
*/
get<T extends AIEngine>(provider: string): T | undefined {
return this.engines.get(provider) as T | undefined
}
/**
* The instance of the engine manager.
*/
static instance(): EngineManager {
return window.core?.engineManager as EngineManager ?? new EngineManager()
}
}

View File

@ -1,6 +1,6 @@
import { executeOnMain, getJanDataFolderPath, joinPath, systemInformation } from '../../core' import { executeOnMain, getJanDataFolderPath, joinPath, systemInformation } from '../../core'
import { events } from '../../events' import { events } from '../../events'
import { Model, ModelEvent } from '../../types' import { Model, ModelEvent } from '../../../types'
import { OAIEngine } from './OAIEngine' import { OAIEngine } from './OAIEngine'
/** /**
@ -9,54 +9,55 @@ import { OAIEngine } from './OAIEngine'
*/ */
export abstract class LocalOAIEngine extends OAIEngine { export abstract class LocalOAIEngine extends OAIEngine {
// The inference engine // The inference engine
abstract nodeModule: string
loadModelFunctionName: string = 'loadModel' loadModelFunctionName: string = 'loadModel'
unloadModelFunctionName: string = 'unloadModel' unloadModelFunctionName: string = 'unloadModel'
isRunning: boolean = false
/** /**
* On extension load, subscribe to events. * On extension load, subscribe to events.
*/ */
onLoad() { override onLoad() {
super.onLoad() super.onLoad()
// These events are applicable to local inference providers // These events are applicable to local inference providers
events.on(ModelEvent.OnModelInit, (model: Model) => this.onModelInit(model)) events.on(ModelEvent.OnModelInit, (model: Model) => this.loadModel(model))
events.on(ModelEvent.OnModelStop, (model: Model) => this.onModelStop(model)) events.on(ModelEvent.OnModelStop, (model: Model) => this.unloadModel(model))
} }
/** /**
* Load the model. * Load the model.
*/ */
async onModelInit(model: Model) { override async loadModel(model: Model): Promise<void> {
if (model.engine.toString() !== this.provider) return if (model.engine.toString() !== this.provider) return
const modelFolderName = 'models'
const modelFolder = await joinPath([await getJanDataFolderPath(), this.modelFolder, model.id]) const modelFolder = await joinPath([await getJanDataFolderPath(), modelFolderName, model.id])
const systemInfo = await systemInformation() const systemInfo = await systemInformation()
const res = await executeOnMain(this.nodeModule, this.loadModelFunctionName, { const res = await executeOnMain(
this.nodeModule,
this.loadModelFunctionName,
{
modelFolder, modelFolder,
model, model,
}, systemInfo) },
systemInfo
)
if (res?.error) { if (res?.error) {
events.emit(ModelEvent.OnModelFail, { events.emit(ModelEvent.OnModelFail, { error: res.error })
...model, return Promise.reject(res.error)
error: res.error,
})
return
} else { } else {
this.loadedModel = model this.loadedModel = model
events.emit(ModelEvent.OnModelReady, model) events.emit(ModelEvent.OnModelReady, model)
this.isRunning = true return Promise.resolve()
} }
} }
/** /**
* Stops the model. * Stops the model.
*/ */
onModelStop(model: Model) { override async unloadModel(model?: Model): Promise<void> {
if (model.engine?.toString() !== this.provider) return if (model?.engine && model.engine?.toString() !== this.provider) return Promise.resolve()
this.isRunning = false this.loadedModel = undefined
return executeOnMain(this.nodeModule, this.unloadModelFunctionName).then(() => {
executeOnMain(this.nodeModule, this.unloadModelFunctionName).then(() => {
events.emit(ModelEvent.OnModelStopped, {}) events.emit(ModelEvent.OnModelStopped, {})
}) })
} }

View File

@ -1,5 +1,5 @@
import { requestInference } from './helpers/sse' import { requestInference } from './helpers/sse'
import { ulid } from 'ulid' import { ulid } from 'ulidx'
import { AIEngine } from './AIEngine' import { AIEngine } from './AIEngine'
import { import {
ChatCompletionRole, ChatCompletionRole,
@ -13,7 +13,7 @@ import {
ModelInfo, ModelInfo,
ThreadContent, ThreadContent,
ThreadMessage, ThreadMessage,
} from '../../types' } from '../../../types'
import { events } from '../../events' import { events } from '../../events'
/** /**
@ -23,7 +23,6 @@ import { events } from '../../events'
export abstract class OAIEngine extends AIEngine { export abstract class OAIEngine extends AIEngine {
// The inference engine // The inference engine
abstract inferenceUrl: string abstract inferenceUrl: string
abstract nodeModule: string
// Controller to handle stop requests // Controller to handle stop requests
controller = new AbortController() controller = new AbortController()
@ -35,21 +34,21 @@ export abstract class OAIEngine extends AIEngine {
/** /**
* On extension load, subscribe to events. * On extension load, subscribe to events.
*/ */
onLoad() { override onLoad() {
super.onLoad() super.onLoad()
events.on(MessageEvent.OnMessageSent, (data: MessageRequest) => this.inference(data)) events.on(MessageEvent.OnMessageSent, (data: MessageRequest) => this.inference(data))
events.on(InferenceEvent.OnInferenceStopped, () => this.onInferenceStopped()) events.on(InferenceEvent.OnInferenceStopped, () => this.stopInference())
} }
/** /**
* On extension unload * On extension unload
*/ */
onUnload(): void {} override onUnload(): void {}
/* /*
* Inference request * Inference request
*/ */
inference(data: MessageRequest) { override inference(data: MessageRequest) {
if (data.model?.engine?.toString() !== this.provider) return if (data.model?.engine?.toString() !== this.provider) return
const timestamp = Date.now() const timestamp = Date.now()
@ -78,7 +77,13 @@ export abstract class OAIEngine extends AIEngine {
...data.model, ...data.model,
} }
requestInference(this.inferenceUrl, data.messages ?? [], model, this.controller).subscribe({ requestInference(
this.inferenceUrl,
data.messages ?? [],
model,
this.controller,
this.headers()
).subscribe({
next: (content: any) => { next: (content: any) => {
const messageContent: ThreadContent = { const messageContent: ThreadContent = {
type: ContentType.Text, type: ContentType.Text,
@ -101,6 +106,7 @@ export abstract class OAIEngine extends AIEngine {
return return
} }
message.status = MessageStatus.Error message.status = MessageStatus.Error
message.error_code = err.code
events.emit(MessageEvent.OnMessageUpdate, message) events.emit(MessageEvent.OnMessageUpdate, message)
}, },
}) })
@ -109,8 +115,15 @@ export abstract class OAIEngine extends AIEngine {
/** /**
* Stops the inference. * Stops the inference.
*/ */
onInferenceStopped() { override stopInference() {
this.isCancelled = true this.isCancelled = true
this.controller?.abort() this.controller?.abort()
} }
/**
* Headers for the inference request
*/
headers(): HeadersInit {
return {}
}
} }

View File

@ -0,0 +1,26 @@
import { OAIEngine } from './OAIEngine'
/**
* Base OAI Remote Inference Provider
* Added the implementation of loading and unloading model (applicable to local inference providers)
*/
export abstract class RemoteOAIEngine extends OAIEngine {
// The inference engine
abstract apiKey: string
/**
* On extension load, subscribe to events.
*/
override onLoad() {
super.onLoad()
}
/**
* Headers for the inference request
*/
override headers(): HeadersInit {
return {
'Authorization': `Bearer ${this.apiKey}`,
'api-key': `${this.apiKey}`,
}
}
}

View File

@ -1,5 +1,5 @@
import { Observable } from 'rxjs' import { Observable } from 'rxjs'
import { ModelRuntimeParams } from '../../../types' import { ErrorCode, ModelRuntimeParams } from '../../../../types'
/** /**
* Sends a request to the inference server to generate a response based on the recent messages. * Sends a request to the inference server to generate a response based on the recent messages.
* @param recentMessages - An array of recent messages to use as context for the inference. * @param recentMessages - An array of recent messages to use as context for the inference.
@ -12,7 +12,8 @@ export function requestInference(
id: string id: string
parameters: ModelRuntimeParams parameters: ModelRuntimeParams
}, },
controller?: AbortController controller?: AbortController,
headers?: HeadersInit
): Observable<string> { ): Observable<string> {
return new Observable((subscriber) => { return new Observable((subscriber) => {
const requestBody = JSON.stringify({ const requestBody = JSON.stringify({
@ -27,11 +28,22 @@ export function requestInference(
'Content-Type': 'application/json', 'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Origin': '*',
'Accept': model.parameters.stream ? 'text/event-stream' : 'application/json', 'Accept': model.parameters.stream ? 'text/event-stream' : 'application/json',
...headers,
}, },
body: requestBody, body: requestBody,
signal: controller?.signal, signal: controller?.signal,
}) })
.then(async (response) => { .then(async (response) => {
if (!response.ok) {
const data = await response.json()
const error = {
message: data.error?.message ?? 'Error occurred.',
code: data.error?.code ?? ErrorCode.Unknown,
}
subscriber.error(error)
subscriber.complete()
return
}
if (model.parameters.stream === false) { if (model.parameters.stream === false) {
const data = await response.json() const data = await response.json()
subscriber.next(data.choices[0]?.message?.content ?? '') subscriber.next(data.choices[0]?.message?.content ?? '')

View File

@ -1,3 +1,5 @@
export * from './AIEngine' export * from './AIEngine'
export * from './OAIEngine' export * from './OAIEngine'
export * from './LocalOAIEngine' export * from './LocalOAIEngine'
export * from './RemoteOAIEngine'
export * from './EngineManager'

View File

@ -1,6 +1,6 @@
import { BaseExtension, ExtensionTypeEnum } from '../extension' import { BaseExtension, ExtensionTypeEnum } from '../extension'
import { HuggingFaceInterface, HuggingFaceRepoData, Quantization } from '../types/huggingface' import { HuggingFaceInterface, HuggingFaceRepoData, Quantization } from '../../types/huggingface'
import { Model } from '../types/model' import { Model } from '../../types/model'
/** /**
* Hugging Face extension for converting HF models to GGUF. * Hugging Face extension for converting HF models to GGUF.

View File

@ -32,4 +32,4 @@ export { HuggingFaceExtension } from './huggingface'
/** /**
* Base AI Engines. * Base AI Engines.
*/ */
export * from './ai-engines' export * from './engines'

View File

@ -1,4 +1,4 @@
import { InferenceInterface, MessageRequest, ThreadMessage } from '../index' import { InferenceInterface, MessageRequest, ThreadMessage } from '../../types'
import { BaseExtension, ExtensionTypeEnum } from '../extension' import { BaseExtension, ExtensionTypeEnum } from '../extension'
/** /**

View File

@ -1,5 +1,5 @@
import { BaseExtension, ExtensionTypeEnum } from '../extension' import { BaseExtension, ExtensionTypeEnum } from '../extension'
import { GpuSetting, ImportingModel, Model, ModelInterface, OptionType } from '../index' import { GpuSetting, ImportingModel, Model, ModelInterface, OptionType } from '../../types'
/** /**
* Model extension for managing models. * Model extension for managing models.

View File

@ -1,5 +1,5 @@
import { BaseExtension, ExtensionTypeEnum } from '../extension' import { BaseExtension, ExtensionTypeEnum } from '../extension'
import { GpuSetting, MonitoringInterface, OperatingSystemInfo } from '../index' import { GpuSetting, MonitoringInterface, OperatingSystemInfo } from '../../types'
/** /**
* Monitoring extension for system monitoring. * Monitoring extension for system monitoring.

View File

@ -1,10 +1,10 @@
import { FileStat } from './types' import { FileStat } from '../types'
/** /**
* Writes data to a file at the specified path. * Writes data to a file at the specified path.
* @returns {Promise<any>} A Promise that resolves when the file is written successfully. * @returns {Promise<any>} A Promise that resolves when the file is written successfully.
*/ */
const writeFileSync = (...args: any[]) => global.core.api?.writeFileSync(...args) const writeFileSync = (...args: any[]) => globalThis.core.api?.writeFileSync(...args)
/** /**
* Writes blob data to a file at the specified path. * Writes blob data to a file at the specified path.
@ -13,29 +13,29 @@ const writeFileSync = (...args: any[]) => global.core.api?.writeFileSync(...args
* @returns * @returns
*/ */
const writeBlob: (path: string, data: string) => Promise<any> = (path, data) => const writeBlob: (path: string, data: string) => Promise<any> = (path, data) =>
global.core.api?.writeBlob(path, data) globalThis.core.api?.writeBlob(path, data)
/** /**
* Reads the contents of a file at the specified path. * Reads the contents of a file at the specified path.
* @returns {Promise<any>} A Promise that resolves with the contents of the file. * @returns {Promise<any>} A Promise that resolves with the contents of the file.
*/ */
const readFileSync = (...args: any[]) => global.core.api?.readFileSync(...args) const readFileSync = (...args: any[]) => globalThis.core.api?.readFileSync(...args)
/** /**
* Check whether the file exists * Check whether the file exists
* @param {string} path * @param {string} path
* @returns {boolean} A boolean indicating whether the path is a file. * @returns {boolean} A boolean indicating whether the path is a file.
*/ */
const existsSync = (...args: any[]) => global.core.api?.existsSync(...args) const existsSync = (...args: any[]) => globalThis.core.api?.existsSync(...args)
/** /**
* List the directory files * List the directory files
* @returns {Promise<any>} A Promise that resolves with the contents of the directory. * @returns {Promise<any>} A Promise that resolves with the contents of the directory.
*/ */
const readdirSync = (...args: any[]) => global.core.api?.readdirSync(...args) const readdirSync = (...args: any[]) => globalThis.core.api?.readdirSync(...args)
/** /**
* Creates a directory at the specified path. * Creates a directory at the specified path.
* @returns {Promise<any>} A Promise that resolves when the directory is created successfully. * @returns {Promise<any>} A Promise that resolves when the directory is created successfully.
*/ */
const mkdirSync = (...args: any[]) => global.core.api?.mkdirSync(...args) const mkdir = (...args: any[]) => globalThis.core.api?.mkdir(...args)
const mkdir = (...args: any[]) => global.core.api?.mkdir(...args) const mkdir = (...args: any[]) => global.core.api?.mkdir(...args)
@ -43,22 +43,19 @@ const mkdir = (...args: any[]) => global.core.api?.mkdir(...args)
* Removes a directory at the specified path. * Removes a directory at the specified path.
* @returns {Promise<any>} A Promise that resolves when the directory is removed successfully. * @returns {Promise<any>} A Promise that resolves when the directory is removed successfully.
*/ */
const rmdirSync = (...args: any[]) => const rm = (...args: any[]) => globalThis.core.api?.rm(...args, { recursive: true, force: true })
global.core.api?.rmdirSync(...args, { recursive: true, force: true })
const rm = (path: string) => global.core.api?.rm(path)
/** /**
* Deletes a file from the local file system. * Deletes a file from the local file system.
* @param {string} path - The path of the file to delete. * @param {string} path - The path of the file to delete.
* @returns {Promise<any>} A Promise that resolves when the file is deleted. * @returns {Promise<any>} A Promise that resolves when the file is deleted.
*/ */
const unlinkSync = (...args: any[]) => global.core.api?.unlinkSync(...args) const unlinkSync = (...args: any[]) => globalThis.core.api?.unlinkSync(...args)
/** /**
* Appends data to a file at the specified path. * Appends data to a file at the specified path.
*/ */
const appendFileSync = (...args: any[]) => global.core.api?.appendFileSync(...args) const appendFileSync = (...args: any[]) => globalThis.core.api?.appendFileSync(...args)
/** /**
* Synchronizes a file from a source path to a destination path. * Synchronizes a file from a source path to a destination path.
@ -67,15 +64,15 @@ const appendFileSync = (...args: any[]) => global.core.api?.appendFileSync(...ar
* @returns {Promise<any>} - A promise that resolves when the file has been successfully synchronized. * @returns {Promise<any>} - A promise that resolves when the file has been successfully synchronized.
*/ */
const syncFile: (src: string, dest: string) => Promise<any> = (src, dest) => const syncFile: (src: string, dest: string) => Promise<any> = (src, dest) =>
global.core.api?.syncFile(src, dest) globalThis.core.api?.syncFile(src, dest)
/** /**
* Copy file sync. * Copy file sync.
*/ */
const copyFileSync = (...args: any[]) => global.core.api?.copyFileSync(...args) const copyFileSync = (...args: any[]) => globalThis.core.api?.copyFileSync(...args)
const copyFile: (src: string, dest: string) => Promise<void> = (src, dest) => const copyFile: (src: string, dest: string) => Promise<void> = (src, dest) =>
global.core.api?.copyFile(src, dest) globalThis.core.api?.copyFile(src, dest)
/** /**
* Gets the file's stats. * Gets the file's stats.
@ -87,7 +84,7 @@ const copyFile: (src: string, dest: string) => Promise<void> = (src, dest) =>
const fileStat: (path: string, outsideJanDataFolder?: boolean) => Promise<FileStat | undefined> = ( const fileStat: (path: string, outsideJanDataFolder?: boolean) => Promise<FileStat | undefined> = (
path, path,
outsideJanDataFolder outsideJanDataFolder
) => global.core.api?.fileStat(path, outsideJanDataFolder) ) => globalThis.core.api?.fileStat(path, outsideJanDataFolder)
// TODO: Export `dummy` fs functions automatically // TODO: Export `dummy` fs functions automatically
// Currently adding these manually // Currently adding these manually
@ -96,9 +93,7 @@ export const fs = {
readFileSync, readFileSync,
existsSync, existsSync,
readdirSync, readdirSync,
mkdirSync,
mkdir, mkdir,
rmdirSync,
rm, rm,
unlinkSync, unlinkSync,
appendFileSync, appendFileSync,

35
core/src/browser/index.ts Normal file
View File

@ -0,0 +1,35 @@
/**
* Export Core module
* @module
*/
export * from './core'
/**
* Export Event module.
* @module
*/
export * from './events'
/**
* Export Filesystem module.
* @module
*/
export * from './fs'
/**
* Export Extension module.
* @module
*/
export * from './extension'
/**
* Export all base extensions.
* @module
*/
export * from './extensions'
/**
* Export all base tools.
* @module
*/
export * from './tools'

View File

@ -0,0 +1,2 @@
export * from './manager'
export * from './tool'

View File

@ -0,0 +1,47 @@
import { AssistantTool, MessageRequest } from '../../types'
import { InferenceTool } from './tool'
/**
* Manages the registration and retrieval of inference tools.
*/
export class ToolManager {
public tools = new Map<string, InferenceTool>()
/**
* Registers a tool.
* @param tool - The tool to register.
*/
register<T extends InferenceTool>(tool: T) {
this.tools.set(tool.name, tool)
}
/**
* Retrieves a tool by it's name.
* @param name - The name of the tool to retrieve.
* @returns The tool, if found.
*/
get<T extends InferenceTool>(name: string): T | undefined {
return this.tools.get(name) as T | undefined
}
/*
** Process the message request with the tools.
*/
process(request: MessageRequest, tools: AssistantTool[]): Promise<MessageRequest> {
return tools.reduce((prevPromise, currentTool) => {
return prevPromise.then((prevResult) => {
return currentTool.enabled
? this.get(currentTool.type)?.process(prevResult, currentTool) ??
Promise.resolve(prevResult)
: Promise.resolve(prevResult)
})
}, Promise.resolve(request))
}
/**
* The instance of the tool manager.
*/
static instance(): ToolManager {
return (window.core?.toolManager as ToolManager) ?? new ToolManager()
}
}

View File

@ -0,0 +1,12 @@
import { AssistantTool, MessageRequest } from '../../types'
/**
* Represents a base inference tool.
*/
export abstract class InferenceTool {
abstract name: string
/*
** Process a message request and return the processed message request.
*/
abstract process(request: MessageRequest, tool?: AssistantTool): Promise<MessageRequest>
}

View File

@ -2,42 +2,13 @@
* Export all types. * Export all types.
* @module * @module
*/ */
export * from './types/index' export * from './types'
/** /**
* Export all routes * Export browser module
*/
export * from './api'
/**
* Export Core module
* @module * @module
*/ */
export * from './core' export * from './browser'
/**
* Export Event module.
* @module
*/
export * from './events'
/**
* Export Filesystem module.
* @module
*/
export * from './fs'
/**
* Export Extension module.
* @module
*/
export * from './extension'
/**
* Export all base extensions.
* @module
*/
export * from './extensions/index'
/** /**
* Declare global object * Declare global object

View File

@ -4,7 +4,7 @@ import {
ExtensionRoute, ExtensionRoute,
FileManagerRoute, FileManagerRoute,
FileSystemRoute, FileSystemRoute,
} from '../../../api' } from '../../../types/api'
import { Downloader } from '../processors/download' import { Downloader } from '../processors/download'
import { FileSystem } from '../processors/fs' import { FileSystem } from '../processors/fs'
import { Extension } from '../processors/extension' import { Extension } from '../processors/extension'

View File

@ -1,4 +1,4 @@
import { CoreRoutes } from '../../../api' import { CoreRoutes } from '../../../types/api'
import { RequestAdapter } from './adapter' import { RequestAdapter } from './adapter'
export type Handler = (route: string, args: any) => any export type Handler = (route: string, args: any) => any

View File

@ -1,5 +1,5 @@
import { resolve, sep } from 'path' import { resolve, sep } from 'path'
import { DownloadEvent } from '../../../api' import { DownloadEvent } from '../../../types/api'
import { normalizeFilePath } from '../../helper/path' import { normalizeFilePath } from '../../helper/path'
import { getJanDataFolderPath } from '../../helper' import { getJanDataFolderPath } from '../../helper'
import { DownloadManager } from '../../helper/download' import { DownloadManager } from '../../helper/download'

View File

@ -2,6 +2,7 @@ import { join } from 'path'
import { normalizeFilePath } from '../../helper/path' import { normalizeFilePath } from '../../helper/path'
import { getJanDataFolderPath } from '../../helper' import { getJanDataFolderPath } from '../../helper'
import { Processor } from './Processor' import { Processor } from './Processor'
import fs from 'fs'
export class FileSystem implements Processor { export class FileSystem implements Processor {
observer?: Function observer?: Function
@ -11,15 +12,65 @@ export class FileSystem implements Processor {
this.observer = observer this.observer = observer
} }
process(route: string, ...args: any[]): any { process(route: string, ...args: any): any {
const instance = this as any
const func = instance[route]
if (func) {
return func(...args)
} else {
return import(FileSystem.moduleName).then((mdl) => return import(FileSystem.moduleName).then((mdl) =>
mdl[route]( mdl[route](
...args.map((arg: any) => ...args.map((arg: any) => {
typeof arg === 'string' && (arg.startsWith(`file:/`) || arg.startsWith(`file:\\`)) return typeof arg === 'string' &&
(arg.startsWith(`file:/`) || arg.startsWith(`file:\\`))
? join(getJanDataFolderPath(), normalizeFilePath(arg)) ? join(getJanDataFolderPath(), normalizeFilePath(arg))
: arg : arg
) })
) )
) )
} }
}
rm(...args: any): Promise<void> {
if (typeof args[0] !== 'string') {
throw new Error(`rm error: Invalid argument ${JSON.stringify(args)}`)
}
let path = args[0]
if (path.startsWith(`file:/`) || path.startsWith(`file:\\`)) {
path = join(getJanDataFolderPath(), normalizeFilePath(path))
}
return new Promise((resolve, reject) => {
fs.rm(path, { recursive: true, force: true }, (err) => {
if (err) {
reject(err)
} else {
resolve()
}
})
})
}
mkdir(...args: any): Promise<void> {
if (typeof args[0] !== 'string') {
throw new Error(`mkdir error: Invalid argument ${JSON.stringify(args)}`)
}
let path = args[0]
if (path.startsWith(`file:/`) || path.startsWith(`file:\\`)) {
path = join(getJanDataFolderPath(), normalizeFilePath(path))
}
return new Promise((resolve, reject) => {
fs.mkdir(path, { recursive: true }, (err) => {
if (err) {
reject(err)
} else {
resolve()
}
})
})
}
} }

View File

@ -1,4 +1,4 @@
import { DownloadRoute } from '../../../../api' import { DownloadRoute } from '../../../../types/api'
import { DownloadManager } from '../../../helper/download' import { DownloadManager } from '../../../helper/download'
import { HttpServer } from '../../HttpServer' import { HttpServer } from '../../HttpServer'

View File

@ -40,7 +40,7 @@ export const commonRouter = async (app: HttpServer) => {
}) })
// Threads // Threads
app.post(`/threads/`, async (req, res) => createThread(req.body)) app.post(`/threads`, async (req, res) => createThread(req.body))
app.get(`/threads/:threadId/messages`, async (req, res) => app.get(`/threads/:threadId/messages`, async (req, res) =>
getMessages(req.params.threadId).then(normalizeData) getMessages(req.params.threadId).then(normalizeData)

View File

@ -216,7 +216,7 @@ export const createMessage = async (threadId: string, message: any) => {
const threadMessagesFileName = 'messages.jsonl' const threadMessagesFileName = 'messages.jsonl'
try { try {
const { ulid } = require('ulid') const { ulid } = require('ulidx')
const msgId = ulid() const msgId = ulid()
const createdAt = Date.now() const createdAt = Date.now()
const threadMessage: ThreadMessage = { const threadMessage: ThreadMessage = {
@ -335,7 +335,12 @@ export const chatCompletions = async (request: any, reply: any) => {
headers['api-key'] = apiKey headers['api-key'] = apiKey
} }
console.debug(apiUrl) console.debug(apiUrl)
console.debug(JSON.stringify(headers))
if (requestedModel.engine === 'openai' && request.body.stop) {
// openai only allows max 4 stop words
request.body.stop = request.body.stop.slice(0, 4)
}
const fetch = require('node-fetch') const fetch = require('node-fetch')
const response = await fetch(apiUrl, { const response = await fetch(apiUrl, {
method: 'POST', method: 'POST',

View File

@ -182,7 +182,7 @@ export default class Extension {
async uninstall(): Promise<void> { async uninstall(): Promise<void> {
const path = ExtensionManager.instance.getExtensionsPath() const path = ExtensionManager.instance.getExtensionsPath()
const extPath = resolve(path ?? '', this.name ?? '') const extPath = resolve(path ?? '', this.name ?? '')
await rmdirSync(extPath, { recursive: true }) rmdirSync(extPath, { recursive: true })
this.emitUpdate() this.emitUpdate()
} }

View File

@ -126,7 +126,7 @@ const exec = async (command: string): Promise<string> => {
} }
export const getEngineConfiguration = async (engineId: string) => { export const getEngineConfiguration = async (engineId: string) => {
if (engineId !== 'openai') { if (engineId !== 'openai' && engineId !== 'groq') {
return undefined return undefined
} }
const directoryPath = join(getJanDataFolderPath(), 'engines') const directoryPath = join(getJanDataFolderPath(), 'engines')

View File

@ -4,3 +4,5 @@ export * from './extension/manager'
export * from './extension/store' export * from './extension/store'
export * from './api' export * from './api'
export * from './helper' export * from './helper'
export * from './../types'
export * from '../types/api'

View File

@ -82,9 +82,9 @@ export enum FileSystemRoute {
unlinkSync = 'unlinkSync', unlinkSync = 'unlinkSync',
existsSync = 'existsSync', existsSync = 'existsSync',
readdirSync = 'readdirSync', readdirSync = 'readdirSync',
mkdirSync = 'mkdirSync', rm = 'rm',
mkdir = 'mkdir',
readFileSync = 'readFileSync', readFileSync = 'readFileSync',
rmdirSync = 'rmdirSync',
writeFileSync = 'writeFileSync', writeFileSync = 'writeFileSync',
} }
export enum FileManagerRoute { export enum FileManagerRoute {

View File

@ -8,3 +8,4 @@ export * from './file'
export * from './config' export * from './config'
export * from './huggingface' export * from './huggingface'
export * from './miscellaneous' export * from './miscellaneous'
export * from './api'

View File

@ -7,7 +7,6 @@ export type ModelInfo = {
settings: ModelSettingParams settings: ModelSettingParams
parameters: ModelRuntimeParams parameters: ModelRuntimeParams
engine?: InferenceEngine engine?: InferenceEngine
proxy_model?: InferenceEngine
} }
/** /**
@ -18,10 +17,9 @@ export type ModelInfo = {
export enum InferenceEngine { export enum InferenceEngine {
nitro = 'nitro', nitro = 'nitro',
openai = 'openai', openai = 'openai',
groq = 'groq',
triton_trtllm = 'triton_trtllm', triton_trtllm = 'triton_trtllm',
nitro_tensorrt_llm = 'nitro-tensorrt-llm', nitro_tensorrt_llm = 'nitro-tensorrt-llm',
tool_retrieval_enabled = 'tool_retrieval_enabled',
} }
export type ModelArtifact = { export type ModelArtifact = {
@ -93,8 +91,6 @@ export type Model = {
* The model engine. * The model engine.
*/ */
engine: InferenceEngine engine: InferenceEngine
proxy_model?: InferenceEngine
} }
export type ModelMetadata = { export type ModelMetadata = {

View File

@ -1,6 +0,0 @@
GTM_ID=xxxx
UMAMI_PROJECT_API_KEY=xxxx
UMAMI_APP_URL=xxxx
ALGOLIA_API_KEY=xxxx
ALGOLIA_APP_ID=xxxx
GITHUB_ACCESS_TOKEN=xxxx

20
docs/.gitignore vendored
View File

@ -1,20 +0,0 @@
# Dependencies
/node_modules
# Production
/build
# Generated files
.docusaurus
.cache-loader
# Misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*

View File

@ -1,86 +0,0 @@
# Website & Docs
This website is built using [Docusaurus 3.0](https://docusaurus.io/), a modern static website generator.
### Information Architecture
We try to **keep routes consistent** to maintain SEO.
- **`/guides/`**: Guides on how to use the Jan application. For end users who are directly using Jan.
- **`/developer/`**: Developer docs on how to extend Jan. These pages are about what people can build with our software.
- **`/api-reference/`**: Reference documentation for the Jan API server, written in Swagger/OpenAPI format.
- **`/changelog/`**: A list of changes made to the Jan application with each release.
- **`/blog/`**: A blog for the Jan application.
### Sidebar Autogeneration
The order of each page is either explicitly defined in `sidebar.js` or follows the [Docusaurus autogenerated](https://docusaurus.io/docs/next/sidebar/autogenerated) naming format, `##-path-name.md`.
Important slugs are hardcoded at the document level (and shouldn't be rerouted):
```
---
title: Overview
slug: /docs
---
```
## How to Contribute
Refer to the [Contributing Guide](https://github.com/janhq/jan/blob/dev/CONTRIBUTING.md) for more comprehensive information on how to contribute to the Jan project.
### Pre-requisites and Installation
- [Node.js](https://nodejs.org/en/) (version 20.0.0 or higher)
- [yarn](https://yarnpkg.com/) (version 1.22.0 or higher)
#### Installation
```bash
cd jan/docs
yarn install
yarn start
```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
#### Build
```bash
yarn build
```
This command generates static content into the `build` directory and can be served using any static contents hosting service.
### Deployment
Using SSH:
```bash
USE_SSH=true yarn deploy
```
Not using SSH:
```bash
GIT_USER=<Your GitHub username> yarn deploy
```
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
### Preview URL, Pre-release and Publishing Documentation
- When a pull request is created, the preview URL will be automatically commented on the pull request.
- The documentation will then be published to [https://dev.jan.ai/](https://dev.jan.ai/) when the pull request is merged to `dev`.
- Our open-source maintainers will sync the updated content from `dev` to `docs` branch, which will then be published to [https://jan.ai/](https://jan.ai/).
### Additional Plugins
- @docusaurus/theme-live-codeblock
- [Redocusaurus](https://redocusaurus.vercel.app/): manually upload swagger files at `/openapi/jan.yaml` to update the API reference documentation.

View File

@ -1,3 +0,0 @@
module.exports = {
presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
};

View File

@ -1,122 +0,0 @@
---
title: "Post Mortem: Bitdefender False Positive Flag"
description: "10th January 2024, Jan's 0.4.4 Release on Windows triggered Bitdefender to incorrectly flag it as infected with Gen:Variant.Tedy.258323, leading to automatic quarantine warnings on users' computers."
slug: /postmortems/january-10-2024-bitdefender-false-positive-flag
tags: [Postmortem]
---
Following the recent incident related to Jan version 0.4.4 triggering Bitdefender on Windows with Gen:Variant.Tedy.258323 on January 10, 2024, we wanted to provide a comprehensive postmortem and outline the necessary follow-up actions.
## Incident Overview
### Bug Description
Jan 0.4.4 installation on Windows triggered Bitdefender to flag it as infected with Gen:Variant.Tedy.258323, leading to automatic quarantine.
### Affected Antivirus
- McAfee / Microsoft Defender was unaffected
- Bitdefender consistently flagged the issue.
### Incident Timeline
- _10 Jan, 2:18 am SGT:_ Hawke flags up Malware antivirus errors for 0.4.4 installation on Windows computers.
- _10 Jan, 2:21 am SGT:_ @0xSage responds in Discord.
- _10 Jan, 2:35 am SGT:_ Hawke confirms multiple people have experienced this error on fresh installs.
- _10 Jan, 2:41 am SGT:_ @louis-jan and @dan-jan revert 0.4.4 out of an abundance of caution.
- _Incident ongoing:_ To triage and investigate the next day.
- _10 Jan, 11:36 am SGT:_ @Hien has investigated all versions of Nitro and conducted scans using Bitdefender. Only the 2 latest versions raised warnings (0.2.7, 0.2.8).
- _10 Jan, 12:44 pm SGT:_ @Hien tested again for the 0.2.6 and suggested using 0.2.6 for now, the 2 remaining Nitro version (0.2.7, 0.2.8) will under further investigation.
- The team started testing on the fixed build.
- _10 Jan, 3:22 pm SGT:_ Diagnosis found that it's most likely a false positive. @Hien has only found a solution by attempting to build Nitro Windows CPU on a GitHub-hosted runner and hasn't identified the root cause yet.
- _10 Jan, 5:24 pm SGT:_ @Hien testing two scenarios and still trying to understand the workings of Bitdefender.
- _11 Jan, 5:46 pm SGT:_ Postmortem meeting
## Investigation Update
- @Hien has investigated all versions of Nitro and conducted scans using Bitdefender. and only the 2 latest versions raised warnings from Bitdefender. Nitro 0.2.6, which is the highest version without the issue, was tested again, and it no longer triggers a warning from Bitdefender.
- We have observed that Nitro versions up to 0.2.6 remain unaffected. However, Bitdefender flags versions 0.2.7 and 0.2.8 as infected, leading to the deletion. In order to proceed with the current release, Hien suggests downgrading Nitro to version 0.2.6 and conducting tests with this version. Simultaneously, he will investigate why Bitdefender is flagging versions 0.2.7 and 0.2.8.
- It's essential to note that between versions 0.2.6, 0.2.7, and 0.2.8, only minor changes were made, which should not trigger a malicious code warning. We can refer to the changelog between 0.2.7 and 0.2.8 to pinpoint these changes.
- Our primary message is to convey that we did not introduce malicious code into Jan (indicating a false positive), and the investigation aims to understand the root cause behind Bitdefender flagging versions 0.2.7 and 0.2.8.
- The current diagnosis looks like a false positive but it's still under investigation. Reference link: [here](https://stackoverflow.com/questions/75886428/fake-positive-bit-defender-problem-genvariant-tedy-304469), [here](https://stackoverflow.com/questions/58010466/bitdefender-detects-my-console-application-as-genvariant-ursu-56053), and [here](https://www.cisa.gov/sites/default/files/2023-06/mar-10365227.r1.v1.clear_.pdf).
- @Hien testing two scenarios and still trying to understand the workings of Bitdefender. Still under investigation: is the issue with the code or the CI?
- In Case 1, using the same CI agent for tags 0.2.6 and 0.2.8, after PRs by Alan and myself, Bitdefender flagged the Nitro CPU binary build. Naturally, one would conclude this is due to the code.
- However, I proceeded with a further experiment: for the 0.2.8 code, instead of using our CI agent, I used a GitHub hosted agent. This time, Bitdefender did not flag our binary build.
- We've identified the Bitdefender warning was not an attack. There is no malicious code
- We've isolated the event to originate from a CI agent, which resulted in a BitDefender false positive alert.
## Follow-ups and Action Items
1. **Reproduce Bitdefender Flag in Controlled Environment [Done]:**
- _Objective:_ To replicate the issue in a controlled environment to understand the triggers and specifics of Bitdefender's detection.
2. **Investigate Malicious Code or False Positive:**
- _Objective:_ Determine whether the flagged issue is a result of actual malicious code or a false positive. If it's a false positive, work towards resolution while communicating with Bitdefender.
3. **Supply Chain Attack Assessment:**
- _Objective:_ Evaluate the possibility of a supply chain attack. Investigate whether the Nitro 0.4.4 distribution was compromised or tampered with during the release process.
4. **Testing after the Hotfix:**
- _Objective:_ In addition to verifying the issue after the fix, it is essential to conduct comprehensive testing across related areas, ensuring compatibility across different operating systems and antivirus software (latest version / free version only).
5. **Process Improvement for Future Releases:**
- _Objective:_ Identify and implement improvements to our release process to prevent similar incidents in the future. This may include enhanced testing procedures, code analysis, and collaboration with antivirus software providers during the pre-release phase. Additionally, we should add verifying the latest antivirus software in the release checklist.
6. **Documentation of Tested Antivirus Versions:**
- _Objective:_ Create a document that outlines the testing conducted, including a matrix that correlates Jan versions with the tested antivirus versions.
- _Sample list:_ for consideration purpose
- Bitdefender
- McAfee
- Avira
- Kaspersky
- Norton
- Microsoft defender
- AVG
- TotalAV
## Next Steps
- The team should follow up on each action item with clear ownership priority, and deadlines.
- Communicate progress transparently with the community and clients through appropriate channels. If any insights or suggestions, share them within the dedicated channels.
- Update internal documentation and procedures based on the lessons learned from this incident.
## Lessons Learned
1. **Antivirus Compatibility Awareness:**
- _Observation:_ The incident underscored the significance of recognizing and testing for antivirus compatibility, particularly with widely-used solutions like Bitdefender.
- _Lesson Learned:_ In the future, we will integrate comprehensive checks for compatibility with various antivirus software, including both antivirus and "Malicious Code Detection," into our CI or QA checklist. This proactive measure aims to minimize false positive detections during the release and testing processes.
2. **Cross-Platform Testing:**
- _Observation:_ The problem did not occur on MacOS and Linux systems, implying a potential oversight in cross-platform testing during our release procedures.
- _Lesson Learned:_ Clarification — This observation is not directly related to antivirus testing. Instead, it underscores the necessity to improve our testing protocols, encompassing multiple operating systems. This ensures a thorough evaluation of potential issues on diverse platforms, considering the various antivirus software and differences in architectures on Mac and Linux systems.
3. **User Communication and Documentation:**
- _Observation:_ Due to the timely response from Nicole, who was still active on Discord and Github at 2 am, this quick response facilitated our ability to assess the impact accurately.
- _Lesson Learned:_ While our communication with users was effective in this instance, it was mainly due to Nicole's presence during the incident. To improve our overall response capability, we should prioritize "24/7 rapid triage and response." This involves ensuring continuous availability or establishing a reliable rotation of team members for swift user communication and issue documentation, further enhancing our incident response efficiency.
4. **Proactive Incident Response:**
- _Observation:_ The incident response, while involving a prompt version rollback, experienced a slight delay due to the release occurring at midnight. This delay postponed the initiation of the investigation until the next working hours.
- _Lesson Learned:_ Recognizing the importance of swift incident response, particularly in time-sensitive situations, we acknowledge that releasing updates during off-hours can impact the immediacy of our actions. Moving forward, we will strive to optimize our release schedules to minimize delays and ensure that investigations can commence promptly regardless of the time of day. This may involve considering alternative release windows or implementing automated responses to critical incidents, ensuring a more proactive and timely resolution.
5. **Supply Chain Security Measures:**
- _Observation:_ While the incident prompted consideration of a potential supply chain attack, it's crucial to emphasize that this was not the case. Nonetheless, the incident underscored the importance of reviewing our supply chain security measures.
- _Lesson Learned:_ Going forward, we should strengthen supply chain security by introducing additional verification steps to uphold the integrity of our release process. Collaborating with distribution channels is essential for enhancing security checks and ensuring a robust supply chain.
- _Longer-term:_ Exploring options for checking Jan for malicious code and incorporating antivirus as part of our CI/CD pipeline should be considered for a more comprehensive and proactive approach.
6. **User Education on False Positives:**
- _Observation:_ Users reported Bitdefender automatically "disinfecting" the flagged Nitro version without allowing any user actions.
- _Lesson Learned:_ Educate users about the possibility of false positives and guide them on how to whitelist or report such incidents to their antivirus provider (if possible). Provide clear communication on steps users can take in such situations.
These lessons learned will serve as a foundation for refining our processes and ensuring a more resilient release and incident response framework in the future. Continuous improvement is key to maintaining the reliability and security of our software.
Thank you for your dedication and cooperation in resolving this matter promptly.

View File

@ -1,76 +0,0 @@
dan-jan:
name: Daniel Onggunhao
title: Co-Founder
url: https://github.com/dan-jan
image_url: https://avatars.githubusercontent.com/u/101145494?v=4
email: daniel@jan.ai
namchuai:
name: Nam Nguyen
title: Developer
url: https://github.com/namchuai
image_url: https://avatars.githubusercontent.com/u/10397206?v=4
email: james@jan.ai
hiro-v:
name: Hiro Vuong
title: MLE
url: https://github.com/hiro-v
image_url: https://avatars.githubusercontent.com/u/22463238?v=4
email: hiro@jan.ai
ashley-jan:
name: Ashley Tran
title: Product Designer
url: https://github.com/imtuyethan
image_url: https://avatars.githubusercontent.com/u/89722390?v=4
email: ashley@jan.ai
hientominh:
name: Hien To
title: DevOps Engineer
url: https://github.com/hientominh
image_url: https://avatars.githubusercontent.com/u/37921427?v=4
email: hien@jan.ai
Van-QA:
name: Van Pham
title: QA & Release Manager
url: https://github.com/Van-QA
image_url: https://avatars.githubusercontent.com/u/64197333?v=4
email: van@jan.ai
louis-jan:
name: Louis Le
title: Software Engineer
url: https://github.com/louis-jan
image_url: https://avatars.githubusercontent.com/u/133622055?v=4
email: louis@jan.ai
hahuyhoang411:
name: Rex Ha
title: LLM Researcher & Content Writer
url: https://github.com/hahuyhoang411
image_url: https://avatars.githubusercontent.com/u/64120343?v=4
email: rex@jan.ai
automaticcat:
name: Alan Dao
title: AI Engineer
url: https://github.com/tikikun
image_url: https://avatars.githubusercontent.com/u/22268502?v=4
email: alan@jan.ai
hieu-jan:
name: Henry Ho
title: Software Engineer
url: https://github.com/hieu-jan
image_url: https://avatars.githubusercontent.com/u/150573299?v=4
email: hieu@jan.ai
0xsage:
name: Nicole Zhu
title: Co-Founder
url: https://github.com/0xsage
image_url: https://avatars.githubusercontent.com/u/69952136?v=4
email: nicole@jan.ai

View File

@ -1,12 +0,0 @@
---
title: Jan's Vision for 2035
---
[Jan 2035: A Robotics Company](https://hackmd.io/QIWyYbNNQVWVbupuI3kjAA)
We only have 2 planning parameters:
- 10 year vision
- 2 week sprint
And we measure our success on Quarterly OKRs

View File

@ -1,119 +0,0 @@
---
title: About Jan
slug: /about
description: Jan is a desktop application that turns computers into thinking machines.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
about Jan,
desktop application,
thinking machine,
]
---
Jan turns computers into thinking machines to change how we use them.
Jan is created and maintained by Jan Labs, a robotics company.
With Jan, you can:
- Run [open-source LLMs](https://huggingface.co/models?pipeline_tag=text-generation) locally or connect to cloud AIs like [ChatGPT](https://openai.com/blog/openai-api) or [Google](https://ai.google.dev/).
- Fine-tune AI with specific knowledge.
- Supercharge your productivity by leveraging AI.
- Search the web and databases.
- Integrate AI with everyday tools to work on your behalf (with permission).
- Customize and add features with Extensions.
:::tip
Jan aims for long-term human-robot collaboration, envisioning AI as a harmonious extension of human capabilities. Our goal is to build customizable robots that we continually improve and customize, growing together.
:::
![Human repairing a Droid](/img/star-wars-droids.png)
## Jans principles
- **Ownership**: Jan is committed to developing a product that fully belongs to users. You're the true owner, free from data tracking and storage by us.
- **Privacy**: Jan works locally by default, allowing use without an internet connection. Your data stays on your device in a universal format, giving you complete privacy control.
- **100% User Supported**: Every user can access, develop, and customize Jan's codebases to suit their needs.
- **Rejecting Dark Patterns**: We never use tricks to extract more money or lock you into an ecosystem.
## Why do we exist?
> _"I do not fear computers. I fear the lack of them." - Isaac Asimov_
Jan was founded on the belief that AI should coexist with humans, not replace them. Our mission is to democratize AI access, ensuring everyone can easily utilize it with full ownership and control over their data, free from privacy concerns.
### What are the things Jan committed on?
We are committed to creating open, local-first products that extend individual freedom, rejecting dark patterns and ecosystem lock-ins, and embracing an open-source ethos.
#### What's different about it?
| | Status Quo | Jan |
| --------------------- | -------------------------- | ---------------------------------------------------------------------- |
| **Ownership** | Owned by Big Tech | Fully owned by you |
| **Openness** | Closed-source | [Open-source (AGPLv3)](https://github.com/janhq/jan/blob/main/LICENSE) |
| **Your Role** | Consumer | Creator |
| **Approach** | Cloud-based | [Local-first](https://www.inkandswitch.com/local-first/) |
| **Data Handling** | Stored on external servers | Stored locally, openly accessible |
| **Privacy** | Questionable | Private and offline |
| **Transparency** | Opaque "Black Box" | Open-source and customizable |
| **Outage Resilience** | Potential data hostage | Continues to work on your device |
| **Philosophy** | User monetization | Empowerment with the right to repair |
## How we work
Jan is an open-source product with transparent development and future features. Users have the right to modify and customize Jan. We are committed to building an open-source AI ecosystem.
Jan is building in public using GitHub, where anyone is welcome to join. Key resources include Jan's [Kanban](https://github.com/orgs/janhq/projects/5/views/7) and Jan's [Roadmap](https://github.com/orgs/janhq/projects/5/views/29).
Jan has a fully-remote team, primarily based in the APAC timezone, and we use Discord and GitHub for collaboration. Our community is central to our operations, and we embrace asynchronous work. We hold meetings only for synchronization and vision sharing, using [Excalidraw](https://excalidraw.com/) or [Miro](https://miro.com/) for visualization and sharing notes on Discord for alignment. We also use [HackMD](https://hackmd.io/) to document our ideas and build a Jan library.
## How to get it?
You can install and start using Jan in less than 5 minutes, from [Jan.ai](https://jan.ai) or our [Github repo](https://github.com/janhq/jan).
## What license is the code under?
Jan is licensed under the [AGPLv3 License](https://github.com/janhq/jan/blob/main/LICENSE).
We happily accept pull requests, however, we do ask that you sign a [Contributor License Agreement](https://en.wikipedia.org/wiki/Contributor_License_Agreement) so that we have the right to relicense your contributions[^2].
## What was it built with?
[Jan](https://github.com/janhq/jan) is pragmatically built using `Typescript` at the application level and `C++` at the Inference level (which we have refactored into [Nitro](https://nitro.jan.ai)[^3]).
We follow [clean architecture](https://blog.cleancoder.com/uncle-bob/2012/08/13/the-clean-architecture.html) and currently support multiple frameworks and runtimes:
- A desktop client with [Electron](https://www.electronjs.org/)
- A headless server-mode with [Nodejs](https://nodejs.org/en)
- Planned support for mobile with [Capacitor](https://capacitorjs.com/)
- Planned support for Python runtime
Architecturally, we have made similar choices to the [Next.js Enterprise Javascript Stack](https://vercel.com/templates/next.js/nextjs-enterprise-boilerplate), which is a [battle-tested](https://nextjs.org/showcase/enterprise) framework for building enterprise-grade applications that scale.
## Join the team
Join us on this journey at Jan Labs, where we embrace open-source collaboration and transparency. Together, let's shape a future where Jan becomes an essential companion in the open-source community. Explore [careers](https://janai.bamboohr.com/careers) with us.
## Contact
Drop us a message in our [Discord](https://discord.gg/af6SaTdzpx) and we'll get back to you.
- `#general`: for general discussion
- `#get-help`: for bug reports and troubleshooting
- `#roadmap`: for feature requests and ideas
## Footnotes
[^1]: Credit to Obsidian's original website
[^2]: Credit to [Discourse's About Page](https://www.discourse.org/about)
[^3]: Credit to [Llama.cpp](https://github.com/ggerganov/llama.cpp), [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM), [vLLM](https://github.com/vllm-project/vllm), [LMDeploy](https://github.com/InternLM/lmdeploy) and more.

View File

@ -1,65 +0,0 @@
# Frequently Asked Questions (FAQ)
## What is Jan?
Jan is software that helps you run large language models (LLMs) on your everyday tasks. For details, read the [About page](https://jan.ai/about/).
## How do I use Jan?
Download Jan to your computer, choose a compatible LLM, or connect to a remote AI with the API code to start. You can switch between them as needed.
## Is Jan compatible with my operating system?
Jan is available for Mac, Windows, and Linux, ensuring wide compatibility.
## Do you use my data?
No. See our data and analytics policy [here](https://jan.ai/privacy/#:~:text=We%20do%20not%20share%20your,with%20a%20better%20user%20experience.).
## Do you sell my data?
No. We don't even track your data. Jan is yours.
## How does Jan ensure my data remains private?
Jan prioritizes your privacy by running open-source AI models 100% offline on your computer, ensuring all conversations, documents, and files stay private.
## What does "Jan" stand for?
Jan stands for “Just Another Neuron”, as we are passionate about building software that complements in your existing neural pathways. But in the spirit of full transparency, it was also just a nice 3 letter domain name we owned 😂.
## Can I use Jan without an internet connection?
Yes, Jan can run locally without an internet connection for many features.
## Are there any costs associated with using Jan?
Jan is free to use. However, if you want to connect to remote APIs, like GPT-4, you will need to put in your own API key.
## What types of AI models can I download or import with Jan?
You can download popular AI models or import any model of your choice through Jan's Hub.
## How do I customize Jan using the programmable API?
The API allows you to tailor Jan to your needs, but specific details on usage would require consulting Jan's documentation.
## How can I contribute to Jan's development or suggest features?
Contributions can be made through [GitHub](https://github.com/janhq/jan) and [Discord](https://discord.gg/Exe46xPMbK), where you can also suggest features and contribute.
## How can I get involved with the Jan community?
Joining [Jan's Discord server](https://discord.gg/qSwXFx6Krr) is a great way to get involved with the community.
## How do I troubleshoot issues with installing or using Jan?
For troubleshooting, you should reach out on Discord and check GitHub for assistance and support from the community and the development team.
## Can I self-host?
Yes! We love the self-hosted movement. Jan is available as a Helm chart/ Docker composes which can be run across home servers or even production-level environments.
## Are you hiring?
We often hire directly from our community. If you are interested in applying, please see our careers page [here](https://janai.bamboohr.com/careers).

View File

@ -1,6 +0,0 @@
---
title: Roadmap
---
- [ ] [Immediate Roadmap on Github](https://github.com/orgs/janhq/projects/5/views/16)
- [ ] [Longer-term Roadmap on Discord](https://discord.gg/Ey62mynnYr)

View File

@ -1,26 +0,0 @@
---
title: Acknowledgements
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
slug: /acknowledgements
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
acknowledgements,
third-party libraries,
]
---
# Acknowledgements
We would like to express our gratitude to the following third-party libraries that have made the development of Jan possible.
- [llama.cpp](https://github.com/ggerganov/llama.cpp/blob/master/LICENSE)
- [LangChain.js](https://github.com/langchain-ai/langchainjs/blob/main/LICENSE)
- [TensorRT](https://github.com/NVIDIA/TensorRT/blob/main/LICENSE)

View File

@ -1,46 +0,0 @@
---
title: Jan's Community
slug: /community
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
## Socials
- [Discord](https://discord.gg/SH3DGmUs6b)
- [X](https://twitter.com/janframework)
- [HuggingFace](https://huggingface.co/janhq)
- [LinkedIn](https://www.linkedin.com/company/janframework/)
## Community Run
- [Reddit](https://www.reddit.com/r/janframework/)
## Careers
- [Jobs](https://janai.bamboohr.com/careers)
## Newsletter
<iframe
width="100%"
height="600px"
src="https://c0c7c086.sibforms.com/serve/MUIFAEWm49nC1OONIibGnlV44yxPMw6Fu1Yc8pK7nP3jp7rZ6rvrb5uOmCD8IIhrRj6-h-_AYrw-sz7JNpcUZ8LAAZoUIOjGmSvNWHwoFhxX5lb-38-fxXj933yIdGzEMBZJv4Nu2BqC2A4uThDGmjM-n_DZBV1v_mKbTcVUWVUE7VutWhRqrDr69IWI4SgbuIMACkcTiWX8ZNLw"
frameborder="0"
scrolling="auto"
allowfullscreen
style={{
margin: 'auto',
maxWidth: '100%',
}}
></iframe>

View File

@ -1,51 +0,0 @@
---
title: Architecture
slug: /developer/architecture
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::warning
This page is still under construction, and should be read as a scratchpad
:::
## Overview
- Jan has a modular architecture and is largely built on top of its own modules.
- Jan uses a local [file-based approach](/developer/file-based) for data persistence.
- Jan provides an Electron-based [Desktop UI](https://github.com/janhq/jan).
- Jan provides an embeddable inference engine, written in C++, called [Nitro](https://nitro.jan.ai/docs/).
## Extensions
Jan has an Extensions API inspired by VSCode. In fact, most of Jan's core services are built as extensions.
Jan supports the following OpenAI compatible extensions:
| Jan Module | Description | API Docs |
| ---------- | ------------- | --------------------------------------------- |
| Chat | Inference | [/chats](/api-reference/#tag/Chat-Completion) |
| Models | Models | [/models](/api-reference/#tag/Models) |
| Assistants | Apps | [/assistants](/api-reference/#tag/Assistants) |
| Threads | Conversations | [/threads](/api-reference/#tag/Threads) |
| Messages | Messages | [/messages](/api-reference/#tag/Messages) |
<!-- TODO: link npm modules -->
## Modules
Modules are low level, system services. It is similar to OS kernel modules. Modules provide abstractions to basic, device level functionality like working with the filesystem, device system, databases, AI inference engines, etc.
Jan follows the [dependency inversion principle](https://en.wikipedia.org/wiki/Dependency_inversion_principle) such that `modules` expose the interfaces that `extensions` can then implement.

View File

@ -1,79 +0,0 @@
---
title: File-based Approach
slug: /developer/file-based
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::warning
This page is still under construction, and should be read as a scratchpad
:::
Jan use the local filesystem for data persistence, similar to VSCode. This allows for composability and tinkerability.
```yaml
janroot/ # Jan's root folder (e.g. ~/jan)
models/ # For raw AI models
threads/ # For conversation history
assistants/ # For AI assistants' configs, knowledge, etc.
```
```yaml
/models
/modelA
model.json # Default model settings
llama-7b-q4.gguf # Model binaries
/threads
/jan-unixstamp
thread.json # thread metadata (e.g. subject)
messages.jsonl # messages
files/ # RAG
/assistants
/jan # A default assistant that can use all models
assistant.json # Assistant configs (see below)
package.json # Import npm modules, e.g. Langchain, Llamaindex
/src # For custom code
index.js # Entrypoint
# `/threads` at root level
# `/models` at root level
/shakespeare # Example of a custom assistant
assistant.json
package.json
/threads # Assistants remember conversations in the future
/models # Users can upload custom models
```
## Data Dependencies
```mermaid
graph LR
A1[("A User Integrators")] -->|uses| B1[assistant]
B1 -->|persist conversational history| C1[("thread A")]
B1 -->|executes| D1[("built-in tools as module")]
B1 -.->|uses| E1[model]
E1 -.->|model.json| D1
D1 --> F1[retrieval]
F1 -->|belongs to| G1[("web browsing")]
G1 --> H1[Google]
G1 --> H2[Duckduckgo]
F1 -->|belongs to| I1[("API calling")]
F1 --> J1[("knowledge files")]
```
- User/ Integrator
- Assistant object
- Model object
- Thread object
- Built-in tool object

View File

@ -1,86 +0,0 @@
---
title: User Interface
slug: /developer/ui
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::warning
This page is still under construction, and should be read as a scratchpad
:::
Jan provides a UI Kit for customize the UI for your use case. This means you can personalize the entire application according to your own brand and visual styles.
This page gives you an overview of how to customize the UI.
You can see some of the user interface components when you first open Jan.
To Link:
- Ribbon
- LeftSidebar
- Main
- RightSidebar
- StatusBar
## Views
![Jan Views](/img/jan-views.png)
TODO: add a better image.
### Ribbon
Assistants shortcuts and Modules settings show up here.
```js
import .. from "@jan"
sample code here
```
### LeftSidebar
Conversation threads show up here. This is customizable, so custom assistants can add additional menu items here.
```js
import .. from "@jan"
sample code here
```
### Main
The main view for interacting with assistants. This is customizable, so custom assistants can add in additional UI components. By default, this is a chat thread with assistants.
```js
import .. from "@jan"
sample code here
```
### RightSidebar
A "settings" view for each thread. Users should be able to edit settings or other configs to customize the assistant experience within each thread.
```js
import .. from "@jan"
sample code here
```
### StatusBar
A global status bar that shows processes, hardware/disk utilization and more.
```js
import .. from "@jan"
sample code here
```

View File

@ -1,79 +0,0 @@
---
title: Installation and Prerequisites
slug: /developer/prereq
description: Guide to install and setup Jan for development.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
installation,
prerequisites,
developer setup,
]
---
## Requirements
### Hardware Requirements
Ensure your system meets the following specifications to guarantee a smooth development experience:
- Hardware Requirements
### System Requirements
Make sure your operating system meets the specific requirements for Jan development:
- [Windows](../../install/windows/#system-requirements)
- [MacOS](../../install/mac/#system-requirements)
- [Linux](../../install/linux/#system-requirements)
## Prerequisites
- [Node.js](https://nodejs.org/en/) (version 20.0.0 or higher)
- [yarn](https://yarnpkg.com/) (version 1.22.0 or higher)
- [make](https://www.gnu.org/software/make/) (version 3.81 or higher)
## Instructions
1. **Clone the Repository:**
```bash
git clone https://github.com/janhq/jan
cd jan
git checkout -b DESIRED_BRANCH
```
2. **Install Dependencies**
```bash
yarn install
```
3. **Run Development and Use Jan Desktop**
```bash
make dev
```
This command starts the development server and opens the Jan Desktop app.
## For Production Build
```bash
# Do steps 1 and 2 in the previous section
# Build the app
make build
```
This will build the app MacOS (M1/M2/M3) for production (with code signing already done) and place the result in `/electron/dist` folder.
## Troubleshooting
If you run into any issues due to a broken build, please check the [Stuck on a Broken Build](../../troubleshooting/stuck-on-broken-build) guide.

View File

@ -1,50 +0,0 @@
---
title: Overview
slug: /developer
description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
The following docs are aimed at developers who want to build extensions on top of the Jan Framework.
:::tip
If you are interested to **contribute to the framework's Core SDK itself**, like adding new drivers, runtimes, and infrastructure level support, please refer to [framework docs](/developer/framework) instead.
:::
## Extensions
Jan an **extensible framework** (like VSCode or Obsidian) that lets you build, customize and run AI applications everywhere, with an emphasis on local first.
Extensions are automatically available across Mac, Windows, Linux Desktops.
Extensions can also be made available in local API server-mode, which can be deployed on any VM.
### Building Extensions
This framework is packaged and regularly published as an SDK through [npm](https://www.npmjs.com/org/janhq) and [pip](https://pypi.org/).
The framework provides built-in support for the following:
- Native OS integrations with Electron and Chromium
- Native server integrations with Nodejs
- Native mobile integrations with Capacitor (coming soon)
:::tip
Build once, deploy everywhere
:::
## Jan in Action
The [Jan Desktop client](https://github.com/janhq/jan/releases) is built with Jan SDK. This means you can customize any part of the application from the branding to the features, and truly make it your own.
[Gif: show desktop & server side by side]

View File

@ -1,23 +0,0 @@
---
title: Your First Assistant
slug: /developer/build-assistant/your-first-assistant/
description: A quick start on how to build an assistant.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
quick start,
build assistant,
]
---
:::caution
This is currently under development.
:::

View File

@ -1,22 +0,0 @@
---
title: Anatomy of an Assistant
slug: /developer/build-assistant/assistant-anatomy/
description: An overview of assistant.json
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
build assistant,
assistant anatomy,
]
---
:::caution
This is currently under development.
:::

View File

@ -1,22 +0,0 @@
---
title: Package your Assistant
slug: /developer/build-assistant/package-your-assistant/
description: Package your assistant for sharing and publishing.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
quick start,
build assistant,
]
---
:::caution
This is currently under development.
:::

View File

@ -1,25 +0,0 @@
---
title: Build an Assistant
slug: /developer/build-assistant
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
build assistant,
]
---
:::caution
This is currently under development.
:::
import DocCardList from "@theme/DocCardList";
<DocCardList />

View File

@ -1,24 +0,0 @@
---
title: Your First Engine
slug: /developer/build-engine/your-first-engine/
description: A quick start on how to build your first engine
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
quick start,
build engine,
]
---
:::caution
This is currently under development.
:::
A quickstart on how to integrate tensorrt llm

View File

@ -1,22 +0,0 @@
---
title: Anatomy of an Engine
slug: /developer/build-engine/engine-anatomy
description: An overview of engine.json
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
build engine,
engine anatomy,
]
---
:::caution
This is currently under development.
:::

View File

@ -1,22 +0,0 @@
---
title: Package your Engine
slug: /developer/build-engine/package-your-engine/
description: Package your engine for sharing and publishing.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
build engine,
engine anatomy,
]
---
:::caution
This is currently under development.
:::

View File

@ -1,25 +0,0 @@
---
title: Build an Inference Engine
slug: /developer/build-engine/
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
build assistant,
]
---
:::caution
This is currently under development.
:::
import DocCardList from "@theme/DocCardList";
<DocCardList />

Binary file not shown.

Before

Width:  |  Height:  |  Size: 108 KiB

View File

@ -1,88 +0,0 @@
---
title: Your First Extension
slug: /developer/build-extension/your-first-extension/
description: A quick start on how to build your first extension
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
quick start,
build extension,
]
---
:::caution
This is currently under development.
:::
In this guide, we'll walk you through the process of building your first extension and integrating it into Jan.
## Steps to Create Your First Extension
To create your own extension, you can follow the steps below:
1. Click the **Use this template** button at the top of the [extension-template repository](https://github.com/janhq/extension-template).
2. Select **Create a new repository**.
3. Choose an owner and name for your new repository.
4. Click **Create repository**.
5. Clone your new repository to your local machine.
## Initial Setup
After you have cloned the repository to your local machine or codespace, you will need to perform some initial setup steps before you can develop your extension.
:::info
You will need to have a reasonably modern version of [Node.js](https://nodejs.org) handy. If you are using a version manager like [`nodenv`](https://github.com/nodenv/nodenv) or [`nvm`](https://github.com/nvm-sh/nvm), you can run `nodenv install` in the root of your repository to install the version specified in
[`package.json`](https://github.com/janhq/extension-template/blob/main/package.json). Otherwise, 20.x or later should work!
:::
1. :hammer_and_wrench: Install the dependencies
```bash
npm install
```
2. :building_construction: Package the TypeScript for distribution
```bash
npm run bundle
```
3. :white_check_mark: Check your artifact
There will be a `.tgz` file in your extension directory now. This is the file you will need to import into Jan. You can import this file into Jan by following the instructions in the [Import Extension](https://jan.ai/guides/using-extensions/import-extensions/) guide.
## Update the Extension Metadata
The [`package.json`](https://github.com/janhq/extension-template/blob/main/package.json) file defines metadata about your extension, such as extension name, main entry, description and version.
When you copy this repository, update `package.json` with the name, and description for your extension.
## Update the Extension Code
The [`src/`](https://github.com/janhq/extension-template/tree/main/src) directory is the heart of your extension! This contains the source code that will be run when your extension extension functions are invoked. You can replace the contents of this directory with your own code.
There are a few things to keep in mind when writing your extension code:
- Most Jan Extension functions are processed asynchronously.
In `index.ts`, you will see that the extension function will return a `Promise<any>`.
```typescript
import { core } from "@janhq/core";
function onStart(): Promise<any> {
return core.invokePluginFunc(MODULE_PATH, "run", 0);
}
```
For more information about the Jan Extension Core module, see the [documentation](https://github.com/janhq/jan/blob/main/core/README.md).
Now, go ahead and start customizing your extension! Happy coding!

View File

@ -1,22 +0,0 @@
---
title: Anatomy of an Extension
slug: /developer/build-extension/extension-anatomy
description: An overview of extensions.json
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
build extension,
extension anatomy,
]
---
:::caution
This is currently under development.
:::

View File

@ -1,22 +0,0 @@
---
title: Package your Engine
slug: /developer/build-extension/package-your-extension/
description: Package your extension for sharing and publishing.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
build extension,
extension anatomy,
]
---
:::caution
This is currently under development.
:::

View File

@ -1,25 +0,0 @@
---
title: Build an Extension
slug: /developer/build-extension/
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
build extension,
]
---
:::caution
This is currently under development.
:::
import DocCardList from "@theme/DocCardList";
<DocCardList />

View File

@ -1,24 +0,0 @@
---
title: Engineering Specs
slug: /developer/engineering
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
spec,
engineering,
]
---
import DocCardList from "@theme/DocCardList";
<DocCardList className="DocCardList--no-description" />
Talk about CoreSDK here

View File

@ -1,119 +0,0 @@
---
title: "Assistants"
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::caution
This is currently under development.
:::
## Overview
In Jan, assistants are `primary` entities with the following capabilities:
- Assistants can use `models`, `tools`, handle and emit `events`, and invoke `custom code`.
- Users can create custom assistants with saved `model` settings and parameters.
- An [OpenAI Assistants API](https://platform.openai.com/docs/api-reference/assistants) compatible endpoint at `localhost:1337/v1/assistants`.
- Jan ships with a default assistant called "Jan" that lets you use all models.
## Folder Structure
```yaml
/jan
/models/
/threads/
/assistants
/jan # An assistant available to you by default
assistant.json # See below
/src # Assistants can invoke custom code
index.js # Entrypoint
process.js # For server processes (needs better name)
package.json # Import any npm libraries, e.g. Langchain, Llamaindex
/shakespeare # You can create custom assistants
assistant.json
/chicken_man
```
## `assistant.json`
- Each `assistant` folder contains an `assistant.json` file, which is a representation of an assistant.
- `assistant.json` contains metadata and model parameter overrides
- There are no required fields.
```js
{
"id": "asst_abc123", // Defaults to foldername
"object": "assistant", // Always "assistant"
"version": 1, // Defaults to 1
"created_at": 1698984975,
"name": "Math Tutor", // Defaults to foldername
"description": null,
"avatar": "https://pic.png",
"models": [ // Defaults to "*" all models
{ ...model_0 }
],
"instructions": "Be concise", // A system prompt for the assistant
"events": [], // Defaults to "*"
"metadata": {}, // Defaults to {}
// "tools": [], // Coming soon
// "file_ids": [], // Coming soon
// "memory/threads": true, // Coming soon
}
```
### Examples
Here's what the default Jan assistant's json file looks like:
```js
{
"name": "Jan",
"description": "A global assistant that lets you chat with all downloaded models",
"avatar": "https://jan.ai/img/logo.svg",
// All other properties are not explicitly declared and use the default values (see above).
}
```
## Events
Jan assistants can respond to event hooks. More powerfully, Jan assistants can register their own pubsub, so other entities, like other assistants can respond to your assistants events.
## Custom Code
Jan assistants are Turing complete. This means you can write freeform code, and use any dependencies, when customizing your assistant.
```typescript
import {events, models} from "@janhq/core"
import {retrieval} from "@hiro/best-rag-ever" // This can be featured on Jan hub but install from npm
events.on('assistant:asst_abc123', (event) => async {
const result = models[0].process(event)
events.emit("assistant:asst_abc123", result)
resolve()
})
```
## Tools
> Coming soon
## Functions
> Coming soon
## Files
> Coming soon

View File

@ -1,42 +0,0 @@
---
title: Chats
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::caution
This is currently under development.
:::
## Overview
In Jan, `chats` are LLM responses in the form of OpenAI compatible `chat completion objects`.
- Models take a list of messages and return a model-generated response as output.
- An [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) compatible endpoint at `localhost:1337/v1/chats`.
## Folder Structure
Chats are stateless, thus are not saved in `janroot`. Any content and relevant metadata from calling this endpoint is extracted and persisted through [Messages](/docs/engineering/messages).
## API Reference
Jan's Chat API is compatible with [OpenAI's Chat API](https://platform.openai.com/docs/api-reference/chat).
See [Jan Chat API](https://jan.ai/api-reference/#tag/Chat-Completion)
## Implementation
Under the hood, the `/chat` endpoint simply reroutes an existing endpoint from [Nitro server](https://nitro.jan.ai). Nitro is a lightweight & local inference server, written in C++ and embedded into the Jan app. See [Nitro documentation](https://nitro.jan.ai/docs).

View File

@ -1,59 +0,0 @@
---
title: Engine
---
:::caution
Currently Under Development
:::
## Overview
In the Jan application, engines serve as primary entities with the following capabilities:
- Engine will be installed through `inference-extensions`.
- Models will depend on engines to do [inference](https://en.wikipedia.org/wiki/Inference_engine).
- Engine configuration and required metadata will be stored in a json file.
## Folder Structure
- Default parameters for engines are stored in JSON files located in the `/engines` folder.
- These parameter files are named uniquely with `engine_id`.
- Engines are referenced directly using `engine_id` in the `model.json` file.
```yaml
jan/
engines/
nitro.json
openai.json
.....
```
## Engine Default Parameter Files
- Each inference engine requires default parameters to function in cases where user-provided parameters are absent.
- These parameters are stored in JSON files, structured as simple key-value pairs.
### Example
Here is an example of an engine file for `engine_id` `nitro`:
```js
{
"ctx_len": 512,
"ngl": 100,
"embedding": false,
"n_parallel": 1,
"cont_batching": false
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
}
```
For detailed engine parameters, refer to: [Nitro's Model Settings](https://nitro.jan.ai/features/load-unload#table-of-parameters)
## Adding an Engine
- Engine parameter files are automatically generated upon installing an `inference-extension` in the Jan application.
---

View File

@ -1,80 +0,0 @@
---
title: "Files"
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::warning
Draft Specification: functionality has not been implemented yet.
:::
Files can be used by `threads`, `assistants` and `fine-tuning`
> Equivalent to: https://platform.openai.com/docs/api-reference/files
## Files Object
- Equivalent to: https://platform.openai.com/docs/api-reference/files
- Note: OAI's struct doesn't seem very well designed
- `files.json`
```js
{
// Public properties (OpenAI Compatible: https://platform.openai.com/docs/api-reference/files/object)
"id": "file-BK7bzQj3FfZFXr7DbL6xJwfo",
"object": "file",
"bytes": 120000,
"created_at": 1677610602,
"filename": "salesOverview.pdf",
"purpose": "assistants"
}
```
## File API
### List Files
> OpenAI Equivalent: https://platform.openai.com/docs/api-reference/files/list
### Upload file
> OpenAI Equivalent: https://platform.openai.com/docs/api-reference/files/create
### Delete file
> OpenAI Equivalent: https://platform.openai.com/docs/api-reference/files/delete
### Retrieve file
> OpenAI Equivalent: https://platform.openai.com/docs/api-reference/files/retrieve
### Retrieve file content
> OpenAI Equivalent: https://platform.openai.com/docs/api-reference/files/retrieve-contents
## Files Filesystem
- Files can exist in several parts of Jan's filesystem
- TODO: are files hard copied into these folders? Or do we define a `files.json` and only record the relative filepath?
```sh
/files # root `/files` for finetuning, etc
/assistants
/jan
/files # assistant-specific files
/threads
/jan-12938912
/files # thread-specific files
```

View File

@ -1,17 +0,0 @@
---
title: "Fine-tuning"
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
Todo: @hiro

View File

@ -1,101 +0,0 @@
---
title: Messages
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::caution
This is currently under development.
:::
## Overview
`Messages` capture a conversation's content. This can include the content from LLM responses and other metadata from [chat completions](/specs/chats).
- Users and assistants can send multimedia messages.
- An [OpenAI Message API](https://platform.openai.com/docs/api-reference/messages) compatible endpoint at `localhost:1337/v1/messages`.
## Folder Structure
Messages are saved in the `/threads/{thread_id}` folder in `messages.jsonl` files
```yaml
jan/
threads/
assistant_name_unix_timestamp/
thread.json # Thread metadata
messages.jsonl # Messages are stored in jsonl format
```
## `message.jsonl`
Individual messages are saved in `jsonl` format for indexing purposes.
```js
{...message_2}
{...message_1}
{...message_0}
```
### Examples
Here's a standard example `message` sent from a user.
```js
"id": "0", // Sequential or UUID
"object": "thread.message", // Defaults to "thread.message"
"created_at": 1698983503,
"thread_id": "thread_asdf", // Defaults to parent thread
"assistant_id": "jan", // Defaults to parent thread
"role": "user", // From either "user" or "assistant"
"content": [
{
"type": "text",
"text": {
"value": "Hi!?",
"annotations": []
}
}
],
"metadata": {}, // Defaults to {}
```
Here's an example `message` response from an assistant.
```js
"id": "0", // Sequential or UUID
"object": "thread.message", // Defaults to "thread.message"
"created_at": 1698983503,
"thread_id": "thread_asdf", // Defaults to parent thread
"assistant_id": "jan", // Defaults to parent thread
"role": "assistant", // From either "user" or "assistant"
"content": [ // Usually from Chat Completion obj
{
"type": "text",
"text": {
"value": "Hi! How can I help you today?",
"annotations": []
}
}
],
"metadata": {}, // Defaults to {}
"usage": {} // Save chat completion properties https://platform.openai.com/docs/api-reference/chat/object
```
## API Reference
Jan's `messages` API is compatible with [OpenAI's Messages API](https://platform.openai.com/docs/api-reference/messages), with additional methods for managing messages locally.
See [Jan Messages API](https://jan.ai/api-reference#tag/Messages).

View File

@ -1,128 +0,0 @@
---
title: Models
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::caution
This is currently under development.
:::
## Overview
In Jan, models are primary entities with the following capabilities:
- Users can import, configure, and run models locally.
- An [OpenAI Model API](https://platform.openai.com/docs/api-reference/models) compatible endpoint at `localhost:1337/v1/models`.
- Supported model formats: `ggufv3`, and more.
## Folder Structure
- Models are stored in the `/models` folder.
- Models are organized by individual folders, each containing the binaries and configurations needed to run the model. This makes for easy packaging and sharing.
- Model folder names are unique and used as `model_id` default values.
```yaml
jan/ # Jan root folder
models/
llama2-70b-q4_k_m/ # Example: standard GGUF model
model.json
model-binary-1.gguf
mistral-7b-gguf-q3_k_l/ # Example: quantizations are separate folders
model.json
mistral-7b-q3-K-L.gguf
mistral-7b-gguf-q8_k_m/ # Example: quantizations are separate folders
model.json
mistral-7b-q8_k_k.gguf
llava-ggml-Q5/ # Example: model with many partitions
model.json
mmprj.bin
model_q5.ggml
```
## `model.json`
- Each `model` folder contains a `model.json` file, which is a representation of a model.
- `model.json` contains metadata and default parameters used to run a model.
### Example
Here's a standard example `model.json` for a GGUF model.
```js
{
"id": "zephyr-7b", // Defaults to foldername
"object": "model", // Defaults to "model"
"sources": [
{
"filename": "zephyr-7b-beta.Q4_K_M.gguf",
"url": "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf"
}
],
"name": "Zephyr 7B", // Defaults to foldername
"owned_by": "you", // Defaults to "you"
"version": "1", // Defaults to 1
"created": 1231231, // Defaults to file creation time
"description": null, // Defaults to null
"format": "ggufv3", // Defaults to "ggufv3"
"engine": "nitro", // engine_id specified in jan/engine folder
"engine_parameters": {
// Engine parameters inside model.json can override
"ctx_len": 4096, // the value inside the base engine.json
"ngl": 100,
"embedding": true,
"n_parallel": 4
},
"model_parameters": {
// Models are called parameters
"stream": true,
"max_tokens": 4096,
"stop": ["<endofstring>"], // This usually can be left blank, only used with specific need from model author
"frequency_penalty": 0,
"presence_penalty": 0,
"temperature": 0.7,
"top_p": 0.95
},
"metadata": {}, // Defaults to {}
"assets": [
// Defaults to current dir
"file://.../zephyr-7b-q4_k_m.bin"
]
}
```
The engine parameters in the example can be found at: [Nitro's model settings](https://nitro.jan.ai/features/load-unload#table-of-parameters)
The model parameters in the example can be found at: [Nitro's model parameters](https://nitro.jan.ai/api-reference#tag/Chat-Completion)
## API Reference
Jan's Model API is compatible with [OpenAI's Models API](https://platform.openai.com/docs/api-reference/models), with additional methods for managing and running models locally.
See [Jan Models API](https://jan.ai/api-reference#tag/Models).
## Importing Models
:::caution
This is currently under development.
:::
You can import a model by dragging the model binary or gguf file into the `/models` folder.
- Jan automatically generates a corresponding `model.json` file based on the binary filename.
- Jan automatically organizes it into its own `/models/model-id` folder.
- Jan automatically populates the `model.json` properties, which you can subsequently modify.

View File

@ -1,19 +0,0 @@
---
title: Prompts
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
- [ ] /prompts folder
- [ ] How to add to prompts
- [ ] Assistants can have suggested Prompts

View File

@ -1,77 +0,0 @@
---
title: Threads
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::caution
This is currently under development.
:::
## Overview
`Threads` are conversations between an `assistant` and the user:
- Users can tweak `model` params and `assistant` behavior within each thread.
- Users can import and export threads.
- An [OpenAI Thread API](https://platform.openai.com/docs/api-reference/threads) compatible endpoint at `localhost:1337/v1/threads`.
## Folder Structure
- Threads are saved in the `/threads` folder.
- Threads are organized by folders, one for each thread, and can be easily zipped, exported, and cleared.
- Thread folders follow the naming: `assistant_id` + `thread_created_at`.
- Thread folders also contain `messages.jsonl` files. See [messages](/docs/engineering/messages).
```yaml
janroot/
threads/
assistant_name_unix_timestamp/ # Thread `ID`
thread.json
```
## `thread.json`
- Each `thread` folder contains a `thread.json` file, which is a representation of a thread.
- `thread.json` contains metadata and model parameter overrides.
- There are no required fields.
### Example
Here's a standard example `thread.json` for a conversation between the user and the default Jan assistant.
```js
"id": "thread_....", // Defaults to foldername
"object": "thread", // Defaults to "thread"
"title": "funny physics joke", // Defaults to ""
"assistants": [
{
"assistant_id": "jan", // Defaults to "jan"
"model": { // Defaults to the currently active model (can be changed before thread is begun)
"id": "...",
"settings": {}, // Defaults to and overrides assistant.json's "settings" (and if none, then model.json "settings")
"parameters": {}, // Defaults to and overrides assistant.json's "parameters" (and if none, then model.json "parameters")
}
},
],
"created": 1231231 // Defaults to file creation time
"metadata": {}, // Defaults to {}
```
## API Reference
Jan's Threads API is compatible with [OpenAI's Threads API](https://platform.openai.com/docs/api-reference/threads), with additional methods for managing threads locally.
See [Jan Threads API](https://jan.ai/api-reference#tag/Threads).

View File

@ -1,22 +0,0 @@
---
title: Product Specs
slug: /developer/product
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
spec,
product,
]
---
import DocCardList from "@theme/DocCardList";
<DocCardList className="DocCardList--no-description" />

View File

@ -1,28 +0,0 @@
---
title: Chat
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
## Overview
A home screen for users to chat with [assistants](/docs/engineering/assistants) via conversation [threads](/docs/engineering/threads).
![alt text](../img/chat-screen.png)
## User Stories
<!-- Can also be used as a QA Checklist -->
- Users can chat with `Jan` the default assistant
- Users can customize chat settings like model parameters via both the GUI & `thread.json`

View File

@ -1,29 +0,0 @@
---
title: Hub
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
## Overview
The Hub is like a store for everything, where users can discover and download models, assistants, and more.
![alt text](../img/hub-screen.png)
## User Stories
<!-- Can also be used as a QA Checklist -->
- Users can discover recommended models (Jan ships with a few preconfigured `model.json` files)
- Users can download models suitable for their devices, e.g. compatible with their RAM
- Users can download models via a HuggingFace URL (coming soon)

View File

@ -1,27 +0,0 @@
---
title: Jan (The Default Assistant)
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
Jan ships with a default assistant "Jan" that lets users chat with any open source model out-of-the-box.
This assistant is defined in `/jan`. It is a generic assistant to illustrate power of Jan. In the future, it will support additional features e.g. multi-assistant conversations
- Your Assistant "Jan" lets you pick any model that is in the root /models folder
- Right panel: pick LLM model and set model parameters
- Jans threads will be at root level
- `model.json` will reflect model chosen for that session
- Be able to “add” other assistants in the future
- Jans files will be at thread level
- Jan is not a persistent memory assistant

View File

@ -1,43 +0,0 @@
---
title: Settings
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
## Overview
A settings page for users to add extensions, configure model settings, change app appearance, add keyboard shortcuts, and a plethora of other personalizations.
![alt text](../img/settings-screen.png)
## User Stories
<!-- Can also be used as a QA Checklist -->
### General Settings
- Users can customize `port` number
- Users can customize `janroot` folder location
### Extensions Settings
- Users can add, delete, and configure extensions
### Model Settings
- Users can configure default model parameters and settings
- Users can delete models
### Appearance
- Users can set color themes and dark/light modes

View File

@ -1,28 +0,0 @@
---
title: System Monitor
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
## Overview
An activity screen to monitor system health and running models.
![alt text](../img/system-screen.png)
## User Stories
<!-- Can also be used as a QA Checklist -->
- Users can see disk and ram utilization
- Users can start and stop models based on system health

View File

@ -1,110 +0,0 @@
---
title: Framework
slug: /developer/framework/
description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
The following low-level docs are aimed at core contributors.
We cover how to contribute to the core framework (aka the `Core SDK`).
:::tip
If you are interested to **build on top of the framework**, like creating assistants or adding app level extensions, please refer to [developer docs](/developer) instead.
:::
## Jan Framework
At its core, Jan is a **cross-platform, local-first and AI native framework** that can be used to build anything.
### Extensions
Ultimately, we aim for a `VSCode` or `Obsidian` like SDK that allows **devs to build and customize complex and ethical AI applications for any use case**, in less than 30 minutes.
In fact, the current Jan [Desktop Client](https://jan.ai/) is actually just a specific set of extensions & integrations built on top of this framework.
![Desktop is Extensions](./assets/ExtensionCallouts.png)
:::tip
We encourage devs to fork, customize, and open source your improvements for the greater community.
:::
### Cross Platform
Jan follows [Clean Architecture](https://blog.cleancoder.com/uncle-bob/2012/08/13/the-clean-architecture.html) to the best of our ability. Though leaky abstractions remain (we're a fast moving, open source codebase), we do our best to build an SDK that allows devs to **build once, deploy everywhere.**
![Clean Architecture](./assets/CleanArchitecture.jpg)
**Supported Runtimes:**
- `Node Native Runtime`, good for server side apps
- `Electron Chromium`, good for Desktop Native apps
- `Capacitor`, good for Mobile apps (planned, not built yet)
- `Python Runtime`, good for MLOps workflows (planned, not built yet)
**Supported OS & Architectures:**
- Mac Intel & Silicon
- Windows
- Linux (through AppImage)
- Nvidia GPUs
- AMD ROCm (coming soon)
Read more:
- [Code Entrypoint](https://github.com/janhq/jan/tree/main/core)
- [Dependency Inversion](https://en.wikipedia.org/wiki/Dependency_inversion_principle)
### Local First
Jan's data persistence happens on the user's local filesystem.
We implemented abstractions on top of `fs` and other core modules in an opinionated way, s.t. user data is saved in a folder-based framework that lets users easily package, export, and manage their data.
Future endeavors on this front include cross device syncing, multi user experience, and more.
Long term, we want to integrate with folks working on [CRDTs](https://www.inkandswitch.com/local-first/), e.g. [Socket Runtime](https://www.theregister.com/2023/04/11/socket_runtime/) to deeply enable local-first software.
Read more:
- [Folder-based wrappers entrypoint](https://github.com/janhq/jan/blob/main/core/src/fs.ts)
- [Piping Node modules across infrastructures](https://github.com/janhq/jan/tree/main/core/src/node)
:::caution
Our local first approach at the moment needs a lot of work. Please don't hesitate to refactor as you make your way through the codebase.
:::
### AI Native
We believe all software applications can be natively supercharged with AI primitives and embedded AI servers.
Including:
- OpenAI Compatible AI [types](https://github.com/janhq/jan/tree/main/core/src/types) and [core extensions](https://github.com/janhq/jan/tree/main/core/src/extensions) to support common functionality like making an inference call.
- Multiple inference engines through [extensions, integrations & wrappers](https://github.com/janhq/jan/tree/main/extensions/inference-nitro-extension) _On this, we'd like to appreciate the folks at [llamacpp](https://github.com/ggerganov/llama.cpp) and [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) for. To which we'll continue to make commits & fixes back upstream._
- [Code Entrypoint](https://github.com/janhq/jan/tree/main/core/src/api)
## Fun Project Ideas
Beyond the current Jan client and UX, the Core SDK can be used to build many other AI-powered and privacy preserving applications.
- `Game engine`: For AI enabled character games, procedural generation games
- `Health app`: For a personal healthcare app that improves habits
- Got ideas? Make a PR into this docs page!
If you are interested to tackle these issues, or have suggestions for integrations and other OSS tools we can use, please hit us up in [Discord](https://discord.gg/5rQ2zTv3be).
:::caution
Our open source license is copy left, which means we encourage forks to stay open source, and allow core contributors to merge things upstream.
:::

Binary file not shown.

Before

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 402 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 172 KiB

Some files were not shown because too many files have changed in this diff Show More