Merge branch 'dev' into patch-1

This commit is contained in:
Henry 2024-02-16 08:07:11 +07:00 committed by GitHub
commit 379a41f08e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
264 changed files with 6645 additions and 3956 deletions

View File

@ -1,4 +1,4 @@
{
"name": "jan",
"image": "node:20"
}
"name": "jan",
"image": "node:20"
}

View File

@ -55,10 +55,10 @@ jobs:
steps:
- name: install-aws-cli-action
uses: unfor19/install-aws-cli-action@v1
- name: Delete object older than 7 days
- name: Delete object older than 10 days
run: |
# Get the list of objects in the 'latest' folder
OBJECTS=$(aws s3api list-objects --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --query 'Contents[?LastModified<`'$(date -d "$current_date -30 days" -u +"%Y-%m-%dT%H:%M:%SZ")'`].{Key: Key}' --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com | jq -c .)
OBJECTS=$(aws s3api list-objects --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --query 'Contents[?LastModified<`'$(date -d "$current_date -10 days" -u +"%Y-%m-%dT%H:%M:%SZ")'`].{Key: Key}' --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com | jq -c .)
# Create a JSON file for the delete operation
echo "{\"Objects\": $OBJECTS, \"Quiet\": false}" > delete.json

View File

@ -1,5 +1,6 @@
name: Jan Electron Linter & Test
on:
workflow_dispatch:
push:
branches:
- main

View File

@ -1,39 +1,61 @@
FROM node:20-bullseye AS base
FROM node:20-bookworm AS base
# 1. Install dependencies only when needed
FROM base AS deps
FROM base AS builder
# Install g++ 11
RUN apt update && apt install -y gcc-11 g++-11 cpp-11 jq xsel && rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Install dependencies based on the preferred package manager
COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* ./
RUN yarn install
COPY . ./
RUN export NITRO_VERSION=$(cat extensions/inference-nitro-extension/bin/version.txt) && \
jq --arg nitroVersion $NITRO_VERSION '(.scripts."downloadnitro:linux" | gsub("\\${NITRO_VERSION}"; $nitroVersion)) | gsub("\r"; "")' extensions/inference-nitro-extension/package.json > /tmp/newcommand.txt && export NEW_COMMAND=$(sed 's/^"//;s/"$//' /tmp/newcommand.txt) && jq --arg newCommand "$NEW_COMMAND" '.scripts."downloadnitro:linux" = $newCommand' extensions/inference-nitro-extension/package.json > /tmp/package.json && mv /tmp/package.json extensions/inference-nitro-extension/package.json
RUN make install-and-build
RUN yarn workspace jan-web install
RUN export NODE_ENV=production && yarn workspace jan-web build
# # 2. Rebuild the source code only when needed
FROM base AS builder
WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
COPY . .
# This will do the trick, use the corresponding env file for each environment.
RUN yarn workspace server install
RUN yarn server:prod
# 3. Production image, copy all the files and run next
FROM base AS runner
# Install g++ 11
RUN apt update && apt install -y gcc-11 g++-11 cpp-11 jq xsel && rm -rf /var/lib/apt/lists/*
WORKDIR /app
ENV NODE_ENV=production
# Copy the package.json and yarn.lock of root yarn space to leverage Docker cache
COPY --from=builder /app/package.json ./package.json
COPY --from=builder /app/node_modules ./node_modules/
COPY --from=builder /app/yarn.lock ./yarn.lock
# RUN addgroup -g 1001 -S nodejs;
COPY --from=builder /app/server/build ./
# Copy the package.json, yarn.lock, and build output of server yarn space to leverage Docker cache
COPY --from=builder /app/core ./core/
COPY --from=builder /app/server ./server/
RUN cd core && yarn install && yarn run build
RUN yarn workspace @janhq/server install && yarn workspace @janhq/server build
COPY --from=builder /app/docs/openapi ./docs/openapi/
# Automatically leverage output traces to reduce image size
# https://nextjs.org/docs/advanced-features/output-file-tracing
COPY --from=builder /app/server/node_modules ./node_modules
COPY --from=builder /app/server/package.json ./package.json
# Copy pre-install dependencies
COPY --from=builder /app/pre-install ./pre-install/
EXPOSE 4000 3928
# Copy the package.json, yarn.lock, and output of web yarn space to leverage Docker cache
COPY --from=builder /app/web/out ./web/out/
COPY --from=builder /app/web/.next ./web/.next/
COPY --from=builder /app/web/package.json ./web/package.json
COPY --from=builder /app/web/yarn.lock ./web/yarn.lock
COPY --from=builder /app/models ./models/
ENV PORT 4000
ENV APPDATA /app/data
RUN npm install -g serve@latest
CMD ["node", "main.js"]
EXPOSE 1337 3000 3928
ENV JAN_API_HOST 0.0.0.0
ENV JAN_API_PORT 1337
CMD ["sh", "-c", "cd server && node build/main.js & cd web && npx serve out"]
# docker build -t jan .
# docker run -p 1337:1337 -p 3000:3000 -p 3928:3928 jan

88
Dockerfile.gpu Normal file
View File

@ -0,0 +1,88 @@
# Please change the base image to the appropriate CUDA version base on NVIDIA Driver Compatibility
# Run nvidia-smi to check the CUDA version and the corresponding driver version
# Then update the base image to the appropriate CUDA version refer https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda/tags
FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 AS base
# 1. Install dependencies only when needed
FROM base AS builder
# Install g++ 11
RUN apt update && apt install -y gcc-11 g++-11 cpp-11 jq xsel curl gnupg make python3-dev && curl -sL https://deb.nodesource.com/setup_20.x | bash - && apt install nodejs -y && rm -rf /var/lib/apt/lists/*
# Update alternatives for GCC and related tools
RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 110 \
--slave /usr/bin/g++ g++ /usr/bin/g++-11 \
--slave /usr/bin/gcov gcov /usr/bin/gcov-11 \
--slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-11 \
--slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-11 && \
update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-11 110
RUN npm install -g yarn
WORKDIR /app
# Install dependencies based on the preferred package manager
COPY . ./
RUN export NITRO_VERSION=$(cat extensions/inference-nitro-extension/bin/version.txt) && \
jq --arg nitroVersion $NITRO_VERSION '(.scripts."downloadnitro:linux" | gsub("\\${NITRO_VERSION}"; $nitroVersion)) | gsub("\r"; "")' extensions/inference-nitro-extension/package.json > /tmp/newcommand.txt && export NEW_COMMAND=$(sed 's/^"//;s/"$//' /tmp/newcommand.txt) && jq --arg newCommand "$NEW_COMMAND" '.scripts."downloadnitro:linux" = $newCommand' extensions/inference-nitro-extension/package.json > /tmp/package.json && mv /tmp/package.json extensions/inference-nitro-extension/package.json
RUN make install-and-build
RUN yarn workspace jan-web install
RUN export NODE_ENV=production && yarn workspace jan-web build
# # 2. Rebuild the source code only when needed
FROM base AS runner
# Install g++ 11
RUN apt update && apt install -y gcc-11 g++-11 cpp-11 jq xsel curl gnupg make python3-dev && curl -sL https://deb.nodesource.com/setup_20.x | bash - && apt-get install nodejs -y && rm -rf /var/lib/apt/lists/*
# Update alternatives for GCC and related tools
RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 110 \
--slave /usr/bin/g++ g++ /usr/bin/g++-11 \
--slave /usr/bin/gcov gcov /usr/bin/gcov-11 \
--slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-11 \
--slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-11 && \
update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-11 110
RUN npm install -g yarn
WORKDIR /app
# Copy the package.json and yarn.lock of root yarn space to leverage Docker cache
COPY --from=builder /app/package.json ./package.json
COPY --from=builder /app/node_modules ./node_modules/
COPY --from=builder /app/yarn.lock ./yarn.lock
# Copy the package.json, yarn.lock, and build output of server yarn space to leverage Docker cache
COPY --from=builder /app/core ./core/
COPY --from=builder /app/server ./server/
RUN cd core && yarn install && yarn run build
RUN yarn workspace @janhq/server install && yarn workspace @janhq/server build
COPY --from=builder /app/docs/openapi ./docs/openapi/
# Copy pre-install dependencies
COPY --from=builder /app/pre-install ./pre-install/
# Copy the package.json, yarn.lock, and output of web yarn space to leverage Docker cache
COPY --from=builder /app/web/out ./web/out/
COPY --from=builder /app/web/.next ./web/.next/
COPY --from=builder /app/web/package.json ./web/package.json
COPY --from=builder /app/web/yarn.lock ./web/yarn.lock
COPY --from=builder /app/models ./models/
RUN npm install -g serve@latest
EXPOSE 1337 3000 3928
ENV LD_LIBRARY_PATH=/usr/local/cuda/targets/x86_64-linux/lib:/usr/local/cuda-12.0/compat${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
ENV JAN_API_HOST 0.0.0.0
ENV JAN_API_PORT 1337
CMD ["sh", "-c", "cd server && node build/main.js & cd web && npx serve out"]
# pre-requisites: nvidia-docker
# docker build -t jan-gpu . -f Dockerfile.gpu
# docker run -p 1337:1337 -p 3000:3000 -p 3928:3928 --gpus all jan-gpu

View File

@ -24,9 +24,9 @@ endif
check-file-counts: install-and-build
ifeq ($(OS),Windows_NT)
powershell -Command "if ((Get-ChildItem -Path electron/pre-install -Filter *.tgz | Measure-Object | Select-Object -ExpandProperty Count) -ne (Get-ChildItem -Path extensions -Directory | Measure-Object | Select-Object -ExpandProperty Count)) { Write-Host 'Number of .tgz files in electron/pre-install does not match the number of subdirectories in extension'; exit 1 } else { Write-Host 'Extension build successful' }"
powershell -Command "if ((Get-ChildItem -Path pre-install -Filter *.tgz | Measure-Object | Select-Object -ExpandProperty Count) -ne (Get-ChildItem -Path extensions -Directory | Measure-Object | Select-Object -ExpandProperty Count)) { Write-Host 'Number of .tgz files in pre-install does not match the number of subdirectories in extension'; exit 1 } else { Write-Host 'Extension build successful' }"
else
@tgz_count=$$(find electron/pre-install -type f -name "*.tgz" | wc -l); dir_count=$$(find extensions -mindepth 1 -maxdepth 1 -type d | wc -l); if [ $$tgz_count -ne $$dir_count ]; then echo "Number of .tgz files in electron/pre-install ($$tgz_count) does not match the number of subdirectories in extension ($$dir_count)"; exit 1; else echo "Extension build successful"; fi
@tgz_count=$$(find pre-install -type f -name "*.tgz" | wc -l); dir_count=$$(find extensions -mindepth 1 -maxdepth 1 -type d | wc -l); if [ $$tgz_count -ne $$dir_count ]; then echo "Number of .tgz files in pre-install ($$tgz_count) does not match the number of subdirectories in extension ($$dir_count)"; exit 1; else echo "Extension build successful"; fi
endif
dev: check-file-counts

109
README.md
View File

@ -43,31 +43,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<tr style="text-align:center">
<td style="text-align:center"><b>Stable (Recommended)</b></td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-win-x64-0.4.5.exe'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.6/jan-win-x64-0.4.6.exe'>
<img src='./docs/static/img/windows.png' style="height:14px; width: 14px" />
<b>jan.exe</b>
</a>
</td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-mac-x64-0.4.5.dmg'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.6/jan-mac-x64-0.4.6.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b>
</a>
</td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-mac-arm64-0.4.5.dmg'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.6/jan-mac-arm64-0.4.6.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b>
</a>
</td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-linux-amd64-0.4.5.deb'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.6/jan-linux-amd64-0.4.6.deb'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.deb</b>
</a>
</td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-linux-x86_64-0.4.5.AppImage'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.6/jan-linux-x86_64-0.4.6.AppImage'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.AppImage</b>
</a>
@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<tr style="text-align:center">
<td style="text-align:center"><b>Experimental (Nightly Build)</b></td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.5-216.exe'>
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.6-271.exe'>
<img src='./docs/static/img/windows.png' style="height:14px; width: 14px" />
<b>jan.exe</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.5-216.dmg'>
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.6-271.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.5-216.dmg'>
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.6-271.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.5-216.deb'>
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.6-271.deb'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.deb</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.5-216.AppImage'>
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.6-271.AppImage'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.AppImage</b>
</a>
@ -167,6 +167,7 @@ To reset your installation:
- Clear Application cache in `~/Library/Caches/jan`
## Requirements for running Jan
- MacOS: 13 or higher
- Windows:
- Windows 10 or higher
@ -194,17 +195,17 @@ Contributions are welcome! Please read the [CONTRIBUTING.md](CONTRIBUTING.md) fi
1. **Clone the repository and prepare:**
```bash
git clone https://github.com/janhq/jan
cd jan
git checkout -b DESIRED_BRANCH
```
```bash
git clone https://github.com/janhq/jan
cd jan
git checkout -b DESIRED_BRANCH
```
2. **Run development and use Jan Desktop**
```bash
make dev
```
```bash
make dev
```
This will start the development server and open the desktop app.
@ -218,6 +219,78 @@ make build
This will build the app MacOS m1/m2 for production (with code signing already done) and put the result in `dist` folder.
### Docker mode
- Supported OS: Linux, WSL2 Docker
- Pre-requisites:
- Docker Engine and Docker Compose are required to run Jan in Docker mode. Follow the [instructions](https://docs.docker.com/engine/install/ubuntu/) below to get started with Docker Engine on Ubuntu.
```bash
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh ./get-docker.sh --dry-run
```
- If you intend to run Jan in GPU mode, you need to install `nvidia-driver` and `nvidia-docker2`. Follow the instruction [here](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) for installation.
- Run Jan in Docker mode
- **Option 1**: Run Jan in CPU mode
```bash
docker compose --profile cpu up -d
```
- **Option 2**: Run Jan in GPU mode
- **Step 1**: Check CUDA compatibility with your NVIDIA driver by running `nvidia-smi` and check the CUDA version in the output
```bash
nvidia-smi
# Output
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 531.18 Driver Version: 531.18 CUDA Version: 12.1 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 4070 Ti WDDM | 00000000:01:00.0 On | N/A |
| 0% 44C P8 16W / 285W| 1481MiB / 12282MiB | 2% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:02:00.0 Off | N/A |
| 0% 49C P8 14W / 120W| 0MiB / 6144MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:05:00.0 Off | N/A |
| 29% 38C P8 11W / 120W| 0MiB / 6144MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
```
- **Step 2**: Visit [NVIDIA NGC Catalog ](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda/tags) and find the smallest minor version of image tag that matches your CUDA version (e.g., 12.1 -> 12.1.0)
- **Step 3**: Update the `Dockerfile.gpu` line number 5 with the latest minor version of the image tag from step 2 (e.g. change `FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 AS base` to `FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS base`)
- **Step 4**: Run command to start Jan in GPU mode
```bash
# GPU mode
docker compose --profile gpu up -d
```
This will start the web server and you can access Jan at `http://localhost:3000`.
> Note: Currently, Docker mode is only work for development and localhost, production is not supported yet. RAG feature is not supported in Docker mode yet.
## Acknowledgements
Jan builds on top of other open-source projects:

View File

@ -4,4 +4,4 @@ module.exports = {
moduleNameMapper: {
'@/(.*)': '<rootDir>/src/$1',
},
}
}

View File

@ -57,6 +57,7 @@
"rollup-plugin-typescript2": "^0.36.0",
"ts-jest": "^26.1.1",
"tslib": "^2.6.2",
"typescript": "^5.2.2"
"typescript": "^5.2.2",
"rimraf": "^3.0.2"
}
}

View File

@ -54,7 +54,8 @@ export default [
'url',
'http',
'os',
'util'
'util',
'child_process',
],
watch: {
include: 'src/node/**',

View File

@ -1,15 +1,22 @@
/**
* Native Route APIs
* @description Enum of all the routes exposed by the app
*/
export enum NativeRoute {
openExternalUrl = 'openExternalUrl',
openAppDirectory = 'openAppDirectory',
openFileExplore = 'openFileExplorer',
selectDirectory = 'selectDirectory',
relaunch = 'relaunch',
}
/**
* App Route APIs
* @description Enum of all the routes exposed by the app
*/
export enum AppRoute {
openExternalUrl = 'openExternalUrl',
openAppDirectory = 'openAppDirectory',
openFileExplore = 'openFileExplorer',
selectDirectory = 'selectDirectory',
getAppConfigurations = 'getAppConfigurations',
updateAppConfiguration = 'updateAppConfiguration',
relaunch = 'relaunch',
joinPath = 'joinPath',
isSubdirectory = 'isSubdirectory',
baseName = 'baseName',
@ -30,6 +37,7 @@ export enum DownloadRoute {
downloadFile = 'downloadFile',
pauseDownload = 'pauseDownload',
resumeDownload = 'resumeDownload',
getDownloadProgress = 'getDownloadProgress',
}
export enum DownloadEvent {
@ -68,6 +76,10 @@ export enum FileManagerRoute {
export type ApiFunction = (...args: any[]) => any
export type NativeRouteFunctions = {
[K in NativeRoute]: ApiFunction
}
export type AppRouteFunctions = {
[K in AppRoute]: ApiFunction
}
@ -96,7 +108,8 @@ export type FileManagerRouteFunctions = {
[K in FileManagerRoute]: ApiFunction
}
export type APIFunctions = AppRouteFunctions &
export type APIFunctions = NativeRouteFunctions &
AppRouteFunctions &
AppEventFunctions &
DownloadRouteFunctions &
DownloadEventFunctions &
@ -104,11 +117,13 @@ export type APIFunctions = AppRouteFunctions &
FileSystemRouteFunctions &
FileManagerRoute
export const APIRoutes = [
export const CoreRoutes = [
...Object.values(AppRoute),
...Object.values(DownloadRoute),
...Object.values(ExtensionRoute),
...Object.values(FileSystemRoute),
...Object.values(FileManagerRoute),
]
export const APIRoutes = [...CoreRoutes, ...Object.values(NativeRoute)]
export const APIEvents = [...Object.values(AppEvent), ...Object.values(DownloadEvent)]

View File

@ -1,13 +1,13 @@
export enum ExtensionTypeEnum {
Assistant = "assistant",
Conversational = "conversational",
Inference = "inference",
Model = "model",
SystemMonitoring = "systemMonitoring",
Assistant = 'assistant',
Conversational = 'conversational',
Inference = 'inference',
Model = 'model',
SystemMonitoring = 'systemMonitoring',
}
export interface ExtensionType {
type(): ExtensionTypeEnum | undefined;
type(): ExtensionTypeEnum | undefined
}
/**
* Represents a base extension.
@ -20,16 +20,16 @@ export abstract class BaseExtension implements ExtensionType {
* Undefined means its not extending any known extension by the application.
*/
type(): ExtensionTypeEnum | undefined {
return undefined;
return undefined
}
/**
* Called when the extension is loaded.
* Any initialization logic for the extension should be put here.
*/
abstract onLoad(): void;
abstract onLoad(): void
/**
* Called when the extension is unloaded.
* Any cleanup logic for the extension should be put here.
*/
abstract onUnload(): void;
abstract onUnload(): void
}

View File

@ -1,5 +1,5 @@
import { Assistant, AssistantInterface } from "../index";
import { BaseExtension, ExtensionTypeEnum } from "../extension";
import { Assistant, AssistantInterface } from '../index'
import { BaseExtension, ExtensionTypeEnum } from '../extension'
/**
* Assistant extension for managing assistants.
@ -10,10 +10,10 @@ export abstract class AssistantExtension extends BaseExtension implements Assist
* Assistant extension type.
*/
type(): ExtensionTypeEnum | undefined {
return ExtensionTypeEnum.Assistant;
return ExtensionTypeEnum.Assistant
}
abstract createAssistant(assistant: Assistant): Promise<void>;
abstract deleteAssistant(assistant: Assistant): Promise<void>;
abstract getAssistants(): Promise<Assistant[]>;
abstract createAssistant(assistant: Assistant): Promise<void>
abstract deleteAssistant(assistant: Assistant): Promise<void>
abstract getAssistants(): Promise<Assistant[]>
}

View File

@ -14,7 +14,7 @@ export abstract class ConversationalExtension
* Conversation extension type.
*/
type(): ExtensionTypeEnum | undefined {
return ExtensionTypeEnum.Conversational;
return ExtensionTypeEnum.Conversational
}
abstract getThreads(): Promise<Thread[]>

View File

@ -2,24 +2,24 @@
* Conversational extension. Persists and retrieves conversations.
* @module
*/
export { ConversationalExtension } from "./conversational";
export { ConversationalExtension } from './conversational'
/**
* Inference extension. Start, stop and inference models.
*/
export { InferenceExtension } from "./inference";
export { InferenceExtension } from './inference'
/**
* Monitoring extension for system monitoring.
*/
export { MonitoringExtension } from "./monitoring";
export { MonitoringExtension } from './monitoring'
/**
* Assistant extension for managing assistants.
*/
export { AssistantExtension } from "./assistant";
export { AssistantExtension } from './assistant'
/**
* Model extension for managing models.
*/
export { ModelExtension } from "./model";
export { ModelExtension } from './model'

View File

@ -1,5 +1,5 @@
import { InferenceInterface, MessageRequest, ThreadMessage } from "../index";
import { BaseExtension, ExtensionTypeEnum } from "../extension";
import { InferenceInterface, MessageRequest, ThreadMessage } from '../index'
import { BaseExtension, ExtensionTypeEnum } from '../extension'
/**
* Inference extension. Start, stop and inference models.
@ -9,8 +9,8 @@ export abstract class InferenceExtension extends BaseExtension implements Infere
* Inference extension type.
*/
type(): ExtensionTypeEnum | undefined {
return ExtensionTypeEnum.Inference;
return ExtensionTypeEnum.Inference
}
abstract inference(data: MessageRequest): Promise<ThreadMessage>;
abstract inference(data: MessageRequest): Promise<ThreadMessage>
}

View File

@ -1,5 +1,5 @@
import { BaseExtension, ExtensionTypeEnum } from "../extension";
import { Model, ModelInterface } from "../index";
import { BaseExtension, ExtensionTypeEnum } from '../extension'
import { Model, ModelInterface } from '../index'
/**
* Model extension for managing models.
@ -9,16 +9,16 @@ export abstract class ModelExtension extends BaseExtension implements ModelInter
* Model extension type.
*/
type(): ExtensionTypeEnum | undefined {
return ExtensionTypeEnum.Model;
return ExtensionTypeEnum.Model
}
abstract downloadModel(
model: Model,
network?: { proxy: string; ignoreSSL?: boolean },
): Promise<void>;
abstract cancelModelDownload(modelId: string): Promise<void>;
abstract deleteModel(modelId: string): Promise<void>;
abstract saveModel(model: Model): Promise<void>;
abstract getDownloadedModels(): Promise<Model[]>;
abstract getConfiguredModels(): Promise<Model[]>;
network?: { proxy: string; ignoreSSL?: boolean }
): Promise<void>
abstract cancelModelDownload(modelId: string): Promise<void>
abstract deleteModel(modelId: string): Promise<void>
abstract saveModel(model: Model): Promise<void>
abstract getDownloadedModels(): Promise<Model[]>
abstract getConfiguredModels(): Promise<Model[]>
}

View File

@ -1,5 +1,5 @@
import { BaseExtension, ExtensionTypeEnum } from "../extension";
import { MonitoringInterface } from "../index";
import { BaseExtension, ExtensionTypeEnum } from '../extension'
import { MonitoringInterface } from '../index'
/**
* Monitoring extension for system monitoring.
@ -10,9 +10,9 @@ export abstract class MonitoringExtension extends BaseExtension implements Monit
* Monitoring extension type.
*/
type(): ExtensionTypeEnum | undefined {
return ExtensionTypeEnum.SystemMonitoring;
return ExtensionTypeEnum.SystemMonitoring
}
abstract getResourcesInfo(): Promise<any>;
abstract getCurrentLoad(): Promise<any>;
abstract getResourcesInfo(): Promise<any>
abstract getCurrentLoad(): Promise<any>
}

View File

@ -38,3 +38,10 @@ export * from './extension'
* @module
*/
export * from './extensions/index'
/**
* Declare global object
*/
declare global {
var core: any | undefined
}

View File

@ -0,0 +1,43 @@
import {
AppRoute,
DownloadRoute,
ExtensionRoute,
FileManagerRoute,
FileSystemRoute,
} from '../../../api'
import { Downloader } from '../processors/download'
import { FileSystem } from '../processors/fs'
import { Extension } from '../processors/extension'
import { FSExt } from '../processors/fsExt'
import { App } from '../processors/app'
export class RequestAdapter {
downloader: Downloader
fileSystem: FileSystem
extension: Extension
fsExt: FSExt
app: App
constructor(observer?: Function) {
this.downloader = new Downloader(observer)
this.fileSystem = new FileSystem()
this.extension = new Extension()
this.fsExt = new FSExt()
this.app = new App()
}
// TODO: Clearer Factory pattern here
process(route: string, ...args: any) {
if (route in DownloadRoute) {
return this.downloader.process(route, ...args)
} else if (route in FileSystemRoute) {
return this.fileSystem.process(route, ...args)
} else if (route in ExtensionRoute) {
return this.extension.process(route, ...args)
} else if (route in FileManagerRoute) {
return this.fsExt.process(route, ...args)
} else if (route in AppRoute) {
return this.app.process(route, ...args)
}
}
}

View File

@ -0,0 +1,23 @@
import { CoreRoutes } from '../../../api'
import { RequestAdapter } from './adapter'
export type Handler = (route: string, args: any) => any
export class RequestHandler {
handler: Handler
adataper: RequestAdapter
constructor(handler: Handler, observer?: Function) {
this.handler = handler
this.adataper = new RequestAdapter(observer)
}
handle() {
CoreRoutes.map((route) => {
this.handler(route, async (...args: any[]) => {
const values = await this.adataper.process(route, ...args)
return values
})
})
}
}

View File

@ -1,2 +1,3 @@
export * from './HttpServer'
export * from './routes'
export * from './restful/v1'
export * from './common/handler'

View File

@ -0,0 +1,3 @@
export abstract class Processor {
abstract process(key: string, ...args: any[]): any
}

View File

@ -0,0 +1,97 @@
import { basename, isAbsolute, join, relative } from 'path'
import { AppRoute } from '../../../api'
import { Processor } from './Processor'
import { getAppConfigurations as appConfiguration, updateAppConfiguration } from '../../helper'
import { log as writeLog, logServer as writeServerLog } from '../../helper/log'
import { appResourcePath } from '../../helper/path'
export class App implements Processor {
observer?: Function
constructor(observer?: Function) {
this.observer = observer
}
process(key: string, ...args: any[]): any {
const instance = this as any
const func = instance[key]
return func(...args)
}
/**
* Joins multiple paths together, respect to the current OS.
*/
joinPath(args: any[]) {
return join(...args)
}
/**
* Checks if the given path is a subdirectory of the given directory.
*
* @param _event - The IPC event object.
* @param from - The path to check.
* @param to - The directory to check against.
*
* @returns {Promise<boolean>} - A promise that resolves with the result.
*/
isSubdirectory(from: any, to: any) {
const rel = relative(from, to)
const isSubdir = rel && !rel.startsWith('..') && !isAbsolute(rel)
if (isSubdir === '') return false
else return isSubdir
}
/**
* Retrieve basename from given path, respect to the current OS.
*/
baseName(args: any) {
return basename(args)
}
/**
* Log message to log file.
*/
log(args: any) {
writeLog(args)
}
/**
* Log message to log file.
*/
logServer(args: any) {
writeServerLog(args)
}
getAppConfigurations() {
return appConfiguration()
}
async updateAppConfiguration(args: any) {
await updateAppConfiguration(args)
}
/**
* Start Jan API Server.
*/
async startServer(args?: any) {
const { startServer } = require('@janhq/server')
return startServer({
host: args?.host,
port: args?.port,
isCorsEnabled: args?.isCorsEnabled,
isVerboseEnabled: args?.isVerboseEnabled,
schemaPath: join(await appResourcePath(), 'docs', 'openapi', 'jan.yaml'),
baseDir: join(await appResourcePath(), 'docs', 'openapi'),
})
}
/**
* Stop Jan API Server.
*/
stopServer() {
const { stopServer } = require('@janhq/server')
return stopServer()
}
}

View File

@ -0,0 +1,106 @@
import { resolve, sep } from 'path'
import { DownloadEvent } from '../../../api'
import { normalizeFilePath } from '../../helper/path'
import { getJanDataFolderPath } from '../../helper'
import { DownloadManager } from '../../helper/download'
import { createWriteStream, renameSync } from 'fs'
import { Processor } from './Processor'
import { DownloadState } from '../../../types'
export class Downloader implements Processor {
observer?: Function
constructor(observer?: Function) {
this.observer = observer
}
process(key: string, ...args: any[]): any {
const instance = this as any
const func = instance[key]
return func(this.observer, ...args)
}
downloadFile(observer: any, url: string, localPath: string, network: any) {
const request = require('request')
const progress = require('request-progress')
const strictSSL = !network?.ignoreSSL
const proxy = network?.proxy?.startsWith('http') ? network.proxy : undefined
if (typeof localPath === 'string') {
localPath = normalizeFilePath(localPath)
}
const array = localPath.split(sep)
const fileName = array.pop() ?? ''
const modelId = array.pop() ?? ''
const destination = resolve(getJanDataFolderPath(), localPath)
const rq = request({ url, strictSSL, proxy })
// Put request to download manager instance
DownloadManager.instance.setRequest(localPath, rq)
// Downloading file to a temp file first
const downloadingTempFile = `${destination}.download`
progress(rq, {})
.on('progress', (state: any) => {
const downloadState: DownloadState = {
...state,
modelId,
fileName,
downloadState: 'downloading',
}
console.log('progress: ', downloadState)
observer?.(DownloadEvent.onFileDownloadUpdate, downloadState)
DownloadManager.instance.downloadProgressMap[modelId] = downloadState
})
.on('error', (error: Error) => {
const currentDownloadState = DownloadManager.instance.downloadProgressMap[modelId]
const downloadState: DownloadState = {
...currentDownloadState,
error: error.message,
downloadState: 'error',
}
if (currentDownloadState) {
DownloadManager.instance.downloadProgressMap[modelId] = downloadState
}
observer?.(DownloadEvent.onFileDownloadError, downloadState)
})
.on('end', () => {
const currentDownloadState = DownloadManager.instance.downloadProgressMap[modelId]
if (currentDownloadState && DownloadManager.instance.networkRequests[localPath]) {
// Finished downloading, rename temp file to actual file
renameSync(downloadingTempFile, destination)
const downloadState: DownloadState = {
...currentDownloadState,
downloadState: 'end',
}
observer?.(DownloadEvent.onFileDownloadSuccess, downloadState)
DownloadManager.instance.downloadProgressMap[modelId] = downloadState
}
})
.pipe(createWriteStream(downloadingTempFile))
}
abortDownload(observer: any, fileName: string) {
const rq = DownloadManager.instance.networkRequests[fileName]
if (rq) {
DownloadManager.instance.networkRequests[fileName] = undefined
rq?.abort()
} else {
observer?.(DownloadEvent.onFileDownloadError, {
fileName,
error: 'aborted',
})
}
}
resumeDownload(observer: any, fileName: any) {
DownloadManager.instance.networkRequests[fileName]?.resume()
}
pauseDownload(observer: any, fileName: any) {
DownloadManager.instance.networkRequests[fileName]?.pause()
}
}

View File

@ -0,0 +1,88 @@
import { readdirSync } from 'fs'
import { join, extname } from 'path'
import { Processor } from './Processor'
import { ModuleManager } from '../../helper/module'
import { getJanExtensionsPath as getPath } from '../../helper'
import {
getActiveExtensions as getExtensions,
getExtension,
removeExtension,
installExtensions,
} from '../../extension/store'
import { appResourcePath } from '../../helper/path'
export class Extension implements Processor {
observer?: Function
constructor(observer?: Function) {
this.observer = observer
}
process(key: string, ...args: any[]): any {
const instance = this as any
const func = instance[key]
return func(...args)
}
invokeExtensionFunc(modulePath: string, method: string, ...params: any[]) {
const module = require(join(getPath(), modulePath))
ModuleManager.instance.setModule(modulePath, module)
if (typeof module[method] === 'function') {
return module[method](...params)
} else {
console.debug(module[method])
console.error(`Function "${method}" does not exist in the module.`)
}
}
/**
* Returns the paths of the base extensions.
* @returns An array of paths to the base extensions.
*/
async baseExtensions() {
const baseExtensionPath = join(await appResourcePath(), 'pre-install')
return readdirSync(baseExtensionPath)
.filter((file) => extname(file) === '.tgz')
.map((file) => join(baseExtensionPath, file))
}
/**MARK: Extension Manager handlers */
async installExtension(extensions: any) {
// Install and activate all provided extensions
const installed = await installExtensions(extensions)
return JSON.parse(JSON.stringify(installed))
}
// Register IPC route to uninstall a extension
async uninstallExtension(extensions: any) {
// Uninstall all provided extensions
for (const ext of extensions) {
const extension = getExtension(ext)
await extension.uninstall()
if (extension.name) removeExtension(extension.name)
}
// Reload all renderer pages if needed
return true
}
// Register IPC route to update a extension
async updateExtension(extensions: any) {
// Update all provided extensions
const updated: any[] = []
for (const ext of extensions) {
const extension = getExtension(ext)
const res = await extension.update()
if (res) updated.push(extension)
}
// Reload all renderer pages if needed
return JSON.parse(JSON.stringify(updated))
}
getActiveExtensions() {
return JSON.parse(JSON.stringify(getExtensions()))
}
}

View File

@ -0,0 +1,25 @@
import { join } from 'path'
import { normalizeFilePath } from '../../helper/path'
import { getJanDataFolderPath } from '../../helper'
import { Processor } from './Processor'
export class FileSystem implements Processor {
observer?: Function
private static moduleName = 'fs'
constructor(observer?: Function) {
this.observer = observer
}
process(route: string, ...args: any[]): any {
return import(FileSystem.moduleName).then((mdl) =>
mdl[route](
...args.map((arg: any) =>
typeof arg === 'string' && (arg.startsWith(`file:/`) || arg.startsWith(`file:\\`))
? join(getJanDataFolderPath(), normalizeFilePath(arg))
: arg
)
)
)
}
}

View File

@ -0,0 +1,78 @@
import { join } from 'path'
import fs from 'fs'
import { FileManagerRoute } from '../../../api'
import { appResourcePath, normalizeFilePath } from '../../helper/path'
import { getJanDataFolderPath, getJanDataFolderPath as getPath } from '../../helper'
import { Processor } from './Processor'
import { FileStat } from '../../../types'
export class FSExt implements Processor {
observer?: Function
constructor(observer?: Function) {
this.observer = observer
}
process(key: string, ...args: any): any {
const instance = this as any
const func = instance[key]
return func(...args)
}
// Handles the 'syncFile' IPC event. This event is triggered to synchronize a file from a source path to a destination path.
syncFile(src: string, dest: string) {
const reflect = require('@alumna/reflect')
return reflect({
src,
dest,
recursive: true,
delete: false,
overwrite: true,
errorOnExist: false,
})
}
// Handles the 'getJanDataFolderPath' IPC event. This event is triggered to get the user space path.
getJanDataFolderPath() {
return Promise.resolve(getPath())
}
// Handles the 'getResourcePath' IPC event. This event is triggered to get the resource path.
getResourcePath() {
return appResourcePath()
}
// Handles the 'getUserHomePath' IPC event. This event is triggered to get the user home path.
getUserHomePath() {
return process.env[process.platform == 'win32' ? 'USERPROFILE' : 'HOME']
}
// handle fs is directory here
fileStat(path: string) {
const normalizedPath = normalizeFilePath(path)
const fullPath = join(getJanDataFolderPath(), normalizedPath)
const isExist = fs.existsSync(fullPath)
if (!isExist) return undefined
const isDirectory = fs.lstatSync(fullPath).isDirectory()
const size = fs.statSync(fullPath).size
const fileStat: FileStat = {
isDirectory,
size,
}
return fileStat
}
writeBlob(path: string, data: any) {
try {
const normalizedPath = normalizeFilePath(path)
const dataBuffer = Buffer.from(data, 'base64')
fs.writeFileSync(join(getJanDataFolderPath(), normalizedPath), dataBuffer)
} catch (err) {
console.error(`writeFile ${path} result: ${err}`)
}
}
}

View File

@ -0,0 +1,23 @@
import { DownloadRoute } from '../../../../api'
import { DownloadManager } from '../../../helper/download'
import { HttpServer } from '../../HttpServer'
export const downloadRouter = async (app: HttpServer) => {
app.get(`/download/${DownloadRoute.getDownloadProgress}/:modelId`, async (req, res) => {
const modelId = req.params.modelId
console.debug(`Getting download progress for model ${modelId}`)
console.debug(
`All Download progress: ${JSON.stringify(DownloadManager.instance.downloadProgressMap)}`
)
// check if null DownloadManager.instance.downloadProgressMap
if (!DownloadManager.instance.downloadProgressMap[modelId]) {
return res.status(404).send({
message: 'Download progress not found',
})
} else {
return res.status(200).send(DownloadManager.instance.downloadProgressMap[modelId])
}
})
}

View File

@ -0,0 +1,13 @@
import { HttpServer } from '../../HttpServer'
import { Handler, RequestHandler } from '../../common/handler'
export function handleRequests(app: HttpServer) {
const restWrapper: Handler = (route: string, listener: (...args: any[]) => any) => {
app.post(`/app/${route}`, async (request: any, reply: any) => {
const args = JSON.parse(request.body) as any[]
reply.send(JSON.stringify(await listener(...args)))
})
}
const handler = new RequestHandler(restWrapper)
handler.handle()
}

View File

@ -1,20 +1,24 @@
import { AppRoute } from '../../../api'
import { HttpServer } from '../HttpServer'
import { basename, join } from 'path'
import {
chatCompletions,
deleteBuilder,
downloadModel,
getBuilder,
retrieveBuilder,
} from '../common/builder'
createMessage,
createThread,
getMessages,
retrieveMesasge,
updateThread,
} from './helper/builder'
import { JanApiRouteConfiguration } from '../common/configuration'
import { startModel, stopModel } from '../common/startStopModel'
import { JanApiRouteConfiguration } from './helper/configuration'
import { startModel, stopModel } from './helper/startStopModel'
import { ModelSettingParams } from '../../../types'
export const commonRouter = async (app: HttpServer) => {
// Common Routes
// Read & Delete :: Threads | Models | Assistants
Object.keys(JanApiRouteConfiguration).forEach((key) => {
app.get(`/${key}`, async (_request) => getBuilder(JanApiRouteConfiguration[key]))
@ -27,7 +31,24 @@ export const commonRouter = async (app: HttpServer) => {
)
})
// Download Model Routes
// Threads
app.post(`/threads/`, async (req, res) => createThread(req.body))
app.get(`/threads/:threadId/messages`, async (req, res) => getMessages(req.params.threadId))
app.get(`/threads/:threadId/messages/:messageId`, async (req, res) =>
retrieveMesasge(req.params.threadId, req.params.messageId)
)
app.post(`/threads/:threadId/messages`, async (req, res) =>
createMessage(req.params.threadId as any, req.body as any)
)
app.patch(`/threads/:threadId`, async (request: any) =>
updateThread(request.params.threadId, request.body)
)
// Models
app.get(`/models/download/:modelId`, async (request: any) =>
downloadModel(request.params.modelId, {
ignoreSSL: request.query.ignoreSSL === 'true',
@ -46,17 +67,6 @@ export const commonRouter = async (app: HttpServer) => {
app.put(`/models/:modelId/stop`, async (request: any) => stopModel(request.params.modelId))
// Chat Completion Routes
// Chat Completion
app.post(`/chat/completions`, async (request: any, reply: any) => chatCompletions(request, reply))
// App Routes
app.post(`/app/${AppRoute.joinPath}`, async (request: any, reply: any) => {
const args = JSON.parse(request.body) as any[]
reply.send(JSON.stringify(join(...args[0])))
})
app.post(`/app/${AppRoute.baseName}`, async (request: any, reply: any) => {
const args = JSON.parse(request.body) as any[]
reply.send(JSON.stringify(basename(args[0])))
})
}

View File

@ -1,10 +1,11 @@
import fs from 'fs'
import { JanApiRouteConfiguration, RouteConfiguration } from './configuration'
import { join } from 'path'
import { ContentType, MessageStatus, Model, ThreadMessage } from './../../../index'
import { getEngineConfiguration, getJanDataFolderPath } from '../../utils'
import { ContentType, MessageStatus, Model, ThreadMessage } from '../../../../index'
import { getEngineConfiguration, getJanDataFolderPath } from '../../../helper'
import { DEFAULT_CHAT_COMPLETION_URL } from './consts'
// TODO: Refactor these
export const getBuilder = async (configuration: RouteConfiguration) => {
const directoryPath = join(getJanDataFolderPath(), configuration.dirName)
try {

View File

@ -1,9 +1,9 @@
import fs from 'fs'
import { join } from 'path'
import { getJanDataFolderPath, getJanExtensionsPath, getSystemResourceInfo } from '../../utils'
import { logServer } from '../../log'
import { getJanDataFolderPath, getJanExtensionsPath, getSystemResourceInfo } from '../../../helper'
import { logServer } from '../../../helper/log'
import { ChildProcessWithoutNullStreams, spawn } from 'child_process'
import { Model, ModelSettingParams, PromptTemplate } from '../../../types'
import { Model, ModelSettingParams, PromptTemplate } from '../../../../types'
import {
LOCAL_HOST,
NITRO_DEFAULT_PORT,

View File

@ -0,0 +1,16 @@
import { HttpServer } from '../HttpServer'
import { commonRouter } from './common'
import { downloadRouter } from './app/download'
import { handleRequests } from './app/handlers'
export const v1Router = async (app: HttpServer) => {
// MARK: Public API Routes
app.register(commonRouter)
// MARK: Internal Application Routes
handleRequests(app)
// Expanded route for tracking download progress
// TODO: Replace by Observer Wrapper (ZeroMQ / Vanilla Websocket)
app.register(downloadRouter)
}

View File

@ -1,58 +0,0 @@
import { DownloadRoute } from '../../../api'
import { join } from 'path'
import { DownloadManager } from '../../download'
import { HttpServer } from '../HttpServer'
import { createWriteStream } from 'fs'
import { getJanDataFolderPath } from '../../utils'
import { normalizeFilePath } from "../../path";
export const downloadRouter = async (app: HttpServer) => {
app.post(`/${DownloadRoute.downloadFile}`, async (req, res) => {
const strictSSL = !(req.query.ignoreSSL === "true");
const proxy = req.query.proxy?.startsWith("http") ? req.query.proxy : undefined;
const body = JSON.parse(req.body as any);
const normalizedArgs = body.map((arg: any) => {
if (typeof arg === "string") {
return join(getJanDataFolderPath(), normalizeFilePath(arg));
}
return arg;
});
const localPath = normalizedArgs[1];
const fileName = localPath.split("/").pop() ?? "";
const request = require("request");
const progress = require("request-progress");
const rq = request({ url: normalizedArgs[0], strictSSL, proxy });
progress(rq, {})
.on("progress", function (state: any) {
console.log("download onProgress", state);
})
.on("error", function (err: Error) {
console.log("download onError", err);
})
.on("end", function () {
console.log("download onEnd");
})
.pipe(createWriteStream(normalizedArgs[1]));
DownloadManager.instance.setRequest(fileName, rq);
});
app.post(`/${DownloadRoute.abortDownload}`, async (req, res) => {
const body = JSON.parse(req.body as any);
const normalizedArgs = body.map((arg: any) => {
if (typeof arg === "string") {
return join(getJanDataFolderPath(), normalizeFilePath(arg));
}
return arg;
});
const localPath = normalizedArgs[0];
const fileName = localPath.split("/").pop() ?? "";
const rq = DownloadManager.instance.networkRequests[fileName];
DownloadManager.instance.networkRequests[fileName] = undefined;
rq?.abort();
});
};

View File

@ -1,49 +0,0 @@
import { join, extname } from 'path'
import { ExtensionRoute } from '../../../api/index'
import { ModuleManager } from '../../module'
import { getActiveExtensions, installExtensions } from '../../extension/store'
import { HttpServer } from '../HttpServer'
import { readdirSync } from 'fs'
import { getJanExtensionsPath } from '../../utils'
export const extensionRouter = async (app: HttpServer) => {
// TODO: Share code between node projects
app.post(`/${ExtensionRoute.getActiveExtensions}`, async (_req, res) => {
const activeExtensions = await getActiveExtensions()
res.status(200).send(activeExtensions)
})
app.post(`/${ExtensionRoute.baseExtensions}`, async (_req, res) => {
const baseExtensionPath = join(__dirname, '..', '..', '..', 'pre-install')
const extensions = readdirSync(baseExtensionPath)
.filter((file) => extname(file) === '.tgz')
.map((file) => join(baseExtensionPath, file))
res.status(200).send(extensions)
})
app.post(`/${ExtensionRoute.installExtension}`, async (req) => {
const extensions = req.body as any
const installed = await installExtensions(JSON.parse(extensions)[0])
return JSON.parse(JSON.stringify(installed))
})
app.post(`/${ExtensionRoute.invokeExtensionFunc}`, async (req, res) => {
const args = JSON.parse(req.body as any)
console.debug(args)
const module = await import(join(getJanExtensionsPath(), args[0]))
ModuleManager.instance.setModule(args[0], module)
const method = args[1]
if (typeof module[method] === 'function') {
// remove first item from args
const newArgs = args.slice(2)
console.log(newArgs)
return module[method](...args.slice(2))
} else {
console.debug(module[method])
console.error(`Function "${method}" does not exist in the module.`)
}
})
}

View File

@ -1,14 +0,0 @@
import { FileManagerRoute } from '../../../api'
import { HttpServer } from '../../index'
export const fsRouter = async (app: HttpServer) => {
app.post(`/app/${FileManagerRoute.syncFile}`, async (request: any, reply: any) => {})
app.post(`/app/${FileManagerRoute.getJanDataFolderPath}`, async (request: any, reply: any) => {})
app.post(`/app/${FileManagerRoute.getResourcePath}`, async (request: any, reply: any) => {})
app.post(`/app/${FileManagerRoute.getUserHomePath}`, async (request: any, reply: any) => {})
app.post(`/app/${FileManagerRoute.fileStat}`, async (request: any, reply: any) => {})
}

View File

@ -1,29 +0,0 @@
import { FileSystemRoute } from '../../../api'
import { join } from 'path'
import { HttpServer } from '../HttpServer'
import { getJanDataFolderPath } from '../../utils'
import { normalizeFilePath } from '../../path'
export const fsRouter = async (app: HttpServer) => {
const moduleName = 'fs'
// Generate handlers for each fs route
Object.values(FileSystemRoute).forEach((route) => {
app.post(`/${route}`, async (req, res) => {
const body = JSON.parse(req.body as any)
try {
const result = await import(moduleName).then((mdl) => {
return mdl[route](
...body.map((arg: any) =>
typeof arg === 'string' && (arg.startsWith(`file:/`) || arg.startsWith(`file:\\`))
? join(getJanDataFolderPath(), normalizeFilePath(arg))
: arg
)
)
})
res.status(200).send(result)
} catch (ex) {
console.log(ex)
}
})
})
}

View File

@ -1,6 +0,0 @@
export * from './download'
export * from './extension'
export * from './fs'
export * from './thread'
export * from './common'
export * from './v1'

View File

@ -1,30 +0,0 @@
import { HttpServer } from '../HttpServer'
import {
createMessage,
createThread,
getMessages,
retrieveMesasge,
updateThread,
} from '../common/builder'
export const threadRouter = async (app: HttpServer) => {
// create thread
app.post(`/`, async (req, res) => createThread(req.body))
app.get(`/:threadId/messages`, async (req, res) => getMessages(req.params.threadId))
// retrieve message
app.get(`/:threadId/messages/:messageId`, async (req, res) =>
retrieveMesasge(req.params.threadId, req.params.messageId),
)
// create message
app.post(`/:threadId/messages`, async (req, res) =>
createMessage(req.params.threadId as any, req.body as any),
)
// modify thread
app.patch(`/:threadId`, async (request: any) =>
updateThread(request.params.threadId, request.body),
)
}

View File

@ -1,25 +0,0 @@
import { HttpServer } from '../HttpServer'
import { commonRouter } from './common'
import { threadRouter } from './thread'
import { fsRouter } from './fs'
import { extensionRouter } from './extension'
import { downloadRouter } from './download'
export const v1Router = async (app: HttpServer) => {
// MARK: External Routes
app.register(commonRouter)
app.register(threadRouter, {
prefix: '/threads',
})
// MARK: Internal Application Routes
app.register(fsRouter, {
prefix: '/fs',
})
app.register(extensionRouter, {
prefix: '/extension',
})
app.register(downloadRouter, {
prefix: '/download',
})
}

View File

@ -104,7 +104,7 @@ export default class Extension {
await pacote.extract(
this.specifier,
join(ExtensionManager.instance.getExtensionsPath() ?? '', this.name ?? ''),
this.installOptions,
this.installOptions
)
// Set the url using the custom extensions protocol

View File

@ -41,8 +41,8 @@ async function registerExtensionProtocol() {
console.error('Electron is not available')
}
const extensionPath = ExtensionManager.instance.getExtensionsPath()
if (electron) {
return electron.protocol.registerFileProtocol('extension', (request: any, callback: any) => {
if (electron && electron.protocol) {
return electron.protocol?.registerFileProtocol('extension', (request: any, callback: any) => {
const entry = request.url.substr('extension://'.length - 1)
const url = normalize(extensionPath + entry)
@ -69,7 +69,7 @@ export function useExtensions(extensionsPath: string) {
// Read extension list from extensions folder
const extensions = JSON.parse(
readFileSync(ExtensionManager.instance.getExtensionsFile(), 'utf-8'),
readFileSync(ExtensionManager.instance.getExtensionsFile(), 'utf-8')
)
try {
// Create and store a Extension instance for each extension in list
@ -82,7 +82,7 @@ export function useExtensions(extensionsPath: string) {
throw new Error(
'Could not successfully rebuild list of installed extensions.\n' +
error +
'\nPlease check the extensions.json file in the extensions folder.',
'\nPlease check the extensions.json file in the extensions folder.'
)
}
@ -122,7 +122,7 @@ function loadExtension(ext: any) {
export function getStore() {
if (!ExtensionManager.instance.getExtensionsFile()) {
throw new Error(
'The extension path has not yet been set up. Please run useExtensions before accessing the store',
'The extension path has not yet been set up. Please run useExtensions before accessing the store'
)
}

View File

@ -1,6 +1,6 @@
import { writeFileSync } from "fs";
import Extension from "./extension";
import { ExtensionManager } from "./manager";
import { writeFileSync } from 'fs'
import Extension from './extension'
import { ExtensionManager } from './manager'
/**
* @module store
@ -11,7 +11,7 @@ import { ExtensionManager } from "./manager";
* Register of installed extensions
* @type {Object.<string, Extension>} extension - List of installed extensions
*/
const extensions: Record<string, Extension> = {};
const extensions: Record<string, Extension> = {}
/**
* Get a extension from the stored extensions.
@ -21,10 +21,10 @@ const extensions: Record<string, Extension> = {};
*/
export function getExtension(name: string) {
if (!Object.prototype.hasOwnProperty.call(extensions, name)) {
throw new Error(`Extension ${name} does not exist`);
throw new Error(`Extension ${name} does not exist`)
}
return extensions[name];
return extensions[name]
}
/**
@ -33,7 +33,7 @@ export function getExtension(name: string) {
* @alias extensionManager.getAllExtensions
*/
export function getAllExtensions() {
return Object.values(extensions);
return Object.values(extensions)
}
/**
@ -42,7 +42,7 @@ export function getAllExtensions() {
* @alias extensionManager.getActiveExtensions
*/
export function getActiveExtensions() {
return Object.values(extensions).filter((extension) => extension.active);
return Object.values(extensions).filter((extension) => extension.active)
}
/**
@ -53,9 +53,9 @@ export function getActiveExtensions() {
* @alias extensionManager.removeExtension
*/
export function removeExtension(name: string, persist = true) {
const del = delete extensions[name];
if (persist) persistExtensions();
return del;
const del = delete extensions[name]
if (persist) persistExtensions()
return del
}
/**
@ -65,10 +65,10 @@ export function removeExtension(name: string, persist = true) {
* @returns {void}
*/
export function addExtension(extension: Extension, persist = true) {
if (extension.name) extensions[extension.name] = extension;
if (extension.name) extensions[extension.name] = extension
if (persist) {
persistExtensions();
extension.subscribe("pe-persist", persistExtensions);
persistExtensions()
extension.subscribe('pe-persist', persistExtensions)
}
}
@ -77,14 +77,11 @@ export function addExtension(extension: Extension, persist = true) {
* @returns {void}
*/
export function persistExtensions() {
const persistData: Record<string, Extension> = {};
const persistData: Record<string, Extension> = {}
for (const name in extensions) {
persistData[name] = extensions[name];
persistData[name] = extensions[name]
}
writeFileSync(
ExtensionManager.instance.getExtensionsFile(),
JSON.stringify(persistData),
);
writeFileSync(ExtensionManager.instance.getExtensionsFile(), JSON.stringify(persistData))
}
/**
@ -94,26 +91,29 @@ export function persistExtensions() {
* @returns {Promise.<Array.<Extension>>} New extension
* @alias extensionManager.installExtensions
*/
export async function installExtensions(extensions: any, store = true) {
const installed: Extension[] = [];
export async function installExtensions(extensions: any) {
const installed: Extension[] = []
for (const ext of extensions) {
// Set install options and activation based on input type
const isObject = typeof ext === "object";
const spec = isObject ? [ext.specifier, ext] : [ext];
const activate = isObject ? ext.activate !== false : true;
const isObject = typeof ext === 'object'
const spec = isObject ? [ext.specifier, ext] : [ext]
const activate = isObject ? ext.activate !== false : true
// Install and possibly activate extension
const extension = new Extension(...spec);
await extension._install();
if (activate) extension.setActive(true);
const extension = new Extension(...spec)
if (!extension.origin) {
continue
}
await extension._install()
if (activate) extension.setActive(true)
// Add extension to store if needed
if (store) addExtension(extension);
installed.push(extension);
addExtension(extension)
installed.push(extension)
}
// Return list of all installed extensions
return installed;
return installed
}
/**

View File

@ -2,7 +2,7 @@ import { AppConfiguration, SystemResourceInfo } from '../../types'
import { join } from 'path'
import fs from 'fs'
import os from 'os'
import { log, logServer } from '../log'
import { log, logServer } from './log'
import childProcess from 'child_process'
// TODO: move this to core
@ -56,34 +56,6 @@ export const updateAppConfiguration = (configuration: AppConfiguration): Promise
return Promise.resolve()
}
/**
* Utility function to get server log path
*
* @returns {string} The log path.
*/
export const getServerLogPath = (): string => {
const appConfigurations = getAppConfigurations()
const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
fs.mkdirSync(logFolderPath, { recursive: true })
}
return join(logFolderPath, 'server.log')
}
/**
* Utility function to get app log path
*
* @returns {string} The log path.
*/
export const getAppLogPath = (): string => {
const appConfigurations = getAppConfigurations()
const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
fs.mkdirSync(logFolderPath, { recursive: true })
}
return join(logFolderPath, 'app.log')
}
/**
* Utility function to get data folder path
*
@ -146,18 +118,6 @@ const exec = async (command: string): Promise<string> => {
})
}
export const getSystemResourceInfo = async (): Promise<SystemResourceInfo> => {
const cpu = await physicalCpuCount()
const message = `[NITRO]::CPU informations - ${cpu}`
log(message)
logServer(message)
return {
numCpuPhysicalCore: cpu,
memAvailable: 0, // TODO: this should not be 0
}
}
export const getEngineConfiguration = async (engineId: string) => {
if (engineId !== 'openai') {
return undefined
@ -167,3 +127,31 @@ export const getEngineConfiguration = async (engineId: string) => {
const data = fs.readFileSync(filePath, 'utf-8')
return JSON.parse(data)
}
/**
* Utility function to get server log path
*
* @returns {string} The log path.
*/
export const getServerLogPath = (): string => {
const appConfigurations = getAppConfigurations()
const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
fs.mkdirSync(logFolderPath, { recursive: true })
}
return join(logFolderPath, 'server.log')
}
/**
* Utility function to get app log path
*
* @returns {string} The log path.
*/
export const getAppLogPath = (): string => {
const appConfigurations = getAppConfigurations()
const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
fs.mkdirSync(logFolderPath, { recursive: true })
}
return join(logFolderPath, 'app.log')
}

View File

@ -1,15 +1,18 @@
import { DownloadState } from '../../types'
/**
* Manages file downloads and network requests.
*/
export class DownloadManager {
public networkRequests: Record<string, any> = {};
public networkRequests: Record<string, any> = {}
public static instance: DownloadManager = new DownloadManager();
public static instance: DownloadManager = new DownloadManager()
public downloadProgressMap: Record<string, DownloadState> = {}
constructor() {
if (DownloadManager.instance) {
return DownloadManager.instance;
return DownloadManager.instance
}
}
/**
@ -18,6 +21,6 @@ export class DownloadManager {
* @param {Request | undefined} request - The network request to set, or undefined to clear the request.
*/
setRequest(fileName: string, request: any | undefined) {
this.networkRequests[fileName] = request;
this.networkRequests[fileName] = request
}
}

View File

@ -0,0 +1,6 @@
export * from './config'
export * from './download'
export * from './log'
export * from './module'
export * from './path'
export * from './resource'

View File

@ -1,6 +1,6 @@
import fs from 'fs'
import util from 'util'
import { getAppLogPath, getServerLogPath } from './utils'
import { getAppLogPath, getServerLogPath } from './config'
export const log = (message: string) => {
const path = getAppLogPath()

View File

@ -0,0 +1,35 @@
import { join } from 'path'
/**
* Normalize file path
* Remove all file protocol prefix
* @param path
* @returns
*/
export function normalizeFilePath(path: string): string {
return path.replace(/^(file:[\\/]+)([^:\s]+)$/, '$2')
}
export async function appResourcePath(): Promise<string> {
let electron: any = undefined
try {
const moduleName = 'electron'
electron = await import(moduleName)
} catch (err) {
console.error('Electron is not available')
}
// electron
if (electron && electron.protocol) {
let appPath = join(electron.app.getAppPath(), '..', 'app.asar.unpacked')
if (!electron.app.isPackaged) {
// for development mode
appPath = join(electron.app.getAppPath())
}
return appPath
}
// server
return join(global.core.appPath(), '../../..')
}

View File

@ -0,0 +1,15 @@
import { SystemResourceInfo } from '../../types'
import { physicalCpuCount } from './config'
import { log, logServer } from './log'
export const getSystemResourceInfo = async (): Promise<SystemResourceInfo> => {
const cpu = await physicalCpuCount()
const message = `[NITRO]::CPU informations - ${cpu}`
log(message)
logServer(message)
return {
numCpuPhysicalCore: cpu,
memAvailable: 0, // TODO: this should not be 0
}
}

View File

@ -2,9 +2,5 @@ export * from './extension/index'
export * from './extension/extension'
export * from './extension/manager'
export * from './extension/store'
export * from './download'
export * from './module'
export * from './api'
export * from './log'
export * from './utils'
export * from './path'
export * from './helper'

View File

@ -1,9 +0,0 @@
/**
* Normalize file path
* Remove all file protocol prefix
* @param path
* @returns
*/
export function normalizeFilePath(path: string): string {
return path.replace(/^(file:[\\/]+)([^:\s]+)$/, "$2");
}

View File

@ -0,0 +1,7 @@
/**
* The `EventName` enumeration contains the names of all the available events in the Jan platform.
*/
export enum AssistantEvent {
/** The `OnAssistantsUpdate` event is emitted when the assistant list is updated. */
OnAssistantsUpdate = 'OnAssistantsUpdate',
}

View File

@ -1,2 +1,3 @@
export * from './assistantEntity'
export * from './assistantEvent'
export * from './assistantInterface'

View File

@ -2,3 +2,26 @@ export type FileStat = {
isDirectory: boolean
size: number
}
export type DownloadState = {
modelId: string
fileName: string
time: DownloadTime
speed: number
percent: number
size: DownloadSize
children?: DownloadState[]
error?: string
downloadState: 'downloading' | 'error' | 'end'
}
type DownloadTime = {
elapsed: number
remaining: number
}
type DownloadSize = {
total: number
transferred: number
}

View File

@ -1,3 +1,4 @@
export * from './messageEntity'
export * from './messageInterface'
export * from './messageEvent'
export * from './messageRequestType'

View File

@ -27,6 +27,8 @@ export type ThreadMessage = {
updated: number
/** The additional metadata of this message. **/
metadata?: Record<string, unknown>
type?: string
}
/**
@ -56,6 +58,8 @@ export type MessageRequest = {
/** The thread of this message is belong to. **/
// TODO: deprecate threadId field
thread?: Thread
type?: string
}
/**

View File

@ -0,0 +1,5 @@
export enum MessageRequestType {
Thread = 'Thread',
Assistant = 'Assistant',
Summary = 'Summary',
}

View File

@ -12,4 +12,6 @@ export enum ModelEvent {
OnModelStop = 'OnModelStop',
/** The `OnModelStopped` event is emitted when a model stopped ok. */
OnModelStopped = 'OnModelStopped',
/** The `OnModelUpdate` event is emitted when the model list is updated. */
OnModelsUpdate = 'OnModelsUpdate',
}

View File

@ -10,7 +10,7 @@ export interface ModelInterface {
* @param network - Optional object to specify proxy/whether to ignore SSL certificates.
* @returns A Promise that resolves when the model has been downloaded.
*/
downloadModel(model: Model, network?: { ignoreSSL?: boolean, proxy?: string }): Promise<void>
downloadModel(model: Model, network?: { ignoreSSL?: boolean; proxy?: string }): Promise<void>
/**
* Cancels the download of a specific model.

View File

@ -1,4 +1,4 @@
import { normalizeFilePath } from "../../src/node/path";
import { normalizeFilePath } from "../../src/node/helper/path";
describe("Test file normalize", () => {
test("returns no file protocol prefix on Unix", async () => {

View File

@ -1,6 +1,3 @@
{
"extends": [
"tslint-config-standard",
"tslint-config-prettier"
]
}
"extends": ["tslint-config-standard", "tslint-config-prettier"]
}

117
docker-compose.yml Normal file
View File

@ -0,0 +1,117 @@
# Docker Compose file for setting up Minio, createbuckets, app_cpu, and app_gpu services
version: '3.7'
services:
# Minio service for object storage
minio:
image: minio/minio
volumes:
- minio_data:/data
ports:
- "9000:9000"
- "9001:9001"
environment:
# Set the root user and password for Minio
MINIO_ROOT_USER: minioadmin # This acts as AWS_ACCESS_KEY
MINIO_ROOT_PASSWORD: minioadmin # This acts as AWS_SECRET_ACCESS_KEY
command: server --console-address ":9001" /data
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
vpcbr:
ipv4_address: 10.5.0.2
# createbuckets service to create a bucket and set its policy
createbuckets:
image: minio/mc
depends_on:
- minio
entrypoint: >
/bin/sh -c "
/usr/bin/mc alias set myminio http://minio:9000 minioadmin minioadmin;
/usr/bin/mc mb myminio/mybucket;
/usr/bin/mc policy set public myminio/mybucket;
exit 0;
"
networks:
vpcbr:
# app_cpu service for running the CPU version of the application
app_cpu:
image: jan:latest
volumes:
- app_data:/app/server/build/jan
build:
context: .
dockerfile: Dockerfile
environment:
# Set the AWS access key, secret access key, bucket name, endpoint, and region for app_cpu
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
S3_BUCKET_NAME: mybucket
AWS_ENDPOINT: http://10.5.0.2:9000
AWS_REGION: us-east-1
restart: always
profiles:
- cpu
ports:
- "3000:3000"
- "1337:1337"
- "3928:3928"
networks:
vpcbr:
ipv4_address: 10.5.0.3
# app_gpu service for running the GPU version of the application
app_gpu:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
image: jan-gpu:latest
volumes:
- app_data:/app/server/build/jan
build:
context: .
dockerfile: Dockerfile.gpu
restart: always
environment:
# Set the AWS access key, secret access key, bucket name, endpoint, and region for app_gpu
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
S3_BUCKET_NAME: mybucket
AWS_ENDPOINT: http://10.5.0.2:9000
AWS_REGION: us-east-1
profiles:
- gpu
ports:
- "3000:3000"
- "1337:1337"
- "3928:3928"
networks:
vpcbr:
ipv4_address: 10.5.0.4
volumes:
minio_data:
app_data:
networks:
vpcbr:
driver: bridge
ipam:
config:
- subnet: 10.5.0.0/16
gateway: 10.5.0.1
# Usage:
# - Run 'docker-compose --profile cpu up -d' to start the app_cpu service
# - Run 'docker-compose --profile gpu up -d' to start the app_gpu service

View File

@ -1,6 +1,76 @@
dan-jan:
name: Daniel Onggunhao
title: Co-Founder
url: https://github.com/dan-jan
url: https://github.com/dan-jan
image_url: https://avatars.githubusercontent.com/u/101145494?v=4
email: daniel@jan.ai
email: daniel@jan.ai
namchuai:
name: Nam Nguyen
title: Developer
url: https://github.com/namchuai
image_url: https://avatars.githubusercontent.com/u/10397206?v=4
email: james@jan.ai
hiro-v:
name: Hiro Vuong
title: MLE
url: https://github.com/hiro-v
image_url: https://avatars.githubusercontent.com/u/22463238?v=4
email: hiro@jan.ai
ashley-jan:
name: Ashley Tran
title: Product Designer
url: https://github.com/imtuyethan
image_url: https://avatars.githubusercontent.com/u/89722390?v=4
email: ashley@jan.ai
hientominh:
name: Hien To
title: DevOps Engineer
url: https://github.com/hientominh
image_url: https://avatars.githubusercontent.com/u/37921427?v=4
email: hien@jan.ai
Van-QA:
name: Van Pham
title: QA & Release Manager
url: https://github.com/Van-QA
image_url: https://avatars.githubusercontent.com/u/64197333?v=4
email: van@jan.ai
louis-jan:
name: Louis Le
title: Software Engineer
url: https://github.com/louis-jan
image_url: https://avatars.githubusercontent.com/u/133622055?v=4
email: louis@jan.ai
hahuyhoang411:
name: Rex Ha
title: LLM Researcher & Content Writer
url: https://github.com/hahuyhoang411
image_url: https://avatars.githubusercontent.com/u/64120343?v=4
email: rex@jan.ai
automaticcat:
name: Alan Dao
title: AI Engineer
url: https://github.com/tikikun
image_url: https://avatars.githubusercontent.com/u/22268502?v=4
email: alan@jan.ai
hieu-jan:
name: Henry Ho
title: Software Engineer
url: https://github.com/hieu-jan
image_url: https://avatars.githubusercontent.com/u/150573299?v=4
email: hieu@jan.ai
0xsage:
name: Nicole Zhu
title: Co-Founder
url: https://github.com/0xsage
image_url: https://avatars.githubusercontent.com/u/69952136?v=4
email: nicole@jan.ai

View File

@ -110,9 +110,10 @@ Adhering to Jan's privacy preserving philosophy, our analytics philosophy is to
#### What is tracked
1. By default, Github tracks downloads and device metadata for all public Github repos. This helps us troubleshoot & ensure cross platform support.
1. We use Posthog to track a single `app.opened` event without additional user metadata, in order to understand retention.
1. Additionally, we plan to enable a `Settings` feature for users to turn off all tracking.
1. By default, Github tracks downloads and device metadata for all public GitHub repositories. This helps us troubleshoot & ensure cross-platform support.
2. We use [Umami](https://umami.is/) to collect, analyze, and understand application data while maintaining visitor privacy and data ownership. We are using the Umami Cloud in Europe to ensure GDPR compliance. Please see [Umami Privacy Policy](https://umami.is/privacy) for more details.
3. We use Umami to track a single `app.opened` event without additional user metadata, in order to understand retention. In addition, we track `app.event` to understand app version usage.
4. Additionally, we plan to enable a `Settings` feature for users to turn off all tracking.
#### Request for help

View File

@ -0,0 +1,79 @@
---
title: Installation and Prerequisites
slug: /developer/prereq
description: Guide to install and setup Jan for development.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
installation,
prerequisites,
developer setup,
]
---
## Requirements
### Hardware Requirements
Ensure your system meets the following specifications to guarantee a smooth development experience:
- [Hardware Requirements](../../guides/02-installation/06-hardware.md)
### System Requirements
Make sure your operating system meets the specific requirements for Jan development:
- [Windows](../../install/windows/#system-requirements)
- [MacOS](../../install/mac/#system-requirements)
- [Linux](../../install/linux/#system-requirements)
## Prerequisites
- [Node.js](https://nodejs.org/en/) (version 20.0.0 or higher)
- [yarn](https://yarnpkg.com/) (version 1.22.0 or higher)
- [make](https://www.gnu.org/software/make/) (version 3.81 or higher)
## Instructions
1. **Clone the Repository:**
```bash
git clone https://github.com/janhq/jan
cd jan
git checkout -b DESIRED_BRANCH
```
2. **Install Dependencies**
```bash
yarn install
```
3. **Run Development and Use Jan Desktop**
```bash
make dev
```
This command starts the development server and opens the Jan Desktop app.
## For Production Build
```bash
# Do steps 1 and 2 in the previous section
# Build the app
make build
```
This will build the app MacOS (M1/M2/M3) for production (with code signing already done) and place the result in `/electron/dist` folder.
## Troubleshooting
If you run into any issues due to a broken build, please check the [Stuck on a Broken Build](../../troubleshooting/stuck-on-broken-build) guide.

View File

@ -12,11 +12,16 @@ keywords:
conversational AI,
no-subscription fee,
large language model,
installation guide,
]
---
# Installing Jan on MacOS
## System Requirements
Ensure that your MacOS version is 13 or higher to run Jan.
## Installation
Jan is available for download via our homepage, [https://jan.ai/](https://jan.ai/).

View File

@ -12,11 +12,23 @@ keywords:
conversational AI,
no-subscription fee,
large language model,
installation guide,
]
---
# Installing Jan on Windows
## System Requirements
Ensure that your system meets the following requirements:
- Windows 10 or higher is required to run Jan.
To enable GPU support, you will need:
- NVIDIA GPU with CUDA Toolkit 11.7 or higher
- NVIDIA driver 470.63.01 or higher
## Installation
Jan is available for download via our homepage, [https://jan.ai](https://jan.ai/).
@ -59,13 +71,3 @@ To remove all user data associated with Jan, you can delete the `/jan` directory
cd C:\Users\%USERNAME%\AppData\Roaming
rmdir /S jan
```
## Troubleshooting
### Microsoft Defender
**Error: "Microsoft Defender SmartScreen prevented an unrecognized app from starting"**
Windows Defender may display the above warning when running the Jan Installer, as a standard security measure.
To proceed, select the "More info" option and select the "Run Anyway" option to continue with the installation.

View File

@ -12,11 +12,24 @@ keywords:
conversational AI,
no-subscription fee,
large language model,
installation guide,
]
---
# Installing Jan on Linux
## System Requirements
Ensure that your system meets the following requirements:
- glibc 2.27 or higher (check with `ldd --version`)
- gcc 11, g++ 11, cpp 11, or higher, refer to this [link](https://jan.ai/guides/troubleshooting/gpu-not-used/#specific-requirements-for-linux) for more information.
To enable GPU support, you will need:
- NVIDIA GPU with CUDA Toolkit 11.7 or higher
- NVIDIA driver 470.63.01 or higher
## Installation
Jan is available for download via our homepage, [https://jan.ai](https://jan.ai/).
@ -66,7 +79,6 @@ jan-linux-amd64-{version}.deb
# AppImage
jan-linux-x86_64-{version}.AppImage
```
```
## Uninstall Jan

View File

@ -0,0 +1,102 @@
---
title: Docker
slug: /install/docker
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
docker installation,
]
---
# Installing Jan using Docker
## Installation
### Pre-requisites
:::note
**Supported OS**: Linux, WSL2 Docker
:::
- Docker Engine and Docker Compose are required to run Jan in Docker mode. Follow the [instructions](https://docs.docker.com/engine/install/ubuntu/) below to get started with Docker Engine on Ubuntu.
```bash
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh ./get-docker.sh --dry-run
```
- If you intend to run Jan in GPU mode, you need to install `nvidia-driver` and `nvidia-docker2`. Follow the instruction [here](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) for installation.
### Instructions
- Run Jan in Docker mode
- **Option 1**: Run Jan in CPU mode
```bash
docker compose --profile cpu up -d
```
- **Option 2**: Run Jan in GPU mode
- **Step 1**: Check CUDA compatibility with your NVIDIA driver by running `nvidia-smi` and check the CUDA version in the output
```bash
nvidia-smi
# Output
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 531.18 Driver Version: 531.18 CUDA Version: 12.1 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 4070 Ti WDDM | 00000000:01:00.0 On | N/A |
| 0% 44C P8 16W / 285W| 1481MiB / 12282MiB | 2% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:02:00.0 Off | N/A |
| 0% 49C P8 14W / 120W| 0MiB / 6144MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:05:00.0 Off | N/A |
| 29% 38C P8 11W / 120W| 0MiB / 6144MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
```
- **Step 2**: Visit [NVIDIA NGC Catalog ](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda/tags) and find the smallest minor version of image tag that matches your CUDA version (e.g., 12.1 -> 12.1.0)
- **Step 3**: Update the `Dockerfile.gpu` line number 5 with the latest minor version of the image tag from step 2 (e.g. change `FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 AS base` to `FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS base`)
- **Step 4**: Run command to start Jan in GPU mode
```bash
# GPU mode
docker compose --profile gpu up -d
```
This will start the web server and you can access Jan at `http://localhost:3000`.
:::warning
- Docker mode is currently only suitable for development and localhost. Production is not supported yet, and the RAG feature is not available in Docker mode.
:::

View File

@ -65,6 +65,13 @@ Navigate to the `~/jan/models` folder. Create a folder named `gpt-3.5-turbo-16k`
}
```
:::tip
- You can find the list of available models in the [OpenAI Platform](https://platform.openai.com/docs/models/overview).
- Please note that the `id` property need to match the model name in the list. For example, if you want to use the [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo), you need to set the `id` property as `gpt-4-1106-preview`.
:::
### 2. Configure OpenAI API Keys
You can find your API keys in the [OpenAI Platform](https://platform.openai.com/api-keys) and set the OpenAI API keys in `~/jan/engines/openai.json` file.

View File

@ -1,33 +0,0 @@
---
title: Connect to Server
description: Connect to Jan's built-in API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
]
---
:::warning
This page is under construction.
:::
Jan ships with a built-in API server, that can be used as a drop-in, local replacement for OpenAI's API.
Jan runs on port `1337` by default, but this can (soon) be changed in Settings.
1. Go to Settings > Advanced > Enable API Server
2. Go to http://localhost:1337 for the API docs.
3. In terminal, simply CURL...
Note: Some UI states may be broken when in Server Mode.

View File

@ -0,0 +1,72 @@
---
title: Start Local Server
slug: /guides/using-server/server
description: How to run Jan's built-in API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
local server,
api server,
]
---
Jan ships with a built-in API server that can be used as a drop-in, local replacement for OpenAI's API. You can run your server by following these simple steps.
## Open Local API Server View
Navigate to the Local API Server view by clicking the corresponding icon on the left side of the screen.
<br></br>
![01-local-api-view](./assets/01-local-api-view.gif)
## Choosing a Model
On the top right of your screen under `Model Settings`, set the LLM that your local server will be running. You can choose from any of the models already installed, or pick a new model by clicking `Explore the Hub`.
<br></br>
![01-choose-model](./assets/01-choose-model.png)
## Server Options
On the left side of your screen, you can set custom server options.
<br></br>
![01-server-settings](./assets/01-server-options.png)
### Local Server Address
By default, Jan will be accessible only on localhost `127.0.0.1`. This means a local server can only be accessed on the same machine where the server is being run.
You can make the local server more accessible by clicking on the address and choosing `0.0.0.0` instead, which allows the server to be accessed from other devices on the local network. This is less secure than choosing localhost, and should be done with caution.
### Port
Jan runs on port `1337` by default. You can change the port to any other port number if needed.
### Cross-Origin Resource Sharing (CORS)
Cross-Origin Resource Sharing (CORS) manages resource access on the local server from external domains. Enabled for security by default, it can be disabled if needed.
### Verbose Server Logs
The center of the screen displays the server logs as the local server runs. This option provides extensive details about server activities.
## Start Server
Click the `Start Server` button on the top left of your screen. You will see the server log display a message such as `Server listening at http://127.0.0.1:1337`, and the `Start Server` button will change to a red `Stop Server` button.
<br></br>
![01-running-server](./assets/01-running-server.gif)
You server is now running and you can use the server address and port to make requests to the local server.

View File

@ -0,0 +1,102 @@
---
title: Using Jan's Built-in API Server
description: How to use Jan's built-in API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
local server,
api server,
]
---
Jan's built-in API server is compatible with [OpenAI's API](https://platform.openai.com/docs/api-reference) and can be used as a drop-in, local replacement. Follow these steps to use the API server.
## Open the API Reference
Jan contains a comprehensive API reference. This reference displays all the API endpoints available, gives you examples requests and responses, and allows you to execute them in browser.
On the top left of your screen below the red `Stop Server` button is the blue `API Reference`. Clicking this will open the reference in your browser.
<br></br>
![02-api-reference](./assets/02-api-reference.png)
Scroll through the various available endpoints to learn what options are available and try them out by executing the example requests. In addition, you can also use the [Jan API Reference](https://jan.ai/api-reference/) on the Jan website.
### Chat
In the Chat section of the API reference, you will see an example JSON request body.
<br></br>
![02-chat-example](./assets/02-chat-example.png)
With your local server running, you can click the `Try it out` button on the top left, then the blue `Execute` button below the JSON. The browser will send the example request to your server, and display the response body below.
Use the API endpoints, request and response body examples as models for your own application.
### cURL Request Example
Here is an example curl request with a local server running `tinyllama-1.1b`:
<br></br>
```json
{
"messages": [
{
"content": "You are a helpful assistant.",
"role": "system"
},
{
"content": "Hello!",
"role": "user"
}
],
"model": "tinyllama-1.1b",
"stream": true,
"max_tokens": 2048,
"stop": [
"hello"
],
"frequency_penalty": 0,
"presence_penalty": 0,
"temperature": 0.7,
"top_p": 0.95
}
'
```
### Response Body Example
```json
{
"choices": [
{
"finish_reason": null,
"index": 0,
"message": {
"content": "Hello user. What can I help you with?",
"role": "assistant"
}
}
],
"created": 1700193928,
"id": "ebwd2niJvJB1Q2Whyvkz",
"model": "_",
"object": "chat.completion",
"system_fingerprint": "_",
"usage": {
"completion_tokens": 500,
"prompt_tokens": 33,
"total_tokens": 533
}
}
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 328 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 252 KiB

View File

@ -45,7 +45,9 @@ This may occur due to several reasons. Please follow these steps to resolve it:
5. If you are on Nvidia GPUs, please download [Cuda](https://developer.nvidia.com/cuda-downloads).
6. When [checking app logs](https://jan.ai/troubleshooting/how-to-get-error-logs/), if you encounter the error log `Bind address failed at 127.0.0.1:3928`, it indicates that the port used by Nitro might already be in use. Use the following commands to check the port status:
6. If you're using Linux, please ensure that your system meets the following requirements gcc 11, g++ 11, cpp 11, or higher, refer to this [link](https://jan.ai/guides/troubleshooting/gpu-not-used/#specific-requirements-for-linux) for more information.
7. When [checking app logs](https://jan.ai/troubleshooting/how-to-get-error-logs/), if you encounter the error log `Bind address failed at 127.0.0.1:3928`, it indicates that the port used by Nitro might already be in use. Use the following commands to check the port status:
<Tabs groupId="operating-systems">
<TabItem value="mac" label="macOS">

View File

@ -188,4 +188,6 @@ Troubleshooting tips:
2. If the issue persists, ensure your (V)RAM is accessible by the application. Some folks have virtual RAM and need additional configuration.
3. Get help in [Jan Discord](https://discord.gg/mY69SZaMaC).
3. If you are facing issues with the installation of RTX issues, please update the NVIDIA driver that supports CUDA 11.7 or higher. Ensure that the CUDA path is added to the environment variable.
4. Get help in [Jan Discord](https://discord.gg/mY69SZaMaC).

View File

@ -17,4 +17,8 @@ keywords:
]
---
1. You may receive an error response `Error occurred: Unexpected token '<', "<!DOCTYPE"...is not valid JSON`, when you start a chat with OpenAI models. Using a VPN may help fix the issue.
You may receive an error response `Error occurred: Unexpected token '<', "<!DOCTYPE"...is not valid JSON`, when you start a chat with OpenAI models.
1. Check that you added an OpenAI API key. You can get an API key from OpenAI's [developer platform](https://platform.openai.com/). Alternatively, we recommend you download a local model from Jan Hub, which remains free to use and runs on your own computer!
2. Using a VPN may help fix the issue.

View File

@ -0,0 +1,26 @@
---
title: Undefined Issue
slug: /troubleshooting/undefined-issue
description: Undefined issue troubleshooting guide.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
troubleshooting,
undefined issue,
]
---
You may encounter an "undefined" issue when using Jan. Here are some troubleshooting steps to help you resolve the issue.
1. Try wiping the Jan folder and reopening the Jan app and see if the issue persists.
2. If the issue persists, try to go `~/jan/extensions/@janhq/inference-nitro-extensions/dist/bin/<your-os>/nitro` and run the nitro manually and see if you get any error messages.
3. Resolve the error messages you get from the nitro and see if the issue persists.
4. Reopen the Jan app and see if the issue is resolved.
5. If the issue persists, please share with us the [app logs](https://jan.ai/troubleshooting/how-to-get-error-logs/) via [Jan Discord](https://discord.gg/mY69SZaMaC).

View File

@ -1,6 +1,6 @@
# [Release Version] QA Script
**Release Version:**
**Release Version:** v0.4.6
**Operating System:**
@ -25,10 +25,10 @@
### 3. Users uninstall app
- [ ] :key: Check that the uninstallation process removes all components of the app from the system.
- [ ] :key::warning: Check that the uninstallation process removes the app successfully from the system.
- [ ] Clean the Jan root directory and open the app to check if it creates all the necessary folders, especially models and extensions.
- [ ] When updating the app, check if the `/models` directory has any JSON files that change according to the update.
- [ ] Verify if updating the app also updates extensions correctly (test functionality changes; support notifications for necessary tests with each version related to extensions update).
- [ ] Verify if updating the app also updates extensions correctly (test functionality changes, support notifications for necessary tests with each version related to extensions update).
### 4. Users close app
@ -60,49 +60,45 @@
- [ ] :key: Ensure that the conversation thread is maintained without any loss of data upon sending multiple messages.
- [ ] Test for the ability to send different types of messages (e.g., text, emojis, code blocks).
- [ ] :key: Validate the scroll functionality in the chat window for lengthy conversations.
- [ ] Check if the user can renew responses multiple times.
- [ ] Check if the user can copy the response.
- [ ] Check if the user can delete responses.
- [ ] :warning: Test if the user deletes the message midway, then the assistant stops that response.
- [ ] :key: Check the `clear message` button works.
- [ ] :key: Check the `delete entire chat` works.
- [ ] :warning: Check if deleting all the chat retains the system prompt.
- [ ] Check if deleting all the chat retains the system prompt.
- [ ] Check the output format of the AI (code blocks, JSON, markdown, ...).
- [ ] :key: Validate that there is appropriate error handling and messaging if the assistant fails to respond.
- [ ] Test assistant's ability to maintain context over multiple exchanges.
- [ ] :key: Check the `create new chat` button works correctly
- [ ] Confirm that by changing `models` mid-thread the app can still handle it.
- [ ] Check that by changing `instructions` mid-thread the app can still handle it.
- [ ] Check the `regenerate` button renews the response.
- [ ] Check the `Instructions` update correctly after the user updates it midway.
- [ ] Check the `regenerate` button renews the response (single / multiple times).
- [ ] Check the `Instructions` update correctly after the user updates it midway (mid-thread).
### 2. Users can customize chat settings like model parameters via both the GUI & thread.json
- [ ] :key: Confirm that the chat settings options are accessible via the GUI.
- [ ] :key: Confirm that the Threads settings options are accessible.
- [ ] Test the functionality to adjust model parameters (e.g., Temperature, Top K, Top P) from the GUI and verify they are reflected in the chat behavior.
- [ ] :key: Ensure that changes can be saved and persisted between sessions.
- [ ] Validate that users can access and modify the thread.json file.
- [ ] :key: Check that changes made in thread.json are correctly applied to the chat session upon reload or restart.
- [ ] Verify if there is a revert option to go back to previous settings after changes are made.
- [ ] Test for user feedback or confirmation after saving changes to settings.
- [ ] Check the maximum and minimum limits of the adjustable parameters and how they affect the assistant's responses.
- [ ] :key: Validate user permissions for those who can change settings and persist them.
- [ ] :key: Ensure that users switch between threads with different models, the app can handle it.
### 3. Users can click on a history thread
### 3. Model dropdown
- [ ] :key: Model list should highlight recommended based on user RAM
- [ ] Model size should display (for both installed and imported models)
### 4. Users can click on a history thread
- [ ] Test the ability to click on any thread in the history panel.
- [ ] :key: Verify that clicking a thread brings up the past conversation in the main chat window.
- [ ] :key: Ensure that the selected thread is highlighted or otherwise indicated in the history panel.
- [ ] Confirm that the chat window displays the entire conversation from the selected history thread without any missing messages.
- [ ] :key: Check the performance and accuracy of the history feature when dealing with a large number of threads.
- [ ] Validate that historical threads reflect the exact state of the chat at that time, including settings.
- [ ] :key: :warning: Test the search functionality within the history panel for quick navigation.
- [ ] :key: Verify the ability to delete or clean old threads.
- [ ] :key: Confirm that changing the title of the thread updates correctly.
### 4. Users can config instructions for the assistant.
### 5. Users can config instructions for the assistant.
- [ ] Ensure there is a clear interface to input or change instructions for the assistant.
- [ ] Test if the instructions set by the user are being followed by the assistant in subsequent conversations.
- [ ] :key: Validate that changes to instructions are updated in real time and do not require a restart of the application or session.
@ -112,6 +108,8 @@
- [ ] Validate that instructions can be saved with descriptive names for easy retrieval.
- [ ] :key: Check if the assistant can handle conflicting instructions and how it resolves them.
- [ ] Ensure that instruction configurations are documented for user reference.
- [ ] :key: RAG - Users can import documents and the system should process queries about the uploaded file, providing accurate and appropriate responses in the conversation thread.
## D. Hub
@ -125,8 +123,7 @@
- [ ] Display the best model for their RAM at the top.
- [ ] :key: Ensure that models are labeled with RAM requirements and compatibility.
- [ ] :key: Validate that the download function is disabled for models that exceed the user's system capabilities.
- [ ] Test that the platform provides alternative recommendations for models not suitable due to RAM limitations.
- [ ] :warning: Test that the platform provides alternative recommendations for models not suitable due to RAM limitations.
- [ ] :key: Check the download model functionality and validate if the cancel download feature works correctly.
### 3. Users can download models via a HuggingFace URL (coming soon)
@ -139,7 +136,7 @@
- [ ] :key: Have clear instructions so users can do their own.
- [ ] :key: Ensure the new model updates after restarting the app.
- [ ] Ensure it raises clear errors for users to fix the problem while adding a new model.
- [ ] :warning:Ensure it raises clear errors for users to fix the problem while adding a new model.
### 5. Users can use the model as they want
@ -149,9 +146,13 @@
- [ ] Check if starting another model stops the other model entirely.
- [ ] Check the `Explore models` navigate correctly to the model panel.
- [ ] :key: Check when deleting a model it will delete all the files on the user's computer.
- [ ] The recommended tags should present right for the user's hardware.
- [ ] :warning:The recommended tags should present right for the user's hardware.
- [ ] Assess that the descriptions of models are accurate and informative.
### 6. Users can Integrate With a Remote Server
- [ ] :key: Import openAI GPT model https://jan.ai/guides/using-models/integrate-with-remote-server/ and the model displayed in Hub / Thread dropdown
- [ ] Users can use the remote model properly
## E. System Monitor
### 1. Users can see disk and RAM utilization
@ -181,7 +182,7 @@
- [ ] Confirm that the application saves the theme preference and persists it across sessions.
- [ ] Validate that all elements of the UI are compatible with the theme changes and maintain legibility and contrast.
### 2. Users change the extensions
### 2. Users change the extensions [TBU]
- [ ] Confirm that the `Extensions` tab lists all available plugins.
- [ ] :key: Test the toggle switch for each plugin to ensure it enables or disables the plugin correctly.
@ -208,3 +209,19 @@
- [ ] :key: Test that the application prevents the installation of incompatible or corrupt plugin files.
- [ ] :key: Check that the user can uninstall or disable custom plugins as easily as pre-installed ones.
- [ ] Verify that the application's performance remains stable after the installation of custom plugins.
### 5. Advanced Settings
- [ ] Attemp to test downloading model from hub using **HTTP Proxy** [guideline](https://github.com/janhq/jan/pull/1562)
- [ ] Users can move **Jan data folder**
- [ ] Users can click on Reset button to **factory reset** app settings to its original state & delete all usage data.
## G. Local API server
### 1. Local Server Usage with Server Options
- [ ] :key: Explore API Reference: Swagger API for sending/receiving requests
- [ ] Use default server option
- [ ] Configure and use custom server options
- [ ] Test starting/stopping the local API server with different Model/Model settings
- [ ] Server logs captured with correct Server Options provided
- [ ] Verify functionality of Open logs/Clear feature
- [ ] Ensure that threads and other functions impacting the model are disabled while the local server is running

View File

@ -67,20 +67,31 @@ paths:
x-codeSamples:
- lang: cURL
source: |
curl http://localhost:1337/v1/chat/completions \
-H "Content-Type: application/json" \
curl -X 'POST' \
'http://localhost:1337/v1/chat/completions' \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"model": "tinyllama-1.1b",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
"content": "You are a helpful assistant.",
"role": "system"
},
{
"role": "user",
"content": "Hello!"
"content": "Hello!",
"role": "user"
}
]
],
"model": "tinyllama-1.1b",
"stream": true,
"max_tokens": 2048,
"stop": [
"hello"
],
"frequency_penalty": 0,
"presence_penalty": 0,
"temperature": 0.7,
"top_p": 0.95
}'
/models:
get:
@ -103,7 +114,9 @@ paths:
x-codeSamples:
- lang: cURL
source: |
curl http://localhost:1337/v1/models
curl -X 'GET' \
'http://localhost:1337/v1/models' \
-H 'accept: application/json'
"/models/download/{model_id}":
get:
operationId: downloadModel
@ -131,7 +144,9 @@ paths:
x-codeSamples:
- lang: cURL
source: |
curl -X POST http://localhost:1337/v1/models/download/{model_id}
curl -X 'GET' \
'http://localhost:1337/v1/models/download/{model_id}' \
-H 'accept: application/json'
"/models/{model_id}":
get:
operationId: retrieveModel
@ -162,7 +177,9 @@ paths:
x-codeSamples:
- lang: cURL
source: |
curl http://localhost:1337/v1/models/{model_id}
curl -X 'GET' \
'http://localhost:1337/v1/models/{model_id}' \
-H 'accept: application/json'
delete:
operationId: deleteModel
tags:
@ -191,7 +208,9 @@ paths:
x-codeSamples:
- lang: cURL
source: |
curl -X DELETE http://localhost:1337/v1/models/{model_id}
curl -X 'DELETE' \
'http://localhost:1337/v1/models/{model_id}' \
-H 'accept: application/json'
/threads:
post:
operationId: createThread

View File

@ -316,4 +316,4 @@ components:
deleted:
type: boolean
description: Indicates whether the assistant was successfully deleted.
example: true
example: true

View File

@ -188,4 +188,4 @@ components:
total_tokens:
type: integer
example: 533
description: Total number of tokens used
description: Total number of tokens used

View File

@ -1,3 +1,4 @@
---
components:
schemas:
MessageObject:
@ -75,7 +76,7 @@ components:
example: msg_abc123
object:
type: string
description: "Type of the object, indicating it's a thread message."
description: Type of the object, indicating it's a thread message.
default: thread.message
created_at:
type: integer
@ -88,7 +89,7 @@ components:
example: thread_abc123
role:
type: string
description: "Role of the sender, either 'user' or 'assistant'."
description: Role of the sender, either 'user' or 'assistant'.
example: user
content:
type: array
@ -97,7 +98,7 @@ components:
properties:
type:
type: string
description: "Type of content, e.g., 'text'."
description: Type of content, e.g., 'text'.
example: text
text:
type: object
@ -110,21 +111,21 @@ components:
type: array
items:
type: string
description: "Annotations for the text content, if any."
description: Annotations for the text content, if any.
example: []
file_ids:
type: array
items:
type: string
description: "Array of file IDs associated with the message, if any."
description: Array of file IDs associated with the message, if any.
example: []
assistant_id:
type: string
description: "Identifier of the assistant involved in the message, if applicable."
description: Identifier of the assistant involved in the message, if applicable.
example: null
run_id:
type: string
description: "Run ID associated with the message, if applicable."
description: Run ID associated with the message, if applicable.
example: null
metadata:
type: object
@ -139,7 +140,7 @@ components:
example: msg_abc123
object:
type: string
description: "Type of the object, indicating it's a thread message."
description: Type of the object, indicating it's a thread message.
example: thread.message
created_at:
type: integer
@ -152,7 +153,7 @@ components:
example: thread_abc123
role:
type: string
description: "Role of the sender, either 'user' or 'assistant'."
description: Role of the sender, either 'user' or 'assistant'.
example: user
content:
type: array
@ -161,7 +162,7 @@ components:
properties:
type:
type: string
description: "Type of content, e.g., 'text'."
description: Type of content, e.g., 'text'.
example: text
text:
type: object
@ -174,21 +175,21 @@ components:
type: array
items:
type: string
description: "Annotations for the text content, if any."
description: Annotations for the text content, if any.
example: []
file_ids:
type: array
items:
type: string
description: "Array of file IDs associated with the message, if any."
description: Array of file IDs associated with the message, if any.
example: []
assistant_id:
type: string
description: "Identifier of the assistant involved in the message, if applicable."
description: Identifier of the assistant involved in the message, if applicable.
example: null
run_id:
type: string
description: "Run ID associated with the message, if applicable."
description: Run ID associated with the message, if applicable.
example: null
metadata:
type: object
@ -199,7 +200,7 @@ components:
properties:
object:
type: string
description: "Type of the object, indicating it's a list."
description: Type of the object, indicating it's a list.
default: list
data:
type: array
@ -226,7 +227,7 @@ components:
example: msg_abc123
object:
type: string
description: "Type of the object, indicating it's a thread message."
description: Type of the object, indicating it's a thread message.
example: thread.message
created_at:
type: integer
@ -239,7 +240,7 @@ components:
example: thread_abc123
role:
type: string
description: "Role of the sender, either 'user' or 'assistant'."
description: Role of the sender, either 'user' or 'assistant'.
example: user
content:
type: array
@ -248,7 +249,7 @@ components:
properties:
type:
type: string
description: "Type of content, e.g., 'text'."
description: Type of content, e.g., 'text'.
text:
type: object
properties:
@ -260,20 +261,20 @@ components:
type: array
items:
type: string
description: "Annotations for the text content, if any."
description: Annotations for the text content, if any.
file_ids:
type: array
items:
type: string
description: "Array of file IDs associated with the message, if any."
description: Array of file IDs associated with the message, if any.
example: []
assistant_id:
type: string
description: "Identifier of the assistant involved in the message, if applicable."
description: Identifier of the assistant involved in the message, if applicable.
example: null
run_id:
type: string
description: "Run ID associated with the message, if applicable."
description: Run ID associated with the message, if applicable.
example: null
metadata:
type: object
@ -309,4 +310,4 @@ components:
data:
type: array
items:
$ref: "#/components/schemas/MessageFileObject"
$ref: "#/components/schemas/MessageFileObject"

View File

@ -18,114 +18,82 @@ components:
Model:
type: object
properties:
type:
source_url:
type: string
default: model
description: The type of the object.
version:
type: string
default: "1"
description: The version number of the model.
format: uri
description: URL to the source of the model.
example: https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf
id:
type: string
description: Unique identifier used in chat-completions model_name, matches
description:
Unique identifier used in chat-completions model_name, matches
folder name.
example: zephyr-7b
example: trinity-v1.2-7b
object:
type: string
example: model
name:
type: string
description: Name of the model.
example: Zephyr 7B
owned_by:
example: Trinity-v1.2 7B Q4
version:
type: string
description: Compatibility field for OpenAI.
default: ""
created:
type: integer
format: int64
description: Unix timestamp representing the creation time.
default: "1.0"
description: The version number of the model.
description:
type: string
description: Description of the model.
state:
type: string
enum:
- null
- downloading
- ready
- starting
- stopping
description: Current state of the model.
example:
Trinity is an experimental model merge using the Slerp method.
Recommended for daily assistance purposes.
format:
type: string
description: State format of the model, distinct from the engine.
example: ggufv3
source:
type: array
items:
type: object
properties:
url:
format: uri
description: URL to the source of the model.
example: https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf
filename:
type: string
description: Filename of the model.
example: zephyr-7b-beta.Q4_K_M.gguf
example: gguf
settings:
type: object
properties:
ctx_len:
type: string
type: integer
description: Context length.
example: "4096"
ngl:
example: 4096
prompt_template:
type: string
description: Number of layers.
example: "100"
embedding:
type: string
description: Indicates if embedding is enabled.
example: "true"
n_parallel:
type: string
description: Number of parallel processes.
example: "4"
example: "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
additionalProperties: false
parameters:
type: object
properties:
temperature:
type: string
description: Temperature setting for the model.
example: "0.7"
token_limit:
type: string
description: Token limit for the model.
example: "4096"
top_k:
type: string
description: Top-k setting for the model.
example: "0"
example: 0.7
top_p:
type: string
description: Top-p setting for the model.
example: "1"
example: 0.95
stream:
type: string
description: Indicates if streaming is enabled.
example: "true"
example: true
max_tokens:
example: 4096
stop:
example: []
frequency_penalty:
example: 0
presence_penalty:
example: 0
additionalProperties: false
metadata:
type: object
description: Additional metadata.
assets:
type: array
items:
author:
type: string
description: List of assets related to the model.
required:
- source
example: Jan
tags:
example:
- 7B
- Merged
- Featured
size:
example: 4370000000,
cover:
example: https://raw.githubusercontent.com/janhq/jan/main/models/trinity-v1.2-7b/cover.png
engine:
example: nitro
ModelObject:
type: object
properties:
@ -133,7 +101,7 @@ components:
type: string
description: |
The identifier of the model.
example: zephyr-7b
example: trinity-v1.2-7b
object:
type: string
description: |
@ -153,197 +121,89 @@ components:
GetModelResponse:
type: object
properties:
source_url:
type: string
format: uri
description: URL to the source of the model.
example: https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf
id:
type: string
description: The identifier of the model.
example: zephyr-7b
description:
Unique identifier used in chat-completions model_name, matches
folder name.
example: mistral-ins-7b-q4
object:
type: string
description: Type of the object, indicating it's a model.
default: model
created:
type: integer
format: int64
description: Unix timestamp representing the creation time of the model.
owned_by:
example: model
name:
type: string
description: The entity that owns the model.
example: _
state:
description: Name of the model.
example: Mistral Instruct 7B Q4
version:
type: string
enum:
- not_downloaded
- downloaded
- running
- stopped
description: The current state of the model.
source:
type: array
items:
type: object
properties:
url:
format: uri
description: URL to the source of the model.
example: https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf
filename:
type: string
description: Filename of the model.
example: zephyr-7b-beta.Q4_K_M.gguf
engine_parameters:
type: object
properties:
pre_prompt:
type: string
description: Predefined prompt used for setting up internal configurations.
default: ""
example: Initial setup complete.
system_prompt:
type: string
description: Prefix used for system-level prompts.
default: "SYSTEM: "
user_prompt:
type: string
description: Prefix used for user prompts.
default: "USER: "
ai_prompt:
type: string
description: Prefix used for assistant prompts.
default: "ASSISTANT: "
ngl:
type: integer
description: Number of neural network layers loaded onto the GPU for
acceleration.
minimum: 0
maximum: 100
default: 100
example: 100
ctx_len:
type: integer
description: Context length for model operations, varies based on the specific
model.
minimum: 128
maximum: 4096
default: 4096
example: 4096
n_parallel:
type: integer
description: Number of parallel operations, relevant when continuous batching is
enabled.
minimum: 1
maximum: 10
default: 1
example: 4
cont_batching:
type: boolean
description: Indicates if continuous batching is used for processing.
default: false
example: false
cpu_threads:
type: integer
description: Number of threads allocated for CPU-based inference.
minimum: 1
example: 8
embedding:
type: boolean
description: Indicates if embedding layers are enabled in the model.
default: true
example: true
model_parameters:
default: "1.0"
description: The version number of the model.
description:
type: string
description: Description of the model.
example:
Trinity is an experimental model merge using the Slerp method.
Recommended for daily assistance purposes.
format:
type: string
description: State format of the model, distinct from the engine.
example: gguf
settings:
type: object
properties:
ctx_len:
type: integer
description: Maximum context length the model can handle.
minimum: 0
maximum: 4096
default: 4096
description: Context length.
example: 4096
ngl:
type: integer
description: Number of layers in the neural network.
minimum: 1
maximum: 100
default: 100
example: 100
embedding:
type: boolean
description: Indicates if embedding layers are used.
default: true
example: true
n_parallel:
type: integer
description: Number of parallel processes the model can run.
minimum: 1
maximum: 10
default: 1
example: 4
prompt_template:
type: string
example: "[INST] {prompt} [/INST]"
additionalProperties: false
parameters:
type: object
properties:
temperature:
type: number
description: Controls randomness in model's responses. Higher values lead to
more random responses.
minimum: 0
maximum: 2
default: 0.7
example: 0.7
token_limit:
type: integer
description: Maximum number of tokens the model can generate in a single
response.
minimum: 1
maximum: 4096
default: 4096
example: 4096
top_k:
type: integer
description: Limits the model to consider only the top k most likely next tokens
at each step.
minimum: 0
maximum: 100
default: 0
example: 0
top_p:
type: number
description: Nucleus sampling parameter. The model considers the smallest set of
tokens whose cumulative probability exceeds the top_p value.
minimum: 0
maximum: 1
default: 1
example: 1
example: 0.95
stream:
example: true
max_tokens:
example: 4096
stop:
example: []
frequency_penalty:
example: 0
presence_penalty:
example: 0
additionalProperties: false
metadata:
type: object
properties:
engine:
type: string
description: The engine used by the model.
enum:
- nitro
- openai
- hf_inference
quantization:
type: string
description: Quantization parameter of the model.
example: Q3_K_L
size:
type: string
description: Size of the model.
example: 7B
required:
- id
- object
- created
- owned_by
- state
- source
- parameters
- metadata
author:
type: string
example: MistralAI
tags:
example:
- 7B
- Featured
- Foundation Model
size:
example: 4370000000,
cover:
example: https://raw.githubusercontent.com/janhq/jan/main/models/mistral-ins-7b-q4/cover.png
engine:
example: nitro
DeleteModelResponse:
type: object
properties:
id:
type: string
description: The identifier of the model that was deleted.
example: model-zephyr-7B
example: mistral-ins-7b-q4
object:
type: string
description: Type of the object, indicating it's a model.

View File

@ -142,7 +142,7 @@ components:
example: Jan
instructions:
type: string
description: |
description: >
The instruction of assistant, defaults to "Be my grammar corrector"
model:
type: object
@ -224,4 +224,4 @@ components:
deleted:
type: boolean
description: Indicates whether the thread was successfully deleted.
example: true
example: true

View File

@ -1,8 +0,0 @@
{
"semi": false,
"singleQuote": true,
"quoteProps": "consistent",
"trailingComma": "es5",
"endOfLine": "auto",
"plugins": ["prettier-plugin-tailwindcss"]
}

View File

@ -1,173 +0,0 @@
import { app, ipcMain, dialog, shell } from 'electron'
import { join, basename, relative as getRelative, isAbsolute } from 'path'
import { WindowManager } from './../managers/window'
import { getResourcePath } from './../utils/path'
import { AppRoute, AppConfiguration } from '@janhq/core'
import { ServerConfig, startServer, stopServer } from '@janhq/server'
import {
ModuleManager,
getJanDataFolderPath,
getJanExtensionsPath,
init,
log,
logServer,
getAppConfigurations,
updateAppConfiguration,
} from '@janhq/core/node'
export function handleAppIPCs() {
/**
* Handles the "openAppDirectory" IPC message by opening the app's user data directory.
* The `shell.openPath` method is used to open the directory in the user's default file explorer.
* @param _event - The IPC event object.
*/
ipcMain.handle(AppRoute.openAppDirectory, async (_event) => {
shell.openPath(getJanDataFolderPath())
})
/**
* Opens a URL in the user's default browser.
* @param _event - The IPC event object.
* @param url - The URL to open.
*/
ipcMain.handle(AppRoute.openExternalUrl, async (_event, url) => {
shell.openExternal(url)
})
/**
* Opens a URL in the user's default browser.
* @param _event - The IPC event object.
* @param url - The URL to open.
*/
ipcMain.handle(AppRoute.openFileExplore, async (_event, url) => {
shell.openPath(url)
})
/**
* Joins multiple paths together, respect to the current OS.
*/
ipcMain.handle(AppRoute.joinPath, async (_event, paths: string[]) =>
join(...paths)
)
/**
* Checks if the given path is a subdirectory of the given directory.
*
* @param _event - The IPC event object.
* @param from - The path to check.
* @param to - The directory to check against.
*
* @returns {Promise<boolean>} - A promise that resolves with the result.
*/
ipcMain.handle(
AppRoute.isSubdirectory,
async (_event, from: string, to: string) => {
const relative = getRelative(from, to)
const isSubdir =
relative && !relative.startsWith('..') && !isAbsolute(relative)
if (isSubdir === '') return false
else return isSubdir
}
)
/**
* Retrieve basename from given path, respect to the current OS.
*/
ipcMain.handle(AppRoute.baseName, async (_event, path: string) =>
basename(path)
)
/**
* Start Jan API Server.
*/
ipcMain.handle(AppRoute.startServer, async (_event, configs?: ServerConfig) =>
startServer({
host: configs?.host,
port: configs?.port,
isCorsEnabled: configs?.isCorsEnabled,
isVerboseEnabled: configs?.isVerboseEnabled,
schemaPath: app.isPackaged
? join(getResourcePath(), 'docs', 'openapi', 'jan.yaml')
: undefined,
baseDir: app.isPackaged
? join(getResourcePath(), 'docs', 'openapi')
: undefined,
})
)
/**
* Stop Jan API Server.
*/
ipcMain.handle(AppRoute.stopServer, stopServer)
/**
* Relaunches the app in production - reload window in development.
* @param _event - The IPC event object.
* @param url - The URL to reload.
*/
ipcMain.handle(AppRoute.relaunch, async (_event) => {
ModuleManager.instance.clearImportedModules()
if (app.isPackaged) {
app.relaunch()
app.exit()
} else {
for (const modulePath in ModuleManager.instance.requiredModules) {
delete require.cache[
require.resolve(join(getJanExtensionsPath(), modulePath))
]
}
init({
// Function to check from the main process that user wants to install a extension
confirmInstall: async (_extensions: string[]) => {
return true
},
// Path to install extension to
extensionsPath: getJanExtensionsPath(),
})
WindowManager.instance.currentWindow?.reload()
}
})
/**
* Log message to log file.
*/
ipcMain.handle(AppRoute.log, async (_event, message) => log(message))
/**
* Log message to log file.
*/
ipcMain.handle(AppRoute.logServer, async (_event, message) =>
logServer(message)
)
ipcMain.handle(AppRoute.selectDirectory, async () => {
const mainWindow = WindowManager.instance.currentWindow
if (!mainWindow) {
console.error('No main window found')
return
}
const { canceled, filePaths } = await dialog.showOpenDialog(mainWindow, {
title: 'Select a folder',
buttonLabel: 'Select Folder',
properties: ['openDirectory', 'createDirectory'],
})
if (canceled) {
return
} else {
return filePaths[0]
}
})
ipcMain.handle(AppRoute.getAppConfigurations, async () =>
getAppConfigurations()
)
ipcMain.handle(
AppRoute.updateAppConfiguration,
async (_event, appConfiguration: AppConfiguration) => {
await updateAppConfiguration(appConfiguration)
}
)
}

Some files were not shown because too many files have changed in this diff Show More