Merge branch 'main' into 810-docs-add-modeljson-and-revamp-models-specs-page

This commit is contained in:
automaticcat 2023-12-04 15:38:04 +07:00 committed by GitHub
commit 312a1c4ac6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
164 changed files with 2527 additions and 1615 deletions

View File

@ -1,23 +1,18 @@
name: "Auto Label Conventional Commits" name: "Auto Label Conventional Commits"
on: on:
issues:
types:
- reopened
- opened
pull_request: pull_request:
types: types:
- reopened - reopened
- opened - opened
jobs: jobs:
label_issues: label_prs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
issues: write
pull-requests: write pull-requests: write
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Label issues - name: Label PRs
run: | run: |
ISSUE_TITLE=$(gh issue view ${{ github.event.number }} --json title -q ".title") ISSUE_TITLE=$(gh issue view ${{ github.event.number }} --json title -q ".title")
case "$ISSUE_TITLE" in case "$ISSUE_TITLE" in

View File

@ -1,4 +1,4 @@
name: Jan Build Electron App Nightly name: Jan Build Electron App Nightly or Manual
on: on:
schedule: schedule:
@ -173,8 +173,9 @@ jobs:
name: jan-linux-amd64-${{ steps.version_update.outputs.new_version }}.deb name: jan-linux-amd64-${{ steps.version_update.outputs.new_version }}.deb
path: ./electron/dist/*.deb path: ./electron/dist/*.deb
noti-discord: noti-discord-nightly:
needs: [build-macos, build-windows-x64, build-linux-x64] needs: [build-macos, build-windows-x64, build-linux-x64]
if: github.event_name == 'schedule'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Notify Discord - name: Notify Discord
@ -183,3 +184,15 @@ jobs:
args: "Nightly build artifact: https://github.com/janhq/jan/actions/runs/{{ GITHUB_RUN_ID }}" args: "Nightly build artifact: https://github.com/janhq/jan/actions/runs/{{ GITHUB_RUN_ID }}"
env: env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
noti-discord-manual:
needs: [build-macos, build-windows-x64, build-linux-x64]
if: github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
steps:
- name: Notify Discord
uses: Ilshidur/action-discord@master
with:
args: "Manual build artifact: https://github.com/janhq/jan/actions/runs/{{ GITHUB_RUN_ID }}"
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}

2
.gitignore vendored
View File

@ -2,7 +2,6 @@
.env .env
# Jan inference # Jan inference
models/**
error.log error.log
node_modules node_modules
*.tgz *.tgz
@ -11,6 +10,7 @@ dist
build build
.DS_Store .DS_Store
electron/renderer electron/renderer
electron/models
package-lock.json package-lock.json
*.log *.log

View File

@ -55,23 +55,17 @@ As Jan is development mode, you might get stuck on a broken build.
To reset your installation: To reset your installation:
1. Delete Jan from your `/Applications` folder 1. **Remove Jan from your Applications folder and Cache folder**
1. Delete Application data: ```bash
```sh make clean
# Newer versions
rm -rf /Users/$(whoami)/Library/Application\ Support/jan
# Versions 0.2.0 and older
rm -rf /Users/$(whoami)/Library/Application\ Support/jan-electron
```
1. Clear Application cache:
```sh
rm -rf /Users/$(whoami)/Library/Caches/jan*
``` ```
1. Use the following commands to remove any dangling backend processes: This will remove all build artifacts and cached files:
- Delete Jan from your `/Applications` folder
- Clear Application cache in `/Users/$(whoami)/Library/Caches/jan`
2. Use the following commands to remove any dangling backend processes:
```sh ```sh
ps aux | grep nitro ps aux | grep nitro
@ -124,6 +118,22 @@ make build
This will build the app MacOS m1/m2 for production (with code signing already done) and put the result in `dist` folder. This will build the app MacOS m1/m2 for production (with code signing already done) and put the result in `dist` folder.
## Nightly Build
Nightly build is a process where the software is built automatically every night. This helps in detecting and fixing bugs early in the development cycle. The process for this project is defined in [`.github/workflows/jan-electron-build-nightly.yml`](.github/workflows/jan-electron-build-nightly.yml)
You can join our Discord server [here](https://discord.gg/FTk2MvZwJH) and go to channel [github-jan](https://discordapp.com/channels/1107178041848909847/1148534730359308298) to monitor the build process.
The nightly build is triggered at 2:00 AM UTC every day.
The nightly build can be downloaded from the url notified in the Discord channel. Please access the url from the browser and download the build artifacts from there.
## Manual Build
Manual build is a process where the software is built manually by the developers. This is usually done when a new feature is implemented or a bug is fixed. The process for this project is defined in [`.github/workflows/jan-electron-build-nightly.yml`](.github/workflows/jan-electron-build-nightly.yml)
It is similar to the nightly build process, except that it is triggered manually by the developers.
## Acknowledgements ## Acknowledgements
Jan builds on top of other open-source projects: Jan builds on top of other open-source projects:

View File

@ -54,6 +54,9 @@ const getUserSpace = (): Promise<string> => window.core.api?.getUserSpace();
const openFileExplorer: (path: string) => Promise<any> = (path) => const openFileExplorer: (path: string) => Promise<any> = (path) =>
window.core.api?.openFileExplorer(path); window.core.api?.openFileExplorer(path);
const getResourcePath: () => Promise<string> = () =>
window.core.api?.getResourcePath();
/** /**
* Register extension point function type definition * Register extension point function type definition
*/ */
@ -74,4 +77,5 @@ export {
appDataPath, appDataPath,
getUserSpace, getUserSpace,
openFileExplorer, openFileExplorer,
getResourcePath,
}; };

View File

@ -1,5 +1,5 @@
import { BaseExtension } from "../extension"; import { BaseExtension } from "../extension";
import { Model, ModelCatalog } from "../types/index"; import { Model } from "../types/index";
/** /**
* Model extension for managing models. * Model extension for managing models.
@ -43,5 +43,5 @@ export abstract class ModelExtension extends BaseExtension {
* Gets a list of configured models. * Gets a list of configured models.
* @returns A Promise that resolves with an array of configured models. * @returns A Promise that resolves with an array of configured models.
*/ */
abstract getConfiguredModels(): Promise<ModelCatalog[]>; abstract getConfiguredModels(): Promise<Model[]>;
} }

View File

@ -62,6 +62,9 @@ const deleteFile: (path: string) => Promise<any> = (path) =>
const appendFile: (path: string, data: string) => Promise<any> = (path, data) => const appendFile: (path: string, data: string) => Promise<any> = (path, data) =>
window.core.api?.appendFile(path, data); window.core.api?.appendFile(path, data);
const copyFile: (src: string, dest: string) => Promise<any> = (src, dest) =>
window.core.api?.copyFile(src, dest);
/** /**
* Reads a file line by line. * Reads a file line by line.
* @param {string} path - The path of the file to read. * @param {string} path - The path of the file to read.
@ -80,4 +83,5 @@ export const fs = {
deleteFile, deleteFile,
appendFile, appendFile,
readLineByLine, readLineByLine,
copyFile,
}; };

View File

@ -143,6 +143,7 @@ export type ThreadAssistantInfo = {
assistant_id: string; assistant_id: string;
assistant_name: string; assistant_name: string;
model: ModelInfo; model: ModelInfo;
instructions?: string;
}; };
/** /**
@ -180,7 +181,7 @@ export interface Model {
/** /**
* The version of the model. * The version of the model.
*/ */
version: string; version: number;
/** /**
* The model download source. It can be an external url or a local filepath. * The model download source. It can be an external url or a local filepath.
@ -197,12 +198,6 @@ export interface Model {
*/ */
name: string; name: string;
/**
* The organization that owns the model (you!)
* Default: "you"
*/
owned_by: string;
/** /**
* The Unix timestamp (in seconds) for when the model was created * The Unix timestamp (in seconds) for when the model was created
*/ */
@ -236,11 +231,16 @@ export interface Model {
metadata: ModelMetadata; metadata: ModelMetadata;
} }
export type ModelMetadata = {
author: string;
tags: string[];
size: number;
};
/** /**
* The Model transition states. * The Model transition states.
*/ */
export enum ModelState { export enum ModelState {
ToDownload = "to_download",
Downloading = "downloading", Downloading = "downloading",
Ready = "ready", Ready = "ready",
Running = "running", Running = "running",
@ -250,65 +250,27 @@ export enum ModelState {
* The available model settings. * The available model settings.
*/ */
export type ModelSettingParams = { export type ModelSettingParams = {
ctx_len: number; ctx_len?: number;
ngl: number; ngl?: number;
embedding: boolean; embedding?: boolean;
n_parallel: number; n_parallel?: number;
system_prompt?: string;
user_prompt?: string;
ai_prompt?: string;
}; };
/** /**
* The available model runtime parameters. * The available model runtime parameters.
*/ */
export type ModelRuntimeParam = { export type ModelRuntimeParam = {
temperature: number; temperature?: number;
token_limit: number; token_limit?: number;
top_k: number; top_k?: number;
top_p: number; top_p?: number;
stream: boolean; stream?: boolean;
max_tokens?: number;
}; };
/**
* The metadata of the model.
*/
export type ModelMetadata = {
engine: string;
quantization: string;
size: number;
binaries: string[];
maxRamRequired: number;
author: string;
avatarUrl: string;
};
/**
* Model type of the presentation object which will be presented to the user
* @data_transfer_object
*/
export interface ModelCatalog {
/** The unique id of the model.*/
id: string;
/** The name of the model.*/
name: string;
/** The avatar url of the model.*/
avatarUrl: string;
/** The short description of the model.*/
shortDescription: string;
/** The long description of the model.*/
longDescription: string;
/** The author name of the model.*/
author: string;
/** The version of the model.*/
version: string;
/** The origin url of the model repo.*/
modelUrl: string;
/** The timestamp indicating when this model was released.*/
releaseDate: number;
/** The tags attached to the model description **/
tags: string[];
/** The available versions of this model to download. */
availableVersions: Model[];
}
/** /**
* Assistant type defines the shape of an assistant object. * Assistant type defines the shape of an assistant object.
* @stored * @stored
@ -327,13 +289,13 @@ export type Assistant = {
/** Represents the name of the object. */ /** Represents the name of the object. */
name: string; name: string;
/** Represents the description of the object. */ /** Represents the description of the object. */
description: string; description?: string;
/** Represents the model of the object. */ /** Represents the model of the object. */
model: string; model: string;
/** Represents the instructions for the object. */ /** Represents the instructions for the object. */
instructions: string; instructions?: string;
/** Represents the tools associated with the object. */ /** Represents the tools associated with the object. */
tools: any; tools?: any;
/** Represents the file identifiers associated with the object. */ /** Represents the file identifiers associated with the object. */
file_ids: string[]; file_ids: string[];
/** Represents the metadata of the object. */ /** Represents the metadata of the object. */

View File

@ -1,5 +1,7 @@
--- ---
title: About Jan title: About Jan
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
Jan believes in the need for an open source AI ecosystem, and are building the infra and tooling to allow open source AIs to compete on a level playing field with proprietary ones. Jan believes in the need for an open source AI ecosystem, and are building the infra and tooling to allow open source AIs to compete on a level playing field with proprietary ones.

View File

@ -1,5 +1,7 @@
--- ---
title: Community title: Community
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
- [ ] Social media links - [ ] Social media links

View File

@ -1,3 +1,5 @@
--- ---
title: Build an Assistant title: Build an Assistant
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---

View File

@ -1,5 +1,7 @@
--- ---
title: Extending Jan title: Extending Jan
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
## Overview ## Overview

View File

@ -1,3 +1,5 @@
--- ---
title: Model Management title: Model Management
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---

View File

@ -1,3 +1,5 @@
--- ---
title: Build a Module title: Build a Module
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---

View File

@ -1,5 +1,7 @@
--- ---
title: API Server title: API Server
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::warning :::warning

View File

@ -1,3 +1,5 @@
--- ---
title: Build a Theme title: Build a Theme
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---

View File

@ -1,3 +1,5 @@
--- ---
title: Build a Tool title: Build a Tool
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---

View File

@ -1,5 +1,7 @@
--- ---
title: Engineering title: Engineering
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
## Connecting to Rigs ## Connecting to Rigs

View File

@ -1,6 +1,8 @@
--- ---
title: Onboarding Checklist title: Onboarding Checklist
slug: /handbook slug: /handbook
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
# Welcome # Welcome

View File

@ -1,5 +1,7 @@
--- ---
title: Hardware Examples title: Hardware Examples
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
## Add your own example ## Add your own example

View File

@ -1,5 +1,7 @@
--- ---
title: From Source title: From Source
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
# Install Jan from Source # Install Jan from Source

View File

@ -1,5 +1,7 @@
--- ---
title: Linux title: Linux
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
# Installing Jan on Linux # Installing Jan on Linux

View File

@ -1,5 +1,7 @@
--- ---
title: Mac title: Mac
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
# Installing Jan on MacOS # Installing Jan on MacOS

View File

@ -1,5 +1,7 @@
--- ---
title: Overview title: Overview
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
Getting up and running open-source AI models on your own computer with Jan is quick and easy. Jan is lightweight and can run on a variety of hardware and platform versions. Specific requirements tailored to your platform are outlined below. Getting up and running open-source AI models on your own computer with Jan is quick and easy. Jan is lightweight and can run on a variety of hardware and platform versions. Specific requirements tailored to your platform are outlined below.

View File

@ -1,5 +1,7 @@
--- ---
title: Windows title: Windows
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
# Installing Jan on Windows # Installing Jan on Windows

View File

@ -1,5 +1,7 @@
--- ---
title: How Jan Works title: How Jan Works
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
- Local Filesystem - Local Filesystem

View File

@ -1,6 +1,8 @@
--- ---
title: Introduction title: Introduction
slug: /docs slug: /docs
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
Jan is a ChatGPT-alternative that runs on your own computer, with a [local API server](/api). Jan is a ChatGPT-alternative that runs on your own computer, with a [local API server](/api).

View File

@ -1,5 +1,7 @@
--- ---
title: Quickstart title: Quickstart
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
- Write in the style of comics, explanation - Write in the style of comics, explanation

View File

@ -1,6 +1,8 @@
--- ---
title: Architecture title: Architecture
slug: /specs slug: /specs
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::warning :::warning

View File

@ -1,6 +1,8 @@
--- ---
title: "Assistants" title: "Assistants"
slug: /specs/assistants slug: /specs/assistants
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::caution :::caution

View File

@ -1,6 +1,8 @@
--- ---
title: Chats title: Chats
slug: /specs/chats slug: /specs/chats
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::caution :::caution

View File

@ -1,6 +1,8 @@
--- ---
title: "Files" title: "Files"
slug: /specs/files slug: /specs/files
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::warning :::warning

View File

@ -1,6 +1,8 @@
--- ---
title: "Fine-tuning" title: "Fine-tuning"
slug: /specs/finetuning slug: /specs/finetuning
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
Todo: @hiro Todo: @hiro

View File

@ -1,6 +1,8 @@
--- ---
title: Messages title: Messages
slug: /specs/messages slug: /specs/messages
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::caution :::caution

View File

@ -1,6 +1,8 @@
--- ---
title: Models title: Models
slug: /specs/models slug: /specs/models
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::caution :::caution

View File

@ -1,6 +1,8 @@
--- ---
title: Prompts title: Prompts
slug: /specs/prompts slug: /specs/prompts
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
- [ ] /prompts folder - [ ] /prompts folder

View File

@ -1,6 +1,8 @@
--- ---
title: Threads title: Threads
slug: /specs/threads slug: /specs/threads
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::caution :::caution

View File

@ -1,5 +1,7 @@
--- ---
title: File-based Approach title: File-based Approach
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::warning :::warning

View File

@ -1,5 +1,7 @@
--- ---
title: Jan (Assistant) title: Jan (Assistant)
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
## Jan: a "global" assistant ## Jan: a "global" assistant

View File

@ -1,6 +1,8 @@
--- ---
title: Chat title: Chat
slug: /specs/chat slug: /specs/chat
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
## Overview ## Overview

View File

@ -1,6 +1,8 @@
--- ---
title: Hub title: Hub
slug: /specs/hub slug: /specs/hub
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
## Overview ## Overview

View File

@ -1,6 +1,8 @@
--- ---
title: Settings title: Settings
slug: /specs/settings slug: /specs/settings
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
## Overview ## Overview

View File

@ -1,6 +1,8 @@
--- ---
title: System Monitor title: System Monitor
slug: /specs/system-monitor slug: /specs/system-monitor
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
## Overview ## Overview

View File

@ -1,5 +1,7 @@
--- ---
title: User Interface title: User Interface
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
--- ---
:::warning :::warning

View File

@ -38,6 +38,8 @@ const config = {
mermaid: true, mermaid: true,
}, },
noIndex: false,
// Plugins we added // Plugins we added
plugins: [ plugins: [
"docusaurus-plugin-sass", "docusaurus-plugin-sass",
@ -140,15 +142,44 @@ const config = {
metadata: [ metadata: [
{ name: 'description', content: 'Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.' }, { name: 'description', content: 'Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.' },
{ name: 'keywords', content: 'Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee' }, { name: 'keywords', content: 'Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee' },
{ name: 'robots', content: 'index, follow' },
{ property: 'og:title', content: 'Run your own AI | Jan' }, { property: 'og:title', content: 'Run your own AI | Jan' },
{ property: 'og:description', content: 'Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.' }, { property: 'og:description', content: 'Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.' },
{ property: 'og:image', content: 'https://jan.ai/img/jan-social-card.png' }, { property: 'og:image', content: 'https://jan.ai/img/jan-social-card.png' },
{ property: 'og:type', content: 'website' },
{ property: 'twitter:card', content: 'summary_large_image' }, { property: 'twitter:card', content: 'summary_large_image' },
{ property: 'twitter:site', content: '@janhq_' }, { property: 'twitter:site', content: '@janhq_' },
{ property: 'twitter:title', content: 'Run your own AI | Jan' }, { property: 'twitter:title', content: 'Run your own AI | Jan' },
{ property: 'twitter:description', content: 'Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.' }, { property: 'twitter:description', content: 'Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.' },
{ property: 'twitter:image', content: 'https://jan.ai/img/jan-social-card.png' }, { property: 'twitter:image', content: 'https://jan.ai/img/jan-social-card.png' },
], ],
headTags: [
// Declare a <link> preconnect tag
{
tagName: 'link',
attributes: {
rel: 'preconnect',
href: 'https://jan.ai/',
},
},
// Declare some json-ld structured data
{
tagName: 'script',
attributes: {
type: 'application/ld+json',
},
innerHTML: JSON.stringify({
'@context': 'https://schema.org/',
'@type': 'localAI',
name: 'Jan',
description: "Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.",
keywords: "Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee",
applicationCategory: "BusinessApplication",
operatingSystem: "Multiple",
url: 'https://jan.ai/',
}),
},
],
navbar: { navbar: {
title: "Jan", title: "Jan",
logo: { logo: {

View File

@ -19,7 +19,7 @@ export default function Home() {
<AnnoncementBanner /> <AnnoncementBanner />
<Layout <Layout
title={`${siteConfig.tagline}`} title={`${siteConfig.tagline}`}
description="Jan runs Large Language Models locally on Windows, Mac and Linux. Available on Desktop and Cloud-Native." description="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server."
> >
<main className="bg-gray-50 dark:bg-gray-950/95 relative"> <main className="bg-gray-50 dark:bg-gray-950/95 relative">
<div className="relative"> <div className="relative">

2
docs/static/robots.txt vendored Normal file
View File

@ -0,0 +1,2 @@
User-Agent: *
Allow: /

View File

@ -1,9 +1,9 @@
import { app, ipcMain, shell } from 'electron' import { app, ipcMain, shell } from 'electron'
import { ModuleManager } from '../managers/module' import { ModuleManager } from './../managers/module'
import { join } from 'path' import { join } from 'path'
import { ExtensionManager } from '../managers/extension' import { ExtensionManager } from './../managers/extension'
import { WindowManager } from '../managers/window' import { WindowManager } from './../managers/window'
import { userSpacePath } from '../utils/path' import { userSpacePath } from './../utils/path'
export function handleAppIPCs() { export function handleAppIPCs() {
/** /**

View File

@ -1,9 +1,10 @@
import { app, ipcMain } from 'electron' import { app, ipcMain } from 'electron'
import { DownloadManager } from '../managers/download' import { DownloadManager } from './../managers/download'
import { resolve, join } from 'path' import { resolve, join } from 'path'
import { WindowManager } from '../managers/window' import { WindowManager } from './../managers/window'
import request from 'request' import request from 'request'
import { createWriteStream, unlink } from 'fs' import { createWriteStream } from 'fs'
import { getResourcePath } from './../utils/path'
const progress = require('request-progress') const progress = require('request-progress')
export function handleDownloaderIPCs() { export function handleDownloaderIPCs() {
@ -37,6 +38,10 @@ export function handleDownloaderIPCs() {
rq?.abort() rq?.abort()
}) })
ipcMain.handle('getResourcePath', async (_event) => {
return getResourcePath()
})
/** /**
* Downloads a file from a given URL. * Downloads a file from a given URL.
* @param _event - The IPC event object. * @param _event - The IPC event object.

View File

@ -1,19 +1,16 @@
import { app, ipcMain, webContents } from 'electron' import { ipcMain, webContents } from 'electron'
import { readdirSync, rmdir, writeFileSync } from 'fs' import { readdirSync } from 'fs'
import { ModuleManager } from '../managers/module' import { ModuleManager } from './../managers/module'
import { join, extname } from 'path' import { join, extname } from 'path'
import { ExtensionManager } from '../managers/extension'
import { WindowManager } from '../managers/window'
import { manifest, tarball } from 'pacote'
import { import {
getActiveExtensions, getActiveExtensions,
getAllExtensions, getAllExtensions,
installExtensions, installExtensions,
} from '../extension/store' } from './../extension/store'
import { getExtension } from '../extension/store' import { getExtension } from './../extension/store'
import { removeExtension } from '../extension/store' import { removeExtension } from './../extension/store'
import Extension from '../extension/extension' import Extension from './../extension/extension'
import { userSpacePath } from '../utils/path' import { getResourcePath, userSpacePath } from './../utils/path'
export function handleExtensionIPCs() { export function handleExtensionIPCs() {
/**MARK: General handlers */ /**MARK: General handlers */
@ -48,11 +45,7 @@ export function handleExtensionIPCs() {
* @returns An array of paths to the base extensions. * @returns An array of paths to the base extensions.
*/ */
ipcMain.handle('extension:baseExtensions', async (_event) => { ipcMain.handle('extension:baseExtensions', async (_event) => {
const baseExtensionPath = join( const baseExtensionPath = join(getResourcePath(), 'pre-install')
__dirname,
'../',
app.isPackaged ? '../../app.asar.unpacked/pre-install' : '../pre-install'
)
return readdirSync(baseExtensionPath) return readdirSync(baseExtensionPath)
.filter((file) => extname(file) === '.tgz') .filter((file) => extname(file) === '.tgz')
.map((file) => join(baseExtensionPath, file)) .map((file) => join(baseExtensionPath, file))

View File

@ -1,8 +1,9 @@
import { app, ipcMain } from 'electron' import { ipcMain } from 'electron'
import * as fs from 'fs' import * as fs from 'fs'
import fse from 'fs-extra'
import { join } from 'path' import { join } from 'path'
import readline from 'readline' import readline from 'readline'
import { userSpacePath } from '../utils/path' import { userSpacePath } from './../utils/path'
/** /**
* Handles file system operations. * Handles file system operations.
@ -145,6 +146,12 @@ export function handleFsIPCs() {
} }
}) })
ipcMain.handle('copyFile', async (_event, src: string, dest: string) => {
console.debug(`Copying file from ${src} to ${dest}`)
return fse.copySync(src, dest, { overwrite: false })
})
/** /**
* Reads a file line by line. * Reads a file line by line.
* @param event - The event object. * @param event - The event object.

View File

@ -1,5 +1,5 @@
import { app, dialog } from "electron"; import { app, dialog } from "electron";
import { WindowManager } from "../managers/window"; import { WindowManager } from "./../managers/window";
import { autoUpdater } from "electron-updater"; import { autoUpdater } from "electron-updater";
export function handleAppUpdates() { export function handleAppUpdates() {

View File

@ -67,6 +67,20 @@ export function fsInvokers() {
* @param {string} path - The path of the directory to remove. * @param {string} path - The path of the directory to remove.
*/ */
rmdir: (path: string) => ipcRenderer.invoke('rmdir', path), rmdir: (path: string) => ipcRenderer.invoke('rmdir', path),
/**
* Copies a file from the source path to the destination path.
* @param {string} src - The source path of the file to copy.
* @param {string} dest - The destination path where the file should be copied.
*/
copyFile: (src: string, dest: string) => ipcRenderer.invoke('copyFile', src, dest),
/**
* Retrieves the resource path.
* @returns {Promise<string>} A promise that resolves to the resource path.
*/
getResourcePath: () => ipcRenderer.invoke('getResourcePath'),
} }
return interfaces return interfaces

View File

@ -1,7 +1,7 @@
import { app, BrowserWindow } from 'electron' import { app, BrowserWindow } from 'electron'
import { join } from 'path' import { join } from 'path'
import { setupMenu } from './utils/menu' import { setupMenu } from './utils/menu'
import { handleFsIPCs } from './handlers/fs' import { createUserSpace, getResourcePath } from './utils/path'
/** /**
* Managers * Managers
@ -18,9 +18,11 @@ import { handleThemesIPCs } from './handlers/theme'
import { handleExtensionIPCs } from './handlers/extension' import { handleExtensionIPCs } from './handlers/extension'
import { handleAppIPCs } from './handlers/app' import { handleAppIPCs } from './handlers/app'
import { handleAppUpdates } from './handlers/update' import { handleAppUpdates } from './handlers/update'
import { handleFsIPCs } from './handlers/fs'
app app
.whenReady() .whenReady()
.then(createUserSpace)
.then(ExtensionManager.instance.migrateExtensions) .then(ExtensionManager.instance.migrateExtensions)
.then(ExtensionManager.instance.setupExtensions) .then(ExtensionManager.instance.setupExtensions)
.then(setupMenu) .then(setupMenu)
@ -56,7 +58,7 @@ function createMainWindow() {
}) })
const startURL = app.isPackaged const startURL = app.isPackaged
? `file://${join(__dirname, '../renderer/index.html')}` ? `file://${join(__dirname, '..', 'renderer', 'index.html')}`
: 'http://localhost:3000' : 'http://localhost:3000'
/* Load frontend app to the window */ /* Load frontend app to the window */

View File

@ -1,10 +1,10 @@
import { app } from 'electron' import { app } from 'electron'
import { init } from '../extension' import { init } from './../extension'
import { join, resolve } from 'path' import { join, resolve } from 'path'
import { rmdir } from 'fs' import { rmdir } from 'fs'
import Store from 'electron-store' import Store from 'electron-store'
import { existsSync, mkdirSync, writeFileSync } from 'fs' import { existsSync, mkdirSync, writeFileSync } from 'fs'
import { userSpacePath } from '../utils/path' import { userSpacePath } from './../utils/path'
/** /**
* Manages extension installation and migration. * Manages extension installation and migration.
*/ */

View File

@ -1,4 +1,4 @@
import { dispose } from "../utils/disposable"; import { dispose } from "./../utils/disposable";
/** /**
* Manages imported modules. * Manages imported modules.

View File

@ -1,15 +1,15 @@
import { BrowserWindow } from "electron"; import { BrowserWindow } from 'electron'
/** /**
* Manages the current window instance. * Manages the current window instance.
*/ */
export class WindowManager { export class WindowManager {
public static instance: WindowManager = new WindowManager(); public static instance: WindowManager = new WindowManager()
public currentWindow?: BrowserWindow; public currentWindow?: BrowserWindow
constructor() { constructor() {
if (WindowManager.instance) { if (WindowManager.instance) {
return WindowManager.instance; return WindowManager.instance
} }
} }
@ -21,17 +21,17 @@ export class WindowManager {
createWindow(options?: Electron.BrowserWindowConstructorOptions | undefined) { createWindow(options?: Electron.BrowserWindowConstructorOptions | undefined) {
this.currentWindow = new BrowserWindow({ this.currentWindow = new BrowserWindow({
width: 1200, width: 1200,
minWidth: 800, minWidth: 1200,
height: 800, height: 800,
show: false, show: false,
trafficLightPosition: { trafficLightPosition: {
x: 10, x: 10,
y: 15, y: 15,
}, },
titleBarStyle: "hidden", titleBarStyle: 'hidden',
vibrancy: "sidebar", vibrancy: 'sidebar',
...options, ...options,
}); })
return this.currentWindow; return this.currentWindow
} }
} }

View File

@ -13,10 +13,12 @@
"renderer/**/*", "renderer/**/*",
"build/*.{js,map}", "build/*.{js,map}",
"build/**/*.{js,map}", "build/**/*.{js,map}",
"pre-install" "pre-install",
"models/**/*"
], ],
"asarUnpack": [ "asarUnpack": [
"pre-install" "pre-install",
"models"
], ],
"publish": [ "publish": [
{ {
@ -70,6 +72,7 @@
"@uiball/loaders": "^1.3.0", "@uiball/loaders": "^1.3.0",
"electron-store": "^8.1.0", "electron-store": "^8.1.0",
"electron-updater": "^6.1.4", "electron-updater": "^6.1.4",
"fs-extra": "^11.2.0",
"pacote": "^17.0.4", "pacote": "^17.0.4",
"request": "^2.88.2", "request": "^2.88.2",
"request-progress": "^3.0.0", "request-progress": "^3.0.0",

View File

@ -1,41 +1,41 @@
import { _electron as electron } from "playwright"; import { _electron as electron } from 'playwright'
import { ElectronApplication, Page, expect, test } from "@playwright/test"; import { ElectronApplication, Page, expect, test } from '@playwright/test'
import { import {
findLatestBuild, findLatestBuild,
parseElectronApp, parseElectronApp,
stubDialog, stubDialog,
} from "electron-playwright-helpers"; } from 'electron-playwright-helpers'
let electronApp: ElectronApplication; let electronApp: ElectronApplication
let page: Page; let page: Page
test.beforeAll(async () => { test.beforeAll(async () => {
process.env.CI = "e2e"; process.env.CI = 'e2e'
const latestBuild = findLatestBuild("dist"); const latestBuild = findLatestBuild('dist')
expect(latestBuild).toBeTruthy(); expect(latestBuild).toBeTruthy()
// parse the packaged Electron app and find paths and other info // parse the packaged Electron app and find paths and other info
const appInfo = parseElectronApp(latestBuild); const appInfo = parseElectronApp(latestBuild)
expect(appInfo).toBeTruthy(); expect(appInfo).toBeTruthy()
electronApp = await electron.launch({ electronApp = await electron.launch({
args: [appInfo.main], // main file from package.json args: [appInfo.main], // main file from package.json
executablePath: appInfo.executable, // path to the Electron executable executablePath: appInfo.executable, // path to the Electron executable
}); })
await stubDialog(electronApp, "showMessageBox", { response: 1 }); await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow(); page = await electronApp.firstWindow()
}); })
test.afterAll(async () => { test.afterAll(async () => {
await electronApp.close(); await electronApp.close()
await page.close(); await page.close()
}); })
test("explores models", async () => { test('explores models', async () => {
await page.getByTestId("Explore Models").first().click(); await page.getByTestId('Hub').first().click()
await page.getByTestId("testid-explore-models").isVisible(); await page.getByTestId('testid-explore-models').isVisible()
// More test cases here... // More test cases here...
}); })

View File

@ -1,55 +1,55 @@
import { _electron as electron } from "playwright"; import { _electron as electron } from 'playwright'
import { ElectronApplication, Page, expect, test } from "@playwright/test"; import { ElectronApplication, Page, expect, test } from '@playwright/test'
import { import {
findLatestBuild, findLatestBuild,
parseElectronApp, parseElectronApp,
stubDialog, stubDialog,
} from "electron-playwright-helpers"; } from 'electron-playwright-helpers'
let electronApp: ElectronApplication; let electronApp: ElectronApplication
let page: Page; let page: Page
test.beforeAll(async () => { test.beforeAll(async () => {
process.env.CI = "e2e"; process.env.CI = 'e2e'
const latestBuild = findLatestBuild("dist"); const latestBuild = findLatestBuild('dist')
expect(latestBuild).toBeTruthy(); expect(latestBuild).toBeTruthy()
// parse the packaged Electron app and find paths and other info // parse the packaged Electron app and find paths and other info
const appInfo = parseElectronApp(latestBuild); const appInfo = parseElectronApp(latestBuild)
expect(appInfo).toBeTruthy(); expect(appInfo).toBeTruthy()
expect(appInfo.asar).toBe(true); expect(appInfo.asar).toBe(true)
expect(appInfo.executable).toBeTruthy(); expect(appInfo.executable).toBeTruthy()
expect(appInfo.main).toBeTruthy(); expect(appInfo.main).toBeTruthy()
expect(appInfo.name).toBe("jan"); expect(appInfo.name).toBe('jan')
expect(appInfo.packageJson).toBeTruthy(); expect(appInfo.packageJson).toBeTruthy()
expect(appInfo.packageJson.name).toBe("jan"); expect(appInfo.packageJson.name).toBe('jan')
expect(appInfo.platform).toBeTruthy(); expect(appInfo.platform).toBeTruthy()
expect(appInfo.platform).toBe(process.platform); expect(appInfo.platform).toBe(process.platform)
expect(appInfo.resourcesDir).toBeTruthy(); expect(appInfo.resourcesDir).toBeTruthy()
electronApp = await electron.launch({ electronApp = await electron.launch({
args: [appInfo.main], // main file from package.json args: [appInfo.main], // main file from package.json
executablePath: appInfo.executable, // path to the Electron executable executablePath: appInfo.executable, // path to the Electron executable
}); })
await stubDialog(electronApp, "showMessageBox", { response: 1 }); await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow(); page = await electronApp.firstWindow()
}); })
test.afterAll(async () => { test.afterAll(async () => {
await electronApp.close(); await electronApp.close()
await page.close(); await page.close()
}); })
test("renders the home page", async () => { test('renders the home page', async () => {
expect(page).toBeDefined(); expect(page).toBeDefined()
// Welcome text is available // Welcome text is available
const welcomeText = await page const welcomeText = await page
.getByTestId("testid-welcome-title") .getByTestId('testid-welcome-title')
.first() .first()
.isVisible(); .isVisible()
expect(welcomeText).toBe(false); expect(welcomeText).toBe(false)
}); })

View File

@ -1,41 +0,0 @@
import { _electron as electron } from "playwright";
import { ElectronApplication, Page, expect, test } from "@playwright/test";
import {
findLatestBuild,
parseElectronApp,
stubDialog,
} from "electron-playwright-helpers";
let electronApp: ElectronApplication;
let page: Page;
test.beforeAll(async () => {
process.env.CI = "e2e";
const latestBuild = findLatestBuild("dist");
expect(latestBuild).toBeTruthy();
// parse the packaged Electron app and find paths and other info
const appInfo = parseElectronApp(latestBuild);
expect(appInfo).toBeTruthy();
electronApp = await electron.launch({
args: [appInfo.main], // main file from package.json
executablePath: appInfo.executable, // path to the Electron executable
});
await stubDialog(electronApp, "showMessageBox", { response: 1 });
page = await electronApp.firstWindow();
});
test.afterAll(async () => {
await electronApp.close();
await page.close();
});
test("shows my models", async () => {
await page.getByTestId("My Models").first().click();
await page.getByTestId("testid-my-models").isVisible();
// More test cases here...
});

View File

@ -1,43 +1,43 @@
import { _electron as electron } from "playwright"; import { _electron as electron } from 'playwright'
import { ElectronApplication, Page, expect, test } from "@playwright/test"; import { ElectronApplication, Page, expect, test } from '@playwright/test'
import { import {
findLatestBuild, findLatestBuild,
parseElectronApp, parseElectronApp,
stubDialog, stubDialog,
} from "electron-playwright-helpers"; } from 'electron-playwright-helpers'
let electronApp: ElectronApplication; let electronApp: ElectronApplication
let page: Page; let page: Page
test.beforeAll(async () => { test.beforeAll(async () => {
process.env.CI = "e2e"; process.env.CI = 'e2e'
const latestBuild = findLatestBuild("dist"); const latestBuild = findLatestBuild('dist')
expect(latestBuild).toBeTruthy(); expect(latestBuild).toBeTruthy()
// parse the packaged Electron app and find paths and other info // parse the packaged Electron app and find paths and other info
const appInfo = parseElectronApp(latestBuild); const appInfo = parseElectronApp(latestBuild)
expect(appInfo).toBeTruthy(); expect(appInfo).toBeTruthy()
electronApp = await electron.launch({ electronApp = await electron.launch({
args: [appInfo.main], // main file from package.json args: [appInfo.main], // main file from package.json
executablePath: appInfo.executable, // path to the Electron executable executablePath: appInfo.executable, // path to the Electron executable
}); })
await stubDialog(electronApp, "showMessageBox", { response: 1 }); await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow(); page = await electronApp.firstWindow()
}); })
test.afterAll(async () => { test.afterAll(async () => {
await electronApp.close(); await electronApp.close()
await page.close(); await page.close()
}); })
test("renders left navigation panel", async () => { test('renders left navigation panel', async () => {
// Chat section should be there // Chat section should be there
const chatSection = await page.getByTestId("Chat").first().isVisible(); const chatSection = await page.getByTestId('Chat').first().isVisible()
expect(chatSection).toBe(false); expect(chatSection).toBe(false)
// Home actions // Home actions
/* Disable unstable feature tests /* Disable unstable feature tests
@ -45,7 +45,10 @@ test("renders left navigation panel", async () => {
** Enable back when it is whitelisted ** Enable back when it is whitelisted
*/ */
const myModelsBtn = await page.getByTestId("My Models").first().isEnabled(); const systemMonitorBtn = await page
const settingsBtn = await page.getByTestId("Settings").first().isEnabled(); .getByTestId('System Monitor')
expect([myModelsBtn, settingsBtn].filter((e) => !e).length).toBe(0); .first()
}); .isEnabled()
const settingsBtn = await page.getByTestId('Settings').first().isEnabled()
expect([systemMonitorBtn, settingsBtn].filter((e) => !e).length).toBe(0)
})

View File

@ -1,40 +1,40 @@
import { _electron as electron } from "playwright"; import { _electron as electron } from 'playwright'
import { ElectronApplication, Page, expect, test } from "@playwright/test"; import { ElectronApplication, Page, expect, test } from '@playwright/test'
import { import {
findLatestBuild, findLatestBuild,
parseElectronApp, parseElectronApp,
stubDialog, stubDialog,
} from "electron-playwright-helpers"; } from 'electron-playwright-helpers'
let electronApp: ElectronApplication; let electronApp: ElectronApplication
let page: Page; let page: Page
test.beforeAll(async () => { test.beforeAll(async () => {
process.env.CI = "e2e"; process.env.CI = 'e2e'
const latestBuild = findLatestBuild("dist"); const latestBuild = findLatestBuild('dist')
expect(latestBuild).toBeTruthy(); expect(latestBuild).toBeTruthy()
// parse the packaged Electron app and find paths and other info // parse the packaged Electron app and find paths and other info
const appInfo = parseElectronApp(latestBuild); const appInfo = parseElectronApp(latestBuild)
expect(appInfo).toBeTruthy(); expect(appInfo).toBeTruthy()
electronApp = await electron.launch({ electronApp = await electron.launch({
args: [appInfo.main], // main file from package.json args: [appInfo.main], // main file from package.json
executablePath: appInfo.executable, // path to the Electron executable executablePath: appInfo.executable, // path to the Electron executable
}); })
await stubDialog(electronApp, "showMessageBox", { response: 1 }); await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow(); page = await electronApp.firstWindow()
}); })
test.afterAll(async () => { test.afterAll(async () => {
await electronApp.close(); await electronApp.close()
await page.close(); await page.close()
}); })
test("shows settings", async () => { test('shows settings', async () => {
await page.getByTestId("Settings").first().click(); await page.getByTestId('Settings').first().click()
await page.getByTestId("testid-setting-description").isVisible(); await page.getByTestId('testid-setting-description').isVisible()
}); })

View File

@ -0,0 +1,41 @@
import { _electron as electron } from 'playwright'
import { ElectronApplication, Page, expect, test } from '@playwright/test'
import {
findLatestBuild,
parseElectronApp,
stubDialog,
} from 'electron-playwright-helpers'
let electronApp: ElectronApplication
let page: Page
test.beforeAll(async () => {
process.env.CI = 'e2e'
const latestBuild = findLatestBuild('dist')
expect(latestBuild).toBeTruthy()
// parse the packaged Electron app and find paths and other info
const appInfo = parseElectronApp(latestBuild)
expect(appInfo).toBeTruthy()
electronApp = await electron.launch({
args: [appInfo.main], // main file from package.json
executablePath: appInfo.executable, // path to the Electron executable
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow()
})
test.afterAll(async () => {
await electronApp.close()
await page.close()
})
test('shows system monitor', async () => {
await page.getByTestId('System Monitor').first().click()
await page.getByTestId('testid-system-monitor').isVisible()
// More test cases here...
})

View File

@ -1,4 +1,19 @@
import { join } from 'path' import { join } from 'path'
import { app } from 'electron' import { app } from 'electron'
import { mkdir } from 'fs-extra'
export async function createUserSpace(): Promise<void> {
return mkdir(userSpacePath).catch(() => {})
}
export const userSpacePath = join(app.getPath('home'), 'jan') export const userSpacePath = join(app.getPath('home'), 'jan')
export function getResourcePath() {
let appPath = join(app.getAppPath(), '..', 'app.asar.unpacked')
if (!app.isPackaged) {
// for development mode
appPath = join(__dirname, '..', '..')
}
return appPath
}

View File

@ -89,12 +89,12 @@ export default class JanAssistantExtension implements AssistantExtension {
private async createJanAssistant(): Promise<void> { private async createJanAssistant(): Promise<void> {
const janAssistant: Assistant = { const janAssistant: Assistant = {
avatar: "", avatar: "",
thread_location: undefined, // TODO: make this property ? thread_location: undefined,
id: "jan", id: "jan",
object: "assistant", // TODO: maybe we can set default value for this? object: "assistant", // TODO: maybe we can set default value for this?
created_at: Date.now(), created_at: Date.now(),
name: "Jan Assistant", name: "Jan",
description: "Just Jan Assistant", description: "A default assistant that can use all downloaded models",
model: "*", model: "*",
instructions: "Your name is Jan.", instructions: "Your name is Jan.",
tools: undefined, tools: undefined,

View File

@ -1,4 +1,4 @@
@echo off @echo off
set /p NITRO_VERSION=<./nitro/version.txt set /p NITRO_VERSION=<./nitro/version.txt
.\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda.zip -e --strip 1 -o ./nitro/win-cuda .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda.tar.gz -e --strip 1 -o ./nitro/win-cuda
.\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64.zip -e --strip 1 -o ./nitro/win-cpu .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64.tar.gz -e --strip 1 -o ./nitro/win-cpu

View File

@ -1 +1 @@
0.1.17 0.1.20

View File

@ -146,7 +146,6 @@ export default class JanInferenceExtension implements InferenceExtension {
object: "thread.message", object: "thread.message",
}; };
events.emit(EventName.OnMessageResponse, message); events.emit(EventName.OnMessageResponse, message);
console.log(JSON.stringify(data, null, 2));
instance.isCancelled = false; instance.isCancelled = false;
instance.controller = new AbortController(); instance.controller = new AbortController();

View File

@ -1,3 +1,2 @@
declare const PLUGIN_NAME: string; declare const PLUGIN_NAME: string
declare const MODULE_PATH: string; declare const MODULE_PATH: string
declare const MODEL_CATALOG_URL: string;

View File

@ -1,21 +0,0 @@
interface Version {
name: string
quantMethod: string
bits: number
size: number
maxRamRequired: number
usecase: string
downloadLink: string
}
interface ModelSchema {
id: string
name: string
shortDescription: string
avatarUrl: string
longDescription: string
author: string
version: string
modelUrl: string
tags: string[]
versions: Version[]
}

View File

@ -1,46 +0,0 @@
import { ModelCatalog } from '@janhq/core'
export const parseToModel = (modelGroup): ModelCatalog => {
const modelVersions = []
modelGroup.versions.forEach((v) => {
const model = {
object: 'model',
version: modelGroup.version,
source_url: v.downloadLink,
id: v.name,
name: v.name,
owned_by: 'you',
created: 0,
description: modelGroup.longDescription,
state: 'to_download',
settings: v.settings,
parameters: v.parameters,
metadata: {
engine: '',
quantization: v.quantMethod,
size: v.size,
binaries: [],
maxRamRequired: v.maxRamRequired,
author: modelGroup.author,
avatarUrl: modelGroup.avatarUrl,
},
}
modelVersions.push(model)
})
const modelCatalog: ModelCatalog = {
id: modelGroup.id,
name: modelGroup.name,
avatarUrl: modelGroup.avatarUrl,
shortDescription: modelGroup.shortDescription,
longDescription: modelGroup.longDescription,
author: modelGroup.author,
version: modelGroup.version,
modelUrl: modelGroup.modelUrl,
releaseDate: modelGroup.createdAt,
tags: modelGroup.tags,
availableVersions: modelVersions,
}
return modelCatalog
}

View File

@ -1,6 +1,12 @@
import { ExtensionType, fs, downloadFile, abortDownload } from '@janhq/core' import {
import { ModelExtension, Model, ModelCatalog } from '@janhq/core' ExtensionType,
import { parseToModel } from './helpers/modelParser' fs,
downloadFile,
abortDownload,
getResourcePath,
getUserSpace,
} from '@janhq/core'
import { ModelExtension, Model, ModelState } from '@janhq/core'
import { join } from 'path' import { join } from 'path'
/** /**
@ -24,10 +30,7 @@ export default class JanModelExtension implements ModelExtension {
* @override * @override
*/ */
onLoad(): void { onLoad(): void {
/** Cloud Native this.copyModelsToHomeDir()
* TODO: Fetch all downloading progresses?
**/
fs.mkdir(JanModelExtension._homeDir)
} }
/** /**
@ -36,6 +39,30 @@ export default class JanModelExtension implements ModelExtension {
*/ */
onUnload(): void {} onUnload(): void {}
private async copyModelsToHomeDir() {
try {
// list all of the files under the home directory
const files = await fs.listFiles('')
if (files.includes(JanModelExtension._homeDir)) {
// ignore if the model is already downloaded
console.debug('Model already downloaded')
return
}
// copy models folder from resources to home directory
const resourePath = await getResourcePath()
const srcPath = join(resourePath, 'models')
const userSpace = await getUserSpace()
const destPath = join(userSpace, JanModelExtension._homeDir)
await fs.copyFile(srcPath, destPath)
} catch (err) {
console.error(err)
}
}
/** /**
* Downloads a machine learning model. * Downloads a machine learning model.
* @param model - The model to download. * @param model - The model to download.
@ -57,11 +84,11 @@ export default class JanModelExtension implements ModelExtension {
* @returns {Promise<void>} A promise that resolves when the download has been cancelled. * @returns {Promise<void>} A promise that resolves when the download has been cancelled.
*/ */
async cancelModelDownload(modelId: string): Promise<void> { async cancelModelDownload(modelId: string): Promise<void> {
return abortDownload(join(JanModelExtension._homeDir, modelId, modelId)).then( return abortDownload(
() => { join(JanModelExtension._homeDir, modelId, modelId)
fs.rmdir(join(JanModelExtension._homeDir, modelId)) ).then(() => {
} fs.deleteFile(join(JanModelExtension._homeDir, modelId, modelId))
) })
} }
/** /**
@ -72,7 +99,26 @@ export default class JanModelExtension implements ModelExtension {
async deleteModel(modelId: string): Promise<void> { async deleteModel(modelId: string): Promise<void> {
try { try {
const dirPath = join(JanModelExtension._homeDir, modelId) const dirPath = join(JanModelExtension._homeDir, modelId)
await fs.rmdir(dirPath)
// remove all files under dirPath except model.json
const files = await fs.listFiles(dirPath)
const deletePromises = files.map((fileName: string) => {
if (fileName !== JanModelExtension._modelMetadataFileName) {
return fs.deleteFile(join(dirPath, fileName))
}
})
await Promise.allSettled(deletePromises)
// update the state as default
const jsonFilePath = join(
dirPath,
JanModelExtension._modelMetadataFileName
)
const json = await fs.readFile(jsonFilePath)
const model = JSON.parse(json) as Model
delete model.state
await fs.writeFile(jsonFilePath, JSON.stringify(model, null, 2))
} catch (err) { } catch (err) {
console.error(err) console.error(err)
} }
@ -91,7 +137,17 @@ export default class JanModelExtension implements ModelExtension {
) )
try { try {
await fs.writeFile(jsonFilePath, JSON.stringify(model, null, 2)) await fs.writeFile(
jsonFilePath,
JSON.stringify(
{
...model,
state: ModelState.Ready,
},
null,
2
)
)
} catch (err) { } catch (err) {
console.error(err) console.error(err)
} }
@ -102,39 +158,62 @@ export default class JanModelExtension implements ModelExtension {
* @returns A Promise that resolves with an array of all models. * @returns A Promise that resolves with an array of all models.
*/ */
async getDownloadedModels(): Promise<Model[]> { async getDownloadedModels(): Promise<Model[]> {
const results: Model[] = [] const models = await this.getModelsMetadata()
const allDirs: string[] = await fs.listFiles(JanModelExtension._homeDir) return models.filter((model) => model.state === ModelState.Ready)
for (const dir of allDirs) { }
const modelDirPath = join(JanModelExtension._homeDir, dir)
const isModelDir = await fs.isDirectory(modelDirPath) private async getModelsMetadata(): Promise<Model[]> {
if (!isModelDir) { try {
// if not a directory, ignore const filesUnderJanRoot = await fs.listFiles('')
continue if (!filesUnderJanRoot.includes(JanModelExtension._homeDir)) {
console.debug('model folder not found')
return []
} }
const jsonFiles: string[] = (await fs.listFiles(modelDirPath)).filter( const files: string[] = await fs.listFiles(JanModelExtension._homeDir)
(fileName: string) => fileName === JanModelExtension._modelMetadataFileName
)
for (const json of jsonFiles) { const allDirectories: string[] = []
const model: Model = JSON.parse( for (const file of files) {
await fs.readFile(join(modelDirPath, json)) const isDirectory = await fs.isDirectory(
join(JanModelExtension._homeDir, file)
) )
results.push(model) if (isDirectory) {
allDirectories.push(file)
}
} }
}
return results const readJsonPromises = allDirectories.map((dirName) => {
const jsonPath = join(
JanModelExtension._homeDir,
dirName,
JanModelExtension._modelMetadataFileName
)
return this.readModelMetadata(jsonPath)
})
const results = await Promise.allSettled(readJsonPromises)
const modelData = results.map((result) => {
if (result.status === 'fulfilled') {
return JSON.parse(result.value) as Model
} else {
console.error(result.reason)
}
})
return modelData
} catch (err) {
console.error(err)
return []
}
}
private readModelMetadata(path: string) {
return fs.readFile(join(path))
} }
/** /**
* Gets all available models. * Gets all available models.
* @returns A Promise that resolves with an array of all models. * @returns A Promise that resolves with an array of all models.
*/ */
getConfiguredModels(): Promise<ModelCatalog[]> { async getConfiguredModels(): Promise<Model[]> {
// Add a timestamp to the URL to prevent caching return this.getModelsMetadata()
return import(
/* webpackIgnore: true */ MODEL_CATALOG_URL + `?t=${Date.now()}`
).then((module) => module.default.map((e) => parseToModel(e)))
} }
} }

View File

@ -19,9 +19,6 @@ module.exports = {
new webpack.DefinePlugin({ new webpack.DefinePlugin({
PLUGIN_NAME: JSON.stringify(packageJson.name), PLUGIN_NAME: JSON.stringify(packageJson.name),
MODULE_PATH: JSON.stringify(`${packageJson.name}/${packageJson.module}`), MODULE_PATH: JSON.stringify(`${packageJson.name}/${packageJson.module}`),
MODEL_CATALOG_URL: JSON.stringify(
'https://cdn.jsdelivr.net/npm/@janhq/models@latest/dist/index.js'
),
}), }),
], ],
output: { output: {

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF/resolve/main/nous-capybara-34b.Q5_K_M.gguf",
"id": "capybara-34b",
"object": "model",
"name": "Capybara 200k 34B",
"version": 1.0,
"description": "Nous Capybara 34B, a variant of the Yi-34B model, is the first Nous model with a 200K context length, trained for three epochs on the innovative Capybara dataset.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "USER: ",
"ai_prompt": "ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "NousResearch, The Bloke",
"tags": ["General", "Big Context Length"],
"size": 24320000000
}
}

View File

@ -0,0 +1,23 @@
{
"source_url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-base-GGUF/resolve/main/deepseek-coder-1.3b-base.Q4_K_M.gguf",
"id": "deepseek-coder-1.3b",
"object": "model",
"name": "Deepseek Coder 1.3B",
"version": "1.0",
"description": "",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "",
"ai_prompt": ""
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "deepseek, The Bloke",
"tags": ["Code"],
"size": 870000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/dolphin-2_2-yi-34b-GGUF/resolve/main/dolphin-2_2-yi-34b.Q5_K_M.gguf",
"id": "dolphin-yi-34b",
"object": "model",
"name": "Dolphin Yi 34B",
"version": "1.0",
"description": "Dolphin, based on the Yi-34B model and enhanced with features like conversation and empathy, is trained on a unique dataset for advanced multi-turn conversations. Notably uncensored, it requires careful implementation of an alignment layer for ethical use.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "ehartford, The Bloke",
"tags": ["General Use", "Role-playing"],
"size": 24320000000
}
}

24
models/islm-3b/model.json Normal file
View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/UmbrellaCorp/IS-LM-3B_GGUF/resolve/main/IS-LM-Q4_K_M.gguf",
"id": "islm-3b",
"object": "model",
"name": "IS LM 3B",
"version": "1.0",
"description": "IS LM 3B, based on the StableLM 3B model is specifically finetuned for economic analysis using DataForge Economics and QLoRA over three epochs, enhancing its proficiency in economic forecasting and analysis.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "USER: ",
"ai_prompt": "ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "UmbrellaCorp, The Bloke",
"tags": ["General Use", "Economics"],
"size": 1710000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/lzlv_70B-GGUF/resolve/main/lzlv_70b_fp16_hf.Q5_K_M.gguf",
"id": "lzlv-70b",
"object": "model",
"name": "Lzlv 70B",
"version": "1.0",
"description": "lzlv_70B is a sophisticated AI model designed for roleplaying and creative tasks. This merge aims to combine intelligence with creativity, seemingly outperforming its individual components in complex scenarios and creative outputs.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "USER: ",
"ai_prompt": "ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "lizpreciatior, The Bloke",
"tags": ["General Use", "Role-playing"],
"size": 48750000000
}
}

23
models/marx-3b/model.json Normal file
View File

@ -0,0 +1,23 @@
{
"source_url": "https://huggingface.co/TheBloke/Marx-3B-v3-GGUF/resolve/main/marx-3b-v3.Q4_K_M.gguf",
"id": "marx-3b",
"object": "model",
"name": "Marx 3B",
"version": "1.0",
"description": "Marx 3B, based on the StableLM 3B model is specifically finetuned for chating using EverythingLM data and QLoRA over two epochs, enhancing its proficiency in understand general knowledege.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### HUMAN: ",
"ai_prompt": "### RESPONSE: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Bohan Du, The Bloke",
"tags": ["General Use"],
"size": 1620000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF/resolve/main/mythomax-l2-13b.Q5_K_M.gguf",
"id": "mythomax-13b",
"object": "model",
"name": "Mythomax L2 13B",
"version": "1.0",
"description": "Mythomax L2 13b, an advanced AI model derived from MythoMix, merges MythoLogic-L2's deep comprehension with Huginn's writing skills through a unique tensor merge technique, excelling in roleplaying and storytelling.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction: ",
"ai_prompt": "### Response: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Gryphe, The Bloke",
"tags": ["Role-playing"],
"size": 9230000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/neural-chat-7B-v3-1-GGUF/resolve/main/neural-chat-7b-v3-1.Q4_K_M.gguf",
"id": "neural-chat-7b",
"object": "model",
"name": "Neural Chat 7B",
"version": "1.0",
"description": "The Neural Chat 7B model, developed on the foundation of mistralai/Mistral-7B-v0.1, has been fine-tuned using the Open-Orca/SlimOrca dataset and aligned with the Direct Preference Optimization (DPO) algorithm. It has demonstrated substantial improvements in various AI tasks and performance well on the open_llm_leaderboard.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "### System: ",
"user_prompt": "### User: ",
"ai_prompt": "### Assistant: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Intel, The Bloke",
"tags": ["General Use", "Role-playing", "Big Context Length"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/NeuralHermes-2.5-Mistral-7B-GGUF/resolve/main/neuralhermes-2.5-mistral-7b.Q4_K_M.gguf",
"id": "neuralhermes-7b",
"object": "model",
"name": "NeuralHermes 7B",
"version": "1.0",
"description": "NeuralHermes 2.5 has been enhanced using Direct Preference Optimization. This fine-tuning, inspired by the RLHF process of Neural-chat-7b and OpenHermes-2.5-Mistral-7B, has led to improved performance across several benchmarks.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Intel, The Bloke",
"tags": ["General Use", "Code", "Big Context Length"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Noromaid-20B-v0.1.1-GGUF/resolve/main/noromaid-20b-v0.1.1.Q4_K_M.gguf",
"id": "noromaid-20b",
"object": "model",
"name": "Noromaid 20B",
"version": "1.0",
"description": "The Noromaid 20b model is designed for role-playing and general use, featuring a unique touch with the no_robots dataset that enhances human-like behavior.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction: ",
"ai_prompt": "### Response: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "NeverSleep, The Bloke",
"tags": ["Role-playing"],
"size": 12040000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/openchat_3.5-GGUF/resolve/main/openchat_3.5.Q4_K_M.gguf",
"id": "openchat-7b",
"object": "model",
"name": "Open Chat 3.5 7B",
"version": "1.0",
"description": "OpenChat represents a breakthrough in the realm of open-source language models. By implementing the C-RLFT fine-tuning strategy, inspired by offline reinforcement learning, this 7B model achieves results on par with ChatGPT (March).",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "GPT4 User: ",
"ai_prompt": "<|end_of_turn|>\nGPT4 Assistant: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "OpenChat, The Bloke",
"tags": ["General", "Code"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF/resolve/main/openhermes-2.5-mistral-7b.Q4_K_M.gguf",
"id": "openhermes-mistral-7b",
"object": "model",
"name": "Openhermes 2.5 Mistral 7B",
"version": "1.0",
"description": "The OpenHermes 2.5 Mistral 7B incorporates additional code datasets, more than a million GPT-4 generated data examples, and other high-quality open datasets. This enhancement led to significant improvement in benchmarks, highlighting its improved skill in handling code-centric tasks.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Teknium, The Bloke",
"tags": ["General", "Roleplay"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Orca-2-13B-GGUF/resolve/main/orca-2-13b.Q5_K_M.gguf",
"id": "openorca-13b",
"object": "model",
"name": "Orca 2 13B",
"version": "1.0",
"description": "Orca 2 is a finetuned version of LLAMA-2, designed primarily for single-turn responses in reasoning, reading comprehension, math problem solving, and text summarization.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Microsoft, The Bloke",
"tags": ["General Use"],
"size": 9230000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-GGUF/resolve/main/mistral-7b-openorca.Q4_K_M.gguf",
"id": "openorca-7b",
"object": "model",
"name": "OpenOrca 7B",
"version": "1.0",
"description": "OpenOrca 8k 7B is a model based on Mistral 7B, fine-tuned using the OpenOrca dataset. Notably ranked first on the HF Leaderboard for models under 30B, it excels in efficiency and accessibility.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "OpenOrca, The Bloke",
"tags": ["General", "Code"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf",
"id": "phind-34b",
"object": "model",
"name": "Phind 34B",
"version": "1.0",
"description": "Phind-CodeLlama-34B-v2 is an AI model fine-tuned on 1.5B tokens of high-quality programming data. It's a SOTA open-source model in coding. This multi-lingual model excels in various programming languages, including Python, C/C++, TypeScript, Java, and is designed to be steerable and user-friendly.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "### System Prompt\n",
"user_prompt": "### User Message\n",
"ai_prompt": "### Assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Phind, The Bloke",
"tags": ["Code", "Big Context Length"],
"size": 24320000000
}
}

View File

@ -0,0 +1,23 @@
{
"source_url": "https://huggingface.co/TheBloke/rocket-3B-GGUF/resolve/main/rocket-3b.Q4_K_M.gguf",
"id": "rocket-3b",
"object": "model",
"name": "Rocket 3B",
"version": "1.0",
"description": "Rocket-3B is a GPT-like model, primarily English, fine-tuned on diverse public datasets. It outperforms larger models in benchmarks, showcasing superior understanding and text generation, making it an effective chat model for its size.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "pansophic, The Bloke",
"tags": ["General Use"],
"size": 1710000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_M.gguf",
"id": "starling-7b",
"object": "model",
"name": "Strarling alpha 7B",
"version": "1.0",
"description": "Starling-RM-7B-alpha is a language model finetuned with Reinforcement Learning from AI Feedback from Openchat 3.5. It stands out for its impressive performance using GPT-4 as a judge, making it one of the top-performing models in its category.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "GPT4 User: ",
"ai_prompt": "<|end_of_turn|>\nGPT4 Assistant: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Berkeley-nest, The Bloke",
"tags": ["General", "Code"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/GOAT-70B-Storytelling-GGUF/resolve/main/goat-70b-storytelling.Q5_K_M.gguf",
"id": "storytelling-70b",
"object": "model",
"name": "Storytelling 70B",
"version": "1.0",
"description": "The GOAT-70B-Storytelling model is designed for autonomous story-writing, including crafting books and movie scripts. Based on the LLaMA 2 70B architecture, this model excels in generating cohesive and engaging narratives using inputs like plot outlines and character profiles.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### USER: ",
"ai_prompt": "\n### ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "GOAT-AI, The Bloke",
"tags": ["General Use", "Writing"],
"size": 48750000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/LLaMA2-13B-Tiefighter-GGUF/resolve/main/llama2-13b-tiefighter.Q5_K_M.gguf",
"id": "tiefighter-13b",
"object": "model",
"name": "Tiefighter 13B",
"version": "1.0",
"description": "Tiefighter-13B is a highly creative, merged AI model achieved by combining various 'LORAs' on top of an existing merge, particularly focusing on storytelling and improvisation. This model excels in story writing, chatbots, and adventuring, and is designed to perform better with less detailed inputs, leveraging its inherent creativity.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction: ",
"ai_prompt": "\n### Response: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "KoboldAI, The Bloke",
"tags": ["General Use", "Role-playing", "Writing"],
"size": 9230000000
}
}

View File

@ -0,0 +1,23 @@
{
"source_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v0.6/resolve/main/ggml-model-q4_0.gguf",
"id": "tinyllama-1.1b",
"object": "model",
"name": "TinyLlama Chat 1.1B",
"version": "1.0",
"description": "The TinyLlama project, featuring a 1.1B parameter Llama model, is pretrained on an expansive 3 trillion token dataset. Its design ensures easy integration with various Llama-based open-source projects. Despite its smaller size, it efficiently utilizes lower computational and memory resources, drawing on GPT-4's analytical prowess to enhance its conversational abilities and versatility.",
"format": "gguf",
"settings": {
"ctx_len": 2048,
"system_prompt": "<|system|>\n",
"user_prompt": "<|user|>\n",
"ai_prompt": "<|assistant|>\n"
},
"parameters": {
"max_tokens": 2048
},
"metadata": {
"author": "TinyLlama",
"tags": ["General Use"],
"size": 637000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf",
"id": "wizardcoder-13b",
"object": "model",
"name": "Wizard Coder Python 13B",
"version": "1.0",
"description": "WizardCoder-Python-13B is a Python coding model major models like ChatGPT-3.5. This model based on the Llama2 architecture, demonstrate high proficiency in specific domains like coding and mathematics.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction:\n",
"ai_prompt": "### Response:\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "WizardLM, The Bloke",
"tags": ["Code", "Big Context Length"],
"size": 9230000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-34B-V1.0-GGUF/resolve/main/wizardcoder-python-34b-v1.0.Q5_K_M.gguf",
"id": "wizardcoder-34b",
"object": "model",
"name": "Wizard Coder Python 34B",
"version": "1.0",
"description": "WizardCoder-Python-34B is a Python coding model major models like ChatGPT-3.5. This model based on the Llama2 architecture, demonstrate high proficiency in specific domains like coding and mathematics.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction:\n",
"ai_prompt": "### Response:\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "WizardLM, The Bloke",
"tags": ["Code", "Big Context Length"],
"size": 24320000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Xwin-LM-70B-V0.1-GGUF/resolve/main/xwin-lm-70b-v0.1.Q5_K_M.gguf",
"id": "xwin-70b",
"object": "model",
"name": "Xwin LM 70B",
"version": "1.0",
"description": "Xwin-LM, based on Llama2 models, emphasizes alignment and exhibits advanced language understanding, text generation, and role-playing abilities.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "USER: ",
"ai_prompt": "ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Xwin-LM, The Bloke",
"tags": ["General Use", "Role-playing"],
"size": 48750000000
}
}

View File

@ -0,0 +1,21 @@
{
"source_url": "https://huggingface.co/TheBloke/Yarn-Llama-2-70B-32k-GGUF/resolve/main/yarn-llama-2-70b-32k.Q5_K_M.gguf",
"id": "yarn-70b",
"object": "model",
"name": "Yarn 32k 70B",
"version": "1,0",
"description": "Yarn-Llama-2-70b-32k is designed specifically for handling long contexts. It represents an extension of the Llama-2-70b-hf model, now supporting a 32k token context window.",
"format": "gguf",
"settings": {
"ctx_len": 4096
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "NousResearch, The Bloke",
"tags": ["General Use", "Big Context Length"],
"size": 48750000000
}
}

24
models/yi-34b/model.json Normal file
View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q5_K_M.gguf",
"id": "yi-34b",
"object": "model",
"name": "Yi 34B",
"version": "1.0",
"description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "01-ai, The Bloke",
"tags": ["General", "Role-playing", "Writing"],
"size": 24320000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/resolve/main/zephyr-7b-beta.Q4_K_M.gguf",
"id": "zephyr-beta-7b",
"object": "model",
"name": "Zephyr Beta 7B",
"version": "1.0",
"description": "The Zephyr-7B-β model marks the second iteration in the Zephyr series, designed to function as an effective assistant. It has been fine-tuned from the mistralai/Mistral-7B-v0.1 base model, utilizing a combination of public and synthetic datasets with the application of Direct Preference Optimization.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|system|>\n",
"user_prompt": "</s>\n<|user|>\n",
"ai_prompt": "</s>\n<|assistant|>\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "HuggingFaceH4, The Bloke",
"tags": ["General Use", "Big Context Length"],
"size": 4370000000
}
}

Some files were not shown because too many files have changed in this diff Show More