+
+Download the latest version of Jan at https://jan.ai/ or visit the **[GitHub Releases](https://github.com/janhq/jan/releases)** to download any previous release.
## Demo
-
-_Video: Jan v0.3.0 on Mac Air M2, 16GB Ventura_
+
+_Video: Jan v0.4.0 on Mac Air M2, 16GB Ventura_
## Quicklinks
-
+#### Jan
+- [Jan website](https://jan.ai/)
+- [Jan Github](https://github.com/janhq/jan)
- [User Guides](https://jan.ai/docs)
-- [Developer docs](https://jan.ai/docs)
-- [API reference](https://jan.ai/api/overview)
-- [Nitro Github](https://nitro.jan.ai): Nitro is a C++ inference engine
+- [Developer docs](https://jan.ai/docs/extensions/)
+- [API reference](https://jan.ai/api-reference/)
+- [Specs](https://jan.ai/specs/)
+
+#### Nitro:
+Nitro is a high-efficiency C++ inference engine for edge computing, powering Jan. It is lightweight and embeddable, ideal for product integration.
+- [Nitro Website](https://nitro.jan.ai)
+- [Nitro Github](https://github.com/janhq/nitro)
+- [Documentation](https://nitro.jan.ai/docs)
+- [API Reference](https://nitro.jan.ai/api-reference)
## Troubleshooting
-As Jan is development mode, you might get stuck on a broken build.
+As Jan is in development mode, you might get stuck on a broken build.
To reset your installation:
diff --git a/core/.editorconfig b/core/.editorconfig
new file mode 100644
index 000000000..92b9bb1e5
--- /dev/null
+++ b/core/.editorconfig
@@ -0,0 +1,13 @@
+#root = true
+
+[*]
+indent_style = space
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+max_line_length = 100
+indent_size = 2
+
+[*.md]
+trim_trailing_whitespace = false
diff --git a/core/.gitignore b/core/.gitignore
new file mode 100644
index 000000000..d626d098e
--- /dev/null
+++ b/core/.gitignore
@@ -0,0 +1,12 @@
+node_modules
+coverage
+.nyc_output
+.DS_Store
+*.log
+.vscode
+.idea
+dist
+compiled
+.awcache
+.rpt2_cache
+docs
diff --git a/core/package.json b/core/package.json
index 69be586f3..a599fc3c2 100644
--- a/core/package.json
+++ b/core/package.json
@@ -8,28 +8,83 @@
],
"homepage": "https://jan.ai",
"license": "AGPL-3.0",
- "main": "lib/index.js",
- "types": "lib/index.d.ts",
- "directories": {
- "lib": "lib",
- "test": "__tests__"
- },
- "exports": {
- ".": "./lib/index.js"
- },
+ "main": "dist/core.umd.js",
+ "module": "dist/core.es5.js",
+ "typings": "dist/types/index.d.ts",
"files": [
- "lib",
- "README.md",
- "LICENSE.md",
- "package.json",
- "!.DS_Store"
+ "dist"
],
+ "author": "Jan ",
+ "repository": {
+ "type": "git",
+ "url": ""
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ },
"scripts": {
- "test": "echo \"Error: run tests from root\" && exit 1",
- "build": "tsc"
+ "lint": "tslint --project tsconfig.json -t codeFrame 'src/**/*.ts' 'test/**/*.ts'",
+ "prebuild": "rimraf dist",
+ "build": "tsc --module commonjs && rollup -c rollup.config.ts",
+ "start": "rollup -c rollup.config.ts -w"
+ },
+ "lint-staged": {
+ "{src,test}/**/*.ts": [
+ "prettier --write",
+ "git add"
+ ]
+ },
+ "config": {
+ "commitizen": {
+ "path": "node_modules/cz-conventional-changelog"
+ }
+ },
+ "jest": {
+ "transform": {
+ ".(ts|tsx)": "ts-jest"
+ },
+ "testEnvironment": "node",
+ "testRegex": "(/__tests__/.*|\\.(test|spec))\\.(ts|tsx|js)$",
+ "moduleFileExtensions": [
+ "ts",
+ "tsx",
+ "js"
+ ],
+ "coveragePathIgnorePatterns": [
+ "/node_modules/",
+ "/test/"
+ ],
+ "coverageThreshold": {
+ "global": {
+ "branches": 90,
+ "functions": 95,
+ "lines": 95,
+ "statements": 95
+ }
+ },
+ "collectCoverageFrom": [
+ "src/*.{js,ts}"
+ ]
+ },
+ "prettier": {
+ "semi": false,
+ "singleQuote": true
+ },
+ "commitlint": {
+ "extends": [
+ "@commitlint/config-conventional"
+ ]
},
"devDependencies": {
"@types/node": "^12.0.2",
+ "rollup": "^2.38.5",
+ "rollup-plugin-commonjs": "^9.1.8",
+ "rollup-plugin-json": "^3.1.0",
+ "rollup-plugin-node-resolve": "^5.2.0",
+ "rollup-plugin-sourcemaps": "^0.6.3",
+ "rollup-plugin-typescript2": "^0.36.0",
+ "ts-node": "^7.0.1",
+ "tslib": "^2.6.2",
"typescript": "^5.2.2"
}
}
diff --git a/core/rollup.config.ts b/core/rollup.config.ts
new file mode 100644
index 000000000..5e1762c96
--- /dev/null
+++ b/core/rollup.config.ts
@@ -0,0 +1,37 @@
+import resolve from 'rollup-plugin-node-resolve'
+import commonjs from 'rollup-plugin-commonjs'
+import sourceMaps from 'rollup-plugin-sourcemaps'
+import typescript from 'rollup-plugin-typescript2'
+import json from 'rollup-plugin-json'
+
+const pkg = require('./package.json')
+
+const libraryName = 'core'
+
+export default {
+ input: `src/index.ts`,
+ output: [
+ { file: pkg.main, name: libraryName, format: 'umd', sourcemap: true },
+ { file: pkg.module, format: 'es', sourcemap: true },
+ ],
+ // Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash')
+ external: [],
+ watch: {
+ include: 'src/**',
+ },
+ plugins: [
+ // Allow json resolution
+ json(),
+ // Compile TypeScript files
+ typescript({ useTsconfigDeclarationDir: true }),
+ // Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs)
+ commonjs(),
+ // Allow node_modules resolution, so you can use 'external' to control
+ // which external modules to include in the bundle
+ // https://github.com/rollup/rollup-plugin-node-resolve#usage
+ resolve(),
+
+ // Resolve source maps to the original source
+ sourceMaps(),
+ ],
+}
diff --git a/core/src/@global/index.d.ts b/core/src/@global/index.d.ts
index 0e52252e3..b2d55fc1c 100644
--- a/core/src/@global/index.d.ts
+++ b/core/src/@global/index.d.ts
@@ -1,7 +1,10 @@
-export {};
+export {}
declare global {
- interface Window {
- core?: any;
+ namespace NodeJS {
+ interface Global {
+ core: any
+ }
}
+ var core: any | undefined
}
diff --git a/core/src/api/index.ts b/core/src/api/index.ts
new file mode 100644
index 000000000..b45a41d0e
--- /dev/null
+++ b/core/src/api/index.ts
@@ -0,0 +1,100 @@
+/**
+ * App Route APIs
+ * @description Enum of all the routes exposed by the app
+ */
+export enum AppRoute {
+ setNativeThemeLight = 'setNativeThemeLight',
+ setNativeThemeDark = 'setNativeThemeDark',
+ setNativeThemeSystem = 'setNativeThemeSystem',
+ appDataPath = 'appDataPath',
+ appVersion = 'appVersion',
+ getResourcePath = 'getResourcePath',
+ openExternalUrl = 'openExternalUrl',
+ openAppDirectory = 'openAppDirectory',
+ openFileExplore = 'openFileExplorer',
+ relaunch = 'relaunch',
+}
+
+export enum AppEvent {
+ onAppUpdateDownloadUpdate = 'onAppUpdateDownloadUpdate',
+ onAppUpdateDownloadError = 'onAppUpdateDownloadError',
+ onAppUpdateDownloadSuccess = 'onAppUpdateDownloadSuccess',
+}
+
+export enum DownloadRoute {
+ abortDownload = 'abortDownload',
+ downloadFile = 'downloadFile',
+ pauseDownload = 'pauseDownload',
+ resumeDownload = 'resumeDownload',
+}
+
+export enum DownloadEvent {
+ onFileDownloadUpdate = 'onFileDownloadUpdate',
+ onFileDownloadError = 'onFileDownloadError',
+ onFileDownloadSuccess = 'onFileDownloadSuccess',
+}
+
+export enum ExtensionRoute {
+ baseExtensions = 'baseExtensions',
+ getActiveExtensions = 'getActiveExtensions',
+ installExtension = 'installExtension',
+ invokeExtensionFunc = 'invokeExtensionFunc',
+ updateExtension = 'updateExtension',
+ uninstallExtension = 'uninstallExtension',
+}
+export enum FileSystemRoute {
+ appendFile = 'appendFile',
+ copyFile = 'copyFile',
+ deleteFile = 'deleteFile',
+ exists = 'exists',
+ getResourcePath = 'getResourcePath',
+ getUserSpace = 'getUserSpace',
+ isDirectory = 'isDirectory',
+ listFiles = 'listFiles',
+ mkdir = 'mkdir',
+ readFile = 'readFile',
+ readLineByLine = 'readLineByLine',
+ rmdir = 'rmdir',
+ writeFile = 'writeFile',
+}
+
+export type ApiFunction = (...args: any[]) => any
+
+export type AppRouteFunctions = {
+ [K in AppRoute]: ApiFunction
+}
+
+export type AppEventFunctions = {
+ [K in AppEvent]: ApiFunction
+}
+
+export type DownloadRouteFunctions = {
+ [K in DownloadRoute]: ApiFunction
+}
+
+export type DownloadEventFunctions = {
+ [K in DownloadEvent]: ApiFunction
+}
+
+export type ExtensionRouteFunctions = {
+ [K in ExtensionRoute]: ApiFunction
+}
+
+export type FileSystemRouteFunctions = {
+ [K in FileSystemRoute]: ApiFunction
+}
+
+export type APIFunctions = AppRouteFunctions &
+ AppEventFunctions &
+ DownloadRouteFunctions &
+ DownloadEventFunctions &
+ ExtensionRouteFunctions &
+ FileSystemRouteFunctions
+
+export const APIRoutes = [
+ ...Object.values(AppRoute),
+ ...Object.values(DownloadRoute),
+ ...Object.values(ExtensionRoute),
+ ...Object.values(FileSystemRoute),
+]
+export const APIEvents = [...Object.values(AppEvent), ...Object.values(DownloadEvent)]
diff --git a/core/src/core.ts b/core/src/core.ts
index 0e032f4d9..f268233b7 100644
--- a/core/src/core.ts
+++ b/core/src/core.ts
@@ -7,12 +7,11 @@
* @returns Promise
*
*/
-const executeOnMain: (
- extension: string,
- method: string,
- ...args: any[]
-) => Promise = (extension, method, ...args) =>
- window.core?.api?.invokeExtensionFunc(extension, method, ...args);
+const executeOnMain: (extension: string, method: string, ...args: any[]) => Promise = (
+ extension,
+ method,
+ ...args
+) => global.core?.api?.invokeExtensionFunc(extension, method, ...args)
/**
* Downloads a file from a URL and saves it to the local file system.
@@ -20,10 +19,8 @@ const executeOnMain: (
* @param {string} fileName - The name to use for the downloaded file.
* @returns {Promise} A promise that resolves when the file is downloaded.
*/
-const downloadFile: (url: string, fileName: string) => Promise = (
- url,
- fileName
-) => window.core?.api?.downloadFile(url, fileName);
+const downloadFile: (url: string, fileName: string) => Promise = (url, fileName) =>
+ global.core?.api?.downloadFile(url, fileName)
/**
* Aborts the download of a specific file.
@@ -31,20 +28,13 @@ const downloadFile: (url: string, fileName: string) => Promise = (
* @returns {Promise} A promise that resolves when the download has been aborted.
*/
const abortDownload: (fileName: string) => Promise = (fileName) =>
- window.core.api?.abortDownload(fileName);
-
-/**
- * Retrieves the path to the app data directory using the `coreAPI` object.
- * If the `coreAPI` object is not available, the function returns `undefined`.
- * @returns A Promise that resolves with the path to the app data directory, or `undefined` if the `coreAPI` object is not available.
- */
-const appDataPath: () => Promise = () => window.core.api?.appDataPath();
+ global.core.api?.abortDownload(fileName)
/**
* Gets the user space path.
* @returns {Promise} A Promise that resolves with the user space path.
*/
-const getUserSpace = (): Promise => window.core.api?.getUserSpace();
+const getUserSpace = (): Promise => global.core.api?.getUserSpace()
/**
* Opens the file explorer at a specific path.
@@ -52,10 +42,9 @@ const getUserSpace = (): Promise => window.core.api?.getUserSpace();
* @returns {Promise} A promise that resolves when the file explorer is opened.
*/
const openFileExplorer: (path: string) => Promise = (path) =>
- window.core.api?.openFileExplorer(path);
+ global.core.api?.openFileExplorer(path)
-const getResourcePath: () => Promise = () =>
- window.core.api?.getResourcePath();
+const getResourcePath: () => Promise = () => global.core.api?.getResourcePath()
/**
* Register extension point function type definition
@@ -64,8 +53,8 @@ export type RegisterExtensionPoint = (
extensionName: string,
extensionId: string,
method: Function,
- priority?: number
-) => void;
+ priority?: number,
+) => void
/**
* Functions exports
@@ -74,8 +63,7 @@ export {
executeOnMain,
downloadFile,
abortDownload,
- appDataPath,
getUserSpace,
openFileExplorer,
getResourcePath,
-};
+}
diff --git a/core/src/events.ts b/core/src/events.ts
index f62aa1113..1acbef918 100644
--- a/core/src/events.ts
+++ b/core/src/events.ts
@@ -8,6 +8,18 @@ export enum EventName {
OnMessageResponse = "OnMessageResponse",
/** The `OnMessageUpdate` event is emitted when a message is updated. */
OnMessageUpdate = "OnMessageUpdate",
+ /** The `OnModelInit` event is emitted when a model inits. */
+ OnModelInit = "OnModelInit",
+ /** The `OnModelReady` event is emitted when a model ready. */
+ OnModelReady = "OnModelReady",
+ /** The `OnModelFail` event is emitted when a model fails loading. */
+ OnModelFail = "OnModelFail",
+ /** The `OnModelStop` event is emitted when a model start to stop. */
+ OnModelStop = "OnModelStop",
+ /** The `OnModelStopped` event is emitted when a model stopped ok. */
+ OnModelStopped = "OnModelStopped",
+ /** The `OnInferenceStopped` event is emitted when a inference is stopped. */
+ OnInferenceStopped = "OnInferenceStopped",
}
/**
@@ -20,7 +32,7 @@ const on: (eventName: string, handler: Function) => void = (
eventName,
handler
) => {
- window.core?.events?.on(eventName, handler);
+ global.core?.events?.on(eventName, handler);
};
/**
@@ -33,7 +45,7 @@ const off: (eventName: string, handler: Function) => void = (
eventName,
handler
) => {
- window.core?.events?.off(eventName, handler);
+ global.core?.events?.off(eventName, handler);
};
/**
@@ -43,7 +55,7 @@ const off: (eventName: string, handler: Function) => void = (
* @param object The object to pass to the event callback.
*/
const emit: (eventName: string, object: any) => void = (eventName, object) => {
- window.core?.events?.emit(eventName, object);
+ global.core?.events?.emit(eventName, object);
};
export const events = {
diff --git a/core/src/extensions/inference.ts b/core/src/extensions/inference.ts
index 483ba1339..9453a06d5 100644
--- a/core/src/extensions/inference.ts
+++ b/core/src/extensions/inference.ts
@@ -5,26 +5,10 @@ import { BaseExtension } from "../extension";
* Inference extension. Start, stop and inference models.
*/
export abstract class InferenceExtension extends BaseExtension {
- /**
- * Initializes the model for the extension.
- * @param modelId - The ID of the model to initialize.
- */
- abstract initModel(modelId: string, settings?: ModelSettingParams): Promise;
-
- /**
- * Stops the model for the extension.
- */
- abstract stopModel(): Promise;
-
- /**
- * Stops the streaming inference.
- */
- abstract stopInference(): Promise;
-
/**
* Processes an inference request.
* @param data - The data for the inference request.
* @returns The result of the inference request.
*/
- abstract inferenceRequest(data: MessageRequest): Promise;
+ abstract inference(data: MessageRequest): Promise;
}
diff --git a/core/src/fs.ts b/core/src/fs.ts
index d12b473bf..d15bf6230 100644
--- a/core/src/fs.ts
+++ b/core/src/fs.ts
@@ -5,54 +5,52 @@
* @returns {Promise} A Promise that resolves when the file is written successfully.
*/
const writeFile: (path: string, data: string) => Promise = (path, data) =>
- window.core.api?.writeFile(path, data);
+ global.core.api?.writeFile(path, data)
/**
* Checks whether the path is a directory.
* @param path - The path to check.
* @returns {boolean} A boolean indicating whether the path is a directory.
*/
-const isDirectory = (path: string): Promise =>
- window.core.api?.isDirectory(path);
+const isDirectory = (path: string): Promise => global.core.api?.isDirectory(path)
/**
* Reads the contents of a file at the specified path.
* @param {string} path - The path of the file to read.
* @returns {Promise} A Promise that resolves with the contents of the file.
*/
-const readFile: (path: string) => Promise = (path) =>
- window.core.api?.readFile(path);
-
+const readFile: (path: string) => Promise = (path) => global.core.api?.readFile(path)
+/**
+ * Check whether the file exists
+ * @param {string} path
+ * @returns {boolean} A boolean indicating whether the path is a file.
+ */
+const exists = (path: string): Promise => global.core.api?.exists(path)
/**
* List the directory files
* @param {string} path - The path of the directory to list files.
* @returns {Promise} A Promise that resolves with the contents of the directory.
*/
-const listFiles: (path: string) => Promise = (path) =>
- window.core.api?.listFiles(path);
-
+const listFiles: (path: string) => Promise = (path) => global.core.api?.listFiles(path)
/**
* Creates a directory at the specified path.
* @param {string} path - The path of the directory to create.
* @returns {Promise} A Promise that resolves when the directory is created successfully.
*/
-const mkdir: (path: string) => Promise = (path) =>
- window.core.api?.mkdir(path);
+const mkdir: (path: string) => Promise = (path) => global.core.api?.mkdir(path)
/**
* Removes a directory at the specified path.
* @param {string} path - The path of the directory to remove.
* @returns {Promise} A Promise that resolves when the directory is removed successfully.
*/
-const rmdir: (path: string) => Promise = (path) =>
- window.core.api?.rmdir(path);
+const rmdir: (path: string) => Promise = (path) => global.core.api?.rmdir(path)
/**
* Deletes a file from the local file system.
* @param {string} path - The path of the file to delete.
* @returns {Promise} A Promise that resolves when the file is deleted.
*/
-const deleteFile: (path: string) => Promise = (path) =>
- window.core.api?.deleteFile(path);
+const deleteFile: (path: string) => Promise = (path) => global.core.api?.deleteFile(path)
/**
* Appends data to a file at the specified path.
@@ -60,10 +58,10 @@ const deleteFile: (path: string) => Promise = (path) =>
* @param data data to append
*/
const appendFile: (path: string, data: string) => Promise = (path, data) =>
- window.core.api?.appendFile(path, data);
+ global.core.api?.appendFile(path, data)
const copyFile: (src: string, dest: string) => Promise = (src, dest) =>
- window.core.api?.copyFile(src, dest);
+ global.core.api?.copyFile(src, dest)
/**
* Reads a file line by line.
@@ -71,12 +69,13 @@ const copyFile: (src: string, dest: string) => Promise = (src, dest) =>
* @returns {Promise} A promise that resolves to the lines of the file.
*/
const readLineByLine: (path: string) => Promise = (path) =>
- window.core.api?.readLineByLine(path);
+ global.core.api?.readLineByLine(path)
export const fs = {
isDirectory,
writeFile,
readFile,
+ exists,
listFiles,
mkdir,
rmdir,
@@ -84,4 +83,4 @@ export const fs = {
appendFile,
readLineByLine,
copyFile,
-};
+}
diff --git a/core/src/index.ts b/core/src/index.ts
index ff233ffb3..a56b6f0e1 100644
--- a/core/src/index.ts
+++ b/core/src/index.ts
@@ -2,34 +2,39 @@
* Export all types.
* @module
*/
-export * from "./types/index";
+export * from './types/index'
+
+/**
+ * Export all routes
+ */
+export * from './api'
/**
* Export Core module
* @module
*/
-export * from "./core";
+export * from './core'
/**
* Export Event module.
* @module
*/
-export * from "./events";
+export * from './events'
/**
* Export Filesystem module.
* @module
*/
-export * from "./fs";
+export * from './fs'
/**
* Export Extension module.
* @module
*/
-export * from "./extension";
+export * from './extension'
/**
* Export all base extensions.
* @module
*/
-export * from "./extensions/index";
+export * from './extensions/index'
diff --git a/core/src/types/index.ts b/core/src/types/index.ts
index 7580c2432..2e19f61d8 100644
--- a/core/src/types/index.ts
+++ b/core/src/types/index.ts
@@ -41,8 +41,8 @@ export type MessageRequest = {
/** Messages for constructing a chat completion request **/
messages?: ChatCompletionMessage[];
- /** Runtime parameters for constructing a chat completion request **/
- parameters?: ModelRuntimeParam;
+ /** Settings for constructing a chat completion request **/
+ model?: ModelInfo;
};
/**
@@ -71,9 +71,9 @@ export type ThreadMessage = {
object: string;
/** Thread id, default is a ulid. **/
thread_id: string;
- /** The role of the author of this message. **/
+ /** The assistant id of this thread. **/
assistant_id?: string;
- // TODO: comment
+ /** The role of the author of this message. **/
role: ChatCompletionRole;
/** The content of this message. **/
content: ThreadContent[];
@@ -125,8 +125,6 @@ export interface Thread {
title: string;
/** Assistants in this thread. **/
assistants: ThreadAssistantInfo[];
- // if the thread has been init will full assistant info
- isFinishInit: boolean;
/** The timestamp indicating when this thread was created, represented in ISO 8601 format. **/
created: number;
/** The timestamp indicating when this thread was updated, represented in ISO 8601 format. **/
@@ -153,7 +151,8 @@ export type ThreadAssistantInfo = {
export type ModelInfo = {
id: string;
settings: ModelSettingParams;
- parameters: ModelRuntimeParam;
+ parameters: ModelRuntimeParams;
+ engine?: InferenceEngine;
};
/**
@@ -165,7 +164,19 @@ export type ThreadState = {
waitingForResponse: boolean;
error?: Error;
lastMessage?: string;
+ isFinishInit?: boolean;
};
+/**
+ * Represents the inference engine.
+ * @stored
+ */
+
+enum InferenceEngine {
+ nitro = "nitro",
+ openai = "openai",
+ triton_trtllm = "triton_trtllm",
+ hf_endpoint = "hf_endpoint",
+}
/**
* Model type defines the shape of a model object.
@@ -183,6 +194,11 @@ export interface Model {
*/
version: number;
+ /**
+ * The format of the model.
+ */
+ format: string;
+
/**
* The model download source. It can be an external url or a local filepath.
*/
@@ -213,7 +229,7 @@ export interface Model {
* Default: "to_download"
* Enum: "to_download" "downloading" "ready" "running"
*/
- state: ModelState;
+ state?: ModelState;
/**
* The model settings.
@@ -223,18 +239,23 @@ export interface Model {
/**
* The model runtime parameters.
*/
- parameters: ModelRuntimeParam;
+ parameters: ModelRuntimeParams;
/**
* Metadata of the model.
*/
metadata: ModelMetadata;
+ /**
+ * The model engine.
+ */
+ engine: InferenceEngine;
}
export type ModelMetadata = {
author: string;
tags: string[];
size: number;
+ cover?: string;
};
/**
@@ -254,6 +275,7 @@ export type ModelSettingParams = {
ngl?: number;
embedding?: boolean;
n_parallel?: number;
+ cpu_threads?: number;
system_prompt?: string;
user_prompt?: string;
ai_prompt?: string;
@@ -262,13 +284,16 @@ export type ModelSettingParams = {
/**
* The available model runtime parameters.
*/
-export type ModelRuntimeParam = {
+export type ModelRuntimeParams = {
temperature?: number;
token_limit?: number;
top_k?: number;
top_p?: number;
stream?: boolean;
max_tokens?: number;
+ stop?: string[];
+ frequency_penalty?: number;
+ presence_penalty?: number;
};
/**
diff --git a/core/tsconfig.json b/core/tsconfig.json
index 62caccdcb..19b2d29ad 100644
--- a/core/tsconfig.json
+++ b/core/tsconfig.json
@@ -1,15 +1,19 @@
{
"compilerOptions": {
- "target": "es2016",
- "module": "ES6",
- "outDir": "./lib",
- "esModuleInterop": true,
- "forceConsistentCasingInFileNames": true,
+ "moduleResolution": "node",
+ "target": "es5",
+ "module": "es2015",
+ "lib": ["es2015", "es2016", "es2017", "dom"],
"strict": true,
- "skipLibCheck": true,
+ "sourceMap": true,
"declaration": true,
- "rootDir": "./src"
+ "allowSyntheticDefaultImports": true,
+ "experimentalDecorators": true,
+ "emitDecoratorMetadata": true,
+ "declarationDir": "dist/types",
+ "outDir": "dist/lib",
+ "importHelpers": true,
+ "typeRoots": ["node_modules/@types"]
},
- "include": ["./src"],
- "exclude": ["lib", "node_modules", "**/*.test.ts", "**/__mocks__/*"]
+ "include": ["src"]
}
diff --git a/core/tslint.json b/core/tslint.json
new file mode 100644
index 000000000..398a41670
--- /dev/null
+++ b/core/tslint.json
@@ -0,0 +1,6 @@
+{
+ "extends": [
+ "tslint-config-standard",
+ "tslint-config-prettier"
+ ]
+}
\ No newline at end of file
diff --git a/docs/docs/about/about.md b/docs/docs/about/about.md
index 5fabb707e..4e55e0744 100644
--- a/docs/docs/about/about.md
+++ b/docs/docs/about/about.md
@@ -1,7 +1,7 @@
---
title: About Jan
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
Jan believes in the need for an open source AI ecosystem, and are building the infra and tooling to allow open source AIs to compete on a level playing field with proprietary ones.
diff --git a/docs/docs/community/community.md b/docs/docs/community/community.md
index 623cea8e8..5defbf30c 100644
--- a/docs/docs/community/community.md
+++ b/docs/docs/community/community.md
@@ -1,7 +1,7 @@
---
title: Community
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
- [ ] Social media links
\ No newline at end of file
diff --git a/docs/docs/docs/assistants.md b/docs/docs/docs/assistants.md
index 2f4b1f99f..612229af3 100644
--- a/docs/docs/docs/assistants.md
+++ b/docs/docs/docs/assistants.md
@@ -1,5 +1,5 @@
---
title: Build an Assistant
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
\ No newline at end of file
diff --git a/docs/docs/docs/extensions.md b/docs/docs/docs/extensions.md
index 56cfdfe51..11128ec41 100644
--- a/docs/docs/docs/extensions.md
+++ b/docs/docs/docs/extensions.md
@@ -1,7 +1,7 @@
---
title: Extending Jan
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
## Overview
diff --git a/docs/docs/docs/modules.md b/docs/docs/docs/modules.md
index cb7888f67..3cec1aa3b 100644
--- a/docs/docs/docs/modules.md
+++ b/docs/docs/docs/modules.md
@@ -1,5 +1,5 @@
---
title: Build a Module
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
\ No newline at end of file
diff --git a/docs/docs/docs/themes.md b/docs/docs/docs/themes.md
index 3edfaf490..8760d3ca6 100644
--- a/docs/docs/docs/themes.md
+++ b/docs/docs/docs/themes.md
@@ -1,5 +1,5 @@
---
title: Build a Theme
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
\ No newline at end of file
diff --git a/docs/docs/docs/tools.md b/docs/docs/docs/tools.md
index d8dd132a8..e7a0cb85d 100644
--- a/docs/docs/docs/tools.md
+++ b/docs/docs/docs/tools.md
@@ -1,5 +1,5 @@
---
title: Build a Tool
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
\ No newline at end of file
diff --git a/docs/docs/guides/how-jan-works.md b/docs/docs/guides/how-jan-works.md
index b8202224d..c26d060c8 100644
--- a/docs/docs/guides/how-jan-works.md
+++ b/docs/docs/guides/how-jan-works.md
@@ -1,7 +1,7 @@
---
title: How Jan Works
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
- Local Filesystem
diff --git a/docs/docs/guides/introduction.md b/docs/docs/guides/introduction.md
index 667b0b9d7..bc40a0765 100644
--- a/docs/docs/guides/introduction.md
+++ b/docs/docs/guides/introduction.md
@@ -4,18 +4,18 @@ slug: /guides
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
+ Jan AI,
Jan,
ChatGPT alternative,
- on-premises AI,
- local API server,
local AI,
- llm,
+ private AI,
conversational AI,
no-subscription fee,
+ large language model,
]
---
-Jan is a ChatGPT-alternative that runs on your own computer, with a [local API server](/api).
+Jan is a ChatGPT-alternative that runs on your own computer, with a [local API server](/api-reference/).
Jan uses [open-source AI models](/docs/models), stores data in [open file formats](/specs/data-structures), is highly customizable via [extensions](/docs/extensions).
diff --git a/docs/docs/guides/models.md b/docs/docs/guides/models.md
index 4e123e746..17de92183 100644
--- a/docs/docs/guides/models.md
+++ b/docs/docs/guides/models.md
@@ -1,5 +1,5 @@
---
title: Model Management
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
\ No newline at end of file
diff --git a/docs/docs/guides/quickstart.md b/docs/docs/guides/quickstart.md
index 606003be1..101f17585 100644
--- a/docs/docs/guides/quickstart.md
+++ b/docs/docs/guides/quickstart.md
@@ -1,7 +1,7 @@
---
title: Quickstart
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
- Write in the style of comics, explanation
diff --git a/docs/docs/guides/server.md b/docs/docs/guides/server.md
index d309d8817..245428106 100644
--- a/docs/docs/guides/server.md
+++ b/docs/docs/guides/server.md
@@ -1,7 +1,7 @@
---
title: API Server
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::warning
diff --git a/docs/docs/handbook/engineering/engineering.md b/docs/docs/handbook/engineering/engineering.md
index 3ca9952c4..ebd7e188c 100644
--- a/docs/docs/handbook/engineering/engineering.md
+++ b/docs/docs/handbook/engineering/engineering.md
@@ -1,7 +1,7 @@
---
title: Engineering
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
## Connecting to Rigs
diff --git a/docs/docs/handbook/handbook.md b/docs/docs/handbook/handbook.md
index a0485da61..7fc5a9138 100644
--- a/docs/docs/handbook/handbook.md
+++ b/docs/docs/handbook/handbook.md
@@ -2,7 +2,7 @@
title: Onboarding Checklist
slug: /handbook
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
# Welcome
diff --git a/docs/docs/hardware/community.md b/docs/docs/hardware/community.md
index a8c3ffee9..e1825b24b 100644
--- a/docs/docs/hardware/community.md
+++ b/docs/docs/hardware/community.md
@@ -1,7 +1,7 @@
---
title: Hardware Examples
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
## Add your own example
diff --git a/docs/docs/install/from-source.md b/docs/docs/install/from-source.md
index 5377e831c..f99228d80 100644
--- a/docs/docs/install/from-source.md
+++ b/docs/docs/install/from-source.md
@@ -1,7 +1,7 @@
---
title: From Source
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
# Install Jan from Source
diff --git a/docs/docs/install/linux.md b/docs/docs/install/linux.md
index 0b61f96d8..1ca22c97b 100644
--- a/docs/docs/install/linux.md
+++ b/docs/docs/install/linux.md
@@ -1,7 +1,7 @@
---
title: Linux
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
# Installing Jan on Linux
diff --git a/docs/docs/install/mac.md b/docs/docs/install/mac.md
index a618d05e3..d1c5f1fc9 100644
--- a/docs/docs/install/mac.md
+++ b/docs/docs/install/mac.md
@@ -1,7 +1,7 @@
---
title: Mac
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
# Installing Jan on MacOS
diff --git a/docs/docs/install/overview.md b/docs/docs/install/overview.md
index b41db64d7..3dce1b938 100644
--- a/docs/docs/install/overview.md
+++ b/docs/docs/install/overview.md
@@ -1,7 +1,7 @@
---
title: Overview
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
Getting up and running open-source AI models on your own computer with Jan is quick and easy. Jan is lightweight and can run on a variety of hardware and platform versions. Specific requirements tailored to your platform are outlined below.
diff --git a/docs/docs/install/windows.md b/docs/docs/install/windows.md
index f3de435ec..53db66ae7 100644
--- a/docs/docs/install/windows.md
+++ b/docs/docs/install/windows.md
@@ -1,7 +1,7 @@
---
title: Windows
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
# Installing Jan on Windows
diff --git a/docs/docs/specs/architecture.md b/docs/docs/specs/architecture.md
index 2557f6203..dd96d5d8b 100644
--- a/docs/docs/specs/architecture.md
+++ b/docs/docs/specs/architecture.md
@@ -2,7 +2,7 @@
title: Architecture
slug: /specs
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::warning
diff --git a/docs/docs/specs/engineering/assistants.md b/docs/docs/specs/engineering/assistants.md
index 8a96f6408..50646f9cb 100644
--- a/docs/docs/specs/engineering/assistants.md
+++ b/docs/docs/specs/engineering/assistants.md
@@ -2,7 +2,7 @@
title: "Assistants"
slug: /specs/assistants
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::caution
@@ -40,6 +40,10 @@ In Jan, assistants are `primary` entities with the following capabilities:
## `assistant.json`
+- Each `assistant` folder contains an `assistant.json` file, which is a representation of an assistant.
+- `assistant.json` contains metadata and model parameter overrides
+- There are no required fields.
+
```js
{
"id": "asst_abc123", // Defaults to foldername
diff --git a/docs/docs/specs/engineering/chats.md b/docs/docs/specs/engineering/chats.md
index 7daac57b0..8ba467d2a 100644
--- a/docs/docs/specs/engineering/chats.md
+++ b/docs/docs/specs/engineering/chats.md
@@ -2,7 +2,7 @@
title: Chats
slug: /specs/chats
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::caution
diff --git a/docs/docs/specs/engineering/files.md b/docs/docs/specs/engineering/files.md
index b93054ef1..452a4645d 100644
--- a/docs/docs/specs/engineering/files.md
+++ b/docs/docs/specs/engineering/files.md
@@ -2,7 +2,7 @@
title: "Files"
slug: /specs/files
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::warning
diff --git a/docs/docs/specs/engineering/fine-tuning.md b/docs/docs/specs/engineering/fine-tuning.md
index 97c45d85b..111e28f6f 100644
--- a/docs/docs/specs/engineering/fine-tuning.md
+++ b/docs/docs/specs/engineering/fine-tuning.md
@@ -2,7 +2,7 @@
title: "Fine-tuning"
slug: /specs/finetuning
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
Todo: @hiro
diff --git a/docs/docs/specs/engineering/messages.md b/docs/docs/specs/engineering/messages.md
index 4032e61d4..4818378d9 100644
--- a/docs/docs/specs/engineering/messages.md
+++ b/docs/docs/specs/engineering/messages.md
@@ -2,7 +2,7 @@
title: Messages
slug: /specs/messages
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::caution
diff --git a/docs/docs/specs/engineering/models.md b/docs/docs/specs/engineering/models.md
index e10fbd088..4fc75122f 100644
--- a/docs/docs/specs/engineering/models.md
+++ b/docs/docs/specs/engineering/models.md
@@ -2,7 +2,7 @@
title: Models
slug: /specs/models
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::caution
diff --git a/docs/docs/specs/engineering/prompts.md b/docs/docs/specs/engineering/prompts.md
index 9d4fa4fd6..66e58bf77 100644
--- a/docs/docs/specs/engineering/prompts.md
+++ b/docs/docs/specs/engineering/prompts.md
@@ -2,7 +2,7 @@
title: Prompts
slug: /specs/prompts
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
- [ ] /prompts folder
diff --git a/docs/docs/specs/engineering/threads.md b/docs/docs/specs/engineering/threads.md
index c1421e4ae..b6181b2c2 100644
--- a/docs/docs/specs/engineering/threads.md
+++ b/docs/docs/specs/engineering/threads.md
@@ -2,7 +2,7 @@
title: Threads
slug: /specs/threads
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::caution
diff --git a/docs/docs/specs/file-based.md b/docs/docs/specs/file-based.md
index 26f3d8efb..6296199d9 100644
--- a/docs/docs/specs/file-based.md
+++ b/docs/docs/specs/file-based.md
@@ -1,7 +1,7 @@
---
title: File-based Approach
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::warning
diff --git a/docs/docs/specs/jan.md b/docs/docs/specs/jan.md
index e92dddf7a..8cd1db98e 100644
--- a/docs/docs/specs/jan.md
+++ b/docs/docs/specs/jan.md
@@ -1,7 +1,7 @@
---
title: Jan (Assistant)
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
## Jan: a "global" assistant
diff --git a/docs/docs/specs/product/chat.md b/docs/docs/specs/product/chat.md
index acbf57487..1f167a07b 100644
--- a/docs/docs/specs/product/chat.md
+++ b/docs/docs/specs/product/chat.md
@@ -2,7 +2,7 @@
title: Chat
slug: /specs/chat
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
## Overview
diff --git a/docs/docs/specs/product/hub.md b/docs/docs/specs/product/hub.md
index 1a9f6064a..14feab542 100644
--- a/docs/docs/specs/product/hub.md
+++ b/docs/docs/specs/product/hub.md
@@ -2,7 +2,7 @@
title: Hub
slug: /specs/hub
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
## Overview
diff --git a/docs/docs/specs/product/settings.md b/docs/docs/specs/product/settings.md
index d7e60e943..6d5b5939b 100644
--- a/docs/docs/specs/product/settings.md
+++ b/docs/docs/specs/product/settings.md
@@ -2,7 +2,7 @@
title: Settings
slug: /specs/settings
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
## Overview
diff --git a/docs/docs/specs/product/system-monitor.md b/docs/docs/specs/product/system-monitor.md
index f4c77c38c..3333eb9bd 100644
--- a/docs/docs/specs/product/system-monitor.md
+++ b/docs/docs/specs/product/system-monitor.md
@@ -2,7 +2,7 @@
title: System Monitor
slug: /specs/system-monitor
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
## Overview
diff --git a/docs/docs/specs/user-interface.md b/docs/docs/specs/user-interface.md
index 156eac5a6..bcf72f84c 100644
--- a/docs/docs/specs/user-interface.md
+++ b/docs/docs/specs/user-interface.md
@@ -1,7 +1,7 @@
---
title: User Interface
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords: [Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee]
+keywords: [Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ]
---
:::warning
diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js
index da62e3399..a2799552e 100644
--- a/docs/docusaurus.config.js
+++ b/docs/docusaurus.config.js
@@ -3,7 +3,6 @@
require("dotenv").config();
-const lightCodeTheme = require("prism-react-renderer/themes/github");
const darkCodeTheme = require("prism-react-renderer/themes/dracula");
/** @type {import('@docusaurus/types').Config} */
@@ -115,11 +114,10 @@ const config = {
primaryColor: "#1a73e8",
primaryColorDark: "#1a73e8",
options: {
- requiredPropsFirst: true,
- noAutoAuth: true,
- hideDownloadButton: true,
- disableSearch: true,
- },
+ requiredPropsFirst: true,
+ noAutoAuth: true,
+ hideDownloadButton: true,
+ },
},
},
],
@@ -127,7 +125,7 @@ const config = {
// Docs: https://docusaurus.io/docs/api/themes/configuration
themeConfig: {
- image: "img/jan-social-card.png",
+ image: "img/og-image.png",
// Only for react live
liveCodeBlock: {
playgroundPosition: "bottom",
@@ -135,48 +133,78 @@ const config = {
docs: {
sidebar: {
hideable: true,
- autoCollapseCategories: true,
+ autoCollapseCategories: false,
},
},
- // SEO Docusarus
+ // SEO Docusarus
metadata: [
- { name: 'description', content: 'Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.' },
- { name: 'keywords', content: 'Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee' },
- { name: 'robots', content: 'index, follow' },
- { property: 'og:title', content: 'Run your own AI | Jan' },
- { property: 'og:description', content: 'Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.' },
- { property: 'og:image', content: 'https://jan.ai/img/jan-social-card.png' },
- { property: 'og:type', content: 'website' },
- { property: 'twitter:card', content: 'summary_large_image' },
- { property: 'twitter:site', content: '@janhq_' },
- { property: 'twitter:title', content: 'Run your own AI | Jan' },
- { property: 'twitter:description', content: 'Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.' },
- { property: 'twitter:image', content: 'https://jan.ai/img/jan-social-card.png' },
+ {
+ name: "description",
+ content:
+ "Jan runs 100% offline on your computer, utilizes open-source AI models, prioritizes privacy, and is highly customizable.",
+ },
+ {
+ name: "keywords",
+ content:
+ "Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ",
+ },
+ { name: "robots", content: "index, follow" },
+ {
+ property: "og:title",
+ content: "Jan | Open-source ChatGPT Alternative",
+ },
+ {
+ property: "og:description",
+ content:
+ "Jan runs 100% offline on your computer, utilizes open-source AI models, prioritizes privacy, and is highly customizable.",
+ },
+ {
+ property: "og:image",
+ content: "https://jan.ai/img/og-image.png",
+ },
+ { property: "og:type", content: "website" },
+ { property: "twitter:card", content: "summary_large_image" },
+ { property: "twitter:site", content: "@janhq_" },
+ {
+ property: "twitter:title",
+ content: "Jan | Open-source ChatGPT Alternative",
+ },
+ {
+ property: "twitter:description",
+ content:
+ "Jan runs 100% offline on your computer, utilizes open-source AI models, prioritizes privacy, and is highly customizable.",
+ },
+ {
+ property: "twitter:image",
+ content: "https://jan.ai/img/og-image.png",
+ },
],
headTags: [
// Declare a preconnect tag
{
- tagName: 'link',
+ tagName: "link",
attributes: {
- rel: 'preconnect',
- href: 'https://jan.ai/',
+ rel: "preconnect",
+ href: "https://jan.ai/",
},
},
// Declare some json-ld structured data
{
- tagName: 'script',
+ tagName: "script",
attributes: {
- type: 'application/ld+json',
+ type: "application/ld+json",
},
innerHTML: JSON.stringify({
- '@context': 'https://schema.org/',
- '@type': 'localAI',
- name: 'Jan',
- description: "Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.",
- keywords: "Jan, ChatGPT alternative, on-premises AI, local API server, local AI, llm, conversational AI, no-subscription fee",
+ "@context": "https://schema.org/",
+ "@type": "localAI",
+ name: "Jan",
+ description:
+ "Jan runs 100% offline on your computer, utilizes open-source AI models, prioritizes privacy, and is highly customizable.",
+ keywords:
+ "Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ",
applicationCategory: "BusinessApplication",
operatingSystem: "Multiple",
- url: 'https://jan.ai/',
+ url: "https://jan.ai/",
}),
},
],
@@ -234,10 +262,10 @@ const config = {
prism: {
theme: darkCodeTheme,
darkTheme: darkCodeTheme,
- additionalLanguages: ["python"],
+ additionalLanguages: ["python", "powershell", "bash"],
},
colorMode: {
- defaultMode: "dark",
+ defaultMode: "light",
disableSwitch: false,
respectPrefersColorScheme: false,
},
diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml
index 43c07cb3c..6a59647e5 100644
--- a/docs/openapi/jan.yaml
+++ b/docs/openapi/jan.yaml
@@ -1,28 +1,32 @@
openapi: 3.0.0
info:
title: API Reference
- description: |
+ description: >
# Introduction
- Jan API is compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference).
-version: "0.1.8"
+
+ Jan API is compatible with the [OpenAI
+ API](https://platform.openai.com/docs/api-reference).
+version: 0.1.8
contact:
name: Jan Discord
- url: https://discord.gg/7EcEz7MrvA
+ url: "https://discord.gg/7EcEz7MrvA"
license:
name: AGPLv3
- url: https://github.com/janhq/nitro/blob/main/LICENSE
+ url: "https://github.com/janhq/nitro/blob/main/LICENSE"
servers:
- - url: https://localhost:1337/v1/
+ - url: "https://localhost:1337/v1/"
tags:
- name: Models
description: List and describe the various models available in the API.
- - name: Chat
- description: |
- Given a list of messages comprising a conversation, the model will return a response.
-
+ - name: Chat
+ description: >
+ Given a list of messages comprising a conversation, the model will return
+ a response.
- name: Messages
- description: |
- Messages capture a conversation's content. This can include the content from LLM responses and other metadata from [chat completions](/specs/chats).
+ description: >
+ Messages capture a conversation's content. This can include the content
+ from LLM responses and other metadata from [chat
+ completions](/specs/chats).
- name: Threads
- name: Assistants
description: Configures and utilizes different AI assistants for varied tasks
@@ -42,10 +46,12 @@ paths:
operationId: createChatCompletion
tags:
- Chat
- summary: |
+ summary: |
Create chat completion
- description: |
- Creates a model response for the given chat conversation. Equivalent to OpenAI's create chat completion.
+ description: >
+ Creates a model response for the given chat conversation. Equivalent
+ to OpenAI's create chat completion.
requestBody:
content:
application/json:
@@ -59,9 +65,10 @@ paths:
schema:
$ref: "specs/chat.yaml#/components/schemas/ChatCompletionResponse"
x-codeSamples:
- - lang: "curl"
- source: |
- curl -X POST 'http://localhost:3982/inferences/llamacpp/chat_completion' \
+ - lang: cURL
+ source: >
+ curl -X POST
+ 'http://localhost:3982/inferences/llamacpp/chat_completion' \
-H "Content-Type: application/json" \
-d '{
"llama_model_path": "/path/to/your/model.gguf",
@@ -72,16 +79,17 @@ paths:
},
]
}'
-
- ### MODELS
/models:
get:
operationId: listModels
tags:
- Models
summary: List models
- description: |
- Lists the currently available models, and provides basic information about each one such as the owner and availability. Equivalent to OpenAI's list model.
+ description: >
+ Lists the currently available models, and provides basic information
+ about each one such as the owner and availability. Equivalent
+ to OpenAI's list model.
responses:
"200":
description: OK
@@ -90,7 +98,7 @@ paths:
schema:
$ref: "specs/models.yaml#/components/schemas/ListModelsResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
curl https://localhost:1337/v1/models
post:
@@ -108,27 +116,29 @@ paths:
schema:
$ref: "specs/models.yaml#/components/schemas/DownloadModelResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
curl -X POST https://localhost:1337/v1/models
- /models/{model_id}:
+ "/models/{model_id}":
get:
operationId: retrieveModel
tags:
- Models
summary: Retrieve model
- description: |
- Get a model instance, providing basic information about the model such as the owner and permissioning. Equivalent to OpenAI's retrieve model.
+ description: >
+ Get a model instance, providing basic information about the model such
+ as the owner and permissioning.
+ Equivalent to OpenAI's retrieve model.
parameters:
- in: path
name: model_id
required: true
schema:
type: string
- # ideally this will be an actual ID, so this will always work from browser
example: zephyr-7b
description: |
- The ID of the model to use for this request
+ The ID of the model to use for this request.
responses:
"200":
description: OK
@@ -137,16 +147,18 @@ paths:
schema:
$ref: "specs/models.yaml#/components/schemas/GetModelResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
- curl https://localhost:1337/v1/models/zephyr-7b
+ curl https://localhost:1337/v1/models/{model_id}
delete:
operationId: deleteModel
tags:
- Models
summary: Delete model
- description: |
- Delete a model. Equivalent to OpenAI's delete model.
+ description: >
+ Delete a model.
+ Equivalent to OpenAI's delete model.
parameters:
- in: path
name: model
@@ -164,10 +176,10 @@ paths:
schema:
$ref: "specs/models.yaml#/components/schemas/DeleteModelResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
- curl -X DELETE https://localhost:1337/v1/models/zephyr-7b
- /models/{model_id}/start:
+ curl -X DELETE https://localhost:1337/v1/models/{model_id}
+ "/models/{model_id}/start":
put:
operationId: startModel
tags:
@@ -181,7 +193,6 @@ paths:
required: true
schema:
type: string
- # ideally this will be an actual ID, so this will always work from browser
example: zephyr-7b
description: |
The ID of the model to use for this request
@@ -193,10 +204,10 @@ paths:
schema:
$ref: "specs/models.yaml#/components/schemas/StartModelResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
- curl -X PUT https://localhost:1337/v1/models/zephyr-7b/start
- /models/{model_id}/stop:
+ curl -X PUT https://localhost:1337/v1/models/{model_id}/start
+ "/models/{model_id}/stop":
put:
operationId: stopModel
tags:
@@ -210,7 +221,6 @@ paths:
required: true
schema:
type: string
- # ideally this will be an actual ID, so this will always work from browser
example: zephyr-7b
description: The ID of the model to use for this request
responses:
@@ -221,19 +231,19 @@ paths:
schema:
$ref: "specs/models.yaml#/components/schemas/StopModelResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
- curl -X PUT https://localhost:1337/v1/models/zephyr-7b/stop
-
- ### THREADS
+ curl -X PUT https://localhost:1337/v1/models/{model_id}/stop
/threads:
post:
operationId: createThread
tags:
- Threads
summary: Create thread
- description: |
- Create a thread. Equivalent to OpenAI's create thread.
+ description: >
+ Create a thread.
+ Equivalent to OpenAI's create thread.
requestBody:
required: false
content:
@@ -243,18 +253,18 @@ paths:
properties:
messages:
type: array
- description: "Initial set of messages for the thread."
+ description: Initial set of messages for the thread.
items:
- $ref: 'specs/threads.yaml#/components/schemas/ThreadMessageObject'
+ $ref: "specs/threads.yaml#/components/schemas/ThreadMessageObject"
responses:
"200":
description: Thread created successfully
content:
application/json:
schema:
- $ref: 'specs/threads.yaml#/components/schemas/CreateThreadResponse'
+ $ref: "specs/threads.yaml#/components/schemas/CreateThreadResponse"
x-codeSamples:
- - lang: "cURL"
+ - lang: cURL
source: |
curl -X POST http://localhost:1337/v1/threads \
-H "Content-Type: application/json" \
@@ -267,7 +277,7 @@ paths:
"role": "user",
"content": "How does AI work? Explain it in simple terms."
}]
- }'
+ }'
get:
operationId: listThreads
tags:
@@ -283,33 +293,38 @@ paths:
schema:
type: array
items:
- $ref: 'specs/threads.yaml#/components/schemas/ThreadObject'
+ $ref: "specs/threads.yaml#/components/schemas/ThreadObject"
example:
- - id: "thread_abc123"
- object: "thread"
+ - id: thread_abc123
+ object: thread
created_at: 1699014083
- assistants: ["assistant-001"]
+ assistants:
+ - assistant-001
metadata: {}
messages: []
- - id: "thread_abc456"
- object: "thread"
+ - id: thread_abc456
+ object: thread
created_at: 1699014083
- assistants: ["assistant-002", "assistant-003"]
+ assistants:
+ - assistant-002
+ - assistant-003
metadata: {}
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
- curl http://localhost:1337/v1/threads \
- -H "Content-Type: application/json" \
-
- /threads/{thread_id}:
+ curl http://localhost:1337/v1/threads \
+ -H "Content-Type: application/json" \
+ "/threads/{thread_id}":
get:
operationId: getThread
tags:
- Threads
summary: Retrieve thread
- description: |
- Retrieves detailed information about a specific thread using its thread_id. Equivalent to OpenAI's retrieve thread.
+ description: >
+ Retrieves detailed information about a specific thread using its
+ thread_id.
+ Equivalent to OpenAI's retrieve thread.
parameters:
- in: path
name: thread_id
@@ -319,7 +334,6 @@ paths:
example: thread_abc123
description: |
The ID of the thread to retrieve.
-
responses:
"200":
description: Thread details retrieved successfully
@@ -328,7 +342,7 @@ paths:
schema:
$ref: "specs/threads.yaml#/components/schemas/GetThreadResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
curl http://localhost:1337/v1/threads/{thread_id}
post:
@@ -336,8 +350,10 @@ paths:
tags:
- Threads
summary: Modify thread
- description: |
- Modifies a thread. Equivalent to OpenAI's modify thread.
+ description: >
+ Modifies a thread.
+ Equivalent to OpenAI's modify thread.
parameters:
- in: path
name: thread_id
@@ -347,7 +363,6 @@ paths:
example: thread_abc123
description: |
The ID of the thread to be modified.
-
requestBody:
required: false
content:
@@ -357,38 +372,40 @@ paths:
properties:
messages:
type: array
- description: "Set of messages to update in the thread."
+ description: Set of messages to update in the thread.
items:
- $ref: 'specs/threads.yaml#/components/schemas/ThreadMessageObject'
+ $ref: "specs/threads.yaml#/components/schemas/ThreadMessageObject"
responses:
"200":
description: Thread modified successfully
content:
application/json:
schema:
- $ref: 'specs/threads.yaml#/components/schemas/ModifyThreadResponse'
+ $ref: "specs/threads.yaml#/components/schemas/ModifyThreadResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
- curl -X POST http://localhost:1337/v1/threads/{thread_id} \
- -H "Content-Type: application/json" \
- -d '{
- "messages": [{
- "role": "user",
- "content": "Hello, what is AI?",
- "file_ids": ["file-abc123"]
- }, {
- "role": "user",
- "content": "How does AI work? Explain it in simple terms."
- }]
- }'
+ curl -X POST http://localhost:1337/v1/threads/{thread_id} \
+ -H "Content-Type: application/json" \
+ -d '{
+ "messages": [{
+ "role": "user",
+ "content": "Hello, what is AI?",
+ "file_ids": ["file-abc123"]
+ }, {
+ "role": "user",
+ "content": "How does AI work? Explain it in simple terms."
+ }]
+ }'
delete:
operationId: deleteThread
tags:
- Threads
summary: Delete thread
- description: |
- Delete a thread. Equivalent to OpenAI's delete thread.
+ description: >
+ Delete a thread.
+ Equivalent to OpenAI's delete thread.
parameters:
- in: path
name: thread_id
@@ -404,20 +421,19 @@ paths:
content:
application/json:
schema:
- $ref: 'specs/threads.yaml#/components/schemas/DeleteThreadResponse'
+ $ref: "specs/threads.yaml#/components/schemas/DeleteThreadResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
curl -X DELETE http://localhost:1337/v1/threads/{thread_id}
-
- /threads/{thread_id}/assistants:
+ "/threads/{thread_id}/assistants":
get:
operationId: getThreadAssistants
tags:
- Threads
summary: Get Thread.Assistants
description: |
- Can achieve this goal by calling Get thread API
+ Can achieve this goal by calling Get thread API.
parameters:
- in: path
name: thread_id
@@ -434,29 +450,26 @@ paths:
schema:
$ref: "specs/threads.yaml#/components/schemas/GetThreadResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
curl http://localhost:1337/v1/threads/{thread_id}/assistants
-
- /threads/{thread_id}/assistants/{assistants_id}:
+ "/threads/{thread_id}/assistants/{assistants_id}":
post:
operationId: postThreadAssistants
tags:
- Threads
summary: Modify Thread.Assistants
- description: |
-
-
- Can achieve this goal by calling Modify Assistant API with thread.assistant[]
-
- /threads/{thread_id}/:
+ description: >
+ Can achieve this goal by calling Modify Assistant API with
+ thread.assistant[].
+ "/threads/{thread_id}/":
get:
operationId: listThreadMessage
tags:
- Threads
summary: List Thread.Messages
- description: |
- Can achieve this goal by calling Get Thread API
+ description: |
+ Can achieve this goal by calling Get Thread API.
parameters:
- in: path
name: thread_id
@@ -473,19 +486,246 @@ paths:
schema:
$ref: "specs/threads.yaml#/components/schemas/GetThreadResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
curl http://localhost:1337/v1/threads/{thread_id}
-
- ### MESSAGES
- /threads/{thread_id}/messages:
+ /assistants/:
+ get:
+ operationId: listAssistants
+ tags:
+ - Assistants
+ summary: List assistants
+ description: >
+ Return a list of assistants.
+ Equivalent to OpenAI's list assistants.
+ responses:
+ "200":
+ description: List of threads retrieved successfully
+ content:
+ application/json:
+ schema:
+ type: array
+ example:
+ - id: asst_abc123
+ object: assistant
+ version: 1
+ created_at: 1698984975
+ name: Math Tutor
+ description: null
+ avatar: "https://pic.png"
+ models:
+ - model_id: model_0
+ instructions: Be concise
+ events:
+ in: []
+ out: []
+ metadata: {}
+ - id: asst_abc456
+ object: assistant
+ version: 1
+ created_at: 1698984975
+ name: Physics Tutor
+ description: null
+ avatar: "https://pic.png"
+ models:
+ - model_id: model_1
+ instructions: Be concise!
+ events:
+ in: []
+ out: []
+ metadata: {}
+ responses: null
+ x-codeSamples:
+ - lang: cURL
+ source: |
+ curl http://localhost:1337/v1/assistants \
+ -H "Content-Type: application/json" \
+ post:
+ operationId: createAssistant
+ tags:
+ - Assistants
+ summary: Create assistant
+ description: >
+ Create an assistant with a model and instructions.
+ Equivalent to OpenAI's create assistants.
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ models:
+ type: array
+ description: >-
+ List of models associated with the assistant. Jan-specific
+ property.
+ items:
+ type: object
+ properties:
+ model_id:
+ type: string
+ example: model_0
+ responses:
+ "200":
+ description: null
+ content:
+ application/json:
+ schema:
+ $ref: >-
+ specs/assistants.yaml#/components/schemas/CreateAssistantResponse
+ x-codeSamples:
+ - lang: cURL
+ source: |
+ curl http://localhost:1337/v1/assistants \
+ -H "Content-Type: application/json" \
+ -d '{
+ "models": [
+ {
+ "model_id": "model_0"
+ }
+ ]
+ }'
+ "/assistants/{assistant_id}":
+ get:
+ operationId: getAssistant
+ tags:
+ - Assistants
+ summary: Retrieve assistant
+ description: >
+ Retrieves an assistant.
+ Equivalent to OpenAI's retrieve assistants.
+ parameters:
+ - in: path
+ name: assistant_id
+ required: true
+ schema:
+ type: string
+ example: asst_abc123
+ description: |
+ The ID of the assistant to retrieve.
+ responses:
+ "200":
+ description: null
+ content:
+ application/json:
+ schema:
+ $ref: >-
+ specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse
+ x-codeSamples:
+ - lang: cURL
+ source: |
+ curl http://localhost:1337/v1/assistants/{assistant_id} \
+ -H "Content-Type: application/json" \
+ post:
+ operationId: modifyAssistant
+ tags:
+ - Assistants
+ summary: Modify assistant
+ description: >
+ Modifies an assistant.
+ Equivalent to OpenAI's modify assistant.
+ parameters:
+ - in: path
+ name: assistant_id
+ required: true
+ schema:
+ type: string
+ example: asst_abc123
+ description: |
+ The ID of the assistant to modify.
+ requestBody:
+ required: false
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ models:
+ type: array
+ description: >-
+ List of models associated with the assistant. Jan-specific
+ property.
+ items:
+ type: object
+ properties:
+ model_id:
+ type: string
+ example: model_0
+ name:
+ type: string
+ description: Name of the assistant.
+ example: Physics Tutor
+ instructions:
+ type: string
+ description: A system prompt for the assistant.
+ example: Be concise!
+ responses:
+ "200":
+ description: null
+ content:
+ application/json:
+ schema:
+ $ref: >-
+ specs/assistants.yaml#/components/schemas/ModifyAssistantResponse
+ x-codeSamples:
+ - lang: cURL
+ source: |
+ curl http://localhost:1337/v1/assistants/{assistant_id} \
+ -H "Content-Type: application/json" \
+ -d '{
+ "models": [
+ {
+ "model_id": "model_0"
+ }
+ ],
+ "name": "Physics Tutor",
+ "instructions": "Be concise!",
+ }'
+ delete:
+ operationId: deleteAssistant
+ tags:
+ - Assistants
+ summary: Delete assistant
+ description: >
+ Delete an assistant.
+ Equivalent to OpenAI's delete assistant.
+ parameters:
+ - in: path
+ name: assistant_id
+ required: true
+ schema:
+ type: string
+ example: asst_abc123
+ description: |
+ The ID of the assistant to delete.
+ responses:
+ "200":
+ description: Deletion status
+ content:
+ application/json:
+ schema:
+ $ref: >-
+ specs/assistants.yaml#/components/schemas/DeleteAssistantResponse
+ x-codeSamples:
+ - lang: cURL
+ source: |
+ curl -X DELETE http://localhost:1337/v1/assistants/{assistant_id}
+ "/threads/{thread_id}/messages":
get:
operationId: listMessages
tags:
- Messages
summary: List messages
- description: |
- Retrieves all messages from the given thread. Equivalent to OpenAI's list messages.
+ description: >
+ Retrieves all messages from the given thread.
+ Equivalent to OpenAI's list messages.
parameters:
- in: path
name: thread_id
@@ -503,7 +743,7 @@ paths:
schema:
$ref: "specs/messages.yaml#/components/schemas/ListMessagesResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
curl http://localhost:1337/v1/threads/{thread_id}/messages \
-H "Content-Type: application/json"
@@ -512,8 +752,10 @@ paths:
tags:
- Messages
summary: Create message
- description: |
- Create a message. Equivalent to OpenAI's list messages.
+ description: >
+ Create a message.
+ Equivalent to OpenAI's list messages.
parameters:
- in: path
name: thread_id
@@ -533,18 +775,16 @@ paths:
role:
type: string
description: |
- "Role of the sender, either 'user' or 'assistant'."
-
- OpenAI compatible
- example: "user"
- enum: ["user", "assistant"]
+ Role of the sender, either 'user' or 'assistant'.
+ example: user
+ enum:
+ - user
+ - assistant
content:
type: string
description: |
- "Text content of the message."
-
- OpenAI compatible
- example: "How does AI work? Explain it in simple terms."
+ Text content of the message.
+ example: How does AI work? Explain it in simple terms.
required:
- role
- content
@@ -556,23 +796,25 @@ paths:
schema:
$ref: "specs/messages.yaml#/components/schemas/CreateMessageResponse"
x-codeSamples:
- - lang: "curl"
+ - lang: cURL
source: |
- curl -X POST http://localhost:1337/v1/threads/{thread_id}/messages \
- -H "Content-Type: application/json" \
- -d '{
- "role": "user",
- "content": "How does AI work? Explain it in simple terms."
- }'
-
- /threads/{thread_id}/messages/{message_id}:
+ curl -X POST http://localhost:1337/v1/threads/{thread_id}/messages \
+ -H "Content-Type: application/json" \
+ -d '{
+ "role": "user",
+ "content": "How does AI work? Explain it in simple terms."
+ }'
+ "/threads/{thread_id}/messages/{message_id}":
get:
operationId: retrieveMessage
tags:
- Messages
summary: Retrieve message
- description: |
- Retrieve a specific message from a thread using its thread_id and message_id. Equivalent to OpenAI's retrieve messages.
+ description: >
+ Retrieve a specific message from a thread using its thread_id and
+ message_id.
+ Equivalent to OpenAI's retrieve messages.
parameters:
- in: path
name: thread_id
@@ -582,7 +824,6 @@ paths:
example: thread_abc123
description: |
The ID of the thread containing the message.
-
- in: path
name: message_id
required: true
@@ -591,7 +832,6 @@ paths:
example: msg_abc123
description: |
The ID of the message to retrieve.
-
responses:
"200":
description: OK
@@ -600,20 +840,22 @@ paths:
schema:
$ref: "specs/messages.yaml#/components/schemas/GetMessageResponse"
x-codeSamples:
- - lang: "curl"
- source: |
- curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} \
- -H "Content-Type: application/json"
-
- /threads/{thread_id}/messages/{message_id}/files:
+ - lang: cURL
+ source: >
+ curl
+ http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} \
+ -H "Content-Type: application/json"
+ "/threads/{thread_id}/messages/{message_id}/files":
get:
operationId: listMessageFiles
tags:
- Messages
summary: |
- List message files
- description: |
- Returns a list of message files. Equivalent to OpenAI's list message files.
+ List message files
+ description: >
+ Returns a list of message files.
+ Equivalent to OpenAI's list message files.
parameters:
- in: path
name: thread_id
@@ -623,37 +865,40 @@ paths:
example: thread_abc123
description: |
The ID of the thread containing the message.
-
- in: path
name: message_id
required: true
schema:
type: string
example: msg_abc123
- description: |
- The ID of the message whose files are to be listed.
-
+ description: |
+ The ID of the message whose files are to be listed.
responses:
"200":
description: List of files retrieved successfully
content:
application/json:
schema:
- $ref: "specs/messages.yaml#/components/schemas/ListMessageFilesResponse"
+ $ref: >-
+ specs/messages.yaml#/components/schemas/ListMessageFilesResponse
x-codeSamples:
- - lang: "curl"
- source: |
- curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id}/files \
+ - lang: cURL
+ source: >
+ curl
+ http://localhost:1337/v1/threads/{thread_id}/messages/{message_id}/files
+ \
-H "Content-Type: application/json"
-
- /threads/{thread_id}/messages/{message_id}/files/{file_id}:
+ "/threads/{thread_id}/messages/{message_id}/files/{file_id}":
get:
operationId: retrieveMessageFile
tags:
- Messages
summary: Retrieve message file
- description: |
- Retrieves a file associated with a specific message in a thread. Equivalent to OpenAI's retrieve message file.
+ description: >
+ Retrieves a file associated with a specific message in a thread.
+ Equivalent to OpenAI's retrieve message file.
parameters:
- in: path
name: thread_id
@@ -663,7 +908,6 @@ paths:
example: thread_abc123
description: |
The ID of the thread containing the message.
-
- in: path
name: message_id
required: true
@@ -672,7 +916,6 @@ paths:
example: msg_abc123
description: |
The ID of the message associated with the file.
-
- in: path
name: file_id
required: true
@@ -681,7 +924,6 @@ paths:
example: file-abc123
description: |
The ID of the file to retrieve.
-
responses:
"200":
description: File retrieved successfully
@@ -690,17 +932,20 @@ paths:
schema:
$ref: "specs/messages.yaml#/components/schemas/MessageFileObject"
x-codeSamples:
- - lang: "curl"
- source: |
- curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id}/files/{file_id} \
+ - lang: cURL
+ source: >
+ curl
+ http://localhost:1337/v1/threads/{thread_id}/messages/{message_id}/files/{file_id}
+ \
-H "Content-Type: application/json"
-
x-webhooks:
ModelObject:
post:
summary: The model object
- description: |
- Describe a model offering that can be used with the API. Equivalent to OpenAI's model object.
+ description: >
+ Describe a model offering that can be used with the API.
+ Equivalent to OpenAI's model object.
operationId: ModelObject
tags:
- Models
@@ -708,12 +953,29 @@ x-webhooks:
content:
application/json:
schema:
- $ref: 'specs/models.yaml#/components/schemas/ModelObject'
+ $ref: "specs/models.yaml#/components/schemas/ModelObject"
+ AssistantObject:
+ post:
+ summary: The assistant object
+ description: >
+ Build assistants that can call models and use tools to perform tasks.
+
+ Equivalent to OpenAI's assistants object.
+ operationId: AssistantObjects
+ tags:
+ - Assistants
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: "specs/assistants.yaml#/components/schemas/AssistantObject"
MessageObject:
post:
summary: The message object
- description: |
- Information about a message in the thread. Equivalent to OpenAI's message object.
+ description: >
+ Information about a message in the thread.
+ Equivalent to OpenAI's message object.
operationId: MessageObject
tags:
- Messages
@@ -721,11 +983,14 @@ x-webhooks:
content:
application/json:
schema:
- $ref: 'specs/messages.yaml#/components/schemas/MessageObject'
+ $ref: "specs/messages.yaml#/components/schemas/MessageObject"
ThreadObject:
post:
summary: The thread object
- description: Represents a thread that contains messages. Equivalent to OpenAI's thread object.
+ description: >-
+ Represents a thread that contains messages.
+ Equivalent to OpenAI's thread object.
operationId: ThreadObject
tags:
- Threads
@@ -733,4 +998,4 @@ x-webhooks:
content:
application/json:
schema:
- $ref: 'specs/threads.yaml#/components/schemas/ThreadObject'
+ $ref: "specs/threads.yaml#/components/schemas/ThreadObject"
diff --git a/docs/openapi/specs/assistant.yaml b/docs/openapi/specs/assistant.yaml
deleted file mode 100644
index 95b4cd22d..000000000
--- a/docs/openapi/specs/assistant.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-AssistantObject:
- type: object
- properties:
- avatar:
- type: string
- description: "URL of the assistant's avatar. Jan-specific property."
- example: "https://lala.png"
- id:
- type: string
- description: "The identifier of the assistant."
- example: "asst_abc123"
- object:
- type: string
- description: "Type of the object, indicating it's an assistant."
- default: "assistant"
- version:
- type: integer
- description: "Version number of the assistant."
- example: 1
- created_at:
- type: integer
- format: int64
- description: "Unix timestamp representing the creation time of the assistant."
- name:
- type: string
- description: "Name of the assistant."
- example: "Math Tutor"
- description:
- type: string
- description: "Description of the assistant. Can be null."
- models:
- type: array
- description: "List of models associated with the assistant. Jan-specific property."
- items:
- type: object
- properties:
- model_id:
- type: string
- # Additional properties for models can be added here
- events:
- type: object
- description: "Event subscription settings for the assistant."
- properties:
- in:
- type: array
- items:
- type: string
- out:
- type: array
- items:
- type: string
- # If there are specific event types, they can be detailed here
- metadata:
- type: object
- description: "Metadata associated with the assistant."
- required:
- - name
- - models
- - events
\ No newline at end of file
diff --git a/docs/openapi/specs/assistants.yaml b/docs/openapi/specs/assistants.yaml
new file mode 100644
index 000000000..7579f35ae
--- /dev/null
+++ b/docs/openapi/specs/assistants.yaml
@@ -0,0 +1,318 @@
+components:
+ schemas:
+ AssistantObject:
+ type: object
+ properties:
+ id:
+ type: string
+ description: The identifier of the assistant.
+ example: asst_abc123
+ object:
+ type: string
+ description: "Type of the object, indicating it's an assistant."
+ default: assistant
+ version:
+ type: integer
+ description: Version number of the assistant.
+ example: 1
+ created_at:
+ type: integer
+ format: int64
+ description: Unix timestamp representing the creation time of the assistant.
+ example: 1698984975
+ name:
+ type: string
+ description: Name of the assistant.
+ example: Math Tutor
+ description:
+ type: string
+ description: Description of the assistant. Can be null.
+ example: null
+ avatar:
+ type: string
+ description: URL of the assistant's avatar. Jan-specific property.
+ example: "https://pic.png"
+ models:
+ type: array
+ description: List of models associated with the assistant. Jan-specific property.
+ items:
+ type: object
+ properties:
+ model_id:
+ type: string
+ example: model_0
+ instructions:
+ type: string
+ description: A system prompt for the assistant.
+ example: Be concise
+ events:
+ type: object
+ description: Event subscription settings for the assistant.
+ properties:
+ in:
+ type: array
+ items:
+ type: string
+ out:
+ type: array
+ items:
+ type: string
+ metadata:
+ type: object
+ description: Metadata associated with the assistant.
+ ListAssistantsResponse: null
+ CreateAssistantResponse:
+ type: object
+ properties:
+ id:
+ type: string
+ description: The identifier of the assistant.
+ example: asst_abc123
+ object:
+ type: string
+ description: "Type of the object, indicating it's an assistant."
+ default: assistant
+ version:
+ type: integer
+ description: Version number of the assistant.
+ example: 1
+ created_at:
+ type: integer
+ format: int64
+ description: Unix timestamp representing the creation time of the assistant.
+ example: 1698984975
+ name:
+ type: string
+ description: Name of the assistant.
+ example: Math Tutor
+ description:
+ type: string
+ description: Description of the assistant. Can be null.
+ example: null
+ avatar:
+ type: string
+ description: URL of the assistant's avatar. Jan-specific property.
+ example: "https://pic.png"
+ models:
+ type: array
+ description: List of models associated with the assistant. Jan-specific property.
+ items:
+ type: object
+ properties:
+ model_id:
+ type: string
+ example: model_0
+ instructions:
+ type: string
+ description: A system prompt for the assistant.
+ example: Be concise
+ events:
+ type: object
+ description: Event subscription settings for the assistant.
+ properties:
+ in:
+ type: array
+ items:
+ type: string
+ out:
+ type: array
+ items:
+ type: string
+ metadata:
+ type: object
+ description: Metadata associated with the assistant.
+ RetrieveAssistantResponse:
+ type: object
+ properties:
+ id:
+ type: string
+ description: The identifier of the assistant.
+ example: asst_abc123
+ object:
+ type: string
+ description: "Type of the object, indicating it's an assistant."
+ default: assistant
+ version:
+ type: integer
+ description: Version number of the assistant.
+ example: 1
+ created_at:
+ type: integer
+ format: int64
+ description: Unix timestamp representing the creation time of the assistant.
+ example: 1698984975
+ name:
+ type: string
+ description: Name of the assistant.
+ example: Math Tutor
+ description:
+ type: string
+ description: Description of the assistant. Can be null.
+ example: null
+ avatar:
+ type: string
+ description: URL of the assistant's avatar. Jan-specific property.
+ example: "https://pic.png"
+ models:
+ type: array
+ description: List of models associated with the assistant. Jan-specific property.
+ items:
+ type: object
+ properties:
+ model_id:
+ type: string
+ example: model_0
+ instructions:
+ type: string
+ description: A system prompt for the assistant.
+ example: Be concise
+ events:
+ type: object
+ description: Event subscription settings for the assistant.
+ properties:
+ in:
+ type: array
+ items:
+ type: string
+ out:
+ type: array
+ items:
+ type: string
+ metadata:
+ type: object
+ description: Metadata associated with the assistant.
+ ModifyAssistantObject:
+ type: object
+ properties:
+ id:
+ type: string
+ description: The identifier of the assistant.
+ example: asst_abc123
+ object:
+ type: string
+ description: "Type of the object, indicating it's an assistant."
+ default: assistant
+ version:
+ type: integer
+ description: Version number of the assistant.
+ example: 1
+ created_at:
+ type: integer
+ format: int64
+ description: Unix timestamp representing the creation time of the assistant.
+ example: 1698984975
+ name:
+ type: string
+ description: Name of the assistant.
+ example: Math Tutor
+ description:
+ type: string
+ description: Description of the assistant. Can be null.
+ example: null
+ avatar:
+ type: string
+ description: URL of the assistant's avatar. Jan-specific property.
+ example: "https://pic.png"
+ models:
+ type: array
+ description: List of models associated with the assistant. Jan-specific property.
+ items:
+ type: object
+ properties:
+ model_id:
+ type: string
+ example: model_0
+ instructions:
+ type: string
+ description: A system prompt for the assistant.
+ example: Be concise
+ events:
+ type: object
+ description: Event subscription settings for the assistant.
+ properties:
+ in:
+ type: array
+ items:
+ type: string
+ out:
+ type: array
+ items:
+ type: string
+ metadata:
+ type: object
+ description: Metadata associated with the assistant.
+ ModifyAssistantResponse:
+ type: object
+ properties:
+ id:
+ type: string
+ description: The identifier of the assistant.
+ example: asst_abc123
+ object:
+ type: string
+ description: "Type of the object, indicating it's an assistant."
+ default: assistant
+ version:
+ type: integer
+ description: Version number of the assistant.
+ example: 1
+ created_at:
+ type: integer
+ format: int64
+ description: Unix timestamp representing the creation time of the assistant.
+ example: 1698984975
+ name:
+ type: string
+ description: Name of the assistant.
+ example: Physics Tutor
+ description:
+ type: string
+ description: Description of the assistant. Can be null.
+ example: null
+ avatar:
+ type: string
+ description: URL of the assistant's avatar. Jan-specific property.
+ example: "https://pic.png"
+ models:
+ type: array
+ description: List of models associated with the assistant. Jan-specific property.
+ items:
+ type: object
+ properties:
+ model_id:
+ type: string
+ example: model_0
+ instructions:
+ type: string
+ description: A system prompt for the assistant.
+ example: Be concise!
+ events:
+ type: object
+ description: Event subscription settings for the assistant.
+ properties:
+ in:
+ type: array
+ items:
+ type: string
+ out:
+ type: array
+ items:
+ type: string
+ metadata:
+ type: object
+ description: Metadata associated with the assistant.
+ DeleteAssistantResponse:
+ type: object
+ properties:
+ id:
+ type: string
+ description: The identifier of the deleted assistant.
+ example: asst_abc123
+ object:
+ type: string
+ description: "Type of the object, indicating the assistant has been deleted."
+ example: assistant.deleted
+ deleted:
+ type: boolean
+ description: Indicates whether the assistant was successfully deleted.
+ example: true
diff --git a/docs/openapi/specs/chat.yaml b/docs/openapi/specs/chat.yaml
index 05444c444..9303e9713 100644
--- a/docs/openapi/specs/chat.yaml
+++ b/docs/openapi/specs/chat.yaml
@@ -6,48 +6,59 @@ components:
messages:
type: arrays
description: |
- Contains input data or prompts for the model to process
+ Contains input data or prompts for the model to process.
example:
- [
- { "content": "Hello there :wave:", "role": "assistant" },
- { "content": "Can you write a long story", "role": "user" },
- ]
+ - content: "Hello there :wave:"
+ role: assistant
+ - content: Can you write a long story
+ role: user
stream:
type: boolean
default: true
- description: Enables continuous output generation, allowing for streaming of model responses
+ description: >-
+ Enables continuous output generation, allowing for streaming of
+ model responses.
model:
type: string
- example: "gpt-3.5-turbo"
- description: Specifies the model being used for inference or processing tasks
+ example: gpt-3.5-turbo
+ description: Specifies the model being used for inference or processing tasks.
max_tokens:
type: number
default: 2048
- description: The maximum number of tokens the model will generate in a single response
+ description: >-
+ The maximum number of tokens the model will generate in a single
+ response.
stop:
type: arrays
- example: ["hello"]
- description: Defines specific tokens or phrases at which the model will stop generating further output
+ example:
+ - hello
+ description: >-
+ Defines specific tokens or phrases at which the model will stop
+ generating further output/
frequency_penalty:
type: number
default: 0
- description: Adjusts the likelihood of the model repeating words or phrases in its output
+ description: >-
+ Adjusts the likelihood of the model repeating words or phrases in
+ its output.
presence_penalty:
type: number
default: 0
- description: Influences the generation of new and varied concepts in the model's output
+ description: >-
+ Influences the generation of new and varied concepts in the model's
+ output.
temperature:
type: number
default: 0.7
min: 0
max: 1
- description: Controls the randomness of the model's output
+ description: Controls the randomness of the model's output.
top_p:
type: number
default: 0.95
min: 0
max: 1
- description: Set probability threshold for more relevant outputs
+ description: Set probability threshold for more relevant outputs.
cache_prompt:
type: boolean
default: true
@@ -58,59 +69,62 @@ components:
messages:
type: arrays
description: |
- Contains input data or prompts for the model to process
+ Contains input data or prompts for the model to process.
example:
- [
- { "content": "Hello there :wave:", "role": "assistant" },
- { "content": "Can you write a long story", "role": "user" },
- ]
+ - content: "Hello there :wave:"
+ role: assistant
+ - content: Can you write a long story
+ role: user
model:
type: string
example: model-zephyr-7B
description: |
- Specifies the model being used for inference or processing tasks
+ Specifies the model being used for inference or processing tasks.
stream:
type: boolean
default: true
- description: |
- Enables continuous output generation, allowing for streaming of model responses
+ description: >
+ Enables continuous output generation, allowing for streaming of
+ model responses.
max_tokens:
type: number
default: 2048
- description: |
- The maximum number of tokens the model will generate in a single response
+ description: >
+ The maximum number of tokens the model will generate in a single
+ response.
stop:
type: arrays
- example: ["hello"]
- description: |
- Defines specific tokens or phrases at which the model will stop generating further output
+ example:
+ - hello
+ description: >
+ Defines specific tokens or phrases at which the model will stop
+ generating further output.
frequency_penalty:
type: number
default: 0
- description: |
- Adjusts the likelihood of the model repeating words or phrases in its output
+ description: >
+ Adjusts the likelihood of the model repeating words or phrases in
+ its output.
presence_penalty:
type: number
default: 0
- description: |
- Influences the generation of new and varied concepts in the model's output
+ description: >
+ Influences the generation of new and varied concepts in the model's
+ output.
temperature:
type: number
default: 0.7
min: 0
max: 1
description: |
- Controls the randomness of the model's output
+ Controls the randomness of the model's output.
top_p:
type: number
default: 0.95
min: 0
max: 1
description: |
- Set probability threshold for more relevant outputs
-
-
-
+ Set probability threshold for more relevant outputs.
ChatCompletionResponse:
type: object
description: Description of the response structure
@@ -125,7 +139,7 @@ components:
type: string
nullable: true
example: null
- description: Reason for finishing the response, if applicable
+ description: "Reason for finishing the response, if applicable"
index:
type: integer
example: 0
@@ -135,7 +149,7 @@ components:
properties:
content:
type: string
- example: "Hello user. What can I help you with?"
+ example: Hello user. What can I help you with?
description: Content of the message
role:
type: string
diff --git a/docs/openapi/specs/messages.yaml b/docs/openapi/specs/messages.yaml
index b99fc4221..9a0799f6a 100644
--- a/docs/openapi/specs/messages.yaml
+++ b/docs/openapi/specs/messages.yaml
@@ -6,34 +6,37 @@ components:
id:
type: string
description: |
- "Sequential or UUID identifier of the message."
+ Sequential or UUID identifier of the message.
example: 0
object:
type: string
description: |
- "Type of the object, defaults to 'thread.message'."
+ Type of the object, defaults to 'thread.message'.
example: thread.message
created_at:
type: integer
format: int64
description: |
- "Unix timestamp representing the creation time of the message."
+ Unix timestamp representing the creation time of the message.
thread_id:
type: string
- description: |
- "Identifier of the thread to which this message belongs. Defaults to parent thread."
- example: "thread_asdf"
+ description: >
+ Identifier of the thread to which this message belongs. Defaults to
+ parent thread.
+ example: thread_asdf
assistant_id:
type: string
- description: |
- "Identifier of the assistant involved in the message. Defaults to parent thread."
+ description: >
+ Identifier of the assistant involved in the message. Defaults to
+ parent thread.
example: jan
role:
type: string
- enum: ["user", "assistant"]
+ enum:
+ - user
+ - assistant
description: |
- "Role of the sender, either 'user' or 'assistant'."
-
+ Role of the sender, either 'user' or 'assistant'.
content:
type: array
items:
@@ -42,37 +45,33 @@ components:
type:
type: string
description: |
- "Type of content, e.g., 'text'."
-
-
+ Type of content, e.g., 'text'.
text:
type: object
properties:
value:
type: string
description: |
- "Text content of the message."
- example: "Hi!?"
+ Text content of the message.
+ example: Hi!?
annotations:
type: array
items:
type: string
description: |
- "Annotations for the text content, if any."
+ Annotations for the text content, if any.
example: []
metadata:
type: object
description: |
- "Metadata associated with the message, defaults to an empty object."
-
+ Metadata associated with the message, defaults to an empty object.
example: {}
-
GetMessageResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the message."
+ description: The identifier of the message.
example: msg_abc123
object:
type: string
@@ -81,11 +80,11 @@ components:
created_at:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the message."
+ description: Unix timestamp representing the creation time of the message.
example: 1699017614
thread_id:
type: string
- description: "Identifier of the thread to which this message belongs."
+ description: Identifier of the thread to which this message belongs.
example: thread_abc123
role:
type: string
@@ -105,8 +104,8 @@ components:
properties:
value:
type: string
- description: "Text content of the message."
- example: "How does AI work? Explain it in simple terms."
+ description: Text content of the message.
+ example: How does AI work? Explain it in simple terms.
annotations:
type: array
items:
@@ -129,15 +128,14 @@ components:
example: null
metadata:
type: object
- description: "Metadata associated with the message."
+ description: Metadata associated with the message.
example: {}
-
CreateMessageResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the created message."
+ description: The identifier of the created message.
example: msg_abc123
object:
type: string
@@ -146,11 +144,11 @@ components:
created_at:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the message."
+ description: Unix timestamp representing the creation time of the message.
example: 1699017614
thread_id:
type: string
- description: "Identifier of the thread to which this message belongs."
+ description: Identifier of the thread to which this message belongs.
example: thread_abc123
role:
type: string
@@ -170,8 +168,8 @@ components:
properties:
value:
type: string
- description: "Text content of the message."
- example: "How does AI work? Explain it in simple terms."
+ description: Text content of the message.
+ example: How does AI work? Explain it in simple terms.
annotations:
type: array
items:
@@ -194,39 +192,37 @@ components:
example: null
metadata:
type: object
- description: "Metadata associated with the message."
+ description: Metadata associated with the message.
example: {}
-
ListMessagesResponse:
type: object
properties:
object:
type: string
description: "Type of the object, indicating it's a list."
- default: "list"
+ default: list
data:
type: array
items:
- $ref: '#/components/schemas/ListMessageObject'
+ $ref: "#/components/schemas/ListMessageObject"
first_id:
type: string
- description: "Identifier of the first message in the list."
- example: "msg_abc123"
+ description: Identifier of the first message in the list.
+ example: msg_abc123
last_id:
type: string
- description: "Identifier of the last message in the list."
- example: "msg_abc456"
+ description: Identifier of the last message in the list.
+ example: msg_abc456
has_more:
type: boolean
- description: "Indicates whether there are more messages to retrieve."
+ description: Indicates whether there are more messages to retrieve.
example: false
-
ListMessageObject:
type: object
properties:
id:
type: string
- description: "The identifier of the message."
+ description: The identifier of the message.
example: msg_abc123
object:
type: string
@@ -235,11 +231,11 @@ components:
created_at:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the message."
+ description: Unix timestamp representing the creation time of the message.
example: 1699017614
thread_id:
type: string
- description: "Identifier of the thread to which this message belongs."
+ description: Identifier of the thread to which this message belongs.
example: thread_abc123
role:
type: string
@@ -258,8 +254,8 @@ components:
properties:
value:
type: string
- description: "Text content of the message."
- example: "How does AI work? Explain it in simple terms."
+ description: Text content of the message.
+ example: How does AI work? Explain it in simple terms.
annotations:
type: array
items:
@@ -281,38 +277,36 @@ components:
example: null
metadata:
type: object
- description: "Metadata associated with the message."
+ description: Metadata associated with the message.
example: {}
-
MessageFileObject:
type: object
properties:
id:
type: string
- description: "The identifier of the file."
+ description: The identifier of the file.
example: file-abc123
object:
type: string
- description: "Type of the object, indicating it's a thread message file."
+ description: Type of the object, indicating it's a thread message file.
example: thread.message.file
created_at:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the file."
+ description: Unix timestamp representing the creation time of the file.
example: 1699061776
message_id:
type: string
- description: "Identifier of the message to which this file is associated."
+ description: Identifier of the message to which this file is associated.
example: msg_abc123
-
ListMessageFilesResponse:
type: object
properties:
object:
type: string
- description: "Type of the object, indicating it's a list."
- default: "list"
+ description: Type of the object, indicating it's a list.
+ default: list
data:
type: array
items:
- $ref: '#/components/schemas/MessageFileObject'
\ No newline at end of file
+ $ref: "#/components/schemas/MessageFileObject"
diff --git a/docs/openapi/specs/models.yaml b/docs/openapi/specs/models.yaml
index aa5cc4155..374f73333 100644
--- a/docs/openapi/specs/models.yaml
+++ b/docs/openapi/specs/models.yaml
@@ -5,7 +5,8 @@ components:
properties:
object:
type: string
- enum: [list]
+ enum:
+ - list
data:
type: array
items:
@@ -13,68 +14,75 @@ components:
required:
- object
- data
-
Model:
type: object
properties:
type:
type: string
- default: "model"
- description: "The type of the object."
+ default: model
+ description: The type of the object.
version:
type: string
default: "1"
- description: "The version number of the model."
+ description: The version number of the model.
id:
type: string
- description: "Unique identifier used in chat-completions model_name, matches folder name."
- example: "zephyr-7b"
+ description: >-
+ Unique identifier used in chat-completions model_name, matches
+ folder name.
+ example: zephyr-7b
name:
type: string
- description: "Name of the model."
- example: "Zephyr 7B"
+ description: Name of the model.
+ example: Zephyr 7B
owned_by:
type: string
- description: "Compatibility field for OpenAI."
+ description: Compatibility field for OpenAI.
default: ""
created:
type: integer
format: int64
- description: "Unix timestamp representing the creation time."
+ description: Unix timestamp representing the creation time.
description:
type: string
- description: "Description of the model."
+ description: Description of the model.
state:
type: string
- enum: [null, "downloading", "ready", "starting", "stopping"]
- description: "Current state of the model."
+ enum:
+ - null
+ - downloading
+ - ready
+ - starting
+ - stopping
+ description: Current state of the model.
format:
type: string
description: "State format of the model, distinct from the engine."
- example: "ggufv3"
+ example: ggufv3
source_url:
type: string
format: uri
- description: "URL to the source of the model."
- example: "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf"
+ description: URL to the source of the model.
+ example: >-
+ https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf
settings:
type: object
properties:
ctx_len:
type: string
- description: "Context length."
+ description: Context length.
example: "2048"
ngl:
type: string
- description: "Number of layers."
+ description: Number of layers.
example: "100"
embedding:
type: string
- description: "Indicates if embedding is enabled."
+ description: Indicates if embedding is enabled.
example: "true"
n_parallel:
type: string
- description: "Number of parallel processes."
+ description: Number of parallel processes.
example: "4"
additionalProperties: false
parameters:
@@ -82,155 +90,237 @@ components:
properties:
temperature:
type: string
- description: "Temperature setting for the model."
+ description: Temperature setting for the model.
example: "0.7"
token_limit:
type: string
- description: "Token limit for the model."
+ description: Token limit for the model.
example: "2048"
top_k:
type: string
- description: "Top-k setting for the model."
+ description: Top-k setting for the model.
example: "0"
top_p:
type: string
- description: "Top-p setting for the model."
+ description: Top-p setting for the model.
example: "1"
stream:
type: string
- description: "Indicates if streaming is enabled."
+ description: Indicates if streaming is enabled.
example: "true"
additionalProperties: false
metadata:
type: object
- description: "Additional metadata."
+ description: Additional metadata.
assets:
type: array
items:
type: string
- description: "List of assets related to the model."
+ description: List of assets related to the model.
required:
- source_url
-
ModelObject:
type: object
properties:
id:
type: string
description: |
- "The identifier of the model."
-
- example: "zephyr-7b"
+ The identifier of the model.
+ example: zephyr-7b
object:
type: string
description: |
- "The type of the object, indicating it's a model."
-
- default: "model"
+ The type of the object, indicating it's a model.
+ default: model
created:
type: integer
format: int64
- description: |
- "Unix timestamp representing the creation time of the model."
-
- example: "1253935178"
+ description: |
+ Unix timestamp representing the creation time of the model.
+ example: 1253935178
owned_by:
type: string
description: |
- "The entity that owns the model."
-
- example: "_"
-
+ The entity that owns the model.
+ example: _
GetModelResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the model."
- example: "zephyr-7b"
+ description: The identifier of the model.
+ example: zephyr-7b
object:
type: string
description: "Type of the object, indicating it's a model."
- default: "model"
+ default: model
created:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the model."
+ description: Unix timestamp representing the creation time of the model.
owned_by:
type: string
- description: "The entity that owns the model."
- example: "_"
+ description: The entity that owns the model.
+ example: _
state:
type: string
- enum: [not_downloaded, downloaded, running, stopped]
- description: "The current state of the model."
+ enum:
+ - not_downloaded
+ - downloaded
+ - running
+ - stopped
+ description: The current state of the model.
source_url:
type: string
format: uri
- description: "URL to the source of the model."
- example: "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf"
- parameters:
+ description: URL to the source of the model.
+ example: >-
+ https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf
+ engine_parameters:
+ type: object
+ properties:
+ pre_prompt:
+ type: string
+ description: Predefined prompt used for setting up internal configurations.
+ default: ""
+ example: Initial setup complete.
+ system_prompt:
+ type: string
+ description: Prefix used for system-level prompts.
+ default: "SYSTEM: "
+ user_prompt:
+ type: string
+ description: Prefix used for user prompts.
+ default: "USER: "
+ ai_prompt:
+ type: string
+ description: Prefix used for assistant prompts.
+ default: "ASSISTANT: "
+ ngl:
+ type: integer
+ description: >-
+ Number of neural network layers loaded onto the GPU for
+ acceleration.
+ minimum: 0
+ maximum: 100
+ default: 100
+ example: 100
+ ctx_len:
+ type: integer
+ description: >-
+ Context length for model operations, varies based on the
+ specific model.
+ minimum: 128
+ maximum: 4096
+ default: 2048
+ example: 2048
+ n_parallel:
+ type: integer
+ description: >-
+ Number of parallel operations, relevant when continuous batching
+ is enabled.
+ minimum: 1
+ maximum: 10
+ default: 1
+ example: 4
+ cont_batching:
+ type: boolean
+ description: Indicates if continuous batching is used for processing.
+ default: false
+ example: false
+ cpu_threads:
+ type: integer
+ description: Number of threads allocated for CPU-based inference.
+ minimum: 1
+ example: 8
+ embedding:
+ type: boolean
+ description: Indicates if embedding layers are enabled in the model.
+ default: true
+ example: true
+ model_parameters:
type: object
properties:
ctx_len:
type: integer
- description: "Context length."
+ description: Maximum context length the model can handle.
+ minimum: 0
+ maximum: 4096
+ default: 2048
example: 2048
ngl:
type: integer
- description: "Number of layers."
+ description: Number of layers in the neural network.
+ minimum: 1
+ maximum: 100
+ default: 100
example: 100
embedding:
type: boolean
- description: "Indicates if embedding is enabled."
+ description: Indicates if embedding layers are used.
+ default: true
example: true
n_parallel:
type: integer
- description: "Number of parallel processes."
+ description: Number of parallel processes the model can run.
+ minimum: 1
+ maximum: 10
+ default: 1
example: 4
- # pre_prompt:
- # type: string
- # description: "Predefined prompt for initiating the chat."
- # example: "A chat between a curious user and an artificial intelligence"
- # user_prompt:
- # type: string
- # description: "Format of user's prompt."
- # example: "USER: "
- # ai_prompt:
- # type: string
- # description: "Format of AI's response."
- # example: "ASSISTANT: "
temperature:
- type: string
- description: "Temperature setting for the model."
- example: "0.7"
+ type: number
+ description: >-
+ Controls randomness in model's responses. Higher values lead to
+ more random responses.
+ minimum: 0
+ maximum: 2
+ default: 0.7
+ example: 0.7
token_limit:
- type: string
- description: "Token limit for the model."
- example: "2048"
+ type: integer
+ description: >-
+ Maximum number of tokens the model can generate in a single
+ response.
+ minimum: 1
+ maximum: 4096
+ default: 2048
+ example: 2048
top_k:
- type: string
- description: "Top-k setting for the model."
- example: "0"
+ type: integer
+ description: >-
+ Limits the model to consider only the top k most likely next
+ tokens at each step.
+ minimum: 0
+ maximum: 100
+ default: 0
+ example: 0
top_p:
- type: string
- description: "Top-p setting for the model."
- example: "1"
+ type: number
+ description: >-
+ Nucleus sampling parameter. The model considers the smallest set
+ of tokens whose cumulative probability exceeds the top_p value.
+ minimum: 0
+ maximum: 1
+ default: 1
+ example: 1
metadata:
type: object
properties:
engine:
type: string
- description: "The engine used by the model."
- example: "llamacpp"
+ description: The engine used by the model.
+ enum:
+ - nitro
+ - openai
+ - hf_inference
quantization:
type: string
- description: "Quantization parameter of the model."
- example: "Q3_K_L"
+ description: Quantization parameter of the model.
+ example: Q3_K_L
size:
type: string
- description: "Size of the model."
- example: "7B"
+ description: Size of the model.
+ example: 7B
required:
- id
- object
@@ -240,77 +330,71 @@ components:
- source_url
- parameters
- metadata
-
DeleteModelResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the model that was deleted."
- example: "model-zephyr-7B"
+ description: The identifier of the model that was deleted.
+ example: model-zephyr-7B
object:
type: string
- description: "Type of the object, indicating it's a model."
- default: "model"
+ description: Type of the object, indicating it's a model.
+ default: model
deleted:
type: boolean
- description: "Indicates whether the model was successfully deleted."
+ description: Indicates whether the model was successfully deleted.
example: true
-
-
StartModelResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the model that was started."
- example: "model-zephyr-7B"
+ description: The identifier of the model that was started.
+ example: model-zephyr-7B
object:
type: string
- description: "Type of the object, indicating it's a model."
- default: "model"
+ description: Type of the object, indicating it's a model.
+ default: model
state:
type: string
- description: "The current state of the model after the start operation."
- example: "running"
+ description: The current state of the model after the start operation.
+ example: running
required:
- id
- object
- state
-
StopModelResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the model that was started."
- example: "model-zephyr-7B"
+ description: The identifier of the model that was started.
+ example: model-zephyr-7B
object:
type: string
- description: "Type of the object, indicating it's a model."
- default: "model"
+ description: Type of the object, indicating it's a model.
+ default: model
state:
type: string
- description: "The current state of the model after the start operation."
- example: "stopped"
+ description: The current state of the model after the start operation.
+ example: stopped
required:
- id
- object
- state
-
DownloadModelResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the model that was started."
- example: "model-zephyr-7B"
+ description: The identifier of the model that was started.
+ example: model-zephyr-7B
object:
type: string
- description: "Type of the object, indicating it's a model."
- default: "model"
+ description: Type of the object, indicating it's a model.
+ default: model
state:
type: string
- description: "The current state of the model after the start operation."
- example: "downloaded"
-
+ description: The current state of the model after the start operation.
+ example: downloaded
diff --git a/docs/openapi/specs/threads.yaml b/docs/openapi/specs/threads.yaml
index c009381e3..f432b56ff 100644
--- a/docs/openapi/specs/threads.yaml
+++ b/docs/openapi/specs/threads.yaml
@@ -6,123 +6,111 @@ components:
id:
type: string
description: |
- "The identifier of the thread, defaults to foldername."
-
+ The identifier of the thread, defaults to foldername.
example: thread_....
object:
type: string
description: |
- "Type of the object, defaults to thread."
-
+ Type of the object, defaults to thread.
example: thread
title:
type: string
- description: |
- "A brief summary or description of the thread, defaults to an empty string."
-
-
- example: "funny physics joke"
+ description: >
+ A brief summary or description of the thread, defaults to an empty
+ string.
+ example: funny physics joke
assistants:
type: array
- description: |
-
+ description: ""
items:
properties:
assistant_id:
type: string
description: |
The identifier of assistant, defaults to "jan"
-
-
example: jan
model:
type: object
properties:
id:
type: string
- description: |
-
+ description: ""
example: ...
- settings:
+ settings:
type: object
- description: |
- Defaults to and overrides assistant.json's "settings" (and if none, then model.json "settings")
-
-
+ description: >
+ Defaults to and overrides assistant.json's "settings" (and
+ if none, then model.json "settings")
parameters:
type: object
- description: |
- Defaults to and overrides assistant.json's "parameters" (and if none, then model.json "parameters")
-
-
+ description: >
+ Defaults to and overrides assistant.json's "parameters"
+ (and if none, then model.json "parameters")
created:
type: integer
format: int64
- description: |
- "Unix timestamp representing the creation time of the thread, defaults to file creation time."
-
+ description: >
+ Unix timestamp representing the creation time of the thread,
+ defaults to file creation time.
example: 1231231
metadata:
type: object
description: |
- "Metadata associated with the thread, defaults to an empty object."
-
+ Metadata associated with the thread, defaults to an empty object.
example: {}
-
GetThreadResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the thread."
+ description: The identifier of the thread.
example: thread_abc123
object:
type: string
- description: "Type of the object"
+ description: Type of the object
example: thread
created_at:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the thread."
+ description: Unix timestamp representing the creation time of the thread.
example: 1699014083
assistants:
type: array
items:
type: string
- description: "List of assistants involved in the thread."
- example: ["assistant-001"]
+ description: List of assistants involved in the thread.
+ example:
+ - assistant-001
metadata:
type: object
- description: "Metadata associated with the thread."
+ description: Metadata associated with the thread.
example: {}
messages:
type: array
items:
type: string
- description: "List of messages within the thread."
+ description: List of messages within the thread.
example: []
-
CreateThreadResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the newly created thread."
+ description: The identifier of the newly created thread.
example: thread_abc123
object:
type: string
- description: "Type of the object, indicating it's a thread."
+ description: Type of the object, indicating it's a thread.
example: thread
created_at:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the thread."
+ description: Unix timestamp representing the creation time of the thread.
example: 1699014083
metadata:
type: object
- description: "Metadata associated with the newly created thread."
+ description: Metadata associated with the newly created thread.
example: {}
-
ThreadMessageObject:
type: object
properties:
@@ -130,21 +118,19 @@ components:
type: string
description: |
"Role of the sender, either 'user' or 'assistant'."
-
- enum: ["user", "assistant"]
+ enum:
+ - user
+ - assistant
content:
type: string
description: |
"Text content of the message."
-
file_ids:
type: array
items:
type: string
description: |
"Array of file IDs associated with the message, if any."
-
-
ModifyThreadResponse:
type: object
properties:
@@ -160,19 +146,18 @@ components:
created_at:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the thread."
+ description: Unix timestamp representing the creation time of the thread.
example: 1699014083
metadata:
type: object
- description: "Metadata associated with the modified thread."
+ description: Metadata associated with the modified thread.
example: {}
-
DeleteThreadResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the deleted thread."
+ description: The identifier of the deleted thread.
example: thread_abc123
object:
type: string
@@ -180,5 +165,5 @@ components:
example: thread.deleted
deleted:
type: boolean
- description: "Indicates whether the thread was successfully deleted."
- example: true
\ No newline at end of file
+ description: Indicates whether the thread was successfully deleted.
+ example: true
diff --git a/docs/package.json b/docs/package.json
index a28f6dbb2..246398992 100644
--- a/docs/package.json
+++ b/docs/package.json
@@ -38,6 +38,7 @@
"react-icons": "^4.11.0",
"redocusaurus": "^2.0.0",
"sass": "^1.69.3",
+ "tailwind-merge": "^2.1.0",
"tailwindcss": "^3.3.3"
},
"devDependencies": {
diff --git a/docs/src/components/Announcement/index.js b/docs/src/containers/Banner/index.js
similarity index 90%
rename from docs/src/components/Announcement/index.js
rename to docs/src/containers/Banner/index.js
index 922be62f7..ea00cf8b1 100644
--- a/docs/src/components/Announcement/index.js
+++ b/docs/src/containers/Banner/index.js
@@ -27,7 +27,7 @@ export default function AnnoncementBanner() {
return (