-
+ jan.AppImage
diff --git a/core/package.json b/core/package.json
index c4d0d475d..9e4d8d69a 100644
--- a/core/package.json
+++ b/core/package.json
@@ -8,8 +8,8 @@
],
"homepage": "https://jan.ai",
"license": "AGPL-3.0",
- "main": "dist/core.umd.js",
- "module": "dist/core.es5.js",
+ "main": "dist/core.es5.js",
+ "module": "dist/core.cjs.js",
"typings": "dist/types/index.d.ts",
"files": [
"dist",
@@ -17,8 +17,7 @@
],
"author": "Jan ",
"exports": {
- ".": "./dist/core.umd.js",
- "./sdk": "./dist/core.umd.js",
+ ".": "./dist/core.es5.js",
"./node": "./dist/node/index.cjs.js"
},
"typesVersions": {
@@ -27,10 +26,6 @@
"./dist/core.es5.js.map",
"./dist/types/index.d.ts"
],
- "sdk": [
- "./dist/core.es5.js.map",
- "./dist/types/index.d.ts"
- ],
"node": [
"./dist/node/index.cjs.js.map",
"./dist/types/node/index.d.ts"
@@ -38,13 +33,14 @@
}
},
"scripts": {
- "lint": "tslint --project tsconfig.json -t codeFrame 'src/**/*.ts' 'test/**/*.ts'",
+ "lint": "tslint --project tsconfig.json -t codeFrame 'src/**/*.ts' 'test/**/*.ts'",
"test": "jest",
"prebuild": "rimraf dist",
"build": "tsc --module commonjs && rollup -c rollup.config.ts",
"start": "rollup -c rollup.config.ts -w"
},
"devDependencies": {
+ "@rollup/plugin-replace": "^5.0.5",
"@types/jest": "^29.5.12",
"@types/node": "^20.11.4",
"eslint": "8.57.0",
@@ -63,6 +59,6 @@
},
"dependencies": {
"rxjs": "^7.8.1",
- "ulid": "^2.3.0"
+ "ulidx": "^2.3.0"
}
}
diff --git a/core/rollup.config.ts b/core/rollup.config.ts
index 95305bf25..865e86d5c 100644
--- a/core/rollup.config.ts
+++ b/core/rollup.config.ts
@@ -3,17 +3,16 @@ import commonjs from 'rollup-plugin-commonjs'
import sourceMaps from 'rollup-plugin-sourcemaps'
import typescript from 'rollup-plugin-typescript2'
import json from 'rollup-plugin-json'
+import replace from '@rollup/plugin-replace'
const pkg = require('./package.json')
-const libraryName = 'core'
-
export default [
{
input: `src/index.ts`,
output: [
- { file: pkg.main, name: libraryName, format: 'umd', sourcemap: true },
- { file: pkg.module, format: 'es', sourcemap: true },
+ // { file: pkg.main, name: libraryName, format: 'umd', sourcemap: true },
+ { file: pkg.main, format: 'es', sourcemap: true },
],
// Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash')
external: ['path'],
@@ -30,7 +29,13 @@ export default [
// Allow node_modules resolution, so you can use 'external' to control
// which external modules to include in the bundle
// https://github.com/rollup/rollup-plugin-node-resolve#usage
- resolve(),
+ replace({
+ 'node:crypto': 'crypto',
+ 'delimiters': ['"', '"'],
+ }),
+ resolve({
+ browser: true,
+ }),
// Resolve source maps to the original source
sourceMaps(),
@@ -46,7 +51,7 @@ export default [
'pacote',
'@types/pacote',
'@npmcli/arborist',
- 'ulid',
+ 'ulidx',
'node-fetch',
'fs',
'request',
@@ -64,7 +69,7 @@ export default [
// Allow json resolution
json(),
// Compile TypeScript files
- typescript({ useTsconfigDeclarationDir: true, exclude: ['src/*.ts', 'src/extensions/**'] }),
+ typescript({ useTsconfigDeclarationDir: true }),
// Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs)
commonjs(),
// Allow node_modules resolution, so you can use 'external' to control
diff --git a/core/src/core.ts b/core/src/core.ts
index 47c0fe6f2..3339759b2 100644
--- a/core/src/core.ts
+++ b/core/src/core.ts
@@ -13,7 +13,7 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
extension,
method,
...args
-) => global.core?.api?.invokeExtensionFunc(extension, method, ...args)
+) => globalThis.core?.api?.invokeExtensionFunc(extension, method, ...args)
/**
* Downloads a file from a URL and saves it to the local file system.
@@ -26,7 +26,7 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
const downloadFile: (downloadRequest: DownloadRequest, network?: NetworkConfig) => Promise = (
downloadRequest,
network
-) => global.core?.api?.downloadFile(downloadRequest, network)
+) => globalThis.core?.api?.downloadFile(downloadRequest, network)
/**
* Aborts the download of a specific file.
@@ -34,14 +34,14 @@ const downloadFile: (downloadRequest: DownloadRequest, network?: NetworkConfig)
* @returns {Promise} A promise that resolves when the download has been aborted.
*/
const abortDownload: (fileName: string) => Promise = (fileName) =>
- global.core.api?.abortDownload(fileName)
+ globalThis.core.api?.abortDownload(fileName)
/**
* Gets Jan's data folder path.
*
* @returns {Promise} A Promise that resolves with Jan's data folder path.
*/
-const getJanDataFolderPath = (): Promise => global.core.api?.getJanDataFolderPath()
+const getJanDataFolderPath = (): Promise => globalThis.core.api?.getJanDataFolderPath()
/**
* Opens the file explorer at a specific path.
@@ -49,21 +49,21 @@ const getJanDataFolderPath = (): Promise => global.core.api?.getJanDataF
* @returns {Promise} A promise that resolves when the file explorer is opened.
*/
const openFileExplorer: (path: string) => Promise = (path) =>
- global.core.api?.openFileExplorer(path)
+ globalThis.core.api?.openFileExplorer(path)
/**
* Joins multiple paths together.
* @param paths - The paths to join.
* @returns {Promise} A promise that resolves with the joined path.
*/
-const joinPath: (paths: string[]) => Promise = (paths) => global.core.api?.joinPath(paths)
+const joinPath: (paths: string[]) => Promise = (paths) => globalThis.core.api?.joinPath(paths)
/**
* Retrive the basename from an url.
* @param path - The path to retrieve.
* @returns {Promise} A promise that resolves with the basename.
*/
-const baseName: (paths: string) => Promise = (path) => global.core.api?.baseName(path)
+const baseName: (paths: string) => Promise = (path) => globalThis.core.api?.baseName(path)
/**
* Opens an external URL in the default web browser.
@@ -72,20 +72,20 @@ const baseName: (paths: string) => Promise = (path) => global.core.api?.
* @returns {Promise} - A promise that resolves when the URL has been successfully opened.
*/
const openExternalUrl: (url: string) => Promise = (url) =>
- global.core.api?.openExternalUrl(url)
+ globalThis.core.api?.openExternalUrl(url)
/**
* Gets the resource path of the application.
*
* @returns {Promise} - A promise that resolves with the resource path.
*/
-const getResourcePath: () => Promise = () => global.core.api?.getResourcePath()
+const getResourcePath: () => Promise = () => globalThis.core.api?.getResourcePath()
/**
* Gets the user's home path.
* @returns return user's home path
*/
-const getUserHomePath = (): Promise => global.core.api?.getUserHomePath()
+const getUserHomePath = (): Promise => globalThis.core.api?.getUserHomePath()
/**
* Log to file from browser processes.
@@ -93,7 +93,7 @@ const getUserHomePath = (): Promise => global.core.api?.getUserHomePath(
* @param message - Message to log.
*/
const log: (message: string, fileName?: string) => void = (message, fileName) =>
- global.core.api?.log(message, fileName)
+ globalThis.core.api?.log(message, fileName)
/**
* Check whether the path is a subdirectory of another path.
@@ -104,14 +104,14 @@ const log: (message: string, fileName?: string) => void = (message, fileName) =>
* @returns {Promise} - A promise that resolves with a boolean indicating whether the path is a subdirectory.
*/
const isSubdirectory: (from: string, to: string) => Promise = (from: string, to: string) =>
- global.core.api?.isSubdirectory(from, to)
+ globalThis.core.api?.isSubdirectory(from, to)
/**
* Get system information
* @returns {Promise} - A promise that resolves with the system information.
*/
const systemInformation: () => Promise = () =>
- global.core.api?.systemInformation()
+ globalThis.core.api?.systemInformation()
/**
* Show toast message from browser processes.
@@ -120,7 +120,7 @@ const systemInformation: () => Promise = () =>
* @returns
*/
const showToast: (title: string, message: string) => void = (title, message) =>
- global.core.api?.showToast(title, message)
+ globalThis.core.api?.showToast(title, message)
/**
* Register extension point function type definition
*/
diff --git a/core/src/events.ts b/core/src/events.ts
index 700807b0c..da85f7e3b 100644
--- a/core/src/events.ts
+++ b/core/src/events.ts
@@ -5,7 +5,7 @@
* @param handler The handler function to call when the event is observed.
*/
const on: (eventName: string, handler: Function) => void = (eventName, handler) => {
- global.core?.events?.on(eventName, handler)
+ globalThis.core?.events?.on(eventName, handler)
}
/**
@@ -15,7 +15,7 @@ const on: (eventName: string, handler: Function) => void = (eventName, handler)
* @param handler The handler function to call when the event is observed.
*/
const off: (eventName: string, handler: Function) => void = (eventName, handler) => {
- global.core?.events?.off(eventName, handler)
+ globalThis.core?.events?.off(eventName, handler)
}
/**
@@ -25,7 +25,7 @@ const off: (eventName: string, handler: Function) => void = (eventName, handler)
* @param object The object to pass to the event callback.
*/
const emit: (eventName: string, object: any) => void = (eventName, object) => {
- global.core?.events?.emit(eventName, object)
+ globalThis.core?.events?.emit(eventName, object)
}
export const events = {
diff --git a/core/src/extensions/ai-engines/OAIEngine.ts b/core/src/extensions/ai-engines/OAIEngine.ts
index 3e583c9b9..948de56ca 100644
--- a/core/src/extensions/ai-engines/OAIEngine.ts
+++ b/core/src/extensions/ai-engines/OAIEngine.ts
@@ -1,5 +1,5 @@
import { requestInference } from './helpers/sse'
-import { ulid } from 'ulid'
+import { ulid } from 'ulidx'
import { AIEngine } from './AIEngine'
import {
ChatCompletionRole,
diff --git a/core/src/fs.ts b/core/src/fs.ts
index dacdbb6d6..2eb22f9d6 100644
--- a/core/src/fs.ts
+++ b/core/src/fs.ts
@@ -4,7 +4,7 @@ import { FileStat } from './types'
* Writes data to a file at the specified path.
* @returns {Promise} A Promise that resolves when the file is written successfully.
*/
-const writeFileSync = (...args: any[]) => global.core.api?.writeFileSync(...args)
+const writeFileSync = (...args: any[]) => globalThis.core.api?.writeFileSync(...args)
/**
* Writes blob data to a file at the specified path.
@@ -13,52 +13,52 @@ const writeFileSync = (...args: any[]) => global.core.api?.writeFileSync(...args
* @returns
*/
const writeBlob: (path: string, data: string) => Promise = (path, data) =>
- global.core.api?.writeBlob(path, data)
+ globalThis.core.api?.writeBlob(path, data)
/**
* Reads the contents of a file at the specified path.
* @returns {Promise} A Promise that resolves with the contents of the file.
*/
-const readFileSync = (...args: any[]) => global.core.api?.readFileSync(...args)
+const readFileSync = (...args: any[]) => globalThis.core.api?.readFileSync(...args)
/**
* Check whether the file exists
* @param {string} path
* @returns {boolean} A boolean indicating whether the path is a file.
*/
-const existsSync = (...args: any[]) => global.core.api?.existsSync(...args)
+const existsSync = (...args: any[]) => globalThis.core.api?.existsSync(...args)
/**
* List the directory files
* @returns {Promise} A Promise that resolves with the contents of the directory.
*/
-const readdirSync = (...args: any[]) => global.core.api?.readdirSync(...args)
+const readdirSync = (...args: any[]) => globalThis.core.api?.readdirSync(...args)
/**
* Creates a directory at the specified path.
* @returns {Promise} A Promise that resolves when the directory is created successfully.
*/
-const mkdirSync = (...args: any[]) => global.core.api?.mkdirSync(...args)
+const mkdirSync = (...args: any[]) => globalThis.core.api?.mkdirSync(...args)
-const mkdir = (...args: any[]) => global.core.api?.mkdir(...args)
+const mkdir = (...args: any[]) => globalThis.core.api?.mkdir(...args)
/**
* Removes a directory at the specified path.
* @returns {Promise} A Promise that resolves when the directory is removed successfully.
*/
const rmdirSync = (...args: any[]) =>
- global.core.api?.rmdirSync(...args, { recursive: true, force: true })
+ globalThis.core.api?.rmdirSync(...args, { recursive: true, force: true })
-const rm = (path: string) => global.core.api?.rm(path)
+const rm = (path: string) => globalThis.core.api?.rm(path)
/**
* Deletes a file from the local file system.
* @param {string} path - The path of the file to delete.
* @returns {Promise} A Promise that resolves when the file is deleted.
*/
-const unlinkSync = (...args: any[]) => global.core.api?.unlinkSync(...args)
+const unlinkSync = (...args: any[]) => globalThis.core.api?.unlinkSync(...args)
/**
* Appends data to a file at the specified path.
*/
-const appendFileSync = (...args: any[]) => global.core.api?.appendFileSync(...args)
+const appendFileSync = (...args: any[]) => globalThis.core.api?.appendFileSync(...args)
/**
* Synchronizes a file from a source path to a destination path.
@@ -67,15 +67,15 @@ const appendFileSync = (...args: any[]) => global.core.api?.appendFileSync(...ar
* @returns {Promise} - A promise that resolves when the file has been successfully synchronized.
*/
const syncFile: (src: string, dest: string) => Promise = (src, dest) =>
- global.core.api?.syncFile(src, dest)
+ globalThis.core.api?.syncFile(src, dest)
/**
* Copy file sync.
*/
-const copyFileSync = (...args: any[]) => global.core.api?.copyFileSync(...args)
+const copyFileSync = (...args: any[]) => globalThis.core.api?.copyFileSync(...args)
const copyFile: (src: string, dest: string) => Promise = (src, dest) =>
- global.core.api?.copyFile(src, dest)
+ globalThis.core.api?.copyFile(src, dest)
/**
* Gets the file's stats.
@@ -87,7 +87,7 @@ const copyFile: (src: string, dest: string) => Promise = (src, dest) =>
const fileStat: (path: string, outsideJanDataFolder?: boolean) => Promise = (
path,
outsideJanDataFolder
-) => global.core.api?.fileStat(path, outsideJanDataFolder)
+) => globalThis.core.api?.fileStat(path, outsideJanDataFolder)
// TODO: Export `dummy` fs functions automatically
// Currently adding these manually
diff --git a/core/src/node/api/restful/helper/builder.ts b/core/src/node/api/restful/helper/builder.ts
index 6b9bbb3a8..1ef8d34a4 100644
--- a/core/src/node/api/restful/helper/builder.ts
+++ b/core/src/node/api/restful/helper/builder.ts
@@ -216,7 +216,7 @@ export const createMessage = async (threadId: string, message: any) => {
const threadMessagesFileName = 'messages.jsonl'
try {
- const { ulid } = require('ulid')
+ const { ulid } = require('ulidx')
const msgId = ulid()
const createdAt = Date.now()
const threadMessage: ThreadMessage = {
diff --git a/core/src/node/index.ts b/core/src/node/index.ts
index 31f2f076e..02d921fd6 100644
--- a/core/src/node/index.ts
+++ b/core/src/node/index.ts
@@ -4,3 +4,5 @@ export * from './extension/manager'
export * from './extension/store'
export * from './api'
export * from './helper'
+export * from './../types'
+export * from './../api'
diff --git a/docs/blog/2024-01-10-bitdefender-false-positive-flag.mdx b/docs/blog/2024-01-10-bitdefender-false-positive-flag.mdx
index 6c208764d..4e6c9b542 100644
--- a/docs/blog/2024-01-10-bitdefender-false-positive-flag.mdx
+++ b/docs/blog/2024-01-10-bitdefender-false-positive-flag.mdx
@@ -1,25 +1,29 @@
---
-title: "Post Mortem: Bitdefender False Positive Flag"
+title: 'Post Mortem: Bitdefender False Positive Flag'
description: "10th January 2024, Jan's 0.4.4 Release on Windows triggered Bitdefender to incorrectly flag it as infected with Gen:Variant.Tedy.258323, leading to automatic quarantine warnings on users' computers."
slug: /postmortems/january-10-2024-bitdefender-false-positive-flag
tags: [Postmortem]
+keywords:
+ [
+ postmortem,
+ bitdefender,
+ false positive,
+ antivirus,
+ jan,
+ nitro,
+ incident,
+ incident response,
+ supply chain security,
+ user communication,
+ documentation,
+ antivirus compatibility,
+ cross-platform testing,
+ proactive incident response,
+ user education,
+ lessons learned,
+ ]
---
-
- Jan 10, 2024 Incident Postmortem - Bitdefender False Positive Flag on Jan AI Resolved
-
-
-
-
-
-
-
-
-
-
-
-
-
Following the recent incident related to Jan version 0.4.4 triggering Bitdefender on Windows with Gen:Variant.Tedy.258323 on January 10, 2024, we wanted to provide a comprehensive postmortem and outline the necessary follow-up actions.
## Incident Overview
diff --git a/docs/docs/about/2035.mdx b/docs/docs/about/2035.mdx
index 2806e7ade..3af7a3197 100644
--- a/docs/docs/about/2035.mdx
+++ b/docs/docs/about/2035.mdx
@@ -2,20 +2,6 @@
title: Jan's Vision for 2035
---
-
- Jan's Vision for 2035
-
-
-
-
-
-
-
-
-
-
-
-
[Jan 2035: A Robotics Company](https://hackmd.io/QIWyYbNNQVWVbupuI3kjAA)
We only have 2 planning parameters:
diff --git a/docs/docs/about/about.md b/docs/docs/about/about.md
index 32f4a3e4f..1e755752c 100644
--- a/docs/docs/about/about.md
+++ b/docs/docs/about/about.md
@@ -4,34 +4,21 @@ slug: /about
description: Jan is a desktop application that turns computers into thinking machines.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
about Jan,
desktop application,
thinking machine,
]
---
-
- About Jan
-
-
-
-
-
-
-
-
-
-
-
-
Jan turns computers into thinking machines to change how we use them.
Jan is created and maintained by Jan Labs, a robotics company.
diff --git a/docs/docs/about/faq.md b/docs/docs/about/faq.md
index b4e05b3a6..12c08a244 100644
--- a/docs/docs/about/faq.md
+++ b/docs/docs/about/faq.md
@@ -2,20 +2,6 @@
title: Frequently Asked Questions (FAQ) - Jan
---
-
- Frequently Asked Questions (FAQ)
-
-
-
-
-
-
-
-
-
-
-
-
# Frequently Asked Questions (FAQ)
## What is Jan?
diff --git a/docs/docs/about/roadmap.md b/docs/docs/about/roadmap.md
index d818aa647..82b4fa34d 100644
--- a/docs/docs/about/roadmap.md
+++ b/docs/docs/about/roadmap.md
@@ -2,19 +2,5 @@
title: Roadmap
---
-
- Roadmap
-
-
-
-
-
-
-
-
-
-
-
-
- [ ] [Immediate Roadmap on Github](https://github.com/orgs/janhq/projects/5/views/16)
- [ ] [Longer-term Roadmap on Discord](https://discord.gg/Ey62mynnYr)
diff --git a/docs/docs/acknowledgements.md b/docs/docs/acknowledgements.md
index 46418fc79..0ec3176c0 100644
--- a/docs/docs/acknowledgements.md
+++ b/docs/docs/acknowledgements.md
@@ -4,30 +4,20 @@ description: Jan is a ChatGPT-alternative that runs on your own computer, with a
slug: /acknowledgements
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
acknowledgements,
third-party libraries,
]
---
-
-
-
-
-
-
-
-
-
-
-
# Acknowledgements
We would like to express our gratitude to the following third-party libraries that have made the development of Jan possible.
diff --git a/docs/docs/community/community.mdx b/docs/docs/community/community.mdx
index 7c5ad9367..f4ce0da87 100644
--- a/docs/docs/community/community.mdx
+++ b/docs/docs/community/community.mdx
@@ -4,31 +4,23 @@ slug: /community
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
+ about Jan,
+ desktop application,
+ thinking machine,
+ community,
+ socials,
]
---
-
- Jan's Community
-
-
-
-
-
-
-
-
-
-
-
-
## Socials
- [Discord](https://discord.gg/SH3DGmUs6b)
diff --git a/docs/docs/developer/01-overview/01-architecture.md b/docs/docs/developer/01-overview/01-architecture.md
index 218964bed..09fffed69 100644
--- a/docs/docs/developer/01-overview/01-architecture.md
+++ b/docs/docs/developer/01-overview/01-architecture.md
@@ -4,31 +4,19 @@ slug: /developer/architecture
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
+ architecture,
]
---
-
- Jan AI Architecture - Modular and Extensible Framework
-
-
-
-
-
-
-
-
-
-
-
-
:::warning
This page is still under construction, and should be read as a scratchpad
diff --git a/docs/docs/developer/01-overview/02-file-based.md b/docs/docs/developer/01-overview/02-file-based.md
index 2cd8a554c..2b0c15a68 100644
--- a/docs/docs/developer/01-overview/02-file-based.md
+++ b/docs/docs/developer/01-overview/02-file-based.md
@@ -4,31 +4,19 @@ slug: /developer/file-based
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
+ file based approach,
]
---
-
- Jan AI File-based Data Persistence Approach
-
-
-
-
-
-
-
-
-
-
-
-
:::warning
This page is still under construction, and should be read as a scratchpad
diff --git a/docs/docs/developer/01-overview/03-user-interface.md b/docs/docs/developer/01-overview/03-user-interface.md
index fa5a3de79..3454b2f61 100644
--- a/docs/docs/developer/01-overview/03-user-interface.md
+++ b/docs/docs/developer/01-overview/03-user-interface.md
@@ -4,31 +4,19 @@ slug: /developer/ui
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
+ UI kit,
]
---
-
- Jan AI User Interface - Customizable UI Kit
-
-
-
-
-
-
-
-
-
-
-
-
:::warning
This page is still under construction, and should be read as a scratchpad
diff --git a/docs/docs/developer/01-overview/04-install-and-prerequisites.md b/docs/docs/developer/01-overview/04-install-and-prerequisites.md
index a3e6ccfc1..efd7ebe76 100644
--- a/docs/docs/developer/01-overview/04-install-and-prerequisites.md
+++ b/docs/docs/developer/01-overview/04-install-and-prerequisites.md
@@ -4,34 +4,21 @@ slug: /developer/prereq
description: Guide to install and setup Jan for development.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
installation,
prerequisites,
developer setup,
]
---
-
- Jan AI Installation and Setup Guide - Developer Prerequisites
-
-
-
-
-
-
-
-
-
-
-
-
## Requirements
### Hardware Requirements
diff --git a/docs/docs/developer/01-overview/README.md b/docs/docs/developer/01-overview/README.md
index 4f094685a..b73c77aeb 100644
--- a/docs/docs/developer/01-overview/README.md
+++ b/docs/docs/developer/01-overview/README.md
@@ -4,31 +4,18 @@ slug: /developer
description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Jan AI Developer Documentation - Building Extensions and SDK Overview
-
-
-
-
-
-
-
-
-
-
-
-
The following docs are aimed at developers who want to build extensions on top of the Jan Framework.
:::tip
diff --git a/docs/docs/developer/02-build-assistant/01-your-first-assistant.md b/docs/docs/developer/02-build-assistant/01-your-first-assistant.md
index 863d275fe..f96dd2802 100644
--- a/docs/docs/developer/02-build-assistant/01-your-first-assistant.md
+++ b/docs/docs/developer/02-build-assistant/01-your-first-assistant.md
@@ -1,36 +1,23 @@
---
title: Your First Assistant
-slug: /developer/build-assistant/your-first-assistant/
+slug: /developer/assistant/your-first-assistant/
description: A quick start on how to build an assistant.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
quick start,
build assistant,
]
---
-
- Your First Assistant
-
-
-
-
-
-
-
-
-
-
-
-
:::caution
This is currently under development.
:::
diff --git a/docs/docs/developer/02-build-assistant/02-assistant-anatomy.md b/docs/docs/developer/02-build-assistant/02-assistant-anatomy.md
index e6951a05b..124817372 100644
--- a/docs/docs/developer/02-build-assistant/02-assistant-anatomy.md
+++ b/docs/docs/developer/02-build-assistant/02-assistant-anatomy.md
@@ -1,17 +1,18 @@
---
title: Anatomy of an Assistant
-slug: /developer/build-assistant/assistant-anatomy/
+slug: /developer/assistant/assistant-anatomy/
description: An overview of assistant.json
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
build assistant,
assistant anatomy,
]
diff --git a/docs/docs/developer/02-build-assistant/03-package-your-assistant.md b/docs/docs/developer/02-build-assistant/03-package-your-assistant.md
index 12fa1510c..e18bcc5f6 100644
--- a/docs/docs/developer/02-build-assistant/03-package-your-assistant.md
+++ b/docs/docs/developer/02-build-assistant/03-package-your-assistant.md
@@ -1,17 +1,18 @@
---
title: Package your Assistant
-slug: /developer/build-assistant/package-your-assistant/
+slug: /developer/assistant/package-your-assistant/
description: Package your assistant for sharing and publishing.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
quick start,
build assistant,
]
diff --git a/docs/docs/developer/02-build-assistant/README.mdx b/docs/docs/developer/02-build-assistant/README.mdx
index 29cf8b63d..35944a8cc 100644
--- a/docs/docs/developer/02-build-assistant/README.mdx
+++ b/docs/docs/developer/02-build-assistant/README.mdx
@@ -1,17 +1,10 @@
---
title: Build an Assistant
-slug: /developer/build-assistant
+slug: /developer/assistant
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
- Jan,
- ChatGPT alternative,
- local AI,
- private AI,
- conversational AI,
- no-subscription fee,
- large language model,
+ Jan, Rethink the Computer, local AI, privacy focus, free and open source, private and offline, conversational AI, no-subscription fee, large language models,
build assistant,
]
---
diff --git a/docs/docs/developer/03-build-engine/01-your-first-engine.md b/docs/docs/developer/03-build-engine/01-your-first-engine.md
index 0670d63c4..2c69b2199 100644
--- a/docs/docs/developer/03-build-engine/01-your-first-engine.md
+++ b/docs/docs/developer/03-build-engine/01-your-first-engine.md
@@ -1,17 +1,18 @@
---
title: Your First Engine
-slug: /developer/build-engine/your-first-engine/
+slug: /developer/engine/your-first-engine/
description: A quick start on how to build your first engine
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
quick start,
build engine,
]
@@ -21,4 +22,4 @@ keywords:
This is currently under development.
:::
-A quickstart on how to integrate tensorrt llm
\ No newline at end of file
+A quickstart on how to integrate tensorrt llm
diff --git a/docs/docs/developer/03-build-engine/02-engine-anatomy.md b/docs/docs/developer/03-build-engine/02-engine-anatomy.md
index 2f8c69a04..5f45339eb 100644
--- a/docs/docs/developer/03-build-engine/02-engine-anatomy.md
+++ b/docs/docs/developer/03-build-engine/02-engine-anatomy.md
@@ -1,17 +1,18 @@
---
title: Anatomy of an Engine
-slug: /developer/build-engine/engine-anatomy
+slug: /developer/engine/engine-anatomy
description: An overview of engine.json
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
build engine,
engine anatomy,
]
@@ -19,4 +20,4 @@ keywords:
:::caution
This is currently under development.
-:::
\ No newline at end of file
+:::
diff --git a/docs/docs/developer/03-build-engine/03-package-your-engine.md b/docs/docs/developer/03-build-engine/03-package-your-engine.md
index 794e1abb2..a44807c68 100644
--- a/docs/docs/developer/03-build-engine/03-package-your-engine.md
+++ b/docs/docs/developer/03-build-engine/03-package-your-engine.md
@@ -1,17 +1,18 @@
---
title: Package your Engine
-slug: /developer/build-engine/package-your-engine/
+slug: /developer/engine/package-your-engine/
description: Package your engine for sharing and publishing.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
build engine,
engine anatomy,
]
diff --git a/docs/docs/developer/03-build-engine/README.mdx b/docs/docs/developer/03-build-engine/README.mdx
index a2521ff54..e4c5980e0 100644
--- a/docs/docs/developer/03-build-engine/README.mdx
+++ b/docs/docs/developer/03-build-engine/README.mdx
@@ -1,17 +1,18 @@
---
title: Build an Inference Engine
-slug: /developer/build-engine/
+slug: /developer/engine/
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
build assistant,
]
---
diff --git a/docs/docs/developer/04-build-extension/01-your-first-extension.md b/docs/docs/developer/04-build-extension/01-your-first-extension.md
index 3fa9f5da5..4dd413ca6 100644
--- a/docs/docs/developer/04-build-extension/01-your-first-extension.md
+++ b/docs/docs/developer/04-build-extension/01-your-first-extension.md
@@ -1,36 +1,23 @@
---
title: Your First Extension
-slug: /developer/build-extension/your-first-extension/
+slug: /developer/extension/your-first-extension/
description: A quick start on how to build your first extension
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
quick start,
build extension,
]
---
-
- Building Your First Jan AI Extension - Quick Start Guide
-
-
-
-
-
-
-
-
-
-
-
-
:::caution
This is currently under development.
:::
diff --git a/docs/docs/developer/04-build-extension/02-extension-anatomy.md b/docs/docs/developer/04-build-extension/02-extension-anatomy.md
index 7c3cd1911..b41f1f0f4 100644
--- a/docs/docs/developer/04-build-extension/02-extension-anatomy.md
+++ b/docs/docs/developer/04-build-extension/02-extension-anatomy.md
@@ -1,17 +1,18 @@
---
title: Anatomy of an Extension
-slug: /developer/build-extension/extension-anatomy
+slug: /developer/extension/extension-anatomy
description: An overview of extensions.json
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
build extension,
extension anatomy,
]
diff --git a/docs/docs/developer/04-build-extension/03-package-your-extension.md b/docs/docs/developer/04-build-extension/03-package-your-extension.md
index cf7ffc6ba..a4c894695 100644
--- a/docs/docs/developer/04-build-extension/03-package-your-extension.md
+++ b/docs/docs/developer/04-build-extension/03-package-your-extension.md
@@ -1,17 +1,18 @@
---
title: Package your Engine
-slug: /developer/build-extension/package-your-extension/
+slug: /developer/extension/package-your-extension/
description: Package your extension for sharing and publishing.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
build extension,
extension anatomy,
]
diff --git a/docs/docs/developer/04-build-extension/README.mdx b/docs/docs/developer/04-build-extension/README.mdx
index a981281e7..ce7fce5b2 100644
--- a/docs/docs/developer/04-build-extension/README.mdx
+++ b/docs/docs/developer/04-build-extension/README.mdx
@@ -1,17 +1,10 @@
---
title: Build an Extension
-slug: /developer/build-extension/
+slug: /developer/extension/
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
- Jan,
- ChatGPT alternative,
- local AI,
- private AI,
- conversational AI,
- no-subscription fee,
- large language model,
+ Jan, Rethink the Computer, local AI, privacy focus, free and open source, private and offline, conversational AI, no-subscription fee, large language models,
build extension,
]
---
diff --git a/docs/docs/developer/05-framework/03-engineering/README.mdx b/docs/docs/developer/05-framework/03-engineering/README.mdx
index c3337ab2e..18c3904a4 100644
--- a/docs/docs/developer/05-framework/03-engineering/README.mdx
+++ b/docs/docs/developer/05-framework/03-engineering/README.mdx
@@ -4,14 +4,15 @@ slug: /developer/engineering
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
spec,
engineering,
]
diff --git a/docs/docs/developer/05-framework/03-engineering/assistants.md b/docs/docs/developer/05-framework/03-engineering/assistants.md
index 90b52ab38..2a2dc2681 100644
--- a/docs/docs/developer/05-framework/03-engineering/assistants.md
+++ b/docs/docs/developer/05-framework/03-engineering/assistants.md
@@ -3,30 +3,18 @@ title: 'Assistants'
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Assistants
-
-
-
-
-
-
-
-
-
-
-
:::caution
This is currently under development.
diff --git a/docs/docs/developer/05-framework/03-engineering/chats.md b/docs/docs/developer/05-framework/03-engineering/chats.md
index 654621e30..b13240e63 100644
--- a/docs/docs/developer/05-framework/03-engineering/chats.md
+++ b/docs/docs/developer/05-framework/03-engineering/chats.md
@@ -3,30 +3,18 @@ title: Chats
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Chats
-
-
-
-
-
-
-
-
-
-
-
:::caution
This is currently under development.
diff --git a/docs/docs/developer/05-framework/03-engineering/engine.md b/docs/docs/developer/05-framework/03-engineering/engine.md
index 8ebfff88d..653576f1b 100644
--- a/docs/docs/developer/05-framework/03-engineering/engine.md
+++ b/docs/docs/developer/05-framework/03-engineering/engine.md
@@ -2,19 +2,6 @@
title: Engine
---
-
- Engine
-
-
-
-
-
-
-
-
-
-
-
:::caution
Currently Under Development
diff --git a/docs/docs/developer/05-framework/03-engineering/files.md b/docs/docs/developer/05-framework/03-engineering/files.md
index 9f572af11..950d8729f 100644
--- a/docs/docs/developer/05-framework/03-engineering/files.md
+++ b/docs/docs/developer/05-framework/03-engineering/files.md
@@ -3,30 +3,18 @@ title: 'Files'
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Files
-
-
-
-
-
-
-
-
-
-
-
:::warning
Draft Specification: functionality has not been implemented yet.
diff --git a/docs/docs/developer/05-framework/03-engineering/fine-tuning.md b/docs/docs/developer/05-framework/03-engineering/fine-tuning.md
index 53ca2b206..dac7cf54a 100644
--- a/docs/docs/developer/05-framework/03-engineering/fine-tuning.md
+++ b/docs/docs/developer/05-framework/03-engineering/fine-tuning.md
@@ -1,16 +1,17 @@
---
-title: "Fine-tuning"
+title: 'Fine-tuning'
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
diff --git a/docs/docs/developer/05-framework/03-engineering/messages.md b/docs/docs/developer/05-framework/03-engineering/messages.md
index 6ddaba45d..37cd061c5 100644
--- a/docs/docs/developer/05-framework/03-engineering/messages.md
+++ b/docs/docs/developer/05-framework/03-engineering/messages.md
@@ -3,30 +3,18 @@ title: Messages
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Messages
-
-
-
-
-
-
-
-
-
-
-
:::caution
This is currently under development.
diff --git a/docs/docs/developer/05-framework/03-engineering/models.md b/docs/docs/developer/05-framework/03-engineering/models.md
index dbe134f07..4895954d7 100644
--- a/docs/docs/developer/05-framework/03-engineering/models.md
+++ b/docs/docs/developer/05-framework/03-engineering/models.md
@@ -3,30 +3,18 @@ title: Models
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Models
-
-
-
-
-
-
-
-
-
-
-
:::caution
This is currently under development.
diff --git a/docs/docs/developer/05-framework/03-engineering/prompts.md b/docs/docs/developer/05-framework/03-engineering/prompts.md
index 22fc578af..5897309d1 100644
--- a/docs/docs/developer/05-framework/03-engineering/prompts.md
+++ b/docs/docs/developer/05-framework/03-engineering/prompts.md
@@ -3,14 +3,15 @@ title: Prompts
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
diff --git a/docs/docs/developer/05-framework/03-engineering/threads.md b/docs/docs/developer/05-framework/03-engineering/threads.md
index f8ba018f8..161b93948 100644
--- a/docs/docs/developer/05-framework/03-engineering/threads.md
+++ b/docs/docs/developer/05-framework/03-engineering/threads.md
@@ -3,30 +3,18 @@ title: Threads
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Threads
-
-
-
-
-
-
-
-
-
-
-
:::caution
This is currently under development.
diff --git a/docs/docs/developer/05-framework/03-product/README.mdx b/docs/docs/developer/05-framework/03-product/README.mdx
index ca3a13b3a..5b3f35f5d 100644
--- a/docs/docs/developer/05-framework/03-product/README.mdx
+++ b/docs/docs/developer/05-framework/03-product/README.mdx
@@ -4,14 +4,15 @@ slug: /developer/product
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
spec,
product,
]
diff --git a/docs/docs/developer/05-framework/03-product/chat.md b/docs/docs/developer/05-framework/03-product/chat.md
index 3b98485b8..fdd87644b 100644
--- a/docs/docs/developer/05-framework/03-product/chat.md
+++ b/docs/docs/developer/05-framework/03-product/chat.md
@@ -3,30 +3,18 @@ title: Chat
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Chat
-
-
-
-
-
-
-
-
-
-
-
## Overview
A home screen for users to chat with [assistants](/docs/engineering/assistants) via conversation [threads](/docs/engineering/threads).
diff --git a/docs/docs/developer/05-framework/03-product/hub.md b/docs/docs/developer/05-framework/03-product/hub.md
index ea8dd81a5..ebbb24fd7 100644
--- a/docs/docs/developer/05-framework/03-product/hub.md
+++ b/docs/docs/developer/05-framework/03-product/hub.md
@@ -3,30 +3,18 @@ title: Hub
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Hub
-
-
-
-
-
-
-
-
-
-
-
## Overview
The Hub is like a store for everything, where users can discover and download models, assistants, and more.
diff --git a/docs/docs/developer/05-framework/03-product/jan.md b/docs/docs/developer/05-framework/03-product/jan.md
index b906be09d..f42e4643a 100644
--- a/docs/docs/developer/05-framework/03-product/jan.md
+++ b/docs/docs/developer/05-framework/03-product/jan.md
@@ -3,30 +3,18 @@ title: Jan (The Default Assistant)
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Jan (The Default Assistant)
-
-
-
-
-
-
-
-
-
-
-
Jan ships with a default assistant "Jan" that lets users chat with any open source model out-of-the-box.
This assistant is defined in `/jan`. It is a generic assistant to illustrate power of Jan. In the future, it will support additional features e.g. multi-assistant conversations
diff --git a/docs/docs/developer/05-framework/03-product/settings.md b/docs/docs/developer/05-framework/03-product/settings.md
index 515b5e802..327fa5c97 100644
--- a/docs/docs/developer/05-framework/03-product/settings.md
+++ b/docs/docs/developer/05-framework/03-product/settings.md
@@ -3,30 +3,18 @@ title: Settings
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Settings
-
-
-
-
-
-
-
-
-
-
-
## Overview
A settings page for users to add extensions, configure model settings, change app appearance, add keyboard shortcuts, and a plethora of other personalizations.
diff --git a/docs/docs/developer/05-framework/03-product/system-monitor.md b/docs/docs/developer/05-framework/03-product/system-monitor.md
index 15dae09ea..fc4a91751 100644
--- a/docs/docs/developer/05-framework/03-product/system-monitor.md
+++ b/docs/docs/developer/05-framework/03-product/system-monitor.md
@@ -3,30 +3,18 @@ title: System Monitor
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- System Monitor
-
-
-
-
-
-
-
-
-
-
-
## Overview
An activity screen to monitor system health and running models.
diff --git a/docs/docs/developer/05-framework/README.md b/docs/docs/developer/05-framework/README.md
index 770f5713a..c94ce9701 100644
--- a/docs/docs/developer/05-framework/README.md
+++ b/docs/docs/developer/05-framework/README.md
@@ -4,14 +4,15 @@ slug: /developer/framework/
description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
diff --git a/docs/docs/events/hcmc-oct23.md b/docs/docs/events/hcmc-oct23.md
index e70329b2d..182153263 100644
--- a/docs/docs/events/hcmc-oct23.md
+++ b/docs/docs/events/hcmc-oct23.md
@@ -3,21 +3,9 @@ title: "Jan's AI Hacker House (Ho Chi Minh City)"
description: '24-27 Oct 2023, District 3, HCMC. AI-focused talks, workshops and social events. Hosted by Jan.ai'
slug: /events/hcmc-oct23
image: /img/hcmc-launch-party.png
+keywords: [AI, Hacker House, Ho Chi Minh City, HCMC, Jan.ai]
---
-
- Jan's AI Hacker House (Ho Chi Minh City)
-
-
-
-
-
-
-
-
-
-
-

🎉 Join us at our Friday Launch Party for an evening of AI talks from other builders! [(RSVP here)](https://jan-launch-party.eventbrite.sg/) 🎉
diff --git a/docs/docs/events/nvidia-llm-day-nov-23.md b/docs/docs/events/nvidia-llm-day-nov-23.md
index f739fb4ff..a57776ef4 100644
--- a/docs/docs/events/nvidia-llm-day-nov-23.md
+++ b/docs/docs/events/nvidia-llm-day-nov-23.md
@@ -3,19 +3,6 @@ title: 'Nov 23: Nvidia GenAI Day'
description: Nvidia's LLM Day
---
-
- Nov 23: Nvidia GenAI Day
-
-
-
-
-
-
-
-
-
-
-

## Nvidia GenAI Innovation Day
diff --git a/docs/docs/guides/extensions/extensions.mdx b/docs/docs/guides/extensions/extensions.mdx
index c414cacd6..63b71b390 100644
--- a/docs/docs/guides/extensions/extensions.mdx
+++ b/docs/docs/guides/extensions/extensions.mdx
@@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 1
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Jan Extensions,
Extensions,
]
@@ -26,34 +27,35 @@ You can find the default extensions in the `Settings` > `Extensions`.
## List of Default Extensions
-| Extension Name | Version | Description | Source Code Link |
-| -------------- | ------- | ----------- | ---------------- |
-| Assistant Extension | `v1.0.0` | This extension enables assistants, including Jan, a default assistant that can call all downloaded models. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/assistant-extension ) |
-| Conversational Extension | `v1.0.0` | This extension enables conversations and state persistence via your filesystem. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/conversational-extension) |
-| Inference Nitro Extension | `v1.0.0` | This extension embeds Nitro, a lightweight (3 MB) inference engine in C++. See nitro.jan.ai. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-nitro-extension) |
-| Inference Openai Extension | `v1.0.0` | This extension enables OpenAI chat completion API calls. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-openai-extension) |
-| Inference Triton Trt Llm Extension | `v1.0.0` | This extension enables Nvidia's TensorRT-LLM as an inference engine option. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-triton-trtllm-extension) |
-| Model Extension | `v1.0.22` | Model Management Extension provides model exploration and seamless downloads. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/model-extension) |
-| Monitoring Extension | `v1.0.9` | This extension offers system health and OS-level data. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/monitoring-extension) |
+| Extension Name | Version | Description | Source Code Link |
+| ---------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
+| Assistant Extension | `v1.0.0` | This extension enables assistants, including Jan, a default assistant that can call all downloaded models. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/assistant-extension) |
+| Conversational Extension | `v1.0.0` | This extension enables conversations and state persistence via your filesystem. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/conversational-extension) |
+| Inference Nitro Extension | `v1.0.0` | This extension embeds Nitro, a lightweight (3 MB) inference engine in C++. See nitro.jan.ai. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-nitro-extension) |
+| Inference Openai Extension | `v1.0.0` | This extension enables OpenAI chat completion API calls. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-openai-extension) |
+| Inference Triton Trt Llm Extension | `v1.0.0` | This extension enables Nvidia's TensorRT-LLM as an inference engine option. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-triton-trtllm-extension) |
+| Model Extension | `v1.0.22` | Model Management Extension provides model exploration and seamless downloads. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/model-extension) |
+| Monitoring Extension | `v1.0.9` | This extension offers system health and OS-level data. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/monitoring-extension) |
## Configure Extension Settings
+
To configure extension settings:
+
1. Navigate to the `~/jan/extensions`.
2. Open the `extensions.json` file
3. Edit the file with options including:
-| Option | Description |
-|-----------------|-------------------------------------------------|
-| `_active` | Enable/disable the extension. |
-| `listeners` | Default listener setting. |
-| `origin` | Extension file path. |
-| `installOptions`| Version and metadata configuration. |
-| `name` | Extension name. |
-| `version` | Extension version. |
-| `main` | Main file path. |
-| `description` | Extension description. |
-| `url` | Extension URL. |
-
+| Option | Description |
+| ---------------- | ----------------------------------- |
+| `_active` | Enable/disable the extension. |
+| `listeners` | Default listener setting. |
+| `origin` | Extension file path. |
+| `installOptions` | Version and metadata configuration. |
+| `name` | Extension name. |
+| `version` | Extension version. |
+| `main` | Main file path. |
+| `description` | Extension description. |
+| `url` | Extension URL. |
```json title="~/jan/extensions/extensions.json"
{
@@ -145,18 +147,17 @@ Currently, Jan only supports official extensions, which can be directly download
For now you can always import a third party extension at your own risk by following the steps below:
-1. Navigate to **Settings** > **Extensions** > Click Select under **Manual Installation**.
+1. Navigate to **Settings** > **Extensions** > Click Select under **Manual Installation**.
2. Then, the ~/jan/extensions/extensions.json file will be updated automatically.
:::caution
-You need to prepare the extension file in .tgz format to install the **non-default** extension.
+You need to prepare the extension file in .tgz format to install the **non-default** extension.
:::
-
:::info[Assistance and Support]
If you have questions, please join our [Discord community](https://discord.gg/Dt7MxDyNNZ) for support, updates, and discussions.
-:::
\ No newline at end of file
+:::
diff --git a/docs/docs/guides/get-started/hardware-setup.mdx b/docs/docs/guides/get-started/hardware-setup.mdx
index b658ec8d8..7225708cf 100644
--- a/docs/docs/guides/get-started/hardware-setup.mdx
+++ b/docs/docs/guides/get-started/hardware-setup.mdx
@@ -5,20 +5,21 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 3
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
hardware requirements,
Nvidia,
AMD,
CPU,
- GPU
+ GPU,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/get-started/overview.mdx b/docs/docs/guides/get-started/overview.mdx
index 3137325c5..c979d53b9 100644
--- a/docs/docs/guides/get-started/overview.mdx
+++ b/docs/docs/guides/get-started/overview.mdx
@@ -5,15 +5,16 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 1
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/get-started/quickstart.mdx b/docs/docs/guides/get-started/quickstart.mdx
index 50b57fe39..243e4a59e 100644
--- a/docs/docs/guides/get-started/quickstart.mdx
+++ b/docs/docs/guides/get-started/quickstart.mdx
@@ -5,20 +5,22 @@ description: Get started quickly with Jan, a ChatGPT-alternative that runs on yo
sidebar_position: 2
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
quickstart,
getting started,
using AI model,
- installation
+ installation,
]
---
+
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import download from './asset/download.gif';
@@ -26,7 +28,9 @@ import gpt from './asset/gpt.gif';
import model from './asset/model.gif';
To get started quickly with Jan, follow the steps below:
+
## Step 1: Get Jan Desktop
+
@@ -38,7 +42,7 @@ To get started quickly with Jan, follow the steps below:
#### Stable Releases
- To download stable releases, go to [Jan.ai](https://jan.ai/) > select **Download for Mac**.
+ To download stable releases, go to [Jan](https://jan.ai/) > select **Download for Mac**.
The download should be available as a `.dmg`.
@@ -49,7 +53,7 @@ To get started quickly with Jan, follow the steps below:
You can download it from [Jan's Discord](https://discord.gg/FTk2MvZwJH) in the [`#nightly-builds`](https://discord.gg/q8szebnxZ7) channel.
#### Experimental Model
-
+
To enable the experimental mode, go to **Settings** > **Advanced Settings** and toggle the **Experimental Mode**
#### Install with Homebrew
@@ -71,15 +75,15 @@ Homebrew package installation is currently limited to **Apple Silicon Macs**, wi
#### Pre-requisites
Ensure that your system meets the following requirements:
- Windows 10 or higher is required to run Jan.
-
+
To enable GPU support, you will need:
- NVIDIA GPU with CUDA Toolkit 11.7 or higher
- NVIDIA driver 470.63.01 or higher
#### Stable Releases
- To download stable releases, go to [Jan.ai](https://jan.ai/) > select **Download for Windows**.
-
+ To download stable releases, go to [Jan](https://jan.ai/) > select **Download for Windows**.
+
The download should be available as a `.exe` file.
#### Nightly Releases
@@ -89,7 +93,7 @@ Homebrew package installation is currently limited to **Apple Silicon Macs**, wi
You can download it from [Jan's Discord](https://discord.gg/FTk2MvZwJH) in the [`#nightly-builds`](https://discord.gg/q8szebnxZ7) channel.
#### Experimental Model
-
+
To enable the experimental mode, go to **Settings** > **Advanced Settings** and toggle the **Experimental Mode**
#### Default Installation Directory
@@ -121,8 +125,8 @@ If you are stuck in a broken build, go to the [Broken Build](/guides/common-erro
#### Stable Releases
- To download stable releases, go to [Jan.ai](https://jan.ai/) > select **Download for Linux**.
-
+ To download stable releases, go to [Jan](https://jan.ai/) > select **Download for Linux**.
+
The download should be available as a `.AppImage` file or a `.deb` file.
#### Nightly Releases
@@ -132,17 +136,17 @@ If you are stuck in a broken build, go to the [Broken Build](/guides/common-erro
You can download it from [Jan's Discord](https://discord.gg/FTk2MvZwJH) in the [`#nightly-builds`](https://discord.gg/q8szebnxZ7) channel.
#### Experimental Model
-
+
To enable the experimental mode, go to **Settings** > **Advanced Settings** and toggle the **Experimental Mode**
-
+
To install Jan, you should use your package manager's install or `dpkg`.
-
+
-
+
To install Jan, run the following command:
```sh
@@ -176,9 +180,11 @@ If you are stuck in a broken build, go to the [Broken Build](/guides/common-erro
:::
+
## Step 2: Download a Model
+
Jan provides a variety of local AI models tailored to different needs, ready for download. These models are installed and run directly on the user's device.
1. Go to the **Hub**.
@@ -199,6 +205,7 @@ Ensure you select the appropriate model size by balancing performance, cost, and
:::
## Step 3: Connect to ChatGPT (Optional)
+
Jan also provides access to remote models hosted on external servers, requiring an API key for connectivity. For example, to use the ChatGPT model with Jan, you must input your API key by following these steps:
1. Go to the **Thread** tab.
@@ -214,6 +221,7 @@ Jan also provides access to remote models hosted on external servers, requiring
## Step 4: Chat with Models
+
After downloading and configuring your model, you can immediately use it in the **Thread** tab.
@@ -225,30 +233,39 @@ After downloading and configuring your model, you can immediately use it in the
## Best Practices
+
This section outlines best practices for developers, analysts, and AI enthusiasts to enhance their experience with Jan when adding AI locally to their computers. Implementing these practices will optimize the performance of AI models.
### Follow the Quickstart Guide
-The quickstart guide above is designed to facilitate a quick setup process. It provides a clear instruction and simple steps to get you up and running with Jan.ai quickly. Even, if you are inexperienced in AI.
+
+The quickstart guide above is designed to facilitate a quick setup process. It provides a clear instruction and simple steps to get you up and running with Jan quickly. Even, if you are inexperienced in AI.
### Select the Right Models
+
Jan offers a range of pre-configured AI models that are suited for different purposes. You should identify which on that aligns with your objectives. There are factors to be considered:
+
- Capabilities
- Accuracy
- Processing Speed
:::note
+
- Some of these factors also depend on your hardware, please see Hardware Requirement.
- Choosing the right model is important to achieve the best performance.
-:::
+ :::
### Setting up Jan
-Ensure that you familiarize yourself with the Jan application. Jan offers advanced settings that you can adjust. These settings may influence how your AI behaves locally. Please see the [Advanced Settings](./guides/advanced) article for a complete list of Jan's configurations and instructions on how to configure them.
+
+Ensure that you familiarize yourself with the Jan application. Jan offers advanced settings that you can adjust. These settings may influence how your AI behaves locally. Please see the [Advanced Settings](/guides/advanced) article for a complete list of Jan's configurations and instructions on how to configure them.
### Integrations
-Jan can work with many different systems and tools. Whether you are incorporating Jan.ai with any open-source LLM provider or other tools, it is important to understand the integration capabilities and limitations.
+
+Jan can work with many different systems and tools. Whether you are incorporating Jan with any open-source LLM provider or other tools, it is important to understand the integration capabilities and limitations.
### Mastering the Prompt Engineering
+
Prompt engineering is an important aspect when dealing with AI models to generate the desired outputs. Mastering this skill can significantly enhance the performance and the responses of the AI. Below are some tips that you can do for prompt engineering:
+
- Ask the model to adopt a persona
- Be specific and details get a more specific answers
- Provide examples or preference text or context at the beginning
@@ -256,4 +273,5 @@ Prompt engineering is an important aspect when dealing with AI models to generat
- Use certain keywords and phrases
## Pre-configured Models
+
To see the full list of Jan's pre-configured models, please see our official GitHub [here](https://github.com/janhq/jan).
diff --git a/docs/docs/guides/get-started/settingup-gpu.mdx b/docs/docs/guides/get-started/settingup-gpu.mdx
index 90a3743c4..cd8fb3556 100644
--- a/docs/docs/guides/get-started/settingup-gpu.mdx
+++ b/docs/docs/guides/get-started/settingup-gpu.mdx
@@ -5,15 +5,16 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 1
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/inference/overview-inference.mdx b/docs/docs/guides/inference/overview-inference.mdx
index cd51bb44e..79e306a67 100644
--- a/docs/docs/guides/inference/overview-inference.mdx
+++ b/docs/docs/guides/inference/overview-inference.mdx
@@ -5,15 +5,16 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 12
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/installation/README.mdx b/docs/docs/guides/installation/README.mdx
index 080fdaced..cd8ca4cc5 100644
--- a/docs/docs/guides/installation/README.mdx
+++ b/docs/docs/guides/installation/README.mdx
@@ -6,19 +6,21 @@ hide_table_of_contents: true
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
## Jan Device Compatible
-Jan is compatible with macOS, Windows, and Linux, making it accessible for a wide range of users. This compatibility allows users to leverage Jan's AI tools effectively, regardless of their device or operating system.
+
+Jan is compatible with macOS, Windows, and Linux, making it accessible for a wide range of users. This compatibility allows users to leverage Jan's AI tools effectively, regardless of their device or operating system.
:::note
For detailed system requirements and setup instructions, refer to our [Hardware Setup](/guides/hardware/) guide.
@@ -28,18 +30,21 @@ import DocCardList from "@theme/DocCardList";
-
## Install Server-Side
+
To install Jan from source, follow the steps below:
### Pre-requisites
+
Before proceeding with the installation of Jan from source, ensure that the following software versions are installed on your system:
- Node.js version 20.0.0 or higher
- Yarn version 1.22.0 or higher
### Install Jan Development Build
+
1. Clone the Jan repository from GitHub by using the following command:
+
```bash
git clone https://github.com/janhq/jan
git checkout DESIRED_BRANCH
@@ -47,6 +52,7 @@ cd jan
```
2. Install the required dependencies by using the following Yarn command:
+
```bash
yarn install
@@ -61,19 +67,24 @@ yarn build:uikit
```
3. Run the development server.
+
```bash
yarn dev
```
+
This will start the development server and open the desktop app. During this step, you may encounter notifications about installing base plugins. Simply click **OK** and **Next** to continue.
### Install Jan Production Build
+
1. Clone the Jan repository from GitHub by using the following command:
+
```bash
git clone https://github.com/janhq/jan
cd jan
```
2. Install the required dependencies by using the following Yarn command:
+
```bash
yarn install
@@ -88,8 +99,9 @@ yarn build:uikit
```
3. Run the production server.
+
```bash
-yarn
+yarn
```
-This completes the installation process for Jan from source. The production-ready app for macOS can be found in the dist folder.
\ No newline at end of file
+This completes the installation process for Jan from source. The production-ready app for macOS can be found in the dist folder.
diff --git a/docs/docs/guides/installation/docker.mdx b/docs/docs/guides/installation/docker.mdx
index 86d9e7de2..55557058b 100644
--- a/docs/docs/guides/installation/docker.mdx
+++ b/docs/docs/guides/installation/docker.mdx
@@ -6,23 +6,26 @@ hide_table_of_contents: true
description: A step-by-step guide to install Jan using Docker.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Install on Docker,
Docker,
Helm,
]
---
+
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
- ### Pre-requisites
+### Pre-requisites
+
Ensure that your system meets the following requirements:
- Linux or WSL2 Docker
- Latest Docker Engine and Docker Compose
@@ -30,101 +33,102 @@ import TabItem from '@theme/TabItem';
To enable GPU support, you will need:
- `nvidia-driver`
- `nvidia-docker2`
-
+
+
:::note
+
- If you have not installed Docker, follow the instructions [here](https://docs.docker.com/engine/install/ubuntu/).
- If you have not installed the required file for GPU support, follow the instructions [here](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
-:::
+ :::
- ### Run Jan in Docker
- You can run Jan in Docker with two methods:
- 1. Run Jan in CPU mode
- 2. Run Jan in GPU mode
-
-
-
- To run Jan in Docker CPU mode, by using the following code:
-
- ```bash
- # cpu mode with default file system
- docker compose --profile cpu-fs up -d
+ ### Run Jan in Docker
+ You can run Jan in Docker with two methods:
+ 1. Run Jan in CPU mode
+ 2. Run Jan in GPU mode
+
+
- # cpu mode with S3 file system
- docker compose --profile cpu-s3fs up -d
- ```
-
-
-
-
- To run Jan in Docker CPU mode, follow the steps below:
- 1. Check CUDA compatibility with your NVIDIA driver by running nvidia-smi and check the CUDA version in the output as shown below:
- ```sh
- nvidia-smi
+ To run Jan in Docker CPU mode, by using the following code:
- # Output
- +---------------------------------------------------------------------------------------+
- | NVIDIA-SMI 531.18 Driver Version: 531.18 CUDA Version: 12.1 |
- |-----------------------------------------+----------------------+----------------------+
- | GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
- | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
- | | | MIG M. |
- |=========================================+======================+======================|
- | 0 NVIDIA GeForce RTX 4070 Ti WDDM | 00000000:01:00.0 On | N/A |
- | 0% 44C P8 16W / 285W| 1481MiB / 12282MiB | 2% Default |
- | | | N/A |
- +-----------------------------------------+----------------------+----------------------+
- | 1 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:02:00.0 Off | N/A |
- | 0% 49C P8 14W / 120W| 0MiB / 6144MiB | 0% Default |
- | | | N/A |
- +-----------------------------------------+----------------------+----------------------+
- | 2 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:05:00.0 Off | N/A |
- | 29% 38C P8 11W / 120W| 0MiB / 6144MiB | 0% Default |
- | | | N/A |
- +-----------------------------------------+----------------------+----------------------+
+ ```bash
+ # cpu mode with default file system
+ docker compose --profile cpu-fs up -d
- +---------------------------------------------------------------------------------------+
- | Processes: |
- | GPU GI CI PID Type Process name GPU Memory |
- | ID ID Usage |
- |=======================================================================================|
- ```
- 2. Visit [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda/tags) and find the smallest minor version of image tag that matches your CUDA version (e.g., 12.1 -> 12.1.0)
- 3. Update the `Dockerfile.gpu` line number 5 with the latest minor version of the image tag from step 2 (e.g. change `FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 AS base` to `FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS base`)
- 4. Run Jan in GPU mode by using the following command:
-
- ```bash
- # GPU mode with default file system
- docker compose --profile gpu-fs up -d
+ # cpu mode with S3 file system
+ docker compose --profile cpu-s3fs up -d
+ ```
- # GPU mode with S3 file system
- docker compose --profile gpu-s3fs up -d
- ```
+
+
-
-
- ### Docker Compose Profile and Environment
- The available Docker Compose profile and the environment variables listed below:
-
- #### Docker Compose Profile
+ To run Jan in Docker CPU mode, follow the steps below:
+ 1. Check CUDA compatibility with your NVIDIA driver by running nvidia-smi and check the CUDA version in the output as shown below:
+ ```sh
+ nvidia-smi
- | Profile | Description |
- |-----------|-------------------------------------------|
- | cpu-fs | Run Jan in CPU mode with default file system |
- | cpu-s3fs | Run Jan in CPU mode with S3 file system |
- | gpu-fs | Run Jan in GPU mode with default file system |
- | gpu-s3fs | Run Jan in GPU mode with S3 file system |
+ # Output
+ +---------------------------------------------------------------------------------------+
+ | NVIDIA-SMI 531.18 Driver Version: 531.18 CUDA Version: 12.1 |
+ |-----------------------------------------+----------------------+----------------------+
+ | GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
+ | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
+ | | | MIG M. |
+ |=========================================+======================+======================|
+ | 0 NVIDIA GeForce RTX 4070 Ti WDDM | 00000000:01:00.0 On | N/A |
+ | 0% 44C P8 16W / 285W| 1481MiB / 12282MiB | 2% Default |
+ | | | N/A |
+ +-----------------------------------------+----------------------+----------------------+
+ | 1 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:02:00.0 Off | N/A |
+ | 0% 49C P8 14W / 120W| 0MiB / 6144MiB | 0% Default |
+ | | | N/A |
+ +-----------------------------------------+----------------------+----------------------+
+ | 2 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:05:00.0 Off | N/A |
+ | 29% 38C P8 11W / 120W| 0MiB / 6144MiB | 0% Default |
+ | | | N/A |
+ +-----------------------------------------+----------------------+----------------------+
- #### Environment Variables
+ +---------------------------------------------------------------------------------------+
+ | Processes: |
+ | GPU GI CI PID Type Process name GPU Memory |
+ | ID ID Usage |
+ |=======================================================================================|
+ ```
+ 2. Visit [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda/tags) and find the smallest minor version of image tag that matches your CUDA version (e.g., 12.1 -> 12.1.0)
+ 3. Update the `Dockerfile.gpu` line number 5 with the latest minor version of the image tag from step 2 (e.g. change `FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 AS base` to `FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS base`)
+ 4. Run Jan in GPU mode by using the following command:
- | Environment Variable | Description |
- |--------------------------|------------------------------------------------------------|
- | S3_BUCKET_NAME | S3 bucket name - leave blank for default file system |
- | AWS_ACCESS_KEY_ID | AWS access key ID - leave blank for default file system |
- | AWS_SECRET_ACCESS_KEY | AWS secret access key - leave blank for default file system|
- | AWS_ENDPOINT | AWS endpoint URL - leave blank for default file system |
- | AWS_REGION | AWS region - leave blank for default file system |
- | API_BASE_URL | Jan Server URL, please modify it as your public ip address or domain name default http://localhost:1377 |
+ ```bash
+ # GPU mode with default file system
+ docker compose --profile gpu-fs up -d
+ # GPU mode with S3 file system
+ docker compose --profile gpu-s3fs up -d
+ ```
+
+
+
+ ### Docker Compose Profile and Environment
+ The available Docker Compose profile and the environment variables listed below:
+
+ #### Docker Compose Profile
+
+ | Profile | Description |
+ |-----------|-------------------------------------------|
+ | cpu-fs | Run Jan in CPU mode with default file system |
+ | cpu-s3fs | Run Jan in CPU mode with S3 file system |
+ | gpu-fs | Run Jan in GPU mode with default file system |
+ | gpu-s3fs | Run Jan in GPU mode with S3 file system |
+
+ #### Environment Variables
+
+ | Environment Variable | Description |
+ |--------------------------|------------------------------------------------------------|
+ | S3_BUCKET_NAME | S3 bucket name - leave blank for default file system |
+ | AWS_ACCESS_KEY_ID | AWS access key ID - leave blank for default file system |
+ | AWS_SECRET_ACCESS_KEY | AWS secret access key - leave blank for default file system|
+ | AWS_ENDPOINT | AWS endpoint URL - leave blank for default file system |
+ | AWS_REGION | AWS region - leave blank for default file system |
+ | API_BASE_URL | Jan Server URL, please modify it as your public ip address or domain name default http://localhost:1377 |
:::warning
diff --git a/docs/docs/guides/installation/linux.mdx b/docs/docs/guides/installation/linux.mdx
index c2a6656cf..2e03fd6e7 100644
--- a/docs/docs/guides/installation/linux.mdx
+++ b/docs/docs/guides/installation/linux.mdx
@@ -6,17 +6,18 @@ hide_table_of_contents: true
description: A step-by-step guide to install Jan on your Linux.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Install on Linux,
Linux,
]
---
-Coming soon
\ No newline at end of file
+Coming soon
diff --git a/docs/docs/guides/installation/mac.mdx b/docs/docs/guides/installation/mac.mdx
index fb9586823..519fef644 100644
--- a/docs/docs/guides/installation/mac.mdx
+++ b/docs/docs/guides/installation/mac.mdx
@@ -6,18 +6,19 @@ hide_table_of_contents: true
description: A step-by-step guide to install Jan on your Mac.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
MacOs,
Install on Mac,
Apple devices,
]
---
-Coming soon
\ No newline at end of file
+Coming soon
diff --git a/docs/docs/guides/installation/windows.mdx b/docs/docs/guides/installation/windows.mdx
index f6e5f06b4..58bd1597f 100644
--- a/docs/docs/guides/installation/windows.mdx
+++ b/docs/docs/guides/installation/windows.mdx
@@ -6,14 +6,15 @@ hide_table_of_contents: true
description: A step-by-step guide to install Jan on your Windows.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Windows 10,
Windows 11,
Install on Windows,
@@ -21,4 +22,4 @@ keywords:
]
---
-Coming soon
\ No newline at end of file
+Coming soon
diff --git a/docs/docs/guides/integrations/README.mdx b/docs/docs/guides/integrations/README.mdx
index 6204df305..7ede1233b 100644
--- a/docs/docs/guides/integrations/README.mdx
+++ b/docs/docs/guides/integrations/README.mdx
@@ -5,14 +5,15 @@ sidebar_position: 1
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
build extension,
]
---
diff --git a/docs/docs/guides/integrations/crewai.mdx b/docs/docs/guides/integrations/crewai.mdx
index 4b564d437..bda409059 100644
--- a/docs/docs/guides/integrations/crewai.mdx
+++ b/docs/docs/guides/integrations/crewai.mdx
@@ -1,22 +1,23 @@
---
title: CrewAI
sidebar_position: 19
-slug: /integrations/crewai
description: A step-by-step guide on how to integrate Jan with CrewAI.
+slug: /integrations/crewai
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Continue integration,
CrewAI integration,
- CrewAI
+ CrewAI,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/integrations/discord.mdx b/docs/docs/guides/integrations/discord.mdx
index fa1c1c065..8fd676f02 100644
--- a/docs/docs/guides/integrations/discord.mdx
+++ b/docs/docs/guides/integrations/discord.mdx
@@ -5,14 +5,15 @@ sidebar_position: 5
description: A step-by-step guide on how to integrate Jan with a Discord bot.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Discord integration,
Discord,
bot,
@@ -38,36 +39,40 @@ pip install -r requirements.txt
```
### Step 3: Set the Environment
+
1. Create a copy of `.env.example`.
2. Change the name to `.env`.
3. Set the environment with the following options:
-| Setting | Instructions |
-| ------- | ------------ |
-| `DISCORD_BOT_TOKEN` | Generate a new Discord application at [discord.com/developers/applications](https://discord.com/developers/applications), obtain a token from the Bot tab, and enable MESSAGE CONTENT INTENT. |
-| `LLM` | For [Jan](https://jan.ai/), set to `local/openai/(MODEL_NAME)`, where `(MODEL_NAME)` is your loaded model's name. |
-| `CUSTOM_SYSTEM_PROMPT` | Adjust the bot's behavior as needed. |
-| `CUSTOM_DISCORD_STATUS` | Set a custom message for the bot's Discord profile. (Max 128 characters) |
-| `ALLOWED_CHANNEL_IDS` | Enter Discord channel IDs where the bot can send messages, separated by commas. Leave blank to allow all channels. |
-| `ALLOWED_ROLE_IDS` | Enter Discord role IDs allowed to use the bot, separated by commas. Leave blank to allow everyone. Including at least one role also disables DMs. |
-| `MAX_IMAGES` | Max number of image attachments allowed per message when using a vision model. (Default: `5`) |
-| `MAX_MESSAGES` | Max messages allowed in a reply chain. (Default: `20`) |
-| `LOCAL_SERVER_URL` | URL of your local API server for LLMs starting with `local/`. (Default: `http://localhost:5000/v1`) |
-| `LOCAL_API_KEY` | API key for your local API server with LLMs starting with `local/`. Usually safe to leave blank. |
+| Setting | Instructions |
+| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `DISCORD_BOT_TOKEN` | Generate a new Discord application at [discord.com/developers/applications](https://discord.com/developers/applications), obtain a token from the Bot tab, and enable MESSAGE CONTENT INTENT. |
+| `LLM` | For [Jan](https://jan.ai/), set to `local/openai/(MODEL_NAME)`, where `(MODEL_NAME)` is your loaded model's name. |
+| `CUSTOM_SYSTEM_PROMPT` | Adjust the bot's behavior as needed. |
+| `CUSTOM_DISCORD_STATUS` | Set a custom message for the bot's Discord profile. (Max 128 characters) |
+| `ALLOWED_CHANNEL_IDS` | Enter Discord channel IDs where the bot can send messages, separated by commas. Leave blank to allow all channels. |
+| `ALLOWED_ROLE_IDS` | Enter Discord role IDs allowed to use the bot, separated by commas. Leave blank to allow everyone. Including at least one role also disables DMs. |
+| `MAX_IMAGES` | Max number of image attachments allowed per message when using a vision model. (Default: `5`) |
+| `MAX_MESSAGES` | Max messages allowed in a reply chain. (Default: `20`) |
+| `LOCAL_SERVER_URL` | URL of your local API server for LLMs starting with `local/`. (Default: `http://localhost:5000/v1`) |
+| `LOCAL_API_KEY` | API key for your local API server with LLMs starting with `local/`. Usually safe to leave blank. |
### Step 4: Insert the Bot
+
Invite the bot to your Discord server using the following URL:
```
https://discord.com/api/oauth2/authorize?client_id=(CLIENT_ID)&permissions=412317273088&scope=bot
```
+
:::note
Replace `CLIENT_ID` with your Discord application's client ID from the OAuth2 tab
:::
+
### Step 5: Run the bot
Run the bot by using the following command in your command prompt:
```sh
python llmcord.py
-```
\ No newline at end of file
+```
diff --git a/docs/docs/guides/integrations/interpreter.mdx b/docs/docs/guides/integrations/interpreter.mdx
index bd2ffb8de..9acd0fa4b 100644
--- a/docs/docs/guides/integrations/interpreter.mdx
+++ b/docs/docs/guides/integrations/interpreter.mdx
@@ -5,31 +5,20 @@ sidebar_position: 6
description: A step-by-step guide on how to integrate Jan with Open Interpreter.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Open Interpreter integration,
Open Interpreter,
]
---
-
- Open Interpreter
-
-
-
-
-
-
-
-
-
-
## Integrate Open Interpreter with Jan
[Open Interpreter](https://github.com/KillianLucas/open-interpreter/) lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running `interpreter` after installing. To integrate Open Interpreter with Jan, follow the steps below:
@@ -68,4 +57,4 @@ Before using Open Interpreter, configure the model in `Settings` > `My Model` fo
interpreter --api_base http://localhost:1337/v1 --model mistral-ins-7b-q4
```
-> **Open Interpreter is now ready for use!**
\ No newline at end of file
+> **Open Interpreter is now ready for use!**
diff --git a/docs/docs/guides/integrations/overview-integration.mdx b/docs/docs/guides/integrations/overview-integration.mdx
index e7ce1e3b3..344ebaa5f 100644
--- a/docs/docs/guides/integrations/overview-integration.mdx
+++ b/docs/docs/guides/integrations/overview-integration.mdx
@@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 1
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
diff --git a/docs/docs/guides/integrations/raycast.mdx b/docs/docs/guides/integrations/raycast.mdx
index 5823410b1..3611dcf00 100644
--- a/docs/docs/guides/integrations/raycast.mdx
+++ b/docs/docs/guides/integrations/raycast.mdx
@@ -4,33 +4,23 @@ slug: /integrations/raycast
sidebar_position: 17
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
raycast integration,
Raycast,
]
description: A step-by-step guide on how to integrate Jan with Raycast.
---
-
- Raycast
-
-
-
-
-
-
-
-
-
-
## Integrate Raycast with Jan
+
[Raycast](https://www.raycast.com/) is a productivity tool designed for macOS that enhances workflow efficiency by providing quick access to various tasks and functionalities through a keyboard-driven interface. To integrate Raycast with Jan, follow the steps below:
### Step 1: Download the TinyLlama Model
@@ -49,4 +39,4 @@ npm i && npm run dev
### Step 3: Search for Nitro and Run the Model
-Search for `Nitro` using the program and you can use the models from Jan in RayCast.
\ No newline at end of file
+Search for `Nitro` using the program and you can use the models from Jan in RayCast.
diff --git a/docs/docs/guides/integrations/router.mdx b/docs/docs/guides/integrations/router.mdx
index dfe44771f..42d1b1940 100644
--- a/docs/docs/guides/integrations/router.mdx
+++ b/docs/docs/guides/integrations/router.mdx
@@ -5,31 +5,20 @@ sidebar_position: 2
description: A step-by-step guide on how to integrate Jan with OpenRouter.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
OpenRouter integration,
- OpenRouter
+ OpenRouter,
]
---
-
- OpenRouter
-
-
-
-
-
-
-
-
-
-
## Integrate OpenRouter with Jan
[OpenRouter](https://openrouter.ai/docs#quick-start) is a tool that gathers AI models. Developers can utilize its API to engage with diverse large language models, generative image models, and generative 3D object models.
@@ -46,10 +35,11 @@ To connect Jan with OpenRouter for accessing remote Large Language Models (LLMs)
1. Go to the directory `~/jan/models`.
2. Make a new folder called `openrouter-(modelname)`, like `openrouter-dolphin-mixtral-8x7b`.
3. Inside the folder, create a `model.json` file with the following settings:
- - Set the `id` property to the model id obtained from OpenRouter.
- - Set the `format` property to `api`.
- - Set the `engine` property to `openai`.
- - Ensure the `state` property is set to `ready`.
+
+- Set the `id` property to the model id obtained from OpenRouter.
+- Set the `format` property to `api`.
+- Set the `engine` property to `openai`.
+- Ensure the `state` property is set to `ready`.
```json title="~/jan/models/openrouter-dolphin-mixtral-8x7b/model.json"
{
@@ -75,9 +65,10 @@ To connect Jan with OpenRouter for accessing remote Large Language Models (LLMs)
```
:::note
-For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
+For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
:::
-
+
### Step 3 : Start the Model
-1. Restart Jan and go to the **Hub**.
-2. Find your model and click on the **Use** button.
\ No newline at end of file
+
+1. Restart Jan and go to the **Hub**.
+2. Find your model and click on the **Use** button.
diff --git a/docs/docs/guides/integrations/unsloth.mdx b/docs/docs/guides/integrations/unsloth.mdx
index b9f7889cb..b99fa5ee7 100644
--- a/docs/docs/guides/integrations/unsloth.mdx
+++ b/docs/docs/guides/integrations/unsloth.mdx
@@ -5,17 +5,18 @@ slug: /integrations/unsloth
description: A step-by-step guide on how to integrate Jan with Unsloth.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Continue integration,
Unsloth integration,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/integrations/vscode.mdx b/docs/docs/guides/integrations/vscode.mdx
index 85e6df83f..943ba7968 100644
--- a/docs/docs/guides/integrations/vscode.mdx
+++ b/docs/docs/guides/integrations/vscode.mdx
@@ -5,20 +5,20 @@ slug: /integrations/continue
description: A step-by-step guide on how to integrate Jan with Continue and VS Code.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Continue integration,
VSCode integration,
]
---
-
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
@@ -43,6 +43,7 @@ To set up Continue for use with Jan's Local Server, you must activate the Jan AP
3. Press the **Start Server** button
### Step 3: Configure Continue to Use Jan's Local Server
+
1. Go to the `~/.continue` directory.
@@ -76,11 +77,12 @@ To set up Continue for use with Jan's Local Server, you must activate the Jan AP
]
}
```
+
2. Ensure the file has the following configurations:
- - Ensure `openai` is selected as the `provider`.
- - Match the `model` with the one enabled in the Jan API Server.
- - Set `apiBase` to `http://localhost:1337`.
- - Leave the `apiKey` field to `EMPTY`.
+ - Ensure `openai` is selected as the `provider`.
+ - Match the `model` with the one enabled in the Jan API Server.
+ - Set `apiBase` to `http://localhost:1337`.
+ - Leave the `apiKey` field to `EMPTY`.
### Step 4: Ensure the Using Model Is Activated in Jan
@@ -94,8 +96,7 @@ To set up Continue for use with Jan's Local Server, you must activate the Jan AP
1. Highlight a code snippet and press `Command + Shift + M` to open the Left Panel.
2. Select Jan at the bottom and ask a question about the code, for example, `Explain this code`.
- ### 2. Editing the code with the help of a large language model
+### 2. Editing the code with the help of a large language model
1. Select a code snippet and use `Command + Shift + L`.
2. Enter your editing request, such as `Add comments to this code`.
-
\ No newline at end of file
diff --git a/docs/docs/guides/local-providers/README.mdx b/docs/docs/guides/local-providers/README.mdx
index 914e2af38..36dbae13e 100644
--- a/docs/docs/guides/local-providers/README.mdx
+++ b/docs/docs/guides/local-providers/README.mdx
@@ -5,14 +5,15 @@ sidebar_position: 13
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
build extension,
]
---
diff --git a/docs/docs/guides/local-providers/llamacpp.mdx b/docs/docs/guides/local-providers/llamacpp.mdx
index 3661a3ca1..ca6285c4a 100644
--- a/docs/docs/guides/local-providers/llamacpp.mdx
+++ b/docs/docs/guides/local-providers/llamacpp.mdx
@@ -5,27 +5,29 @@ sidebar_position: 1
description: A step-by-step guide on how to customize the LlamaCPP extension.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Llama CPP integration,
LlamaCPP Extension,
]
---
-
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## Overview
+
[Nitro](https://github.com/janhq/nitro) is an inference server on top of [llama.cpp](https://github.com/ggerganov/llama.cpp). It provides an OpenAI-compatible API, queue, & scaling.
## LlamaCPP Extension
+
:::note
Nitro is the default AI engine downloaded with Jan. There is no additional setup needed.
:::
@@ -66,18 +68,19 @@ In this guide, we'll walk you through the process of customizing your engine set
The table below describes the parameters in the `nitro.json` file.
-| Parameter | Type | Description |
-| --------- | ---- | ----------- |
-| `ctx_len` | **Integer** | Typically set at `2048`, `ctx_len` provides ample context for model operations like `GPT-3.5`. (*Maximum*: `4096`, *Minimum*: `1`) |
-| `ngl` | **Integer** | Defaulted at `100`, `ngl` determines GPU layer usage. |
-| `cpu_threads` | **Integer** | Determines CPU inference threads, limited by hardware and OS. (*Maximum* determined by system) |
-| `cont_batching` | **Integer** | Controls continuous batching, enhancing throughput for LLM inference. |
-| `embedding` | **Integer** | Enables embedding utilization for tasks like document-enhanced chat in RAG-based applications. |
+| Parameter | Type | Description |
+| --------------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------- |
+| `ctx_len` | **Integer** | Typically set at `2048`, `ctx_len` provides ample context for model operations like `GPT-3.5`. (_Maximum_: `4096`, _Minimum_: `1`) |
+| `ngl` | **Integer** | Defaulted at `100`, `ngl` determines GPU layer usage. |
+| `cpu_threads` | **Integer** | Determines CPU inference threads, limited by hardware and OS. (_Maximum_ determined by system) |
+| `cont_batching` | **Integer** | Controls continuous batching, enhancing throughput for LLM inference. |
+| `embedding` | **Integer** | Enables embedding utilization for tasks like document-enhanced chat in RAG-based applications. |
:::tip
- - By default, the value of `ngl` is set to 100, which indicates that it will offload all. If you wish to offload only 50% of the GPU, you can set `ngl` to 15 because most models on Mistral or Llama are around ~ 30 layers.
- - To utilize the embedding feature, include the JSON parameter `"embedding": true`. It will enable Nitro to process inferences with embedding capabilities. Please refer to the [Embedding in the Nitro documentation](https://nitro.jan.ai/features/embed) for a more detailed explanation.
- - To utilize the continuous batching feature for boosting throughput and minimizing latency in large language model (LLM) inference, include `cont_batching: true`. For details, please refer to the [Continuous Batching in the Nitro documentation](https://nitro.jan.ai/features/cont-batch).
+
+- By default, the value of `ngl` is set to 100, which indicates that it will offload all. If you wish to offload only 50% of the GPU, you can set `ngl` to 15 because most models on Mistral or Llama are around ~ 30 layers.
+- To utilize the embedding feature, include the JSON parameter `"embedding": true`. It will enable Nitro to process inferences with embedding capabilities. Please refer to the [Embedding in the Nitro documentation](https://nitro.jan.ai/features/embed) for a more detailed explanation.
+- To utilize the continuous batching feature for boosting throughput and minimizing latency in large language model (LLM) inference, include `cont_batching: true`. For details, please refer to the [Continuous Batching in the Nitro documentation](https://nitro.jan.ai/features/cont-batch).
:::
@@ -85,4 +88,4 @@ The table below describes the parameters in the `nitro.json` file.
If you have questions, please join our [Discord community](https://discord.gg/Dt7MxDyNNZ) for support, updates, and discussions.
-:::
\ No newline at end of file
+:::
diff --git a/docs/docs/guides/local-providers/lmstudio.mdx b/docs/docs/guides/local-providers/lmstudio.mdx
index c7d7a6236..db4234700 100644
--- a/docs/docs/guides/local-providers/lmstudio.mdx
+++ b/docs/docs/guides/local-providers/lmstudio.mdx
@@ -5,14 +5,15 @@ sidebar_position: 8
description: A step-by-step guide on how to integrate Jan with LM Studio.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
LM Studio integration,
]
---
@@ -20,6 +21,7 @@ keywords:
## Integrate LM Studio with Jan
[LM Studio](https://lmstudio.ai/) enables you to explore, download, and run local Large Language Models (LLMs). You can integrate Jan with LM Studio using two methods:
+
1. Integrate the LM Studio server with Jan UI
2. Migrate your downloaded model from LM Studio to Jan.
@@ -29,6 +31,7 @@ To integrate LM Studio with Jan follow the steps below:
In this guide, we're going to show you how to connect Jan to [LM Studio](https://lmstudio.ai/) using the second method. We'll use the [Phi 2 - GGUF](https://huggingface.co/TheBloke/phi-2-GGUF) model from Hugging Face as our example.
:::
+
### Step 1: Server Setup
1. Access the `Local Inference Server` within LM Studio.
@@ -57,7 +60,7 @@ Replace `(port)` with your chosen port number. The default is 1234.
- Set `format` to `api`.
- Specify `engine` as `openai`.
- Set `state` to `ready`.
-
+
```json title="~/jan/models/lmstudio-phi-2/model.json"
{
"sources": [
@@ -81,10 +84,10 @@ Replace `(port)` with your chosen port number. The default is 1234.
"engine": "openai"
}
```
-:::note
-For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
-:::
+:::note
+For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
+:::
### Step 3: Starting the Model
@@ -108,7 +111,6 @@ For more details regarding the `model.json` settings and parameters fields, ple
Starting from version 0.4.7, Jan enables direct import of LM Studio models using absolute file paths.
-
### Step 1: Locating the Model Path
1. Access `My Models` in LM Studio and locate your model folder.
@@ -171,4 +173,4 @@ For Windows users, ensure to include double backslashes in the URL property, suc
### Step 3: Starting the Model
1. Restart Jan and proceed to the **Hub**.
-2. Locate your model and click **Use** to activate it.
\ No newline at end of file
+2. Locate your model and click **Use** to activate it.
diff --git a/docs/docs/guides/local-providers/ollama.mdx b/docs/docs/guides/local-providers/ollama.mdx
index 49c8d4001..2f7a26227 100644
--- a/docs/docs/guides/local-providers/ollama.mdx
+++ b/docs/docs/guides/local-providers/ollama.mdx
@@ -5,14 +5,15 @@ sidebar_position: 4
description: A step-by-step guide on how to integrate Jan with Ollama.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Ollama integration,
]
---
@@ -20,6 +21,7 @@ keywords:
## Integrate Ollama with Jan
Ollama provides you with largen language that you can run locally. There are two methods to integrate Ollama with Jan:
+
1. Integrate Ollama server with Jan.
2. Migrate the downloaded model from Ollama to Jan.
@@ -40,7 +42,6 @@ ollama run
3. According to the [Ollama documentation on OpenAI compatibility](https://github.com/ollama/ollama/blob/main/docs/openai.md), you can connect to the Ollama server using the web address `http://localhost:11434/v1/chat/completions`. To do this, change the `openai.json` file in the `~/jan/engines` folder to add the Ollama server's full web address:
-
```json title="~/jan/engines/openai.json"
{
"full_url": "http://localhost:11434/v1/chat/completions"
@@ -52,10 +53,11 @@ ollama run
1. Navigate to the `~/jan/models` folder.
2. Create a folder named `(ollam-modelname)`, for example, `lmstudio-phi-2`.
3. Create a `model.json` file inside the folder including the following configurations:
- - Set the `id` property to the model name as Ollama model name.
- - Set the `format` property to `api`.
- - Set the `engine` property to `openai`.
- - Set the `state` property to `ready`.
+
+- Set the `id` property to the model name as Ollama model name.
+- Set the `format` property to `api`.
+- Set the `engine` property to `openai`.
+- Set the `state` property to `ready`.
```json title="~/jan/models/llama2/model.json"
{
@@ -80,10 +82,12 @@ ollama run
"engine": "openai"
}
```
+
:::note
-For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
+For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
:::
### Step 3: Start the Model
-1. Restart Jan and navigate to the **Hub**.
-2. Locate your model and click the **Use** button.
\ No newline at end of file
+
+1. Restart Jan and navigate to the **Hub**.
+2. Locate your model and click the **Use** button.
diff --git a/docs/docs/guides/local-providers/tensorrt.mdx b/docs/docs/guides/local-providers/tensorrt.mdx
index 5cd529a49..46f4346c9 100644
--- a/docs/docs/guides/local-providers/tensorrt.mdx
+++ b/docs/docs/guides/local-providers/tensorrt.mdx
@@ -5,14 +5,15 @@ sidebar_position: 2
description: A step-by-step guide on how to customize the TensorRT-LLM extension.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
TensorRT-LLM Extension,
TensorRT,
tensorRT,
@@ -21,12 +22,15 @@ keywords:
---
## Overview
+
Users with Nvidia GPUs can get **20-40% faster token speeds** compared to using LlamaCPP engine on their laptop or desktops by using [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM). The greater implication is that you are running FP16, which is also more accurate than quantized models.
## TensortRT-LLM Extension
+
This guide walks you through how to install Jan's official [TensorRT-LLM Extension](https://github.com/janhq/nitro-tensorrt-llm). This extension uses [Nitro-TensorRT-LLM](https://github.com/janhq/nitro-tensorrt-llm) as the AI engine, instead of the default [Nitro-Llama-CPP](https://github.com/janhq/nitro). It includes an efficient C++ server to natively execute the [TRT-LLM C++ runtime](https://nvidia.github.io/TensorRT-LLM/gpt_runtime.html). It also comes with additional feature and performance improvements like OpenAI compatibility, tokenizer improvements, and queues.
:::warning
+
- This feature is only available for Windows users. Linux is coming soon.
- Additionally, we only prebuilt a few demo models. You can always build your desired models directly on your machine. For more information, please see [here](#build-your-own-tensorrt-models).
@@ -54,12 +58,13 @@ ls ~\jan\extensions\@janhq\tensorrt-llm-extension\dist\bin
```
### Step 2: Download a Compatible Model
+
TensorRT-LLM can only run models in `TensorRT` format. These models, aka "TensorRT Engines", are prebuilt specifically for each target OS+GPU architecture.
We offer a handful of precompiled models for Ampere and Ada cards that you can immediately download and play with:
1. Restart the application and go to the Hub.
-2. Look for models with the `TensorRT-LLM` label in the recommended models list > Click **Download**.
+2. Look for models with the `TensorRT-LLM` label in the recommended models list > Click **Download**.
:::note
This step might take some time. 🙏
@@ -68,7 +73,7 @@ This step might take some time. 🙏

3. Click use and start chatting!
-4. You may need to allow Nitro in your network
+4. You may need to allow Nitro in your network

@@ -78,7 +83,7 @@ If you are our nightly builds, you may have to reinstall the TensorRT-LLM extens
### Step 3: Configure Settings
-You can customize the default parameters for how Jan runs TensorRT-LLM.
+You can customize the default parameters for how Jan runs TensorRT-LLM.
:::info
coming soon
@@ -91,6 +96,7 @@ coming soon
For now, the model versions are pinned to the extension versions.
### Uninstall Extension
+
To uninstall the extension, follow the steps below:
1. Quit the app.
diff --git a/docs/docs/guides/providers/tensorrt-llm.md b/docs/docs/guides/providers/tensorrt-llm.md
index 3526ef25d..f73168873 100644
--- a/docs/docs/guides/providers/tensorrt-llm.md
+++ b/docs/docs/guides/providers/tensorrt-llm.md
@@ -1,35 +1,40 @@
---
title: TensorRT-LLM
slug: /guides/providers/tensorrt-llm
+keywords:
+ [
+ Jan,
+ Rethink the Computer,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ TensorRT-LLM Extension,
+ TensorRT,
+ tensorRT,
+ extension,
+ ]
---
-
- TensorRT-LLM - Jan Guides
-
-
-
-
-
-
-
-
-
-
:::info
-TensorRT-LLM support was launched in 0.4.9, and should be regarded as an Experimental feature.
+TensorRT-LLM support was launched in 0.4.9, and should be regarded as an Experimental feature.
- Only Windows is supported for now.
-- Please report bugs in our Discord's [#tensorrt-llm](https://discord.com/channels/1107178041848909847/1201832734704795688) channel.
+- Please report bugs in our Discord's [#tensorrt-llm](https://discord.com/channels/1107178041848909847/1201832734704795688) channel.
:::
-Jan supports [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) as an alternate Inference Engine, for users who have Nvidia GPUs with large VRAM. TensorRT-LLM allows for blazing fast inference, but requires Nvidia GPUs with [larger VRAM](https://nvidia.github.io/TensorRT-LLM/memory.html).
+Jan supports [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) as an alternate Inference Engine, for users who have Nvidia GPUs with large VRAM. TensorRT-LLM allows for blazing fast inference, but requires Nvidia GPUs with [larger VRAM](https://nvidia.github.io/TensorRT-LLM/memory.html).
## What is TensorRT-LLM?
-[TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) is an hardware-optimized LLM inference engine for Nvidia GPUs, that compiles models to run extremely fast on Nvidia GPUs.
-- Mainly used on Nvidia's Datacenter-grade GPUs like the H100s [to produce 10,000 tok/s](https://nvidia.github.io/TensorRT-LLM/blogs/H100vsA100.html).
+[TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) is an hardware-optimized LLM inference engine for Nvidia GPUs, that compiles models to run extremely fast on Nvidia GPUs.
+
+- Mainly used on Nvidia's Datacenter-grade GPUs like the H100s [to produce 10,000 tok/s](https://nvidia.github.io/TensorRT-LLM/blogs/H100vsA100.html).
- Can be used on Nvidia's workstation (e.g. [A6000](https://www.nvidia.com/en-us/design-visualization/rtx-6000/)) and consumer-grade GPUs (e.g. [RTX 4090](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/))
:::tip[Benefits]
@@ -48,7 +53,6 @@ Jan supports [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) as an altern
:::
-
## Requirements
### Hardware
@@ -59,11 +63,11 @@ Jan supports [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) as an altern
**Compatible GPUs**
-| Architecture | Supported? | Consumer-grade | Workstation-grade |
-| ------------ | --- | -------------- | ----------------- |
-| Ada | ✅ | 4050 and above | RTX A2000 Ada |
-| Ampere | ✅ | 3050 and above | A100 |
-| Turing | ❌ | Not Supported | Not Supported |
+| Architecture | Supported? | Consumer-grade | Workstation-grade |
+| ------------ | ---------- | -------------- | ----------------- |
+| Ada | ✅ | 4050 and above | RTX A2000 Ada |
+| Ampere | ✅ | 3050 and above | A100 |
+| Turing | ❌ | Not Supported | Not Supported |
:::info
@@ -74,8 +78,8 @@ Please ping us in Discord's [#tensorrt-llm](https://discord.com/channels/1107178
### Software
- Jan v0.4.9+ or Jan v0.4.8-321+ (nightly)
-- [Nvidia Driver v535+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
-- [CUDA Toolkit v12.2+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
+- [Nvidia Driver v535+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
+- [CUDA Toolkit v12.2+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
## Getting Started
@@ -91,6 +95,7 @@ You can check if files have been correctly downloaded:
ls ~\jan\extensions\@janhq\tensorrt-llm-extension\dist\bin
# Your Extension Folder should now include `nitro.exe`, among other `.dll` files needed to run TRT-LLM
```
+
:::
### Download a TensorRT-LLM Model
@@ -100,12 +105,11 @@ Jan's Hub has a few pre-compiled TensorRT-LLM models that you can download, whic
- We automatically download the TensorRT-LLM Model Engine for your GPU architecture
- We have made a few 1.1b models available that can run even on Laptop GPUs with 8gb VRAM
-
| Model | OS | Ada (40XX) | Ampere (30XX) | Description |
| ------------------- | ------- | ---------- | ------------- | --------------------------------------------------- |
-| Llamacorn 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned for usability |
-| TinyJensen 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned on Jensen Huang speeches |
-| Mistral Instruct 7b | Windows | ✅ | ✅ | Mistral |
+| Llamacorn 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned for usability |
+| TinyJensen 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned on Jensen Huang speeches |
+| Mistral Instruct 7b | Windows | ✅ | ✅ | Mistral |
### Importing Pre-built Models
@@ -181,6 +185,7 @@ Note the `engine` is `nitro-tensorrt-llm`: this won't work without it!
### Using a TensorRT-LLM Model
You can just select and use a TensorRT-LLM model from Jan's Thread interface.
+
- Jan will automatically start the TensorRT-LLM model engine in the background
- You may encounter a pop-up from Windows Security, asking for Nitro to allow public and private network access
@@ -201,7 +206,7 @@ coming soon
## Extension Details
-Jan's TensorRT-LLM Extension is built on top of the open source [Nitro TensorRT-LLM Server](https://github.com/janhq/nitro-tensorrt-llm), a C++ inference server on top of TensorRT-LLM that provides an OpenAI-compatible API.
+Jan's TensorRT-LLM Extension is built on top of the open source [Nitro TensorRT-LLM Server](https://github.com/janhq/nitro-tensorrt-llm), a C++ inference server on top of TensorRT-LLM that provides an OpenAI-compatible API.
### Manual Build
@@ -214,7 +219,6 @@ To manually build the artifacts needed to run the server and TensorRT-LLM, you c
3. Delete the entire Extensions folder.
4. Reopen the app, only the default extensions should be restored.
-
## Build your own TensorRT models
:::info
diff --git a/docs/docs/guides/remote-providers/README.mdx b/docs/docs/guides/remote-providers/README.mdx
index 7523be940..0e3f6b747 100644
--- a/docs/docs/guides/remote-providers/README.mdx
+++ b/docs/docs/guides/remote-providers/README.mdx
@@ -5,14 +5,15 @@ sidebar_position: 14
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
build extension,
]
---
diff --git a/docs/docs/guides/remote-providers/claude.mdx b/docs/docs/guides/remote-providers/claude.mdx
index 778761cd4..968a469fe 100644
--- a/docs/docs/guides/remote-providers/claude.mdx
+++ b/docs/docs/guides/remote-providers/claude.mdx
@@ -5,17 +5,18 @@ slug: /guides/engines/claude
description: A step-by-step guide on how to integrate Jan with LM Studio.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Claude integration,
claude,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/remote-providers/groq.mdx b/docs/docs/guides/remote-providers/groq.mdx
index 2ae4027f9..c9837bcfc 100644
--- a/docs/docs/guides/remote-providers/groq.mdx
+++ b/docs/docs/guides/remote-providers/groq.mdx
@@ -17,18 +17,6 @@ keywords:
]
---
-
- Groq
-
-
-
-
-
-
-
-
-
-
## How to Integrate Mistral AI with Jan
This guide provides step-by-step instructions on integrating the Groq API with Jan, enabling users to leverage Groq's capabilities within Jan's conversational interface.
diff --git a/docs/docs/guides/remote-providers/mistral.mdx b/docs/docs/guides/remote-providers/mistral.mdx
index 88b98f880..e93a02183 100644
--- a/docs/docs/guides/remote-providers/mistral.mdx
+++ b/docs/docs/guides/remote-providers/mistral.mdx
@@ -5,35 +5,25 @@ slug: /guides/engines/mistral
description: A step-by-step guide on how to integrate Jan with Mistral AI.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Mistral integration,
]
---
-
- Mistral AI
-
-
-
-
-
-
-
-
-
-
## How to Integrate Mistral AI with Jan
-[Mistral AI](https://docs.mistral.ai/) provides two ways to use their Large Language Models (LLM):
+[Mistral AI](https://docs.mistral.ai/) provides two ways to use their Large Language Models (LLM):
+
1. API
-2. Open-source models on Hugging Face.
+2. Open-source models on Hugging Face.
To integrate Jan with Mistral AI, follow the steps below:
@@ -58,10 +48,10 @@ This tutorial demonstrates integrating Mistral AI with Jan using the API.
1. Navigate to `~/jan/models`.
2. Create a folder named `mistral-(modelname)` (e.g., `mistral-tiny`).
3. Inside, create a `model.json` file with these settings:
- - Set `id` to the Mistral AI model ID.
- - Set `format` to `api`.
- - Set `engine` to `openai`.
- - Set `state` to `ready`.
+ - Set `id` to the Mistral AI model ID.
+ - Set `format` to `api`.
+ - Set `engine` to `openai`.
+ - Set `state` to `ready`.
```json title="~/jan/models/mistral-tiny/model.json"
{
@@ -85,15 +75,15 @@ This tutorial demonstrates integrating Mistral AI with Jan using the API.
},
"engine": "openai"
}
-
```
:::note
-- For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
+
+- For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
- Mistral AI offers various endpoints. Refer to their [endpoint documentation](https://docs.mistral.ai/platform/endpoints/) to select the one that fits your requirements. Here, we use the `mistral-tiny` model as an example.
-:::
+ :::
### Step 3: Start the Model
-1. Restart Jan and navigate to the **Hub**.
-2. Locate your model and click the **Use** button.
\ No newline at end of file
+1. Restart Jan and navigate to the **Hub**.
+2. Locate your model and click the **Use** button.
diff --git a/docs/docs/guides/remote-providers/openai.mdx b/docs/docs/guides/remote-providers/openai.mdx
index 7b1bc4cfa..f0ac032c9 100644
--- a/docs/docs/guides/remote-providers/openai.mdx
+++ b/docs/docs/guides/remote-providers/openai.mdx
@@ -5,14 +5,15 @@ slug: /guides/engines/openai
description: A step-by-step guide on how to integrate Jan with Azure OpenAI.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
integration,
Azure OpenAI Service,
]
@@ -39,13 +40,14 @@ The [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/o
### Step 2: Model Configuration
-1. Go to the `~/jan/models` directory.
+1. Go to the `~/jan/models` directory.
2. Make a new folder called `(your-deployment-name)`, for example `gpt-35-hieu-jan`.
3. Create a `model.json` file inside the folder with the specified configurations:
- - Match the `id` property with both the folder name and your deployment name.
- - Set the `format` property as `api`.
- - Choose `openai` for the `engine` property.
- - Set the `state` property as `ready`.
+
+- Match the `id` property with both the folder name and your deployment name.
+- Set the `format` property as `api`.
+- Choose `openai` for the `engine` property.
+- Set the `state` property as `ready`.
```json title="~/jan/models/gpt-35-hieu-jan/model.json"
{
@@ -72,10 +74,10 @@ The [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/o
```
:::note
-For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
+For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
:::
### Step 3: Start the Model
-1. Restart Jan and go to the Hub.
-2. Find your model in Jan application and click on the Use button.
\ No newline at end of file
+1. Restart Jan and go to the Hub.
+2. Find your model in Jan application and click on the Use button.
diff --git a/docs/docs/guides/remote-providers/remote-server-integration.mdx b/docs/docs/guides/remote-providers/remote-server-integration.mdx
index 2205c3f85..e96a5ba3d 100644
--- a/docs/docs/guides/remote-providers/remote-server-integration.mdx
+++ b/docs/docs/guides/remote-providers/remote-server-integration.mdx
@@ -5,32 +5,21 @@ slug: /guides/engines/remote-server
description: A step-by-step guide on how to set up Jan to connect with any remote or local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
import-models-manually,
remote server,
OAI compatible,
]
---
-
- Remote Server Integration
-
-
-
-
-
-
-
-
-
-
This guide will show you how to configure Jan as a client and point it to any remote & local (self-hosted) API server.
## OpenAI Platform Configuration
@@ -41,7 +30,6 @@ This guide will show you how to configure Jan as a client and point it to any re
2. In this folder, add a `model.json` file with Filename as `model.json`, `id` matching folder name, `Format` as `api`, `Engine` as `openai`, and `State` as `ready`.
-
```json title="~/jan/models/gpt-3.5-turbo-16k/model.json"
{
"sources": [
@@ -68,28 +56,28 @@ This guide will show you how to configure Jan as a client and point it to any re
### `model.json`
-The `model.json` file is used to set up your local models.
+The `model.json` file is used to set up your local models.
:::note
-- If you've set up your model's configuration in `nitro.json`, please note that `model.json` can overwrite the settings.
+
+- If you've set up your model's configuration in `nitro.json`, please note that `model.json` can overwrite the settings.
- When using OpenAI models like GPT-3.5 and GPT-4, you can use the default settings in `model.json` file.
-:::
+ :::
There are two important fields in model.json that you need to setup:
#### Settings
+
This is the field where to set your engine configurations, there are two imporant field that you need to define for your local models:
-| Term | Description |
-|-------------------|---------------------------------------------------------|
-| `ctx_len` | Defined based on the model's context size. |
+| Term | Description |
+| ----------------- | --------------------------------------------------------------------- |
+| `ctx_len` | Defined based on the model's context size. |
| `prompt_template` | Defined based on the model's trained template (e.g., ChatML, Alpaca). |
-To set up the `prompt_template` based on your model, follow the steps below:
- 1. Visit [Hugging Face](https://huggingface.co/), an open-source machine learning platform.
- 2. Find the current model that you're using (e.g., [Gemma 7b it](https://huggingface.co/google/gemma-7b-it)).
- 3. Review the text and identify the template.
+To set up the `prompt_template` based on your model, follow the steps below: 1. Visit [Hugging Face](https://huggingface.co/), an open-source machine learning platform. 2. Find the current model that you're using (e.g., [Gemma 7b it](https://huggingface.co/google/gemma-7b-it)). 3. Review the text and identify the template.
#### Parameters
+
`parameters` is the adjustable settings that affect how your model operates or processes the data.
The fields in `parameters` are typically general and can be the same across models. An example is provided below:
@@ -104,12 +92,11 @@ The fields in `parameters` are typically general and can be the same across mode
}
```
-
:::tip
- - You can find the list of available models in the [OpenAI Platform](https://platform.openai.com/docs/models/overview).
- - The `id` property needs to match the model name in the list.
- - For example, if you want to use the [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo), you must set the `id` property to `gpt-4-1106-preview`.
+- You can find the list of available models in the [OpenAI Platform](https://platform.openai.com/docs/models/overview).
+- The `id` property needs to match the model name in the list.
+ - For example, if you want to use the [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo), you must set the `id` property to `gpt-4-1106-preview`.
:::
@@ -142,7 +129,7 @@ Currently, you can only connect to one OpenAI-compatible endpoint at a time.
### 1. Configure a Client Connection
1. Navigate to the `~/jan/engines` folder.
-2. Modify the `openai.json file`.
+2. Modify the `openai.json file`.
:::note
@@ -166,11 +153,11 @@ Please note that currently, the code that supports any OpenAI-compatible endpoin
1. In `~/jan/models`, create a folder named `mistral-ins-7b-q4`.
2. In this folder, add a `model.json` file with Filename as `model.json`, ensure the following configurations:
- - `id` matching folder name.
- - `Format` set to `api`.
- - `Engine` set to `openai`
- - `State` set to `ready`.
+- `id` matching folder name.
+- `Format` set to `api`.
+- `Engine` set to `openai`
+- `State` set to `ready`.
```json title="~/jan/models/mistral-ins-7b-q4/model.json"
{
@@ -194,8 +181,8 @@ Please note that currently, the code that supports any OpenAI-compatible endpoin
},
"engine": "openai"
}
-
```
+
### 3. Start the Model
1. Restart Jan and navigate to the **Hub**.
@@ -205,4 +192,4 @@ Please note that currently, the code that supports any OpenAI-compatible endpoin
If you have questions or want more preconfigured GGUF models, please join our [Discord community](https://discord.gg/Dt7MxDyNNZ) for support, updates, and discussions.
-:::
\ No newline at end of file
+:::
diff --git a/docs/docs/guides/troubleshooting.mdx b/docs/docs/guides/troubleshooting.mdx
index 227de161b..fda83bb49 100644
--- a/docs/docs/guides/troubleshooting.mdx
+++ b/docs/docs/guides/troubleshooting.mdx
@@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 21
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
troubleshooting,
error codes,
broken build,
@@ -27,6 +28,7 @@ import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## Broken Build
+
To resolve the issue where your Jan is stuck in a broken build after installation.
@@ -96,21 +98,21 @@ To resolve the issue where your Jan is stuck in a broken build after installatio
-
+
#### 1. Uninstall Jan
- To uninstall Jan, you should use your package manager's uninstall or remove option.
+ To uninstall Jan, you should use your package manager's uninstall or remove option.
This will return your system to its state before the installation of Jan.
This method can also reset all settings if you are experiencing any issues with Jan.
-
+
-
+
To uninstall Jan, run the following command.MDXContent
```sh
@@ -124,13 +126,13 @@ To resolve the issue where your Jan is stuck in a broken build after installatio
-
+
To uninstall Jan, you can uninstall Jan by deleting the `.AppImage` file.
If you wish to completely remove all user data associated with Jan after uninstallation, you can delete the user data at `~/jan`.
This method can also reset all settings if you are experiencing any issues with Jan.
-
+
@@ -155,6 +157,7 @@ To resolve the issue where your Jan is stuck in a broken build after installatio
Download the latest version of Jan from our [homepage](https://jan.ai/).
+
By following these steps, you can cleanly uninstall and reinstall Jan, ensuring a smooth and error-free experience with the latest version.
@@ -166,6 +169,7 @@ Before reinstalling Jan, ensure it's completely removed from all shared spaces i
:::
## Troubleshooting NVIDIA GPU
+
To resolve issues when the Jan app does not utilize the NVIDIA GPU on Windows and Linux systems.
#### 1. Ensure GPU Mode Requirements
@@ -268,28 +272,33 @@ If GPU mode isn't enabled by default:
"gpu_highest_vram": "0"
}
```
+
#### 4. Restart Jan
+
Restart Jan application to make sure it works.
##### Troubleshooting Tips
- - Ensure `nvidia_driver` and `cuda` fields indicate installed software.
- - If `gpus` field is empty or lacks your GPU, check NVIDIA driver and CUDA toolkit installations.
- - For further assistance, share the `settings.json` file.
+- Ensure `nvidia_driver` and `cuda` fields indicate installed software.
+- If `gpus` field is empty or lacks your GPU, check NVIDIA driver and CUDA toolkit installations.
+- For further assistance, share the `settings.json` file.
#### Tested Configurations
- **Windows 11 Pro 64-bit:**
+
- GPU: NVIDIA GeForce RTX 4070ti
- CUDA: 12.2
- NVIDIA driver: 531.18 (Bare metal)
- **Ubuntu 22.04 LTS:**
+
- GPU: NVIDIA GeForce RTX 4070ti
- CUDA: 12.2
- NVIDIA driver: 545 (Bare metal)
- **Ubuntu 20.04 LTS:**
+
- GPU: NVIDIA GeForce GTX 1660ti
- CUDA: 12.1
- NVIDIA driver: 535 (Proxmox VM passthrough GPU)
@@ -306,8 +315,11 @@ Restart Jan application to make sure it works.
3. Seek assistance in [Jan Discord](https://discord.gg/mY69SZaMaC).
## How to Get Error Logs
+
To get the error logs of your Jan application, follow the steps below:
+
#### Jan Application
+
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
@@ -315,20 +327,25 @@ To get the error logs of your Jan application, follow the steps below:
5. Click the **logs** folder.
#### Jan UI
+
1. Open your Unix or Linux terminal.
2. Use the following commands to get the recent 50 lines of log files:
+
```bash
tail -n 50 ~/jan/logs/app.log
```
#### Jan API Server
+
1. Open your Unix or Linux terminal.
2. Use the following commands to get the recent 50 lines of log files:
+
```bash
tail -n 50 ~/jan/logs/server.log
```
+
:::warning
Ensure to redact any private or sensitive information when sharing logs or error details.
:::
@@ -338,6 +355,7 @@ If you have any questions or are looking for support, please don't hesitate to c
:::
## Permission Denied
+
When running Jan, you might encounter the following error message:
```
@@ -355,13 +373,17 @@ This error mainly caused by permission problem during installation. To resolve t
```sh
sudo chown -R $(whoami) ~/.npm
```
+
:::note
+
- This command ensures that the necessary permissions are granted for Jan installation, resolving the encountered error.
- If you have any questions or are looking for support, please don't hesitate to contact us via our [Discord community](https://discord.gg/Dt7MxDyNNZ) or create a new issue in our [GitHub repository](https://github.com/janhq/jan/issues/new/choose).
-:::
+ :::
## Something's Amiss
+
When you start a chat with a model and encounter with a Something's Amiss error, here's how to resolve it:
+
1. Ensure your OS is up to date.
2. Choose a model smaller than 80% of your hardware's V/RAM. For example, on an 8GB machine, opt for models smaller than 6GB.
3. Install the latest [Nightly release](/guides/quickstart/#nightly-releases) or [clear the application cache](/troubleshooting/#broken-build) when reinstalling Jan.
@@ -410,6 +432,7 @@ If you have any questions or are looking for support, please don't hesitate to c
:::
## Undefined Issue
+
Encountering an `undefined issue` in Jan is caused by errors related to the Nitro tool or other internal processes. It can be resolved through the following steps:
1. Clearing the Jan folder and then reopen the application to determine if the problem persists
@@ -423,6 +446,7 @@ If you have any questions or are looking for support, please don't hesitate to c
:::
## Unexpected Token
+
Encountering the `Unexpected token` error when initiating a chat with OpenAI models mainly caused by either your OpenAI key or where you access your OpenAI from. This issue can be solved through the following steps:
1. Obtain an OpenAI API key from [OpenAI's developer platform](https://platform.openai.com/) and integrate it into your application.
@@ -431,4 +455,4 @@ Encountering the `Unexpected token` error when initiating a chat with OpenAI mod
:::note
If you have any questions or are looking for support, please don't hesitate to contact us via our [Discord community](https://discord.gg/Dt7MxDyNNZ) or create a new issue in our [GitHub repository](https://github.com/janhq/jan/issues/new/choose).
-:::
\ No newline at end of file
+:::
diff --git a/docs/docs/guides/user-guides/advanced-settings.mdx b/docs/docs/guides/user-guides/advanced-settings.mdx
index 3edf1f905..9a26e952f 100644
--- a/docs/docs/guides/user-guides/advanced-settings.mdx
+++ b/docs/docs/guides/user-guides/advanced-settings.mdx
@@ -5,59 +5,46 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 11
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
Advanced Settings,
HTTPS Proxy,
SSL,
settings,
- Jan settings
+ Jan settings,
]
---
-
- Advanced Settings
-
-
-
-
-
-
-
-
-
-
-
-
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
This guide will show you how to use the advanced settings in Jan.
## Access the Advanced Settings
+
To access the Jan's advanced settings, follow the steps below:
+
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. You can configure the following settings:
-| Feature | Description |
-|---------------------------|-----------------------------------------------------------------------------------------------------------------------|
-| **Keyboard Shortcuts** | Keyboard shortcuts speed up your workflow. For a quick overview of useful keyboard shortcuts, refer to the list [below](advanced-settings.mdx#keyboard-shortcuts). |
-| **Experimental Mode** | Enables experimental features that may be unstable. |
-| **GPU Acceleration** | Enables the boosting of your model performance by using your GPU devices for acceleration. |
-| **Jan Data Folder** | Location for messages, model configurations, and user data. Changeable to a different location. |
-| **HTTPS Proxy & Ignore SSL Certificate** | Use a proxy server for internet connections and ignore SSL certificates for self-signed certificates. Please check out the guide on how to set up your own HTTPS proxy server [here](advanced-settings.mdx#https-proxy). |
-| **Clear Logs** | Removes all logs from the Jan application. |
-| **Reset To Factory Default** | Resets the application to its original state, deleting all data including model customizations and conversation history. |
-
-
+| Feature | Description |
+| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| **Keyboard Shortcuts** | Keyboard shortcuts speed up your workflow. For a quick overview of useful keyboard shortcuts, refer to the list [below](advanced-settings.mdx#keyboard-shortcuts). |
+| **Experimental Mode** | Enables experimental features that may be unstable. |
+| **GPU Acceleration** | Enables the boosting of your model performance by using your GPU devices for acceleration. |
+| **Jan Data Folder** | Location for messages, model configurations, and user data. Changeable to a different location. |
+| **HTTPS Proxy & Ignore SSL Certificate** | Use a proxy server for internet connections and ignore SSL certificates for self-signed certificates. Please check out the guide on how to set up your own HTTPS proxy server [here](advanced-settings.mdx#https-proxy). |
+| **Clear Logs** | Removes all logs from the Jan application. |
+| **Reset To Factory Default** | Resets the application to its original state, deleting all data including model customizations and conversation history. |
## Keyboard Shortcuts
@@ -78,62 +65,72 @@ Here are some of the keyboard shortcuts that you can use in Jan.
-| Combination | Description |
-| --------------- | -------------------------------------------------- |
-| `Ctrl E` | Show list your models |
-| `Ctrl K` | Show list navigation pages |
-| `Ctrl B` | Toggle collapsible left panel |
-| `Ctrl ,` | Navigate to setting page |
-| `Enter` | Send a message |
-| `Shift + Enter` | Insert new line in input box |
+
+| Combination | Description |
+| --------------- | ---------------------------------------------------------- |
+| `Ctrl E` | Show list your models |
+| `Ctrl K` | Show list navigation pages |
+| `Ctrl B` | Toggle collapsible left panel |
+| `Ctrl ,` | Navigate to setting page |
+| `Enter` | Send a message |
+| `Shift + Enter` | Insert new line in input box |
| `Arrow Up` | Navigate to the previous option (within the search dialog) |
| `Arrow Down` | Navigate to the next option (within the search dialog) |
-| Combination | Description |
-| --------------- | -------------------------------------------------- |
-| `Ctrl E` | Show list your models |
-| `Ctrl K` | Show list navigation pages |
-| `Ctrl B` | Toggle collapsible left panel |
-| `Ctrl ,` | Navigate to setting page |
-| `Enter` | Send a message |
-| `Shift + Enter` | Insert new line in input box |
+
+| Combination | Description |
+| --------------- | ---------------------------------------------------------- |
+| `Ctrl E` | Show list your models |
+| `Ctrl K` | Show list navigation pages |
+| `Ctrl B` | Toggle collapsible left panel |
+| `Ctrl ,` | Navigate to setting page |
+| `Enter` | Send a message |
+| `Shift + Enter` | Insert new line in input box |
| `Arrow Up` | Navigate to the previous option (within the search dialog) |
| `Arrow Down` | Navigate to the next option (within the search dialog) |
+
:::note
The keyboard shortcuts are customizable.
:::
## Enable the Experimental Mode
+
To try out new fetures that are still in testing phase, follow the steps below:
+
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **Experimental Mode** click the slider to enable.
## Enable the GPU Acceleration
+
To enhance your model performance, follow the steps below:
:::warning
Ensure that you have read the [troubleshooting guide](/troubleshooting/#troubleshooting-nvidia-gpu) here for further assistance.
:::
+
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **GPU Acceleration** click the slider to enable.
## Access the Jan Data Folder
+
To access the folder where messages, model configurations and user data are stored, follow the steps below:
+
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **Jan Data Folder** click the **folder icon (📂)** to access the data or the **pencil icon (✏️)** to change the folder where you keep your data.
## HTTPS Proxy
+
HTTPS Proxy encrypts data between your browser and the internet, making it hard for outsiders to intercept or read. It also helps you to maintain your privacy and security while being able to bypass regional restrictions on internet.
:::note
@@ -142,7 +139,9 @@ HTTPS Proxy encrypts data between your browser and the internet, making it hard
- HTTPS Proxy does not affect the remote model usage.
:::
+
### Setting Up Your Own HTTPS Proxy Server
+
This guide provides a simple overview of setting up an HTTPS proxy server using **Squid**, a widely used open-source proxy software.
:::note
@@ -150,19 +149,23 @@ Other software options are also available depending on your requirements.
:::
#### Step 1: Choosing a Server
+
1. Firstly, you need to choose a server to host your proxy server.
-:::note
-We recommend using a well-known cloud provider service like:
+ :::note
+ We recommend using a well-known cloud provider service like:
+
- Amazon AWS
- Google Cloud
- Microsoft Azure
- Digital Ocean
-:::
+ :::
2. Ensure that your server has a public IP address and is accessible from the internet.
#### Step 2: Installing Squid
+
Instal **Squid** using the following command:
+
```bash
sudo apt-get update
sudo apt-get install squid
@@ -219,43 +222,50 @@ sudo systemctl restart squid
:::tip
Tips for Secure Your Proxy:
+
- **Firewall rules**: Ensure that only intended users or IP addresses can connect to your proxy server. This can be achieved by setting up appropriate firewall rules.
- **Regular updates**: Keep your server and proxy software updated to ensure that you are protected against known vulnerabilities.
- **Monitoring and logging**: Monitor your proxy server for unusual activity and enable logging to keep track of the traffic passing through your proxy.
-:::
+:::
### Setting Up Jan to Use Your HTTPS Proxy
Once you have your HTTPS proxy server set up, you can configure Jan to use it.
+
1. Navigate to **Settings** > **Advanced Settings**.
2. On the **HTTPS Proxy** click the slider to enable.
3. Input your domain in the blank field.
-
## Ignore SSL Certificate
+
To Allow self-signed or unverified certificates, follow the steps below:
+
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **Ignore SSL Certificates** click the slider to enable.
## Clear Logs
+
To clear all logs on your Jan app, follow the steps below:
:::warning
This feature clears all the data in your **Jan Data Folder**.
:::
+
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **Clear Logs** click the the **Clear** button.
## Reset To Factory Default
+
To reset the Jan app to its original state, follow the steps below:
:::danger[Remember!]
This irreversible action is only recommended if the application is corrupted.
:::
+
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
-4. On the **Reset To Factory Default** click the the **Reset** button.
\ No newline at end of file
+4. On the **Reset To Factory Default** click the the **Reset** button.
diff --git a/docs/docs/guides/user-guides/jan-data-folder.mdx b/docs/docs/guides/user-guides/jan-data-folder.mdx
index 87b3ae974..b2bf14968 100644
--- a/docs/docs/guides/user-guides/jan-data-folder.mdx
+++ b/docs/docs/guides/user-guides/jan-data-folder.mdx
@@ -5,18 +5,19 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 6
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
data folder,
source folder,
Jan data,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/user-guides/local-server.mdx b/docs/docs/guides/user-guides/local-server.mdx
index 90dcd5eeb..421981503 100644
--- a/docs/docs/guides/user-guides/local-server.mdx
+++ b/docs/docs/guides/user-guides/local-server.mdx
@@ -5,14 +5,15 @@ description: A step-by-step guide to start Jan Local Server.
sidebar_position: 10
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
local server,
start server,
api endpoint,
@@ -22,21 +23,25 @@ keywords:
Jan provides a built-in API server that can be used as a drop-in for OpenAI's API local replacement. This guide will walk you through on how to start the local server and use it to make request to the local server.
## Step 1: Set the Local Server
+
To start the local server, follow the steps below:
+
1. Navigate to the Jan main menu dashboard.
2. Click the corresponding icon on the bottom left side of your screen.
3. Select the model you want to use under the Model Settings screen to set the LLM for your local server.
4. Configure the server settings as follows:
-| Feature | Description | Default Setting |
-|-------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------|
-| Local Server Address | By default, Jan is only accessible on the same computer it's running on, using the address 127.0.0.1. You can change this to 0.0.0.0 to let other devices on your local network access it. However, this is less secure than just allowing access from the same computer. | `localhost (127.0.0.1)` |
-| Port | Jan runs on port 1337 by default. The port can be changed to any other port number as needed. | `1337` |
-| Cross-Origin Resource Sharing (CORS) | Manages resource access from external domains. Enabled for security by default but can be disabled if needed. | Enabled |
-| Verbose Server Logs | Provides extensive details about server activities as the local server runs, displayed at the center of the screen. | Not specified (implied enabled) |
+| Feature | Description | Default Setting |
+| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------- |
+| Local Server Address | By default, Jan is only accessible on the same computer it's running on, using the address 127.0.0.1. You can change this to 0.0.0.0 to let other devices on your local network access it. However, this is less secure than just allowing access from the same computer. | `localhost (127.0.0.1)` |
+| Port | Jan runs on port 1337 by default. The port can be changed to any other port number as needed. | `1337` |
+| Cross-Origin Resource Sharing (CORS) | Manages resource access from external domains. Enabled for security by default but can be disabled if needed. | Enabled |
+| Verbose Server Logs | Provides extensive details about server activities as the local server runs, displayed at the center of the screen. | Not specified (implied enabled) |
## Step 2: Start and Use the Built-in API Server
+
Once you have set the server settings, you can start the server by following the steps below:
+
1. Click the **Start Server** button on the top left of your screen.
:::note
@@ -49,6 +54,7 @@ When the server starts, you'll see a message like `Server listening at http://12
4. In this example, we will show you how it works using the `Chat` endpoint.
5. Click the **Try it out** button.
6. The Chat endpoint has the following `cURL request example` when running using a `tinyllama-1.1b` model local server:
+
```json
{
"messages": [
@@ -74,7 +80,9 @@ When the server starts, you'll see a message like `Server listening at http://12
}
'
```
+
7. The endpoint returns the following `JSON response body`:
+
```json
{
"choices": [
@@ -98,4 +106,4 @@ When the server starts, you'll see a message like `Server listening at http://12
"total_tokens": 533
}
}
-```
\ No newline at end of file
+```
diff --git a/docs/docs/guides/user-guides/manage-assistants.mdx b/docs/docs/guides/user-guides/manage-assistants.mdx
index 1a7b4c4e1..27330f187 100644
--- a/docs/docs/guides/user-guides/manage-assistants.mdx
+++ b/docs/docs/guides/user-guides/manage-assistants.mdx
@@ -5,17 +5,18 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 8
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
manage assistants,
assistants,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/user-guides/manage-models.mdx b/docs/docs/guides/user-guides/manage-models.mdx
index afbadcd67..1fa2ed3f0 100644
--- a/docs/docs/guides/user-guides/manage-models.mdx
+++ b/docs/docs/guides/user-guides/manage-models.mdx
@@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 7
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
models,
remote models,
local models,
@@ -20,4 +21,4 @@ keywords:
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/guides/user-guides/manage-threads.mdx b/docs/docs/guides/user-guides/manage-threads.mdx
index adf7c0015..7344efb41 100644
--- a/docs/docs/guides/user-guides/manage-threads.mdx
+++ b/docs/docs/guides/user-guides/manage-threads.mdx
@@ -5,24 +5,24 @@ description: Manage your interaction with AI locally.
sidebar_position: 9
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
threads,
chat history,
thread history,
]
---
-Jan provides a straightforward and private solution for managing your threads with AI on your own device. As you interact with AI using Jan, you'll accumulate a history of threads.
+Jan provides a straightforward and private solution for managing your threads with AI on your own device. As you interact with AI using Jan, you'll accumulate a history of threads.
Jan offers easy tools to organize, delete, or review your past threads with AI. This guide will show you how to keep your threads private and well-organized.
-
### View Thread History
To view your thread history, follow the steps below:
1. Navigate to the main dashboard.
@@ -49,14 +49,12 @@ Jan offers easy tools to organize, delete, or review your past threads with AI.
This will delete all messages in the thread while keeping the thread settings.
:::
-
### Delete Threads History
To delete a thread, follow the steps below:
1. Navigate to the Thread that you want to delete.
2. Click on the **three dots (⋮)** in the Thread section.
3. Sleect the **Delete Thread** button.
-
:::note
This will delete all messages and the thread settings.
:::
diff --git a/docs/docs/guides/user-guides/overview-guides.mdx b/docs/docs/guides/user-guides/overview-guides.mdx
index 2004e14f1..bb522ccf4 100644
--- a/docs/docs/guides/user-guides/overview-guides.mdx
+++ b/docs/docs/guides/user-guides/overview-guides.mdx
@@ -5,15 +5,16 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 5
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-Coming Soon
\ No newline at end of file
+Coming Soon
diff --git a/docs/docs/hardware/community.md b/docs/docs/hardware/community.md
index 5ba920d89..da142b7c3 100644
--- a/docs/docs/hardware/community.md
+++ b/docs/docs/hardware/community.md
@@ -3,31 +3,18 @@ title: Hardware Examples
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Hardware Examples
-
-
-
-
-
-
-
-
-
-
-
-
## Add your own example
Add your own examples to this page by creating a new file in the `docs/docs/hardware/examples` directory.
diff --git a/docs/docs/hardware/concepts/gpu-and-vram.md b/docs/docs/hardware/concepts/gpu-and-vram.md
index 0a9316a90..57387e8d2 100644
--- a/docs/docs/hardware/concepts/gpu-and-vram.md
+++ b/docs/docs/hardware/concepts/gpu-and-vram.md
@@ -2,20 +2,6 @@
title: GPUs and VRAM
---
-
- Understanding GPUs and VRAM for AI and Gaming
-
-
-
-
-
-
-
-
-
-
-
-
## What Is a GPU?
A Graphics Card, or GPU (Graphics Processing Unit), is a fundamental component in modern computing. Think of it as the powerhouse behind rendering the stunning visuals you see on your screen. Similar to the motherboard in your computer, the graphics card is a printed circuit board. However, it's not just a passive piece of hardware; it's a sophisticated device equipped with essential components like fans, onboard RAM, a dedicated memory controller, BIOS, and various other features. If you want to learn more about GPUs then read here to [Understand the architecture of a GPU.](https://medium.com/codex/understanding-the-architecture-of-a-gpu-d5d2d2e8978b)
diff --git a/docs/docs/hardware/overview/cloud-vs-self-hosting.md b/docs/docs/hardware/overview/cloud-vs-self-hosting.md
index f4d4b2236..0d34bb1a9 100644
--- a/docs/docs/hardware/overview/cloud-vs-self-hosting.md
+++ b/docs/docs/hardware/overview/cloud-vs-self-hosting.md
@@ -2,18 +2,6 @@
title: Cloud vs. Self-hosting Your AI
---
-
- Cloud vs. Self-hosting Your AI
-
-
-
-
-
-
-
-
-
-
The choice of how to run your AI - on GPU cloud services, on-prem, or just using an API provider - involves various trade-offs. The following is a naive exploration of the pros and cons of renting vs self-hosting.
## Cost Comparison
diff --git a/docs/docs/hardware/overview/cpu-vs-gpu.md b/docs/docs/hardware/overview/cpu-vs-gpu.md
index b50655574..f0f20d8d6 100644
--- a/docs/docs/hardware/overview/cpu-vs-gpu.md
+++ b/docs/docs/hardware/overview/cpu-vs-gpu.md
@@ -2,20 +2,6 @@
title: GPU vs CPU What's the Difference?
---
-
- GPU vs CPU What's the Difference?
-
-
-
-
-
-
-
-
-
-
-
-
## CPU vs. GPU
| | CPU | GPU |
diff --git a/docs/docs/hardware/recommendations/by-budget.md b/docs/docs/hardware/recommendations/by-budget.md
index e556c77d3..9e640fbc9 100644
--- a/docs/docs/hardware/recommendations/by-budget.md
+++ b/docs/docs/hardware/recommendations/by-budget.md
@@ -2,20 +2,6 @@
title: Recommended AI Hardware by Budget
---
-
- Recommended AI Hardware Builds by Budget
-
-
-
-
-
-
-
-
-
-
-
-
> :warning: **Warning:** Do your own research before any purchase. Jan is not liable for compatibility, performance or other issues. Products can become outdated quickly.
## Entry-level PC Build at $1000
diff --git a/docs/docs/hardware/recommendations/by-hardware.md b/docs/docs/hardware/recommendations/by-hardware.md
index dcd744d8b..ee80a290c 100644
--- a/docs/docs/hardware/recommendations/by-hardware.md
+++ b/docs/docs/hardware/recommendations/by-hardware.md
@@ -2,20 +2,6 @@
title: Selecting AI Hardware
---
-
- Selecting AI Hardware
-
-
-
-
-
-
-
-
-
-
-
-
When selecting a GPU for LLMs, remember that it's not just about the GPU itself. Consider the synergy with other components in your PC:
- **CPU**: To ensure efficient processing, pair your GPU with a powerful CPU. LLMs benefit from fast processors, so having a capable CPU is essential.
diff --git a/docs/docs/hardware/recommendations/by-model.md b/docs/docs/hardware/recommendations/by-model.md
index e9fe1c3e8..99d1ca8a2 100644
--- a/docs/docs/hardware/recommendations/by-model.md
+++ b/docs/docs/hardware/recommendations/by-model.md
@@ -2,20 +2,6 @@
title: Recommended AI Hardware by Model
---
-
- Recommended AI Hardware by Model
-
-
-
-
-
-
-
-
-
-
-
-
## Codellama 34b
### System Requirements:
diff --git a/docs/docs/hardware/recommendations/by-usecase.md b/docs/docs/hardware/recommendations/by-usecase.md
index aa7a1bf75..2ae0cb906 100644
--- a/docs/docs/hardware/recommendations/by-usecase.md
+++ b/docs/docs/hardware/recommendations/by-usecase.md
@@ -2,20 +2,6 @@
title: Recommended AI Hardware by Use Case
---
-
- Recommended AI Hardware by Model
-
-
-
-
-
-
-
-
-
-
-
-
## Which AI Hardware to Choose Based on Your Use Case
Artificial intelligence (AI) is rapidly changing the world, and AI hardware is becoming increasingly important for businesses and individuals alike. Choosing the right hardware for your AI needs is crucial to get the best performance and results. Here are some tips for selecting AI hardware based on your specific use case and requirements.
diff --git a/docs/docs/how-we-work.md b/docs/docs/how-we-work.md
index 602f7c902..5ab1cc15e 100644
--- a/docs/docs/how-we-work.md
+++ b/docs/docs/how-we-work.md
@@ -1,19 +1,24 @@
---
title: How We Work
+slug: /how-we-work
+description: How we work at Jan
+keywords:
+ [
+ Jan,
+ Rethink the Computer,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ build in public,
+ remote team,
+ how we work,
+ ]
---
-
- How We Work - Jan
-
-
-
-
-
-
-
-
-
-
### Open Source
Jan is a startup with an open source business model. We believe in the need for an open source AI ecosystem, and are committed to building it.
diff --git a/docs/docs/how-we-work/analytics/analytics.md b/docs/docs/how-we-work/analytics/analytics.md
index 5991263cc..22957f01c 100644
--- a/docs/docs/how-we-work/analytics/analytics.md
+++ b/docs/docs/how-we-work/analytics/analytics.md
@@ -1,19 +1,22 @@
---
title: Analytics
+slug: /how-we-work/analytics
+description: Jan's Analytics philosophy and implementation
+keywords:
+ [
+ Jan,
+ Rethink the Computer,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ analytics,
+ ]
---
-
- Analytics
-
-
-
-
-
-
-
-
-
-
Adhering to Jan's privacy preserving philosophy, our analytics philosophy is to get "barely-enough-to-function'.
#### What is tracked
diff --git a/docs/docs/how-we-work/engineering/engineering.md b/docs/docs/how-we-work/engineering/engineering.md
index 1db5c3912..63e797629 100644
--- a/docs/docs/how-we-work/engineering/engineering.md
+++ b/docs/docs/how-we-work/engineering/engineering.md
@@ -4,14 +4,15 @@ description: Jan is a ChatGPT-alternative that runs on your own computer, with a
slug: /engineering
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
diff --git a/docs/docs/how-we-work/engineering/qa.mdx b/docs/docs/how-we-work/engineering/qa.mdx
index aa851dfa3..9957aaefa 100644
--- a/docs/docs/how-we-work/engineering/qa.mdx
+++ b/docs/docs/how-we-work/engineering/qa.mdx
@@ -4,29 +4,18 @@ description: Jan is a ChatGPT-alternative that runs on your own computer, with a
slug: /engineering/qa
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- QA
-
-
-
-
-
-
-
-
-
-
### Phase 1: Planning
#### Definition of Ready (DoR):
diff --git a/docs/docs/how-we-work/product-design/product-design.md b/docs/docs/how-we-work/product-design/product-design.md
index a2016b6b8..30c1e5b21 100644
--- a/docs/docs/how-we-work/product-design/product-design.md
+++ b/docs/docs/how-we-work/product-design/product-design.md
@@ -1,5 +1,20 @@
---
title: Product & Design
+slug: /how-we-work/product-design
+description: How we work on product design
+keywords:
+ [
+ Jan,
+ Rethink the Computer,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ product design,
+ ]
---
## Roadmap
@@ -8,4 +23,4 @@ title: Product & Design
- Discord's #roadmap channel
- Work with the community to turn conversations into Product Specs
- Future System?
- - Use Canny?
\ No newline at end of file
+ - Use Canny?
diff --git a/docs/docs/how-we-work/project-management/project-management.md b/docs/docs/how-we-work/project-management/project-management.md
index 85bbe0d75..b1e76757e 100644
--- a/docs/docs/how-we-work/project-management/project-management.md
+++ b/docs/docs/how-we-work/project-management/project-management.md
@@ -1,19 +1,22 @@
---
title: Project Management
+slug: /how-we-work/project-management
+description: Project management at Jan
+keywords:
+ [
+ Jan,
+ Rethink the Computer,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ project management,
+ ]
---
-
- Project Management
-
-
-
-
-
-
-
-
-
-
We use the [Jan Monorepo Project](https://github.com/orgs/janhq/projects/5) in Github to manage our roadmap and sprint Kanbans.
As much as possible, everyone owns their respective `epics` and `tasks`.
diff --git a/docs/docs/how-we-work/strategy/strategy.md b/docs/docs/how-we-work/strategy/strategy.md
index a448c090e..001a7c8a2 100644
--- a/docs/docs/how-we-work/strategy/strategy.md
+++ b/docs/docs/how-we-work/strategy/strategy.md
@@ -2,18 +2,6 @@
title: Strategy
---
-
- Strategy
-
-
-
-
-
-
-
-
-
-
We only have 2 planning parameters:
- 10 year vision
diff --git a/docs/docs/how-we-work/website-docs/website-docs.md b/docs/docs/how-we-work/website-docs/website-docs.md
index 9dcedb0b8..007cd16d7 100644
--- a/docs/docs/how-we-work/website-docs/website-docs.md
+++ b/docs/docs/how-we-work/website-docs/website-docs.md
@@ -1,19 +1,23 @@
---
title: Website & Docs
+slug: /how-we-work/website-docs/
+description: Information about the Jan website and documentation.
+keywords:
+ [
+ Jan,
+ Rethink the Computer,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ website,
+ documentation,
+ ]
---
-
- Website & Docs
-
-
-
-
-
-
-
-
-
-
This website is built using [Docusaurus 3.0](https://docusaurus.io/), a modern static website generator.
### Information Architecture
diff --git a/docs/docs/platforms/desktop.md b/docs/docs/platforms/desktop.md
index d8c8a38cc..1fed9274a 100644
--- a/docs/docs/platforms/desktop.md
+++ b/docs/docs/platforms/desktop.md
@@ -4,29 +4,18 @@ slug: /desktop
description: Turn your computer into an AI PC
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Jan Desktop
-
-
-
-
-
-
-
-
-
-
# Turn any computer into an AI computer

diff --git a/docs/docs/platforms/mobile.md b/docs/docs/platforms/mobile.md
index 827544201..d502cb99a 100644
--- a/docs/docs/platforms/mobile.md
+++ b/docs/docs/platforms/mobile.md
@@ -4,13 +4,14 @@ slug: /mobile
description: Jan Mobile allows you to bring your AI on the go
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
----
\ No newline at end of file
+---
diff --git a/docs/docs/pricing/pricing.md b/docs/docs/pricing/pricing.md
index 233610468..958c021ee 100644
--- a/docs/docs/pricing/pricing.md
+++ b/docs/docs/pricing/pricing.md
@@ -1,6 +1,20 @@
---
title: Pricing
slug: /pricing
+description: Pricing for Jan
+keywords:
+ [
+ Jan,
+ Rethink the Computer,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ pricing,
+ ]
---
| $0 | $1 | Enterprise |
diff --git a/docs/docs/privacy/privacy.md b/docs/docs/privacy/privacy.md
index e1f5d0f10..9bf408810 100644
--- a/docs/docs/privacy/privacy.md
+++ b/docs/docs/privacy/privacy.md
@@ -2,20 +2,6 @@
title: Privacy - Jan
---
-
- Privacy Policy - Jan
-
-
-
-
-
-
-
-
-
-
-
-
# Privacy Policy
Jan is committed to protecting your privacy and ensuring that your personal information is handled in a safe and responsible way. This policy outlines how we collect, store, and use your personal information when you use our mobile application.
diff --git a/docs/docs/releases/changelog/README.mdx b/docs/docs/releases/changelog/README.mdx
index 09e6d8222..8cd181cd3 100644
--- a/docs/docs/releases/changelog/README.mdx
+++ b/docs/docs/releases/changelog/README.mdx
@@ -5,15 +5,16 @@ slug: /changelog
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
- build extension,
+ large language models,
+ changelog,
]
---
diff --git a/docs/docs/server-suite/enterprise.md b/docs/docs/server-suite/enterprise.md
index e08356954..292911485 100644
--- a/docs/docs/server-suite/enterprise.md
+++ b/docs/docs/server-suite/enterprise.md
@@ -4,29 +4,18 @@ slug: /enterprise
description: Built for Enterprise Deployments
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Jan Enterprise
-
-
-
-
-
-
-
-
-
-
# Customize and run AI across your organization
Jan can professional backend to create, customize and run AIs at scale, for production-grade data centers.
diff --git a/docs/docs/server-suite/home-server.md b/docs/docs/server-suite/home-server.md
index 630d2b9d9..4f6a375a9 100644
--- a/docs/docs/server-suite/home-server.md
+++ b/docs/docs/server-suite/home-server.md
@@ -4,29 +4,18 @@ slug: /home-server
description: Built for Home Servers
keywords:
[
- Jan AI,
Jan,
- ChatGPT alternative,
+ Rethink the Computer,
local AI,
- private AI,
+ privacy focus,
+ free and open source,
+ private and offline,
conversational AI,
no-subscription fee,
- large language model,
+ large language models,
]
---
-
- Jan Home Server
-
-
-
-
-
-
-
-
-
-
# Customize and run AI across all of your devices
Self-host and access your AI from anywhere with Jan server suite.
diff --git a/docs/docs/support/support.md b/docs/docs/support/support.md
index 856041f86..36895017d 100644
--- a/docs/docs/support/support.md
+++ b/docs/docs/support/support.md
@@ -2,20 +2,6 @@
title: Support - Jan
---
-
- Support - Jan
-
-
-
-
-
-
-
-
-
-
-
-
# Support
- Bugs & requests: file a GitHub ticket [here](https://github.com/janhq/jan/issues)
diff --git a/docs/docs/team/team.md b/docs/docs/team/team.md
index 2b8b24d01..b18774dda 100644
--- a/docs/docs/team/team.md
+++ b/docs/docs/team/team.md
@@ -2,18 +2,6 @@
title: Who we are
---
-
- Who we are - Jan
-
-
-
-
-
-
-
-
-
-
What's Jan the company about?
We aim to build the cognitive framework for future robots
diff --git a/docs/docs/wall-of-love.md b/docs/docs/wall-of-love.md
index 41f68b0f2..2dda05770 100644
--- a/docs/docs/wall-of-love.md
+++ b/docs/docs/wall-of-love.md
@@ -1,19 +1,22 @@
---
title: Wall of Love ❤️
+slug: /wall-of-love
+description: Check out what our amazing users are saying about Jan!
+keywords:
+ [
+ Jan,
+ Rethink the Computer,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ wall of love,
+ ]
---
-
- Wall of Love ❤️ - Jan
-
-
-
-
-
-
-
-
-
-
## Twitter
Check out our amazing users and what they are saying about Jan!
diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js
index d77d59d9d..7b74337fb 100644
--- a/docs/docusaurus.config.js
+++ b/docs/docusaurus.config.js
@@ -7,7 +7,7 @@ const path = require('path')
/** @type {import('@docusaurus/types').Config} */
const config = {
- title: 'Jan',
+ title: 'Jan | Rethink the Computer',
tagline: 'Run your own AI',
favicon: 'img/favicon.ico',
@@ -81,10 +81,6 @@ const config = {
from: '/troubleshooting/stuck-on-broken-build/',
to: '/troubleshooting/',
},
- {
- from: '/guides/troubleshooting/',
- to: '/troubleshooting/',
- },
{
from: '/troubleshooting/somethings-amiss/',
to: '/troubleshooting/',
@@ -121,6 +117,334 @@ const config = {
from: '/integrations/tensorrt',
to: '/guides/providers/tensorrt-llm',
},
+ {
+ from: '/install/mac/',
+ to: '/guides/install/mac/',
+ },
+ {
+ from: '/guides/using-models/integrate-with-remote-server/',
+ to: '/guides/engines/remote-server/',
+ },
+ {
+ from: '/guides/chatting/manage-history/',
+ to: '/guides/threads/',
+ },
+ {
+ from: '/guides/using-server/',
+ to: '/guides/local-api/',
+ },
+ {
+ from: '/guides/using-models/customize-engine-settings/',
+ to: '/guides/engines/llamacpp/',
+ },
+ {
+ from: '/guides/integrations/openrouter/',
+ to: '/integrations/openrouter/',
+ },
+ {
+ from: '/docs/integrations/',
+ to: '/integrations/',
+ },
+ {
+ from: '/docs/product/chat/',
+ to: '/developer/framework/product/chat/',
+ },
+ {
+ from: '/install/windows/',
+ to: '/guides/install/windows/',
+ },
+ {
+ from: '/api/overview/',
+ to: '/api-reference/',
+ },
+ {
+ from: '/install/linux/',
+ to: '/guides/install/linux/',
+ },
+ {
+ from: '/install/from-source/',
+ to: '/guides/install/#install-server-side',
+ },
+ {
+ from: '/troubleshooting/gpu-not-used/',
+ to: '/troubleshooting/#troubleshooting-nvidia-gpu',
+ },
+ {
+ from: '/guides/using-server/server/',
+ to: '/guides/local-api/#step-2-start-and-use-the-built-in-api-server',
+ },
+ {
+ from: '/docs/integrations/openrouter/',
+ to: '/integrations/openrouter/',
+ },
+ {
+ from: '/docs/integrations/ollama/',
+ to: '/guides/engines/ollama/',
+ },
+ {
+ from: '/guides/using-models/install-from-hub/',
+ to: '/guides/models/',
+ },
+ {
+ from: '/guides/integrations/continue/',
+ to: '/integrations/continue/',
+ },
+ {
+ from: '/docs/engineering/assistants/',
+ to: '/developer/framework/engineering/assistants/',
+ },
+ {
+ from: '/guides/install/hardware/',
+ to: '/guides/hardware/',
+ },
+ {
+ from: '/docs/engineering/files/',
+ to: '/developer/framework/engineering/files/',
+ },
+ {
+ from: '/features/acceleration/',
+ to: '/guides/advanced/#enable-the-gpu-acceleration',
+ },
+ {
+ from: '/docs/extension-guides/',
+ to: '/extensions/',
+ },
+ {
+ from: '/specs/settings/',
+ to: '/developer/framework/product/settings/',
+ },
+ {
+ from: '/guides/using-models/import-models-using-absolute-filepath/',
+ to: '/guides/models/',
+ },
+ {
+ from: '/install/docker/',
+ to: '/guides/install/server/',
+ },
+ {
+ from: '/guides/using-models/import-manually/',
+ to: '/guides/models/',
+ },
+ {
+ from: '/v1/models',
+ to: '/guides/models/',
+ },
+ {
+ from: '/docs/team/contributor-program/',
+ to: '/team/contributor-program/',
+ },
+ {
+ from: '/guides/installation/hardware/',
+ to: '/guides/hardware/',
+ },
+ {
+ from: '/guides/chatting/start-thread/',
+ to: '/guides/threads/',
+ },
+ {
+ from: '/api/files/',
+ to: '/developer/framework/engineering/files/#file-api',
+ },
+ {
+ from: '/specs/threads/',
+ to: '/developer/framework/engineering/threads/',
+ },
+ {
+ from: '/guides/using-models/customize-models/',
+ to: '/guides/models/',
+ },
+ {
+ from: '/docs/modules/models/',
+ to: '/guides/models/',
+ },
+ {
+ from: '/developer/build-extension/package-your-assistant/',
+ to: '/developer/extension/package-your-extension/',
+ },
+ {
+ from: '/getting-started/install/linux/',
+ to: '/guides/install/linux/',
+ },
+ {
+ from: '/features/extensions',
+ to: '/extensions/',
+ },
+ {
+ from: '/specs/chats/',
+ to: '/developer/framework/engineering/chats/',
+ },
+ {
+ from: '/specs/engine/',
+ to: '/developer/framework/engineering/engine/',
+ },
+ {
+ from: '/docs/extension-capabilities/',
+ to: '/extensions/',
+ },
+ {
+ from: '/docs/get-started/use-local-server/',
+ to: '/guides/local-api/',
+ },
+ {
+ from: '/guides/how-jan-works/',
+ to: '/guides/',
+ },
+ {
+ from: '/guides/windows/',
+ to: '/guides/install/windows/',
+ },
+ {
+ from: '/specs/',
+ to: '/developer/framework/',
+ },
+ {
+ from: '/docs/get-started/build-extension/',
+ to: '/developer/extension/',
+ },
+ {
+ from: '/specs/files/',
+ to: '/developer/framework/engineering/files/',
+ },
+ {
+ from: '/guides/using-models/package-models/',
+ to: '/guides/models/',
+ },
+ {
+ from: '/install/overview/',
+ to: '/guides/install/',
+ },
+ {
+ from: '/docs/get-started/extension-anatomy/',
+ to: '/developer/extension/extension-anatomy/',
+ },
+ {
+ from: '/docs/get-started/',
+ to: '/guides/',
+ },
+ {
+ from: '/guides/mac/',
+ to: '/guides/install/mac/',
+ },
+ {
+ from: '/specs/fine-tuning/',
+ to: '/developer/framework/engineering/fine-tuning/',
+ },
+ {
+ from: '/guides/server/',
+ to: '/guides/local-api/',
+ },
+ {
+ from: '/specs/file-based/',
+ to: '/developer/file-based/',
+ },
+ {
+ from: '/developers/',
+ to: '/developer/',
+ },
+ {
+ from: '/api/',
+ to: '/api-reference/',
+ },
+ {
+ from: '/products/desktop',
+ to: '/desktop/',
+ },
+ {
+ from: '/developers/plugins/azure-openai',
+ to: '/guides/engines/openai/',
+ },
+ {
+ from: '/getting-started/install/mac',
+ to: '/guides/install/mac/',
+ },
+ {
+ from: '/guides/fine-tuning/what-models-can-be-fine-tuned',
+ to: '/developer/framework/engineering/fine-tuning/',
+ },
+ {
+ from: '/guides/linux/',
+ to: '/guides/install/linux/',
+ },
+ {
+ from: '/docs/specs/threads',
+ to: '/developer/framework/engineering/threads/',
+ },
+ {
+ from: '/docs/api-reference/models/list',
+ to: '/api-reference#tag/models/get/models',
+ },
+ {
+ from: '/docs/api-reference/threads',
+ to: '/api-reference/#tag/chat/post/chat/completions',
+ },
+ {
+ from: '/getting-started/troubleshooting',
+ to: '/troubleshooting/',
+ },
+ {
+ from: '/getting-started/install/windows',
+ to: '/guides/install/windows/',
+ },
+ {
+ from: '/docs/api-reference/messages',
+ to: '/api-reference#tag/messages/get/threads/{thread_id}/messages',
+ },
+ {
+ from: '/docs/modules/chats',
+ to: '/developer/framework/engineering/chats/',
+ },
+ {
+ from: '/docs/specs/chats',
+ to: '/developer/framework/engineering/chats/',
+ },
+ {
+ from: '/docs/api-reference/assistants',
+ to: '/api-reference/#tag/assistants/get/assistants',
+ },
+ {
+ from: '/docs/modules/files',
+ to: '/developer/framework/engineering/files/',
+ },
+ {
+ from: '/features/ai-models',
+ to: '/guides/models/',
+ },
+ {
+ from: '/docs/specs/models',
+ to: '/developer/framework/engineering/models/',
+ },
+ {
+ from: '/docs/models/overview',
+ to: '/developer/framework/engineering/models/',
+ },
+ {
+ from: '/docs/api-reference/models',
+ to: '/api-reference#tag/models/get/models',
+ },
+ {
+ from: '/docs/guides/fine-tuning',
+ to: '/developer/framework/engineering/fine-tuning/',
+ },
+ {
+ from: '/docs/specs/files',
+ to: '/developer/framework/engineering/files/',
+ },
+ {
+ from: '/docs/modules/threads',
+ to: '/developer/framework/engineering/threads/',
+ },
+ {
+ from: '/hardware/examples/3090x1-@dan-jan',
+ to: '/guides/hardware/',
+ },
+ {
+ from: '/chat',
+ to: '/guides/threads/',
+ },
+ {
+ from: '/docs/modules/assistants',
+ to: '/developer/assistant/',
+ },
],
},
],
@@ -231,12 +555,12 @@ const config = {
{
name: 'keywords',
content:
- 'Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ',
+ 'Jan, Rethink the Computer, local AI, privacy focus, free and open source, private and offline, conversational AI, no-subscription fee, large language models',
},
{ name: 'robots', content: 'index, follow' },
{
property: 'og:title',
- content: 'Jan AI | Rethink the Computer',
+ content: 'Jan | Rethink the Computer',
},
{
property: 'og:description',
@@ -251,7 +575,7 @@ const config = {
{ property: 'twitter:site', content: '@janframework' },
{
property: 'twitter:title',
- content: 'Jan AI | Rethink the Computer',
+ content: 'Jan | Rethink the Computer',
},
{
property: 'twitter:description',
@@ -279,14 +603,10 @@ const config = {
},
innerHTML: JSON.stringify({
'@context': 'https://schema.org/',
- '@type': 'localAI',
- 'name': 'Jan',
- 'description': `Jan turns your computer into an AI machine by running LLMs locally on your computer. It's a privacy-focus, local-first, open-source solution.`,
- 'keywords':
- 'Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model ',
- 'applicationCategory': 'BusinessApplication',
- 'operatingSystem': 'Multiple',
- 'url': 'https://jan.ai/',
+ '@type': 'Organization',
+ name: 'Jan',
+ url: 'https://jan.ai/',
+ logo: 'https://jan.ai/img/og-image.png',
}),
},
],
diff --git a/docs/src/pages/index.js b/docs/src/pages/index.js
index 6687eb06e..0311c8153 100644
--- a/docs/src/pages/index.js
+++ b/docs/src/pages/index.js
@@ -149,7 +149,6 @@ export default function Home() {
<>
diff --git a/electron/handlers/native.ts b/electron/handlers/native.ts
index 04e9b71af..06d9d2a6a 100644
--- a/electron/handlers/native.ts
+++ b/electron/handlers/native.ts
@@ -6,8 +6,8 @@ import {
getJanDataFolderPath,
getJanExtensionsPath,
init,
+ AppEvent, NativeRoute,
} from '@janhq/core/node'
-import { AppEvent, NativeRoute } from '@janhq/core'
export function handleAppIPCs() {
/**
diff --git a/electron/handlers/update.ts b/electron/handlers/update.ts
index 5ea261e54..49376c2a1 100644
--- a/electron/handlers/update.ts
+++ b/electron/handlers/update.ts
@@ -6,7 +6,7 @@ import {
UpdateInfo,
autoUpdater,
} from 'electron-updater'
-import { AppEvent } from '@janhq/core'
+import { AppEvent } from '@janhq/core/node'
import { trayManager } from '../managers/tray'
export let waitingToInstallVersion: string | undefined = undefined
diff --git a/electron/managers/window.ts b/electron/managers/window.ts
index da8dd4b17..f24d25c61 100644
--- a/electron/managers/window.ts
+++ b/electron/managers/window.ts
@@ -1,8 +1,7 @@
import { BrowserWindow, app, shell } from 'electron'
import { quickAskWindowConfig } from './quickAskWindowConfig'
-import { AppEvent } from '@janhq/core'
import { mainWindowConfig } from './mainWindowConfig'
-import { getAppConfigurations } from '@janhq/core/node'
+import { getAppConfigurations, AppEvent } from '@janhq/core/node'
/**
* Manages the current window instance.
diff --git a/electron/package.json b/electron/package.json
index e09e0daf2..49cbadf7a 100644
--- a/electron/package.json
+++ b/electron/package.json
@@ -90,7 +90,7 @@
"pacote": "^17.0.4",
"request": "^2.88.2",
"request-progress": "^3.0.0",
- "ulid": "^2.3.0",
+ "ulidx": "^2.3.0",
"@nut-tree/nut-js": "^4.0.0"
},
"devDependencies": {
diff --git a/electron/preload.ts b/electron/preload.ts
index c4a646c41..6ac259e0d 100644
--- a/electron/preload.ts
+++ b/electron/preload.ts
@@ -3,7 +3,7 @@
* @module preload
*/
-import { APIEvents, APIRoutes } from '@janhq/core'
+import { APIEvents, APIRoutes } from '@janhq/core/node'
import { contextBridge, ipcRenderer } from 'electron'
const interfaces: { [key: string]: (...args: any[]) => any } = {}
diff --git a/electron/tests/config/fixtures.ts b/electron/tests/config/fixtures.ts
index 680b09785..22d83b636 100644
--- a/electron/tests/config/fixtures.ts
+++ b/electron/tests/config/fixtures.ts
@@ -22,6 +22,8 @@ export let appInfo: ElectronAppInfo
export const TIMEOUT = parseInt(process.env.TEST_TIMEOUT || Constants.TIMEOUT)
export async function setupElectron() {
+ console.log(`TEST TIMEOUT: ${TIMEOUT}`)
+
process.env.CI = 'e2e'
const latestBuild = findLatestBuild('dist')
@@ -101,11 +103,11 @@ export const test = base.extend<
},
{ auto: true },
],
+
})
-test.setTimeout(TIMEOUT)
-
test.beforeAll(async () => {
+ test.setTimeout(TIMEOUT)
await setupElectron()
await page.waitForSelector('img[alt="Jan - Logo"]', {
state: 'visible',
diff --git a/extensions/assistant-extension/package.json b/extensions/assistant-extension/package.json
index e3860a1c1..f7fde7f78 100644
--- a/extensions/assistant-extension/package.json
+++ b/extensions/assistant-extension/package.json
@@ -9,9 +9,9 @@
"scripts": {
"clean:modules": "rimraf node_modules/pdf-parse/test && cd node_modules/pdf-parse/lib/pdf.js && rimraf v1.9.426 v1.10.88 v2.0.550",
"build": "yarn clean:modules && tsc --module commonjs && rollup -c rollup.config.ts",
- "build:publish:linux": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../pre-install",
- "build:publish:darwin": "rimraf *.tgz --glob && npm run build && ../../.github/scripts/auto-sign.sh && npm pack && cpx *.tgz ../../pre-install",
- "build:publish:win32": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../pre-install",
+ "build:publish:linux": "rimraf *.tgz --glob && yarn build && npm pack && cpx *.tgz ../../pre-install",
+ "build:publish:darwin": "rimraf *.tgz --glob && yarn build && ../../.github/scripts/auto-sign.sh && npm pack && cpx *.tgz ../../pre-install",
+ "build:publish:win32": "rimraf *.tgz --glob && yarn build && npm pack && cpx *.tgz ../../pre-install",
"build:publish": "run-script-os"
},
"devDependencies": {
diff --git a/extensions/assistant-extension/rollup.config.ts b/extensions/assistant-extension/rollup.config.ts
index d3c39cab2..0d1e4832c 100644
--- a/extensions/assistant-extension/rollup.config.ts
+++ b/extensions/assistant-extension/rollup.config.ts
@@ -36,6 +36,7 @@ export default [
// https://github.com/rollup/rollup-plugin-node-resolve#usage
resolve({
extensions: ['.js', '.ts', '.svelte'],
+ browser: true
}),
// Resolve source maps to the original source
diff --git a/extensions/assistant-extension/src/index.ts b/extensions/assistant-extension/src/index.ts
index a2c153295..de695e6d5 100644
--- a/extensions/assistant-extension/src/index.ts
+++ b/extensions/assistant-extension/src/index.ts
@@ -10,7 +10,6 @@ import {
executeOnMain,
AssistantExtension,
AssistantEvent,
- ChatCompletionMessageContentType,
} from '@janhq/core'
export default class JanAssistantExtension extends AssistantExtension {
diff --git a/extensions/conversational-extension/package.json b/extensions/conversational-extension/package.json
index 8a6da14e5..a803320f5 100644
--- a/extensions/conversational-extension/package.json
+++ b/extensions/conversational-extension/package.json
@@ -7,7 +7,7 @@
"license": "MIT",
"scripts": {
"build": "tsc -b . && webpack --config webpack.config.js",
- "build:publish": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../pre-install"
+ "build:publish": "rimraf *.tgz --glob && yarn build && npm pack && cpx *.tgz ../../pre-install"
},
"exports": {
".": "./dist/index.js",
diff --git a/extensions/conversational-extension/webpack.config.js b/extensions/conversational-extension/webpack.config.js
index a3eb873d7..b56a8f264 100644
--- a/extensions/conversational-extension/webpack.config.js
+++ b/extensions/conversational-extension/webpack.config.js
@@ -24,7 +24,7 @@ module.exports = {
extensions: ['.ts', '.js'],
fallback: {
path: require.resolve('path-browserify'),
- },
+ }
},
// Do not minify the output, otherwise it breaks the class registration
optimization: {
diff --git a/extensions/huggingface-extension/src/node/index.ts b/extensions/huggingface-extension/src/node/index.ts
index cd36c1ab9..d21255509 100644
--- a/extensions/huggingface-extension/src/node/index.ts
+++ b/extensions/huggingface-extension/src/node/index.ts
@@ -1,8 +1,7 @@
import { PythonShell } from 'python-shell'
import { spawn, ChildProcess } from 'child_process'
import { resolve as presolve, join as pjoin } from 'path'
-import type { Quantization } from '@janhq/core'
-import { log } from '@janhq/core/node'
+import { log, Quantization } from '@janhq/core/node'
import { statSync } from 'fs'
export { renameSync } from 'fs'
diff --git a/extensions/inference-nitro-extension/package.json b/extensions/inference-nitro-extension/package.json
index dd5798764..45bd8307a 100644
--- a/extensions/inference-nitro-extension/package.json
+++ b/extensions/inference-nitro-extension/package.json
@@ -12,9 +12,9 @@
"downloadnitro:darwin": "NITRO_VERSION=$(cat ./bin/version.txt) && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-mac-arm64.tar.gz -e --strip 1 -o ./bin/mac-arm64 && chmod +x ./bin/mac-arm64/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-mac-amd64.tar.gz -e --strip 1 -o ./bin/mac-x64 && chmod +x ./bin/mac-x64/nitro",
"downloadnitro:win32": "download.bat",
"downloadnitro": "run-script-os",
- "build:publish:darwin": "rimraf *.tgz --glob && npm run build && npm run downloadnitro && ../../.github/scripts/auto-sign.sh && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
- "build:publish:win32": "rimraf *.tgz --glob && npm run build && npm run downloadnitro && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
- "build:publish:linux": "rimraf *.tgz --glob && npm run build && npm run downloadnitro && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
+ "build:publish:darwin": "rimraf *.tgz --glob && yarn build && npm run downloadnitro && ../../.github/scripts/auto-sign.sh && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
+ "build:publish:win32": "rimraf *.tgz --glob && yarn build && npm run downloadnitro && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
+ "build:publish:linux": "rimraf *.tgz --glob && yarn build && npm run downloadnitro && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
"build:publish": "run-script-os"
},
"exports": {
@@ -45,7 +45,7 @@
"path-browserify": "^1.0.1",
"rxjs": "^7.8.1",
"tcp-port-used": "^1.0.2",
- "ulid": "^2.3.0"
+ "ulidx": "^2.3.0"
},
"engines": {
"node": ">=18.0.0"
diff --git a/extensions/inference-nitro-extension/rollup.config.ts b/extensions/inference-nitro-extension/rollup.config.ts
index ec8943f9c..396c40d08 100644
--- a/extensions/inference-nitro-extension/rollup.config.ts
+++ b/extensions/inference-nitro-extension/rollup.config.ts
@@ -43,6 +43,7 @@ export default [
// https://github.com/rollup/rollup-plugin-node-resolve#usage
resolve({
extensions: ['.js', '.ts', '.svelte'],
+ browser: true,
}),
// Resolve source maps to the original source
diff --git a/extensions/inference-nitro-extension/src/index.ts b/extensions/inference-nitro-extension/src/index.ts
index 70244a5d9..e398cb643 100644
--- a/extensions/inference-nitro-extension/src/index.ts
+++ b/extensions/inference-nitro-extension/src/index.ts
@@ -29,7 +29,7 @@ import {
getJanDataFolderPath,
} from '@janhq/core'
import { requestInference } from './helpers/sse'
-import { ulid } from 'ulid'
+import { ulid } from 'ulidx'
/**
* A class that implements the InferenceExtension interface from the @janhq/core package.
diff --git a/extensions/inference-nitro-extension/src/node/index.ts b/extensions/inference-nitro-extension/src/node/index.ts
index c57eb262d..5e8b97188 100644
--- a/extensions/inference-nitro-extension/src/node/index.ts
+++ b/extensions/inference-nitro-extension/src/node/index.ts
@@ -3,13 +3,14 @@ import path from 'path'
import { ChildProcessWithoutNullStreams, spawn } from 'child_process'
import tcpPortUsed from 'tcp-port-used'
import fetchRT from 'fetch-retry'
-import { log, getSystemResourceInfo } from '@janhq/core/node'
import {
+ log,
+ getSystemResourceInfo,
Model,
InferenceEngine,
ModelSettingParams,
PromptTemplate,
-} from '@janhq/core'
+} from '@janhq/core/node'
import { executableNitroFile } from './execute'
// Polyfill fetch with retry
diff --git a/extensions/inference-openai-extension/package.json b/extensions/inference-openai-extension/package.json
index 5efdbf874..9139661fd 100644
--- a/extensions/inference-openai-extension/package.json
+++ b/extensions/inference-openai-extension/package.json
@@ -8,7 +8,7 @@
"license": "AGPL-3.0",
"scripts": {
"build": "tsc -b . && webpack --config webpack.config.js",
- "build:publish": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../pre-install"
+ "build:publish": "rimraf *.tgz --glob && yarn build && npm pack && cpx *.tgz ../../pre-install"
},
"exports": {
".": "./dist/index.js",
@@ -25,7 +25,7 @@
"@janhq/core": "file:../../core",
"fetch-retry": "^5.0.6",
"path-browserify": "^1.0.1",
- "ulid": "^2.3.0"
+ "ulidx": "^2.3.0"
},
"engines": {
"node": ">=18.0.0"
diff --git a/extensions/inference-openai-extension/src/index.ts b/extensions/inference-openai-extension/src/index.ts
index e617b81e5..ab0c2bde6 100644
--- a/extensions/inference-openai-extension/src/index.ts
+++ b/extensions/inference-openai-extension/src/index.ts
@@ -25,7 +25,7 @@ import {
joinPath,
} from '@janhq/core'
import { requestInference } from './helpers/sse'
-import { ulid } from 'ulid'
+import { ulid } from 'ulidx'
import { join } from 'path'
/**
diff --git a/extensions/inference-triton-trtllm-extension/package.json b/extensions/inference-triton-trtllm-extension/package.json
index 455f8030e..bb97f15c5 100644
--- a/extensions/inference-triton-trtllm-extension/package.json
+++ b/extensions/inference-triton-trtllm-extension/package.json
@@ -8,7 +8,7 @@
"license": "AGPL-3.0",
"scripts": {
"build": "tsc -b . && webpack --config webpack.config.js",
- "build:publish": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../pre-install"
+ "build:publish": "rimraf *.tgz --glob && yarn build && npm pack && cpx *.tgz ../../pre-install"
},
"exports": {
".": "./dist/index.js",
@@ -17,16 +17,17 @@
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^3.0.2",
+ "ts-loader": "^9.5.0",
+ "typescript": "5.3.3",
"webpack": "^5.88.2",
- "webpack-cli": "^5.1.4",
- "ts-loader": "^9.5.0"
+ "webpack-cli": "^5.1.4"
},
"dependencies": {
"@janhq/core": "file:../../core",
"fetch-retry": "^5.0.6",
"path-browserify": "^1.0.1",
- "ulid": "^2.3.0",
- "rxjs": "^7.8.1"
+ "rxjs": "^7.8.1",
+ "ulidx": "^2.3.0"
},
"engines": {
"node": ">=18.0.0"
diff --git a/extensions/inference-triton-trtllm-extension/src/index.ts b/extensions/inference-triton-trtllm-extension/src/index.ts
index 2ba23d6cb..ae1d9315f 100644
--- a/extensions/inference-triton-trtllm-extension/src/index.ts
+++ b/extensions/inference-triton-trtllm-extension/src/index.ts
@@ -22,7 +22,7 @@ import {
ModelEvent,
} from '@janhq/core'
import { requestInference } from './helpers/sse'
-import { ulid } from 'ulid'
+import { ulid } from 'ulidx'
import { join } from 'path'
import { EngineSettings } from './@types/global'
diff --git a/extensions/model-extension/package.json b/extensions/model-extension/package.json
index ef43ecadf..3805c92cf 100644
--- a/extensions/model-extension/package.json
+++ b/extensions/model-extension/package.json
@@ -7,15 +7,23 @@
"author": "Jan ",
"license": "AGPL-3.0",
"scripts": {
- "build": "tsc -b . && webpack --config webpack.config.js",
- "build:publish": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../pre-install"
+ "build": "rollup -c rollup.config.ts",
+ "build:publish": "rimraf *.tgz --glob && yarn build && npm pack && cpx *.tgz ../../pre-install"
},
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^3.0.2",
- "webpack": "^5.88.2",
- "webpack-cli": "^5.1.4",
- "ts-loader": "^9.5.0"
+ "ts-loader": "^9.5.0",
+ "typescript": "5.3.3",
+ "@rollup/plugin-commonjs": "^25.0.7",
+ "@rollup/plugin-json": "^6.1.0",
+ "@rollup/plugin-node-resolve": "^15.2.3",
+ "@rollup/plugin-replace": "^5.0.5",
+ "@types/pdf-parse": "^1.1.4",
+ "rollup": "^2.38.5",
+ "rollup-plugin-define": "^1.0.1",
+ "rollup-plugin-sourcemaps": "^0.6.3",
+ "rollup-plugin-typescript2": "^0.36.0"
},
"files": [
"dist/*",
diff --git a/extensions/model-extension/rollup.config.ts b/extensions/model-extension/rollup.config.ts
new file mode 100644
index 000000000..722785aa3
--- /dev/null
+++ b/extensions/model-extension/rollup.config.ts
@@ -0,0 +1,48 @@
+import resolve from '@rollup/plugin-node-resolve'
+import commonjs from '@rollup/plugin-commonjs'
+import sourceMaps from 'rollup-plugin-sourcemaps'
+import typescript from 'rollup-plugin-typescript2'
+import json from '@rollup/plugin-json'
+import replace from '@rollup/plugin-replace'
+
+const packageJson = require('./package.json')
+
+const pkg = require('./package.json')
+
+export default [
+ {
+ input: `src/index.ts`,
+ output: [{ file: pkg.main, format: 'es', sourcemap: true }],
+ // Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash')
+ external: [],
+ watch: {
+ include: 'src/**',
+ },
+ plugins: [
+ replace({
+ EXTENSION_NAME: JSON.stringify(packageJson.name),
+ MODULE_PATH: JSON.stringify(
+ `${packageJson.name}/${packageJson.module}`
+ ),
+ VERSION: JSON.stringify(packageJson.version),
+ }),
+ // Allow json resolution
+ json(),
+ // Compile TypeScript files
+ typescript({ useTsconfigDeclarationDir: true }),
+ // Compile TypeScript files
+ // Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs)
+ // commonjs(),
+ // Allow node_modules resolution, so you can use 'external' to control
+ // which external modules to include in the bundle
+ // https://github.com/rollup/rollup-plugin-node-resolve#usage
+ resolve({
+ extensions: ['.js', '.ts', '.svelte'],
+ browser: true,
+ }),
+
+ // Resolve source maps to the original source
+ sourceMaps(),
+ ],
+ },
+]
diff --git a/extensions/model-extension/webpack.config.js b/extensions/model-extension/webpack.config.js
deleted file mode 100644
index 347719f91..000000000
--- a/extensions/model-extension/webpack.config.js
+++ /dev/null
@@ -1,40 +0,0 @@
-const path = require('path')
-const webpack = require('webpack')
-const packageJson = require('./package.json')
-
-module.exports = {
- experiments: { outputModule: true },
- entry: './src/index.ts', // Adjust the entry point to match your project's main file
- mode: 'production',
- module: {
- rules: [
- {
- test: /\.tsx?$/,
- use: 'ts-loader',
- exclude: /node_modules/,
- },
- ],
- },
- plugins: [
- new webpack.DefinePlugin({
- EXTENSION_NAME: JSON.stringify(packageJson.name),
- MODULE_PATH: JSON.stringify(`${packageJson.name}/${packageJson.module}`),
- VERSION: JSON.stringify(packageJson.version),
- }),
- ],
- output: {
- filename: 'index.js', // Adjust the output file name as needed
- path: path.resolve(__dirname, 'dist'),
- library: { type: 'module' }, // Specify ESM output format
- },
- resolve: {
- extensions: ['.ts', '.js'],
- fallback: {
- path: require.resolve('path-browserify'),
- },
- },
- optimization: {
- minimize: false,
- },
- // Add loaders and other configuration as needed for your project
-}
diff --git a/extensions/monitoring-extension/package.json b/extensions/monitoring-extension/package.json
index 73d28ab37..0268fb11c 100644
--- a/extensions/monitoring-extension/package.json
+++ b/extensions/monitoring-extension/package.json
@@ -12,7 +12,7 @@
"download-artifacts:darwin": "echo 'No artifacts to download for darwin'",
"download-artifacts:win32": "download.bat",
"download-artifacts:linux": "download https://delta.jan.ai/vulkaninfo -o ./bin && chmod +x ./bin/vulkaninfo",
- "build:publish": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../pre-install"
+ "build:publish": "rimraf *.tgz --glob && yarn build && npm pack && cpx *.tgz ../../pre-install"
},
"exports": {
".": "./dist/index.js",
diff --git a/extensions/monitoring-extension/src/node/index.ts b/extensions/monitoring-extension/src/node/index.ts
index ca767d348..51fd0f070 100644
--- a/extensions/monitoring-extension/src/node/index.ts
+++ b/extensions/monitoring-extension/src/node/index.ts
@@ -4,8 +4,9 @@ import {
OperatingSystemInfo,
ResourceInfo,
SupportedPlatforms,
-} from '@janhq/core'
-import { getJanDataFolderPath, log } from '@janhq/core/node'
+ getJanDataFolderPath,
+ log,
+} from '@janhq/core/node'
import { mem, cpu } from 'node-os-utils'
import { exec } from 'child_process'
import { writeFileSync, existsSync, readFileSync, mkdirSync } from 'fs'
diff --git a/extensions/tensorrt-llm-extension/package.json b/extensions/tensorrt-llm-extension/package.json
index d3521669e..d1cb93f58 100644
--- a/extensions/tensorrt-llm-extension/package.json
+++ b/extensions/tensorrt-llm-extension/package.json
@@ -22,9 +22,9 @@
"provider": "nitro-tensorrt-llm",
"scripts": {
"build": "tsc --module commonjs && rollup -c rollup.config.ts",
- "build:publish:win32": "rimraf *.tgz --glob && npm run build && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
- "build:publish:linux": "rimraf *.tgz --glob && npm run build && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
- "build:publish:darwin": "rimraf *.tgz --glob && npm run build && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
+ "build:publish:win32": "rimraf *.tgz --glob && yarn build && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
+ "build:publish:linux": "rimraf *.tgz --glob && yarn build && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
+ "build:publish:darwin": "rimraf *.tgz --glob && yarn build && cpx \"bin/**\" \"dist/bin\" && npm pack && cpx *.tgz ../../pre-install",
"build:publish": "run-script-os"
},
"exports": {
@@ -57,7 +57,7 @@
"path-browserify": "^1.0.1",
"rxjs": "^7.8.1",
"tcp-port-used": "^1.0.2",
- "ulid": "^2.3.0"
+ "ulidx": "^2.3.0"
},
"engines": {
"node": ">=18.0.0"
diff --git a/extensions/tensorrt-llm-extension/src/node/index.ts b/extensions/tensorrt-llm-extension/src/node/index.ts
index 1afebb950..4e2a7b9fa 100644
--- a/extensions/tensorrt-llm-extension/src/node/index.ts
+++ b/extensions/tensorrt-llm-extension/src/node/index.ts
@@ -2,10 +2,13 @@ import path from 'path'
import { ChildProcessWithoutNullStreams, spawn } from 'child_process'
import tcpPortUsed from 'tcp-port-used'
import fetchRT from 'fetch-retry'
-import { log, getJanDataFolderPath } from '@janhq/core/node'
+import {
+ log,
+ getJanDataFolderPath,
+ SystemInformation,
+ PromptTemplate,
+} from '@janhq/core/node'
import decompress from 'decompress'
-import { SystemInformation } from '@janhq/core'
-import { PromptTemplate } from '@janhq/core'
// Polyfill fetch with retry
const fetchRetry = fetchRT(fetch)
diff --git a/package.json b/package.json
index 847e89d91..79ed81e7a 100644
--- a/package.json
+++ b/package.json
@@ -34,9 +34,9 @@
"build:web": "yarn workspace jan-web build && cpx \"web/out/**\" \"electron/renderer/\"",
"build:electron": "yarn copy:assets && yarn workspace jan build",
"build:electron:test": "yarn workspace jan build:test",
- "build:extensions:windows": "rimraf ./pre-install/*.tgz && powershell -command \"$jobs = Get-ChildItem -Path './extensions' -Directory | ForEach-Object { Start-Job -Name ($_.Name) -ScriptBlock { param($_dir); try { Set-Location $_dir; npm install; npm run build:publish; Write-Output 'Build successful in ' + $_dir } catch { Write-Error 'Error in ' + $_dir; throw } } -ArgumentList $_.FullName }; $jobs | Wait-Job; $jobs | ForEach-Object { Receive-Job -Job $_ -Keep } | ForEach-Object { Write-Host $_ }; $failed = $jobs | Where-Object { $_.State -ne 'Completed' -or $_.ChildJobs[0].JobStateInfo.State -ne 'Completed' }; if ($failed) { Exit 1 }\"",
- "build:extensions:linux": "rimraf ./pre-install/*.tgz && find ./extensions -mindepth 1 -maxdepth 1 -type d -print0 | xargs -0 -n 1 -P 4 -I {} sh -c 'cd {} && npm install && npm run build:publish'",
- "build:extensions:darwin": "rimraf ./pre-install/*.tgz && find ./extensions -mindepth 1 -maxdepth 1 -type d -print0 | xargs -0 -n 1 -P 4 -I {} sh -c 'cd {} && npm install && npm run build:publish'",
+ "build:extensions:windows": "rimraf ./pre-install/*.tgz && powershell -command \"$jobs = Get-ChildItem -Path './extensions' -Directory | ForEach-Object { Start-Job -Name ($_.Name) -ScriptBlock { param($_dir); try { Set-Location $_dir; yarn; yarn build:publish; Write-Output 'Build successful in ' + $_dir } catch { Write-Error 'Error in ' + $_dir; throw } } -ArgumentList $_.FullName }; $jobs | Wait-Job; $jobs | ForEach-Object { Receive-Job -Job $_ -Keep } | ForEach-Object { Write-Host $_ }; $failed = $jobs | Where-Object { $_.State -ne 'Completed' -or $_.ChildJobs[0].JobStateInfo.State -ne 'Completed' }; if ($failed) { Exit 1 }\"",
+ "build:extensions:linux": "rimraf ./pre-install/*.tgz && find ./extensions -mindepth 1 -maxdepth 1 -type d -print0 | xargs -0 -n 1 -P 4 -I {} sh -c 'cd {}; yarn; yarn build:publish'",
+ "build:extensions:darwin": "rimraf ./pre-install/*.tgz && find ./extensions -mindepth 1 -maxdepth 1 -type d -print0 | xargs -0 -n 1 -P 4 -I {} sh -c 'cd {}; yarn; yarn build:publish'",
"build:extensions:server": "yarn workspace build:extensions ",
"build:extensions": "run-script-os",
"build:test": "yarn copy:assets && yarn build:web && yarn workspace jan build:test",
diff --git a/web/containers/Providers/EventHandler.tsx b/web/containers/Providers/EventHandler.tsx
index f0020d311..d44c950e1 100644
--- a/web/containers/Providers/EventHandler.tsx
+++ b/web/containers/Providers/EventHandler.tsx
@@ -17,7 +17,7 @@ import {
ModelInitFailed,
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
-import { ulid } from 'ulid'
+import { ulid } from 'ulidx'
import {
activeModelAtom,
diff --git a/web/hooks/useSendChatMessage.ts b/web/hooks/useSendChatMessage.ts
index 0bbc779a6..d946adacd 100644
--- a/web/hooks/useSendChatMessage.ts
+++ b/web/hooks/useSendChatMessage.ts
@@ -21,7 +21,7 @@ import {
} from '@janhq/core'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
-import { ulid } from 'ulid'
+import { ulid } from 'ulidx'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import {
diff --git a/web/package.json b/web/package.json
index e3301f68b..84d489c00 100644
--- a/web/package.json
+++ b/web/package.json
@@ -44,7 +44,7 @@
"sass": "^1.69.4",
"tailwind-merge": "^2.0.0",
"tailwindcss": "3.3.5",
- "ulid": "^2.3.0",
+ "ulidx": "^2.3.0",
"uuid": "^9.0.1",
"zod": "^3.22.4"
},