Merge changes from dev branch

This commit is contained in:
Daniel 2024-03-21 20:13:41 +08:00
commit f0901a88b1
152 changed files with 1508 additions and 1588 deletions

View File

@ -53,15 +53,17 @@ build: check-file-counts
clean:
ifeq ($(OS),Windows_NT)
powershell -Command "Get-ChildItem -Path . -Include node_modules, .next, dist, build, out -Recurse -Directory | Remove-Item -Recurse -Force"
powershell -Command "Get-ChildItem -Path . -Include package-lock.json -Recurse -File | Remove-Item -Recurse -Force"
powershell -Command "Remove-Item -Recurse -Force ./pre-install/*.tgz"
powershell -Command "Remove-Item -Recurse -Force ./electron/pre-install/*.tgz"
rmdir /s /q "%USERPROFILE%\jan\extensions"
powershell -Command "if (Test-Path \"$($env:USERPROFILE)\jan\extensions\") { Remove-Item -Path \"$($env:USERPROFILE)\jan\extensions\" -Recurse -Force }"
else ifeq ($(shell uname -s),Linux)
find . -name "node_modules" -type d -prune -exec rm -rf '{}' +
find . -name ".next" -type d -exec rm -rf '{}' +
find . -name "dist" -type d -exec rm -rf '{}' +
find . -name "build" -type d -exec rm -rf '{}' +
find . -name "out" -type d -exec rm -rf '{}' +
find . -name "packake-lock.json" -type f -exec rm -rf '{}' +
rm -rf ./pre-install/*.tgz
rm -rf ./electron/pre-install/*.tgz
rm -rf "~/jan/extensions"
@ -72,6 +74,7 @@ else
find . -name "dist" -type d -exec rm -rf '{}' +
find . -name "build" -type d -exec rm -rf '{}' +
find . -name "out" -type d -exec rm -rf '{}' +
find . -name "packake-lock.json" -type f -exec rm -rf '{}' +
rm -rf ./pre-install/*.tgz
rm -rf ./electron/pre-install/*.tgz
rm -rf ~/jan/extensions

View File

@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<tr style="text-align:center">
<td style="text-align:center"><b>Experimental (Nightly Build)</b></td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.9-336.exe'>
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.9-337.exe'>
<img src='./docs/static/img/windows.png' style="height:14px; width: 14px" />
<b>jan.exe</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.9-336.dmg'>
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.9-337.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.9-336.dmg'>
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.9-337.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.9-336.deb'>
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.9-337.deb'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.deb</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.9-336.AppImage'>
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.9-337.AppImage'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.AppImage</b>
</a>

View File

@ -8,8 +8,8 @@
],
"homepage": "https://jan.ai",
"license": "AGPL-3.0",
"main": "dist/core.umd.js",
"module": "dist/core.es5.js",
"main": "dist/core.es5.js",
"module": "dist/core.cjs.js",
"typings": "dist/types/index.d.ts",
"files": [
"dist",
@ -17,8 +17,7 @@
],
"author": "Jan <service@jan.ai>",
"exports": {
".": "./dist/core.umd.js",
"./sdk": "./dist/core.umd.js",
".": "./dist/core.es5.js",
"./node": "./dist/node/index.cjs.js"
},
"typesVersions": {
@ -27,10 +26,6 @@
"./dist/core.es5.js.map",
"./dist/types/index.d.ts"
],
"sdk": [
"./dist/core.es5.js.map",
"./dist/types/index.d.ts"
],
"node": [
"./dist/node/index.cjs.js.map",
"./dist/types/node/index.d.ts"
@ -38,13 +33,14 @@
}
},
"scripts": {
"lint": "tslint --project tsconfig.json -t codeFrame 'src/**/*.ts' 'test/**/*.ts'",
"lint": "tslint --project tsconfig.json -t codeFrame 'src/**/*.ts' 'test/**/*.ts'",
"test": "jest",
"prebuild": "rimraf dist",
"build": "tsc --module commonjs && rollup -c rollup.config.ts",
"start": "rollup -c rollup.config.ts -w"
},
"devDependencies": {
"@rollup/plugin-replace": "^5.0.5",
"@types/jest": "^29.5.12",
"@types/node": "^20.11.4",
"eslint": "8.57.0",
@ -63,6 +59,6 @@
},
"dependencies": {
"rxjs": "^7.8.1",
"ulid": "^2.3.0"
"ulidx": "^2.3.0"
}
}

View File

@ -3,17 +3,16 @@ import commonjs from 'rollup-plugin-commonjs'
import sourceMaps from 'rollup-plugin-sourcemaps'
import typescript from 'rollup-plugin-typescript2'
import json from 'rollup-plugin-json'
import replace from '@rollup/plugin-replace'
const pkg = require('./package.json')
const libraryName = 'core'
export default [
{
input: `src/index.ts`,
output: [
{ file: pkg.main, name: libraryName, format: 'umd', sourcemap: true },
{ file: pkg.module, format: 'es', sourcemap: true },
// { file: pkg.main, name: libraryName, format: 'umd', sourcemap: true },
{ file: pkg.main, format: 'es', sourcemap: true },
],
// Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash')
external: ['path'],
@ -30,7 +29,13 @@ export default [
// Allow node_modules resolution, so you can use 'external' to control
// which external modules to include in the bundle
// https://github.com/rollup/rollup-plugin-node-resolve#usage
resolve(),
replace({
'node:crypto': 'crypto',
'delimiters': ['"', '"'],
}),
resolve({
browser: true,
}),
// Resolve source maps to the original source
sourceMaps(),
@ -46,7 +51,7 @@ export default [
'pacote',
'@types/pacote',
'@npmcli/arborist',
'ulid',
'ulidx',
'node-fetch',
'fs',
'request',
@ -64,7 +69,7 @@ export default [
// Allow json resolution
json(),
// Compile TypeScript files
typescript({ useTsconfigDeclarationDir: true, exclude: ['src/*.ts', 'src/extensions/**'] }),
typescript({ useTsconfigDeclarationDir: true }),
// Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs)
commonjs(),
// Allow node_modules resolution, so you can use 'external' to control

View File

@ -13,7 +13,7 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
extension,
method,
...args
) => global.core?.api?.invokeExtensionFunc(extension, method, ...args)
) => globalThis.core?.api?.invokeExtensionFunc(extension, method, ...args)
/**
* Downloads a file from a URL and saves it to the local file system.
@ -26,7 +26,7 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
const downloadFile: (downloadRequest: DownloadRequest, network?: NetworkConfig) => Promise<any> = (
downloadRequest,
network
) => global.core?.api?.downloadFile(downloadRequest, network)
) => globalThis.core?.api?.downloadFile(downloadRequest, network)
/**
* Aborts the download of a specific file.
@ -34,14 +34,14 @@ const downloadFile: (downloadRequest: DownloadRequest, network?: NetworkConfig)
* @returns {Promise<any>} A promise that resolves when the download has been aborted.
*/
const abortDownload: (fileName: string) => Promise<any> = (fileName) =>
global.core.api?.abortDownload(fileName)
globalThis.core.api?.abortDownload(fileName)
/**
* Gets Jan's data folder path.
*
* @returns {Promise<string>} A Promise that resolves with Jan's data folder path.
*/
const getJanDataFolderPath = (): Promise<string> => global.core.api?.getJanDataFolderPath()
const getJanDataFolderPath = (): Promise<string> => globalThis.core.api?.getJanDataFolderPath()
/**
* Opens the file explorer at a specific path.
@ -49,21 +49,21 @@ const getJanDataFolderPath = (): Promise<string> => global.core.api?.getJanDataF
* @returns {Promise<any>} A promise that resolves when the file explorer is opened.
*/
const openFileExplorer: (path: string) => Promise<any> = (path) =>
global.core.api?.openFileExplorer(path)
globalThis.core.api?.openFileExplorer(path)
/**
* Joins multiple paths together.
* @param paths - The paths to join.
* @returns {Promise<string>} A promise that resolves with the joined path.
*/
const joinPath: (paths: string[]) => Promise<string> = (paths) => global.core.api?.joinPath(paths)
const joinPath: (paths: string[]) => Promise<string> = (paths) => globalThis.core.api?.joinPath(paths)
/**
* Retrive the basename from an url.
* @param path - The path to retrieve.
* @returns {Promise<string>} A promise that resolves with the basename.
*/
const baseName: (paths: string) => Promise<string> = (path) => global.core.api?.baseName(path)
const baseName: (paths: string) => Promise<string> = (path) => globalThis.core.api?.baseName(path)
/**
* Opens an external URL in the default web browser.
@ -72,20 +72,20 @@ const baseName: (paths: string) => Promise<string> = (path) => global.core.api?.
* @returns {Promise<any>} - A promise that resolves when the URL has been successfully opened.
*/
const openExternalUrl: (url: string) => Promise<any> = (url) =>
global.core.api?.openExternalUrl(url)
globalThis.core.api?.openExternalUrl(url)
/**
* Gets the resource path of the application.
*
* @returns {Promise<string>} - A promise that resolves with the resource path.
*/
const getResourcePath: () => Promise<string> = () => global.core.api?.getResourcePath()
const getResourcePath: () => Promise<string> = () => globalThis.core.api?.getResourcePath()
/**
* Gets the user's home path.
* @returns return user's home path
*/
const getUserHomePath = (): Promise<string> => global.core.api?.getUserHomePath()
const getUserHomePath = (): Promise<string> => globalThis.core.api?.getUserHomePath()
/**
* Log to file from browser processes.
@ -93,7 +93,7 @@ const getUserHomePath = (): Promise<string> => global.core.api?.getUserHomePath(
* @param message - Message to log.
*/
const log: (message: string, fileName?: string) => void = (message, fileName) =>
global.core.api?.log(message, fileName)
globalThis.core.api?.log(message, fileName)
/**
* Check whether the path is a subdirectory of another path.
@ -104,14 +104,14 @@ const log: (message: string, fileName?: string) => void = (message, fileName) =>
* @returns {Promise<boolean>} - A promise that resolves with a boolean indicating whether the path is a subdirectory.
*/
const isSubdirectory: (from: string, to: string) => Promise<boolean> = (from: string, to: string) =>
global.core.api?.isSubdirectory(from, to)
globalThis.core.api?.isSubdirectory(from, to)
/**
* Get system information
* @returns {Promise<any>} - A promise that resolves with the system information.
*/
const systemInformation: () => Promise<SystemInformation> = () =>
global.core.api?.systemInformation()
globalThis.core.api?.systemInformation()
/**
* Show toast message from browser processes.
@ -120,7 +120,7 @@ const systemInformation: () => Promise<SystemInformation> = () =>
* @returns
*/
const showToast: (title: string, message: string) => void = (title, message) =>
global.core.api?.showToast(title, message)
globalThis.core.api?.showToast(title, message)
/**
* Register extension point function type definition
*/

View File

@ -5,7 +5,7 @@
* @param handler The handler function to call when the event is observed.
*/
const on: (eventName: string, handler: Function) => void = (eventName, handler) => {
global.core?.events?.on(eventName, handler)
globalThis.core?.events?.on(eventName, handler)
}
/**
@ -15,7 +15,7 @@ const on: (eventName: string, handler: Function) => void = (eventName, handler)
* @param handler The handler function to call when the event is observed.
*/
const off: (eventName: string, handler: Function) => void = (eventName, handler) => {
global.core?.events?.off(eventName, handler)
globalThis.core?.events?.off(eventName, handler)
}
/**
@ -25,7 +25,7 @@ const off: (eventName: string, handler: Function) => void = (eventName, handler)
* @param object The object to pass to the event callback.
*/
const emit: (eventName: string, object: any) => void = (eventName, object) => {
global.core?.events?.emit(eventName, object)
globalThis.core?.events?.emit(eventName, object)
}
export const events = {

View File

@ -1,5 +1,5 @@
import { requestInference } from './helpers/sse'
import { ulid } from 'ulid'
import { ulid } from 'ulidx'
import { AIEngine } from './AIEngine'
import {
ChatCompletionRole,

View File

@ -4,7 +4,7 @@ import { FileStat } from './types'
* Writes data to a file at the specified path.
* @returns {Promise<any>} A Promise that resolves when the file is written successfully.
*/
const writeFileSync = (...args: any[]) => global.core.api?.writeFileSync(...args)
const writeFileSync = (...args: any[]) => globalThis.core.api?.writeFileSync(...args)
/**
* Writes blob data to a file at the specified path.
@ -13,52 +13,52 @@ const writeFileSync = (...args: any[]) => global.core.api?.writeFileSync(...args
* @returns
*/
const writeBlob: (path: string, data: string) => Promise<any> = (path, data) =>
global.core.api?.writeBlob(path, data)
globalThis.core.api?.writeBlob(path, data)
/**
* Reads the contents of a file at the specified path.
* @returns {Promise<any>} A Promise that resolves with the contents of the file.
*/
const readFileSync = (...args: any[]) => global.core.api?.readFileSync(...args)
const readFileSync = (...args: any[]) => globalThis.core.api?.readFileSync(...args)
/**
* Check whether the file exists
* @param {string} path
* @returns {boolean} A boolean indicating whether the path is a file.
*/
const existsSync = (...args: any[]) => global.core.api?.existsSync(...args)
const existsSync = (...args: any[]) => globalThis.core.api?.existsSync(...args)
/**
* List the directory files
* @returns {Promise<any>} A Promise that resolves with the contents of the directory.
*/
const readdirSync = (...args: any[]) => global.core.api?.readdirSync(...args)
const readdirSync = (...args: any[]) => globalThis.core.api?.readdirSync(...args)
/**
* Creates a directory at the specified path.
* @returns {Promise<any>} A Promise that resolves when the directory is created successfully.
*/
const mkdirSync = (...args: any[]) => global.core.api?.mkdirSync(...args)
const mkdirSync = (...args: any[]) => globalThis.core.api?.mkdirSync(...args)
const mkdir = (...args: any[]) => global.core.api?.mkdir(...args)
const mkdir = (...args: any[]) => globalThis.core.api?.mkdir(...args)
/**
* Removes a directory at the specified path.
* @returns {Promise<any>} A Promise that resolves when the directory is removed successfully.
*/
const rmdirSync = (...args: any[]) =>
global.core.api?.rmdirSync(...args, { recursive: true, force: true })
globalThis.core.api?.rmdirSync(...args, { recursive: true, force: true })
const rm = (path: string) => global.core.api?.rm(path)
const rm = (path: string) => globalThis.core.api?.rm(path)
/**
* Deletes a file from the local file system.
* @param {string} path - The path of the file to delete.
* @returns {Promise<any>} A Promise that resolves when the file is deleted.
*/
const unlinkSync = (...args: any[]) => global.core.api?.unlinkSync(...args)
const unlinkSync = (...args: any[]) => globalThis.core.api?.unlinkSync(...args)
/**
* Appends data to a file at the specified path.
*/
const appendFileSync = (...args: any[]) => global.core.api?.appendFileSync(...args)
const appendFileSync = (...args: any[]) => globalThis.core.api?.appendFileSync(...args)
/**
* Synchronizes a file from a source path to a destination path.
@ -67,15 +67,15 @@ const appendFileSync = (...args: any[]) => global.core.api?.appendFileSync(...ar
* @returns {Promise<any>} - A promise that resolves when the file has been successfully synchronized.
*/
const syncFile: (src: string, dest: string) => Promise<any> = (src, dest) =>
global.core.api?.syncFile(src, dest)
globalThis.core.api?.syncFile(src, dest)
/**
* Copy file sync.
*/
const copyFileSync = (...args: any[]) => global.core.api?.copyFileSync(...args)
const copyFileSync = (...args: any[]) => globalThis.core.api?.copyFileSync(...args)
const copyFile: (src: string, dest: string) => Promise<void> = (src, dest) =>
global.core.api?.copyFile(src, dest)
globalThis.core.api?.copyFile(src, dest)
/**
* Gets the file's stats.
@ -87,7 +87,7 @@ const copyFile: (src: string, dest: string) => Promise<void> = (src, dest) =>
const fileStat: (path: string, outsideJanDataFolder?: boolean) => Promise<FileStat | undefined> = (
path,
outsideJanDataFolder
) => global.core.api?.fileStat(path, outsideJanDataFolder)
) => globalThis.core.api?.fileStat(path, outsideJanDataFolder)
// TODO: Export `dummy` fs functions automatically
// Currently adding these manually

View File

@ -216,7 +216,7 @@ export const createMessage = async (threadId: string, message: any) => {
const threadMessagesFileName = 'messages.jsonl'
try {
const { ulid } = require('ulid')
const { ulid } = require('ulidx')
const msgId = ulid()
const createdAt = Date.now()
const threadMessage: ThreadMessage = {

View File

@ -4,3 +4,5 @@ export * from './extension/manager'
export * from './extension/store'
export * from './api'
export * from './helper'
export * from './../types'
export * from './../api'

View File

@ -1,25 +1,29 @@
---
title: "Post Mortem: Bitdefender False Positive Flag"
title: 'Post Mortem: Bitdefender False Positive Flag'
description: "10th January 2024, Jan's 0.4.4 Release on Windows triggered Bitdefender to incorrectly flag it as infected with Gen:Variant.Tedy.258323, leading to automatic quarantine warnings on users' computers."
slug: /postmortems/january-10-2024-bitdefender-false-positive-flag
tags: [Postmortem]
keywords:
[
postmortem,
bitdefender,
false positive,
antivirus,
jan,
nitro,
incident,
incident response,
supply chain security,
user communication,
documentation,
antivirus compatibility,
cross-platform testing,
proactive incident response,
user education,
lessons learned,
]
---
<head>
<title>Jan 10, 2024 Incident Postmortem - Bitdefender False Positive Flag on Jan AI Resolved</title>
<meta charSet="utf-8" />
<meta name="description" content="Comprehensive postmortem on the Jan AI v0.4.4 Bitdefender false positive incident on January 10, 2024. Learn about the investigation, solutions, and preventive measures." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, postmortem, incident, flagging issue" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/blog/postmortems/january-10-2024-bitdefender-false-positive-flag/" />
<meta property="og:title" content="Bitdefender False Positive Flag on Jan AI Resolved - Jan 10, 2024 Incident Postmortem" />
<meta property="og:description" content="Comprehensive postmortem on the Jan AI v0.4.4 Bitdefender false positive incident on January 10, 2024. Learn about the investigation, solutions, and preventive measures." />
<meta property="og:url" content="https://jan.ai/blog/postmortems/january-10-2024-bitdefender-false-positive-flag/" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image.png" />
</head>
Following the recent incident related to Jan version 0.4.4 triggering Bitdefender on Windows with Gen:Variant.Tedy.258323 on January 10, 2024, we wanted to provide a comprehensive postmortem and outline the necessary follow-up actions.
## Incident Overview

View File

@ -2,20 +2,6 @@
title: Jan's Vision for 2035
---
<head>
<title>Jan's Vision for 2035</title>
<meta charset="utf-8" />
<meta name="description" content="Discover Jan's vision for the year 2035, where it aims to transform into a robotics company. Learn about its planning parameters, 10-year vision, 2-week sprint, and success measurement through Quarterly OKRs." />
<meta name="keywords" content="Jan, vision, 2035, robotics company, planning parameters, 10-year vision, 2-week sprint, Quarterly OKRs" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/jans-vision-for-2035" />
<meta property="og:title" content="Jan's Vision for 2035" />
<meta property="og:description" content="Discover Jan's vision for the year 2035, where it aims to transform into a robotics company. Learn about its planning parameters, 10-year vision, 2-week sprint, and success measurement through Quarterly OKRs." />
<meta property="og:url" content="https://jan.ai/jans-vision-for-2035" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image-jans-vision.png" />
</head>
[Jan 2035: A Robotics Company](https://hackmd.io/QIWyYbNNQVWVbupuI3kjAA)
We only have 2 planning parameters:

View File

@ -4,34 +4,21 @@ slug: /about
description: Jan is a desktop application that turns computers into thinking machines.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
about Jan,
desktop application,
thinking machine,
]
---
<head>
<title>About Jan</title>
<meta charset="utf-8" />
<meta name="description" content="Discover Jan, a desktop application that turns computers into thinking machines. Learn about its features, principles, vision, and how to get involved with the Jan Labs team." />
<meta name="keywords" content="Jan, Jan AI, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, desktop application, thinking machine, about Jan" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/about" />
<meta property="og:title" content="About Jan" />
<meta property="og:description" content="Discover Jan, a desktop application that turns computers into thinking machines. Learn about its features, principles, vision, and how to get involved with the Jan Labs team." />
<meta property="og:url" content="https://jan.ai/about" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image-about-jan.png" />
</head>
Jan turns computers into thinking machines to change how we use them.
Jan is created and maintained by Jan Labs, a robotics company.

View File

@ -2,20 +2,6 @@
title: Frequently Asked Questions (FAQ) - Jan
---
<head>
<title>Frequently Asked Questions (FAQ)</title>
<meta charset="utf-8" />
<meta name="description" content="Explore frequently asked questions about Jan, including its features, compatibility, privacy policy, usage, and community involvement." />
<meta name="keywords" content="Jan, frequently asked questions, FAQ, about Jan, usage, compatibility, privacy, community, contribution, troubleshooting, self-hosting, hiring" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/faq" />
<meta property="og:title" content="Frequently Asked Questions (FAQ)" />
<meta property="og:description" content="Explore frequently asked questions about Jan, including its features, compatibility, privacy policy, usage, and community involvement." />
<meta property="og:url" content="https://jan.ai/faq" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image-faq.png" />
</head>
# Frequently Asked Questions (FAQ)
## What is Jan?

View File

@ -2,19 +2,5 @@
title: Roadmap
---
<head>
<title>Roadmap</title>
<meta charset="utf-8" />
<meta name="description" content="Explore Jan's roadmap to see the immediate and longer-term plans for development and features." />
<meta name="keywords" content="Jan, roadmap, development, features, immediate roadmap, longer-term roadmap, GitHub, Discord" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/roadmap" />
<meta property="og:title" content="Roadmap" />
<meta property="og:description" content="Explore Jan's roadmap to see the immediate and longer-term plans for development and features." />
<meta property="og:url" content="https://jan.ai/roadmap" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image-roadmap.png" />
</head>
- [ ] [Immediate Roadmap on Github](https://github.com/orgs/janhq/projects/5/views/16)
- [ ] [Longer-term Roadmap on Discord](https://discord.gg/Ey62mynnYr)

View File

@ -4,30 +4,20 @@ description: Jan is a ChatGPT-alternative that runs on your own computer, with a
slug: /acknowledgements
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
acknowledgements,
third-party libraries,
]
---
<head>
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server. Explore the third-party libraries that have contributed to the development of Jan."/>
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, acknowledgements, third-party libraries"/>
<meta property="og:title" content="Acknowledgements - Jan"/>
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server. Explore the third-party libraries that have contributed to the development of Jan."/>
<meta property="og:url" content="https://jan.ai/acknowledgements"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:title" content="Acknowledgements - Jan"/>
<meta name="twitter:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server. Explore the third-party libraries that have contributed to the development of Jan."/>
</head>
# Acknowledgements
We would like to express our gratitude to the following third-party libraries that have made the development of Jan possible.

View File

@ -4,31 +4,23 @@ slug: /community
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
about Jan,
desktop application,
thinking machine,
community,
socials,
]
---
<head>
<title>Jan's Community</title>
<meta charset="utf-8" />
<meta name="description" content="Join Jan's community to connect with other users, stay updated, and explore career opportunities." />
<meta name="keywords" content="Jan, community, Discord, Twitter, HuggingFace, LinkedIn, Reddit, newsletter, careers" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/community" />
<meta property="og:title" content="Jan's Community" />
<meta property="og:description" content="Join Jan's community to connect with other users, stay updated, and explore career opportunities." />
<meta property="og:url" content="https://jan.ai/community" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image-community.png" />
</head>
## Socials
- [Discord](https://discord.gg/SH3DGmUs6b)

View File

@ -4,31 +4,19 @@ slug: /developer/architecture
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
architecture,
]
---
<head>
<title>Jan AI Architecture - Modular and Extensible Framework</title>
<meta charSet="utf-8" />
<meta name="description" content="Discover the modular architecture of Jan, a ChatGPT alternative that runs on your own computer. Learn about Jan's local API server, Desktop UI, and the Nitro inference engine." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, modular architecture, Extensions API" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/developer/architecture/" />
<meta property="og:title" content="Jan AI Architecture - Modular and Extensible Framework" />
<meta property="og:description" content="Discover the modular architecture of Jan, a ChatGPT alternative that runs on your own computer. Learn about Jan's local API server, Desktop UI, and the Nitro inference engine." />
<meta property="og:url" content="https://jan.ai/developer/architecture/" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image.png" />
</head>
:::warning
This page is still under construction, and should be read as a scratchpad

View File

@ -4,31 +4,19 @@ slug: /developer/file-based
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
file based approach,
]
---
<head>
<title>Jan AI File-based Data Persistence Approach</title>
<meta charSet="utf-8" />
<meta name="description" content="Learn how Jan, a ChatGPT alternative, leverages a local filesystem for data persistence, promoting composability and tinkerability similar to VSCode." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, file-based data storage, data persistence" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/developer/file-based/" />
<meta property="og:title" content="Jan AI File-based Data Persistence Approach" />
<meta property="og:description" content="Learn how Jan, a ChatGPT alternative, leverages a local filesystem for data persistence, promoting composability and tinkerability similar to VSCode." />
<meta property="og:url" content="https://jan.ai/developer/file-based/" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image.png" />
</head>
:::warning
This page is still under construction, and should be read as a scratchpad

View File

@ -4,31 +4,19 @@ slug: /developer/ui
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
UI kit,
]
---
<head>
<title>Jan AI User Interface - Customizable UI Kit</title>
<meta charSet="utf-8" />
<meta name="description" content="Explore Jan's UI Kit for customizing the user interface to fit your brand and style. Learn how to personalize your application with Jan's flexible UI components." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, UI Kit, customizable UI" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/developer/ui/" />
<meta property="og:title" content="Jan AI User Interface - Customizable UI Kit" />
<meta property="og:description" content="Explore Jan's UI Kit for customizing the user interface to fit your brand and style. Learn how to personalize your application with Jan's flexible UI components." />
<meta property="og:url" content="https://jan.ai/developer/ui/" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image.png" />
</head>
:::warning
This page is still under construction, and should be read as a scratchpad

View File

@ -4,34 +4,21 @@ slug: /developer/prereq
description: Guide to install and setup Jan for development.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
installation,
prerequisites,
developer setup,
]
---
<head>
<title>Jan AI Installation and Setup Guide - Developer Prerequisites</title>
<meta charSet="utf-8" />
<meta name="description" content="Comprehensive guide to installing and setting up Jan for development. Covers hardware, system requirements, and step-by-step instructions for developers." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, installation, prerequisites, developer setup" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/developer/prereq/" />
<meta property="og:title" content="Jan AI Installation and Setup Guide - Developer Prerequisites" />
<meta property="og:description" content="Comprehensive guide to installing and setting up Jan for development. Covers hardware, system requirements, and step-by-step instructions for developers." />
<meta property="og:url" content="https://jan.ai/developer/prereq/" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image.png" />
</head>
## Requirements
### Hardware Requirements

View File

@ -4,31 +4,18 @@ slug: /developer
description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Jan AI Developer Documentation - Building Extensions and SDK Overview</title>
<meta charSet="utf-8" />
<meta name="description" content="Guide for developers on building extensions on top of the Jan Framework. Learn about Jan's extensible framework for AI applications, available on all platforms." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, extensible framework, SDK, building extensions" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/developer/" />
<meta property="og:title" content="Jan AI Developer Documentation - Building Extensions and SDK Overview" />
<meta property="og:description" content="Guide for developers on building extensions on top of the Jan Framework. Learn about Jan's extensible framework for AI applications, available on all platforms." />
<meta property="og:url" content="https://jan.ai/developer/" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image.png" />
</head>
The following docs are aimed at developers who want to build extensions on top of the Jan Framework.
:::tip

View File

@ -1,36 +1,23 @@
---
title: Your First Assistant
slug: /developer/build-assistant/your-first-assistant/
slug: /developer/assistant/your-first-assistant/
description: A quick start on how to build an assistant.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
quick start,
build assistant,
]
---
<head>
<title>Your First Assistant</title>
<meta charset="utf-8" />
<meta name="description" content="Get started quickly with building your first assistant using Jan. Learn the basics of creating conversational AI." />
<meta name="keywords" content="Jan, build assistant, quick start, conversational AI, local AI, private AI, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/developer/build-assistant/your-first-assistant/" />
<meta property="og:title" content="Your First Assistant" />
<meta property="og:description" content="Get started quickly with building your first assistant using Jan. Learn the basics of creating conversational AI." />
<meta property="og:url" content="https://jan.ai/developer/build-assistant/your-first-assistant/" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image-first-assistant.png" />
</head>
:::caution
This is currently under development.
:::

View File

@ -1,17 +1,18 @@
---
title: Anatomy of an Assistant
slug: /developer/build-assistant/assistant-anatomy/
slug: /developer/assistant/assistant-anatomy/
description: An overview of assistant.json
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
build assistant,
assistant anatomy,
]

View File

@ -1,17 +1,18 @@
---
title: Package your Assistant
slug: /developer/build-assistant/package-your-assistant/
slug: /developer/assistant/package-your-assistant/
description: Package your assistant for sharing and publishing.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
quick start,
build assistant,
]

View File

@ -1,17 +1,10 @@
---
title: Build an Assistant
slug: /developer/build-assistant
slug: /developer/assistant
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
Jan, Rethink the Computer, local AI, privacy focus, free and open source, private and offline, conversational AI, no-subscription fee, large language models,
build assistant,
]
---

View File

@ -1,17 +1,18 @@
---
title: Your First Engine
slug: /developer/build-engine/your-first-engine/
slug: /developer/engine/your-first-engine/
description: A quick start on how to build your first engine
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
quick start,
build engine,
]

View File

@ -1,17 +1,18 @@
---
title: Anatomy of an Engine
slug: /developer/build-engine/engine-anatomy
slug: /developer/engine/engine-anatomy
description: An overview of engine.json
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
build engine,
engine anatomy,
]

View File

@ -1,17 +1,18 @@
---
title: Package your Engine
slug: /developer/build-engine/package-your-engine/
slug: /developer/engine/package-your-engine/
description: Package your engine for sharing and publishing.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
build engine,
engine anatomy,
]

View File

@ -1,17 +1,18 @@
---
title: Build an Inference Engine
slug: /developer/build-engine/
slug: /developer/engine/
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
build assistant,
]
---

View File

@ -1,36 +1,23 @@
---
title: Your First Extension
slug: /developer/build-extension/your-first-extension/
slug: /developer/extension/your-first-extension/
description: A quick start on how to build your first extension
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
quick start,
build extension,
]
---
<head>
<title>Building Your First Jan AI Extension - Quick Start Guide</title>
<meta charSet="utf-8" />
<meta name="description" content="Step-by-step guide on how to build your first extension for Jan AI. Learn how to use the extension template and integrate your custom functionality into Jan." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, quick start, build extension" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/developer/build-extension/your-first-extension/" />
<meta property="og:title" content="Building Your First Jan AI Extension - Quick Start Guide" />
<meta property="og:description" content="Step-by-step guide on how to build your first extension for Jan AI. Learn how to use the extension template and integrate your custom functionality into Jan." />
<meta property="og:url" content="https://jan.ai/developer/build-extension/your-first-extension/" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image.png" />
</head>
:::caution
This is currently under development.
:::

View File

@ -1,17 +1,18 @@
---
title: Anatomy of an Extension
slug: /developer/build-extension/extension-anatomy
slug: /developer/extension/extension-anatomy
description: An overview of extensions.json
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
build extension,
extension anatomy,
]

View File

@ -1,17 +1,18 @@
---
title: Package your Engine
slug: /developer/build-extension/package-your-extension/
slug: /developer/extension/package-your-extension/
description: Package your extension for sharing and publishing.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
build extension,
extension anatomy,
]

View File

@ -1,17 +1,10 @@
---
title: Build an Extension
slug: /developer/build-extension/
slug: /developer/extension/
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
local AI,
private AI,
conversational AI,
no-subscription fee,
large language model,
Jan, Rethink the Computer, local AI, privacy focus, free and open source, private and offline, conversational AI, no-subscription fee, large language models,
build extension,
]
---

View File

@ -4,14 +4,15 @@ slug: /developer/engineering
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
spec,
engineering,
]

View File

@ -3,30 +3,18 @@ title: 'Assistants'
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Assistants</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/assistants" />
<meta property="og:title" content="Assistants" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/assistants" />
<meta property="og:type" content="article" />
</head>
:::caution
This is currently under development.

View File

@ -3,30 +3,18 @@ title: Chats
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Chats</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/chats" />
<meta property="og:title" content="Chats" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/chats" />
<meta property="og:type" content="article" />
</head>
:::caution
This is currently under development.

View File

@ -2,19 +2,6 @@
title: Engine
---
<head>
<title>Engine</title>
<meta charset="utf-8" />
<meta name="description" content="Currently Under Development" />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/engine" />
<meta property="og:title" content="Engine" />
<meta property="og:description" content="Currently Under Development" />
<meta property="og:url" content="https://jan.ai/engine" />
<meta property="og:type" content="article" />
</head>
:::caution
Currently Under Development

View File

@ -3,30 +3,18 @@ title: 'Files'
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Files</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/files" />
<meta property="og:title" content="Files" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/files" />
<meta property="og:type" content="article" />
</head>
:::warning
Draft Specification: functionality has not been implemented yet.

View File

@ -1,16 +1,17 @@
---
title: "Fine-tuning"
title: 'Fine-tuning'
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---

View File

@ -3,30 +3,18 @@ title: Messages
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Messages</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/messages" />
<meta property="og:title" content="Messages" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/messages" />
<meta property="og:type" content="article" />
</head>
:::caution
This is currently under development.

View File

@ -3,30 +3,18 @@ title: Models
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Models</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/models" />
<meta property="og:title" content="Models" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/models" />
<meta property="og:type" content="article" />
</head>
:::caution
This is currently under development.

View File

@ -3,14 +3,15 @@ title: Prompts
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---

View File

@ -3,30 +3,18 @@ title: Threads
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Threads</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/threads" />
<meta property="og:title" content="Threads" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/threads" />
<meta property="og:type" content="article" />
</head>
:::caution
This is currently under development.

View File

@ -4,14 +4,15 @@ slug: /developer/product
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
spec,
product,
]

View File

@ -3,30 +3,18 @@ title: Chat
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Chat</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/chat" />
<meta property="og:title" content="Chat" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/chat" />
<meta property="og:type" content="article" />
</head>
## Overview
A home screen for users to chat with [assistants](/docs/engineering/assistants) via conversation [threads](/docs/engineering/threads).

View File

@ -3,30 +3,18 @@ title: Hub
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Hub</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/hub" />
<meta property="og:title" content="Hub" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/hub" />
<meta property="og:type" content="article" />
</head>
## Overview
The Hub is like a store for everything, where users can discover and download models, assistants, and more.

View File

@ -3,30 +3,18 @@ title: Jan (The Default Assistant)
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Jan (The Default Assistant)</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/jan" />
<meta property="og:title" content="Jan (The Default Assistant)" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/jan" />
<meta property="og:type" content="article" />
</head>
Jan ships with a default assistant "Jan" that lets users chat with any open source model out-of-the-box.
This assistant is defined in `/jan`. It is a generic assistant to illustrate power of Jan. In the future, it will support additional features e.g. multi-assistant conversations

View File

@ -3,30 +3,18 @@ title: Settings
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Settings</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/settings" />
<meta property="og:title" content="Settings" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/settings" />
<meta property="og:type" content="article" />
</head>
## Overview
A settings page for users to add extensions, configure model settings, change app appearance, add keyboard shortcuts, and a plethora of other personalizations.

View File

@ -3,30 +3,18 @@ title: System Monitor
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>System Monitor</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/system-monitor" />
<meta property="og:title" content="System Monitor" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server." />
<meta property="og:url" content="https://jan.ai/system-monitor" />
<meta property="og:type" content="article" />
</head>
## Overview
An activity screen to monitor system health and running models.

View File

@ -4,14 +4,15 @@ slug: /developer/framework/
description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---

View File

@ -3,21 +3,9 @@ title: "Jan's AI Hacker House (Ho Chi Minh City)"
description: '24-27 Oct 2023, District 3, HCMC. AI-focused talks, workshops and social events. Hosted by Jan.ai'
slug: /events/hcmc-oct23
image: /img/hcmc-launch-party.png
keywords: [AI, Hacker House, Ho Chi Minh City, HCMC, Jan.ai]
---
<head>
<title>Jan's AI Hacker House (Ho Chi Minh City)</title>
<meta charset="utf-8" />
<meta name="description" content="24-27 Oct 2023, District 3, HCMC. AI-focused talks, workshops and social events. Hosted by Jan.ai" />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/events/hcmc-oct23" />
<meta property="og:title" content="Jan's AI Hacker House (Ho Chi Minh City)" />
<meta property="og:description" content="24-27 Oct 2023, District 3, HCMC. AI-focused talks, workshops and social events. Hosted by Jan.ai" />
<meta property="og:url" content="https://jan.ai/events/hcmc-oct23" />
<meta property="og:type" content="article" />
</head>
![](/img/hcmc-launch-party.png)
🎉 Join us at our Friday Launch Party for an evening of AI talks from other builders! [(RSVP here)](https://jan-launch-party.eventbrite.sg/) 🎉

View File

@ -3,19 +3,6 @@ title: 'Nov 23: Nvidia GenAI Day'
description: Nvidia's LLM Day
---
<head>
<title>Nov 23: Nvidia GenAI Day</title>
<meta charset="utf-8" />
<meta name="description" content="Nvidia's LLM Day" />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/events/nvidia-genai-day" />
<meta property="og:title" content="Nov 23: Nvidia GenAI Day" />
<meta property="og:description" content="Nvidia's LLM Day" />
<meta property="og:url" content="https://jan.ai/events/nvidia-genai-day" />
<meta property="og:type" content="article" />
</head>
![](/img/nvidia-llm-day-header.png)
## Nvidia GenAI Innovation Day

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 1
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Jan Extensions,
Extensions,
]
@ -26,34 +27,35 @@ You can find the default extensions in the `Settings` > `Extensions`.
## List of Default Extensions
| Extension Name | Version | Description | Source Code Link |
| -------------- | ------- | ----------- | ---------------- |
| Assistant Extension | `v1.0.0` | This extension enables assistants, including Jan, a default assistant that can call all downloaded models. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/assistant-extension ) |
| Conversational Extension | `v1.0.0` | This extension enables conversations and state persistence via your filesystem. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/conversational-extension) |
| Inference Nitro Extension | `v1.0.0` | This extension embeds Nitro, a lightweight (3 MB) inference engine in C++. See nitro.jan.ai. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-nitro-extension) |
| Inference Openai Extension | `v1.0.0` | This extension enables OpenAI chat completion API calls. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-openai-extension) |
| Inference Triton Trt Llm Extension | `v1.0.0` | This extension enables Nvidia's TensorRT-LLM as an inference engine option. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-triton-trtllm-extension) |
| Model Extension | `v1.0.22` | Model Management Extension provides model exploration and seamless downloads. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/model-extension) |
| Monitoring Extension | `v1.0.9` | This extension offers system health and OS-level data. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/monitoring-extension) |
| Extension Name | Version | Description | Source Code Link |
| ---------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
| Assistant Extension | `v1.0.0` | This extension enables assistants, including Jan, a default assistant that can call all downloaded models. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/assistant-extension) |
| Conversational Extension | `v1.0.0` | This extension enables conversations and state persistence via your filesystem. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/conversational-extension) |
| Inference Nitro Extension | `v1.0.0` | This extension embeds Nitro, a lightweight (3 MB) inference engine in C++. See nitro.jan.ai. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-nitro-extension) |
| Inference Openai Extension | `v1.0.0` | This extension enables OpenAI chat completion API calls. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-openai-extension) |
| Inference Triton Trt Llm Extension | `v1.0.0` | This extension enables Nvidia's TensorRT-LLM as an inference engine option. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/inference-triton-trtllm-extension) |
| Model Extension | `v1.0.22` | Model Management Extension provides model exploration and seamless downloads. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/model-extension) |
| Monitoring Extension | `v1.0.9` | This extension offers system health and OS-level data. | [Link to Source](https://github.com/janhq/jan/tree/dev/extensions/monitoring-extension) |
## Configure Extension Settings
To configure extension settings:
1. Navigate to the `~/jan/extensions`.
2. Open the `extensions.json` file
3. Edit the file with options including:
| Option | Description |
|-----------------|-------------------------------------------------|
| `_active` | Enable/disable the extension. |
| `listeners` | Default listener setting. |
| `origin` | Extension file path. |
| `installOptions`| Version and metadata configuration. |
| `name` | Extension name. |
| `version` | Extension version. |
| `main` | Main file path. |
| `description` | Extension description. |
| `url` | Extension URL. |
| Option | Description |
| ---------------- | ----------------------------------- |
| `_active` | Enable/disable the extension. |
| `listeners` | Default listener setting. |
| `origin` | Extension file path. |
| `installOptions` | Version and metadata configuration. |
| `name` | Extension name. |
| `version` | Extension version. |
| `main` | Main file path. |
| `description` | Extension description. |
| `url` | Extension URL. |
```json title="~/jan/extensions/extensions.json"
{
@ -145,7 +147,7 @@ Currently, Jan only supports official extensions, which can be directly download
For now you can always import a third party extension at your own risk by following the steps below:
1. Navigate to **Settings** > **Extensions** > Click Select under **Manual Installation**.
1. Navigate to **Settings** > **Extensions** > Click Select under **Manual Installation**.
2. Then, the ~/jan/extensions/extensions.json file will be updated automatically.
:::caution
@ -154,7 +156,6 @@ You need to prepare the extension file in .tgz format to install the **non-defau
:::
:::info[Assistance and Support]
If you have questions, please join our [Discord community](https://discord.gg/Dt7MxDyNNZ) for support, updates, and discussions.

View File

@ -5,19 +5,20 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 3
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
hardware requirements,
Nvidia,
AMD,
CPU,
GPU
GPU,
]
---

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 1
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---

View File

@ -5,20 +5,22 @@ description: Get started quickly with Jan, a ChatGPT-alternative that runs on yo
sidebar_position: 2
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
quickstart,
getting started,
using AI model,
installation
installation,
]
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import download from './asset/download.gif';
@ -26,7 +28,9 @@ import gpt from './asset/gpt.gif';
import model from './asset/model.gif';
To get started quickly with Jan, follow the steps below:
## Step 1: Get Jan Desktop
<Tabs>
<TabItem value="mac" label = "Mac" default>
@ -38,7 +42,7 @@ To get started quickly with Jan, follow the steps below:
#### Stable Releases
To download stable releases, go to [Jan.ai](https://jan.ai/) > select **Download for Mac**.
To download stable releases, go to [Jan](https://jan.ai/) > select **Download for Mac**.
The download should be available as a `.dmg`.
@ -78,7 +82,7 @@ Homebrew package installation is currently limited to **Apple Silicon Macs**, wi
#### Stable Releases
To download stable releases, go to [Jan.ai](https://jan.ai/) > select **Download for Windows**.
To download stable releases, go to [Jan](https://jan.ai/) > select **Download for Windows**.
The download should be available as a `.exe` file.
@ -121,7 +125,7 @@ If you are stuck in a broken build, go to the [Broken Build](/guides/common-erro
#### Stable Releases
To download stable releases, go to [Jan.ai](https://jan.ai/) > select **Download for Linux**.
To download stable releases, go to [Jan](https://jan.ai/) > select **Download for Linux**.
The download should be available as a `.AppImage` file or a `.deb` file.
@ -176,9 +180,11 @@ If you are stuck in a broken build, go to the [Broken Build](/guides/common-erro
:::
</TabItem>
</Tabs>
## Step 2: Download a Model
Jan provides a variety of local AI models tailored to different needs, ready for download. These models are installed and run directly on the user's device.
1. Go to the **Hub**.
@ -199,6 +205,7 @@ Ensure you select the appropriate model size by balancing performance, cost, and
:::
## Step 3: Connect to ChatGPT (Optional)
Jan also provides access to remote models hosted on external servers, requiring an API key for connectivity. For example, to use the ChatGPT model with Jan, you must input your API key by following these steps:
1. Go to the **Thread** tab.
@ -214,6 +221,7 @@ Jan also provides access to remote models hosted on external servers, requiring
<br/>
## Step 4: Chat with Models
After downloading and configuring your model, you can immediately use it in the **Thread** tab.
<br/>
@ -225,30 +233,39 @@ After downloading and configuring your model, you can immediately use it in the
<br/>
## Best Practices
This section outlines best practices for developers, analysts, and AI enthusiasts to enhance their experience with Jan when adding AI locally to their computers. Implementing these practices will optimize the performance of AI models.
### Follow the Quickstart Guide
The quickstart guide above is designed to facilitate a quick setup process. It provides a clear instruction and simple steps to get you up and running with Jan.ai quickly. Even, if you are inexperienced in AI.
The quickstart guide above is designed to facilitate a quick setup process. It provides a clear instruction and simple steps to get you up and running with Jan quickly. Even, if you are inexperienced in AI.
### Select the Right Models
Jan offers a range of pre-configured AI models that are suited for different purposes. You should identify which on that aligns with your objectives. There are factors to be considered:
- Capabilities
- Accuracy
- Processing Speed
:::note
- Some of these factors also depend on your hardware, please see Hardware Requirement.
- Choosing the right model is important to achieve the best performance.
:::
:::
### Setting up Jan
Ensure that you familiarize yourself with the Jan application. Jan offers advanced settings that you can adjust. These settings may influence how your AI behaves locally. Please see the [Advanced Settings](./guides/advanced) article for a complete list of Jan's configurations and instructions on how to configure them.
Ensure that you familiarize yourself with the Jan application. Jan offers advanced settings that you can adjust. These settings may influence how your AI behaves locally. Please see the [Advanced Settings](/guides/advanced) article for a complete list of Jan's configurations and instructions on how to configure them.
### Integrations
Jan can work with many different systems and tools. Whether you are incorporating Jan.ai with any open-source LLM provider or other tools, it is important to understand the integration capabilities and limitations.
Jan can work with many different systems and tools. Whether you are incorporating Jan with any open-source LLM provider or other tools, it is important to understand the integration capabilities and limitations.
### Mastering the Prompt Engineering
Prompt engineering is an important aspect when dealing with AI models to generate the desired outputs. Mastering this skill can significantly enhance the performance and the responses of the AI. Below are some tips that you can do for prompt engineering:
- Ask the model to adopt a persona
- Be specific and details get a more specific answers
- Provide examples or preference text or context at the beginning
@ -256,4 +273,5 @@ Prompt engineering is an important aspect when dealing with AI models to generat
- Use certain keywords and phrases
## Pre-configured Models
To see the full list of Jan's pre-configured models, please see our official GitHub [here](https://github.com/janhq/jan).

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 1
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 12
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---

View File

@ -6,18 +6,20 @@ hide_table_of_contents: true
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
## Jan Device Compatible
Jan is compatible with macOS, Windows, and Linux, making it accessible for a wide range of users. This compatibility allows users to leverage Jan's AI tools effectively, regardless of their device or operating system.
:::note
@ -28,18 +30,21 @@ import DocCardList from "@theme/DocCardList";
<DocCardList />
## Install Server-Side
To install Jan from source, follow the steps below:
### Pre-requisites
Before proceeding with the installation of Jan from source, ensure that the following software versions are installed on your system:
- Node.js version 20.0.0 or higher
- Yarn version 1.22.0 or higher
### Install Jan Development Build
1. Clone the Jan repository from GitHub by using the following command:
```bash
git clone https://github.com/janhq/jan
git checkout DESIRED_BRANCH
@ -47,6 +52,7 @@ cd jan
```
2. Install the required dependencies by using the following Yarn command:
```bash
yarn install
@ -61,19 +67,24 @@ yarn build:uikit
```
3. Run the development server.
```bash
yarn dev
```
This will start the development server and open the desktop app. During this step, you may encounter notifications about installing base plugins. Simply click **OK** and **Next** to continue.
### Install Jan Production Build
1. Clone the Jan repository from GitHub by using the following command:
```bash
git clone https://github.com/janhq/jan
cd jan
```
2. Install the required dependencies by using the following Yarn command:
```bash
yarn install
@ -88,6 +99,7 @@ yarn build:uikit
```
3. Run the production server.
```bash
yarn
```

View File

@ -6,23 +6,26 @@ hide_table_of_contents: true
description: A step-by-step guide to install Jan using Docker.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Install on Docker,
Docker,
Helm,
]
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
### Pre-requisites
### Pre-requisites
Ensure that your system meets the following requirements:
- Linux or WSL2 Docker
- Latest Docker Engine and Docker Compose
@ -31,100 +34,101 @@ import TabItem from '@theme/TabItem';
- `nvidia-driver`
- `nvidia-docker2`
:::note
- If you have not installed Docker, follow the instructions [here](https://docs.docker.com/engine/install/ubuntu/).
- If you have not installed the required file for GPU support, follow the instructions [here](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
:::
:::
### Run Jan in Docker
You can run Jan in Docker with two methods:
1. Run Jan in CPU mode
2. Run Jan in GPU mode
<Tabs groupId = "ldocker_type">
<TabItem value="docker_cpu" label = "CPU">
### Run Jan in Docker
You can run Jan in Docker with two methods:
1. Run Jan in CPU mode
2. Run Jan in GPU mode
<Tabs groupId = "ldocker_type">
<TabItem value="docker_cpu" label = "CPU">
To run Jan in Docker CPU mode, by using the following code:
To run Jan in Docker CPU mode, by using the following code:
```bash
# cpu mode with default file system
docker compose --profile cpu-fs up -d
```bash
# cpu mode with default file system
docker compose --profile cpu-fs up -d
# cpu mode with S3 file system
docker compose --profile cpu-s3fs up -d
```
# cpu mode with S3 file system
docker compose --profile cpu-s3fs up -d
```
</TabItem>
<TabItem value="docker_gpu" label = "GPU">
</TabItem>
<TabItem value="docker_gpu" label = "GPU">
To run Jan in Docker CPU mode, follow the steps below:
1. Check CUDA compatibility with your NVIDIA driver by running nvidia-smi and check the CUDA version in the output as shown below:
```sh
nvidia-smi
To run Jan in Docker CPU mode, follow the steps below:
1. Check CUDA compatibility with your NVIDIA driver by running nvidia-smi and check the CUDA version in the output as shown below:
```sh
nvidia-smi
# Output
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 531.18 Driver Version: 531.18 CUDA Version: 12.1 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 4070 Ti WDDM | 00000000:01:00.0 On | N/A |
| 0% 44C P8 16W / 285W| 1481MiB / 12282MiB | 2% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:02:00.0 Off | N/A |
| 0% 49C P8 14W / 120W| 0MiB / 6144MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:05:00.0 Off | N/A |
| 29% 38C P8 11W / 120W| 0MiB / 6144MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
# Output
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 531.18 Driver Version: 531.18 CUDA Version: 12.1 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 4070 Ti WDDM | 00000000:01:00.0 On | N/A |
| 0% 44C P8 16W / 285W| 1481MiB / 12282MiB | 2% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:02:00.0 Off | N/A |
| 0% 49C P8 14W / 120W| 0MiB / 6144MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:05:00.0 Off | N/A |
| 29% 38C P8 11W / 120W| 0MiB / 6144MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
```
2. Visit [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda/tags) and find the smallest minor version of image tag that matches your CUDA version (e.g., 12.1 -> 12.1.0)
3. Update the `Dockerfile.gpu` line number 5 with the latest minor version of the image tag from step 2 (e.g. change `FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 AS base` to `FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS base`)
4. Run Jan in GPU mode by using the following command:
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
```
2. Visit [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda/tags) and find the smallest minor version of image tag that matches your CUDA version (e.g., 12.1 -> 12.1.0)
3. Update the `Dockerfile.gpu` line number 5 with the latest minor version of the image tag from step 2 (e.g. change `FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 AS base` to `FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS base`)
4. Run Jan in GPU mode by using the following command:
```bash
# GPU mode with default file system
docker compose --profile gpu-fs up -d
```bash
# GPU mode with default file system
docker compose --profile gpu-fs up -d
# GPU mode with S3 file system
docker compose --profile gpu-s3fs up -d
```
# GPU mode with S3 file system
docker compose --profile gpu-s3fs up -d
```
</TabItem>
</Tabs>
### Docker Compose Profile and Environment
The available Docker Compose profile and the environment variables listed below:
</TabItem>
</Tabs>
### Docker Compose Profile and Environment
The available Docker Compose profile and the environment variables listed below:
#### Docker Compose Profile
#### Docker Compose Profile
| Profile | Description |
|-----------|-------------------------------------------|
| cpu-fs | Run Jan in CPU mode with default file system |
| cpu-s3fs | Run Jan in CPU mode with S3 file system |
| gpu-fs | Run Jan in GPU mode with default file system |
| gpu-s3fs | Run Jan in GPU mode with S3 file system |
| Profile | Description |
|-----------|-------------------------------------------|
| cpu-fs | Run Jan in CPU mode with default file system |
| cpu-s3fs | Run Jan in CPU mode with S3 file system |
| gpu-fs | Run Jan in GPU mode with default file system |
| gpu-s3fs | Run Jan in GPU mode with S3 file system |
#### Environment Variables
| Environment Variable | Description |
|--------------------------|------------------------------------------------------------|
| S3_BUCKET_NAME | S3 bucket name - leave blank for default file system |
| AWS_ACCESS_KEY_ID | AWS access key ID - leave blank for default file system |
| AWS_SECRET_ACCESS_KEY | AWS secret access key - leave blank for default file system|
| AWS_ENDPOINT | AWS endpoint URL - leave blank for default file system |
| AWS_REGION | AWS region - leave blank for default file system |
| API_BASE_URL | Jan Server URL, please modify it as your public ip address or domain name default http://localhost:1377 |
#### Environment Variables
| Environment Variable | Description |
|--------------------------|------------------------------------------------------------|
| S3_BUCKET_NAME | S3 bucket name - leave blank for default file system |
| AWS_ACCESS_KEY_ID | AWS access key ID - leave blank for default file system |
| AWS_SECRET_ACCESS_KEY | AWS secret access key - leave blank for default file system|
| AWS_ENDPOINT | AWS endpoint URL - leave blank for default file system |
| AWS_REGION | AWS region - leave blank for default file system |
| API_BASE_URL | Jan Server URL, please modify it as your public ip address or domain name default http://localhost:1377 |
:::warning

View File

@ -6,14 +6,15 @@ hide_table_of_contents: true
description: A step-by-step guide to install Jan on your Linux.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Install on Linux,
Linux,
]

View File

@ -6,14 +6,15 @@ hide_table_of_contents: true
description: A step-by-step guide to install Jan on your Mac.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
MacOs,
Install on Mac,
Apple devices,

View File

@ -6,14 +6,15 @@ hide_table_of_contents: true
description: A step-by-step guide to install Jan on your Windows.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Windows 10,
Windows 11,
Install on Windows,

View File

@ -5,14 +5,15 @@ sidebar_position: 1
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
build extension,
]
---

View File

@ -1,21 +1,22 @@
---
title: CrewAI
sidebar_position: 19
slug: /integrations/crewai
description: A step-by-step guide on how to integrate Jan with CrewAI.
slug: /integrations/crewai
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Continue integration,
CrewAI integration,
CrewAI
CrewAI,
]
---

View File

@ -5,14 +5,15 @@ sidebar_position: 5
description: A step-by-step guide on how to integrate Jan with a Discord bot.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Discord integration,
Discord,
bot,
@ -38,32 +39,36 @@ pip install -r requirements.txt
```
### Step 3: Set the Environment
1. Create a copy of `.env.example`.
2. Change the name to `.env`.
3. Set the environment with the following options:
| Setting | Instructions |
| ------- | ------------ |
| `DISCORD_BOT_TOKEN` | Generate a new Discord application at [discord.com/developers/applications](https://discord.com/developers/applications), obtain a token from the Bot tab, and enable MESSAGE CONTENT INTENT. |
| `LLM` | For [Jan](https://jan.ai/), set to `local/openai/(MODEL_NAME)`, where `(MODEL_NAME)` is your loaded model's name. |
| `CUSTOM_SYSTEM_PROMPT` | Adjust the bot's behavior as needed. |
| `CUSTOM_DISCORD_STATUS` | Set a custom message for the bot's Discord profile. (Max 128 characters) |
| `ALLOWED_CHANNEL_IDS` | Enter Discord channel IDs where the bot can send messages, separated by commas. Leave blank to allow all channels. |
| `ALLOWED_ROLE_IDS` | Enter Discord role IDs allowed to use the bot, separated by commas. Leave blank to allow everyone. Including at least one role also disables DMs. |
| `MAX_IMAGES` | Max number of image attachments allowed per message when using a vision model. (Default: `5`) |
| `MAX_MESSAGES` | Max messages allowed in a reply chain. (Default: `20`) |
| `LOCAL_SERVER_URL` | URL of your local API server for LLMs starting with `local/`. (Default: `http://localhost:5000/v1`) |
| `LOCAL_API_KEY` | API key for your local API server with LLMs starting with `local/`. Usually safe to leave blank. |
| Setting | Instructions |
| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `DISCORD_BOT_TOKEN` | Generate a new Discord application at [discord.com/developers/applications](https://discord.com/developers/applications), obtain a token from the Bot tab, and enable MESSAGE CONTENT INTENT. |
| `LLM` | For [Jan](https://jan.ai/), set to `local/openai/(MODEL_NAME)`, where `(MODEL_NAME)` is your loaded model's name. |
| `CUSTOM_SYSTEM_PROMPT` | Adjust the bot's behavior as needed. |
| `CUSTOM_DISCORD_STATUS` | Set a custom message for the bot's Discord profile. (Max 128 characters) |
| `ALLOWED_CHANNEL_IDS` | Enter Discord channel IDs where the bot can send messages, separated by commas. Leave blank to allow all channels. |
| `ALLOWED_ROLE_IDS` | Enter Discord role IDs allowed to use the bot, separated by commas. Leave blank to allow everyone. Including at least one role also disables DMs. |
| `MAX_IMAGES` | Max number of image attachments allowed per message when using a vision model. (Default: `5`) |
| `MAX_MESSAGES` | Max messages allowed in a reply chain. (Default: `20`) |
| `LOCAL_SERVER_URL` | URL of your local API server for LLMs starting with `local/`. (Default: `http://localhost:5000/v1`) |
| `LOCAL_API_KEY` | API key for your local API server with LLMs starting with `local/`. Usually safe to leave blank. |
### Step 4: Insert the Bot
Invite the bot to your Discord server using the following URL:
```
https://discord.com/api/oauth2/authorize?client_id=(CLIENT_ID)&permissions=412317273088&scope=bot
```
:::note
Replace `CLIENT_ID` with your Discord application's client ID from the OAuth2 tab
:::
### Step 5: Run the bot
Run the bot by using the following command in your command prompt:

View File

@ -5,31 +5,20 @@ sidebar_position: 6
description: A step-by-step guide on how to integrate Jan with Open Interpreter.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Open Interpreter integration,
Open Interpreter,
]
---
<head>
<title>Open Interpreter</title>
<meta name="description" content="A step-by-step guide on how to integrate Jan with Open Interpreter. Learn how to install Open Interpreter, configure Jan's local API server, and set up the Open Interpreter environment for seamless interaction with Jan."/>
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, Open Interpreter integration"/>
<meta property="og:title" content="Open Interpreter"/>
<meta property="og:description" content="A step-by-step guide on how to integrate Jan with Open Interpreter. Learn how to install Open Interpreter, configure Jan's local API server, and set up the Open Interpreter environment for seamless interaction with Jan."/>
<meta property="og:url" content="https://jan.ai/guides/integration/open-interpreter"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:title" content="Open Interpreter"/>
<meta name="twitter:description" content="A step-by-step guide on how to integrate Jan with Open Interpreter. Learn how to install Open Interpreter, configure Jan's local API server, and set up the Open Interpreter environment for seamless interaction with Jan."/>
</head>
## Integrate Open Interpreter with Jan
[Open Interpreter](https://github.com/KillianLucas/open-interpreter/) lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running `interpreter` after installing. To integrate Open Interpreter with Jan, follow the steps below:

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 1
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---

View File

@ -4,33 +4,23 @@ slug: /integrations/raycast
sidebar_position: 17
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
raycast integration,
Raycast,
]
description: A step-by-step guide on how to integrate Jan with Raycast.
---
<head>
<title>Raycast</title>
<meta name="description" content="A step-by-step guide on how to integrate Jan with Raycast. Learn how to download the TinyLlama model, clone and run the program, and use Jan models in Raycast for enhanced productivity."/>
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, Raycast integration"/>
<meta property="og:title" content="Raycast"/>
<meta property="og:description" content="A step-by-step guide on how to integrate Jan with Raycast. Learn how to download the TinyLlama model, clone and run the program, and use Jan models in Raycast for enhanced productivity."/>
<meta property="og:url" content="https://jan.ai/guides/integration/raycast"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:title" content="Raycast"/>
<meta name="twitter:description" content="A step-by-step guide on how to integrate Jan with Raycast. Learn how to download the TinyLlama model, clone and run the program, and use Jan models in Raycast for enhanced productivity."/>
</head>
## Integrate Raycast with Jan
[Raycast](https://www.raycast.com/) is a productivity tool designed for macOS that enhances workflow efficiency by providing quick access to various tasks and functionalities through a keyboard-driven interface. To integrate Raycast with Jan, follow the steps below:
### Step 1: Download the TinyLlama Model

View File

@ -5,31 +5,20 @@ sidebar_position: 2
description: A step-by-step guide on how to integrate Jan with OpenRouter.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
OpenRouter integration,
OpenRouter
OpenRouter,
]
---
<head>
<title>OpenRouter</title>
<meta name="description" content="A step-by-step guide on how to integrate Jan with OpenRouter. Learn how to configure the OpenRouter API key, set up model configuration, and start using remote Large Language Models (LLMs) through OpenRouter with Jan."/>
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, OpenRouter integration"/>
<meta property="og:title" content="OpenRouter"/>
<meta property="og:description" content="A step-by-step guide on how to integrate Jan with OpenRouter. Learn how to configure the OpenRouter API key, set up model configuration, and start using remote Large Language Models (LLMs) through OpenRouter with Jan."/>
<meta property="og:url" content="https://jan.ai/guides/integration/openrouter"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:title" content="OpenRouter"/>
<meta name="twitter:description" content="A step-by-step guide on how to integrate Jan with OpenRouter. Learn how to configure the OpenRouter API key, set up model configuration, and start using remote Large Language Models (LLMs) through OpenRouter with Jan."/>
</head>
## Integrate OpenRouter with Jan
[OpenRouter](https://openrouter.ai/docs#quick-start) is a tool that gathers AI models. Developers can utilize its API to engage with diverse large language models, generative image models, and generative 3D object models.
@ -46,10 +35,11 @@ To connect Jan with OpenRouter for accessing remote Large Language Models (LLMs)
1. Go to the directory `~/jan/models`.
2. Make a new folder called `openrouter-(modelname)`, like `openrouter-dolphin-mixtral-8x7b`.
3. Inside the folder, create a `model.json` file with the following settings:
- Set the `id` property to the model id obtained from OpenRouter.
- Set the `format` property to `api`.
- Set the `engine` property to `openai`.
- Ensure the `state` property is set to `ready`.
- Set the `id` property to the model id obtained from OpenRouter.
- Set the `format` property to `api`.
- Set the `engine` property to `openai`.
- Ensure the `state` property is set to `ready`.
```json title="~/jan/models/openrouter-dolphin-mixtral-8x7b/model.json"
{
@ -75,9 +65,10 @@ To connect Jan with OpenRouter for accessing remote Large Language Models (LLMs)
```
:::note
For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
:::
### Step 3 : Start the Model
1. Restart Jan and go to the **Hub**.
2. Find your model and click on the **Use** button.

View File

@ -5,14 +5,15 @@ slug: /integrations/unsloth
description: A step-by-step guide on how to integrate Jan with Unsloth.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Continue integration,
Unsloth integration,
]

View File

@ -5,20 +5,20 @@ slug: /integrations/continue
description: A step-by-step guide on how to integrate Jan with Continue and VS Code.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Continue integration,
VSCode integration,
]
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
@ -43,6 +43,7 @@ To set up Continue for use with Jan's Local Server, you must activate the Jan AP
3. Press the **Start Server** button
### Step 3: Configure Continue to Use Jan's Local Server
1. Go to the `~/.continue` directory.
<Tabs>
@ -76,11 +77,12 @@ To set up Continue for use with Jan's Local Server, you must activate the Jan AP
]
}
```
2. Ensure the file has the following configurations:
- Ensure `openai` is selected as the `provider`.
- Match the `model` with the one enabled in the Jan API Server.
- Set `apiBase` to `http://localhost:1337`.
- Leave the `apiKey` field to `EMPTY`.
- Ensure `openai` is selected as the `provider`.
- Match the `model` with the one enabled in the Jan API Server.
- Set `apiBase` to `http://localhost:1337`.
- Leave the `apiKey` field to `EMPTY`.
### Step 4: Ensure the Using Model Is Activated in Jan
@ -94,8 +96,7 @@ To set up Continue for use with Jan's Local Server, you must activate the Jan AP
1. Highlight a code snippet and press `Command + Shift + M` to open the Left Panel.
2. Select Jan at the bottom and ask a question about the code, for example, `Explain this code`.
### 2. Editing the code with the help of a large language model
### 2. Editing the code with the help of a large language model
1. Select a code snippet and use `Command + Shift + L`.
2. Enter your editing request, such as `Add comments to this code`.

View File

@ -5,14 +5,15 @@ sidebar_position: 13
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
build extension,
]
---

View File

@ -5,27 +5,29 @@ sidebar_position: 1
description: A step-by-step guide on how to customize the LlamaCPP extension.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Llama CPP integration,
LlamaCPP Extension,
]
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## Overview
[Nitro](https://github.com/janhq/nitro) is an inference server on top of [llama.cpp](https://github.com/ggerganov/llama.cpp). It provides an OpenAI-compatible API, queue, & scaling.
## LlamaCPP Extension
:::note
Nitro is the default AI engine downloaded with Jan. There is no additional setup needed.
:::
@ -66,18 +68,19 @@ In this guide, we'll walk you through the process of customizing your engine set
The table below describes the parameters in the `nitro.json` file.
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ctx_len` | **Integer** | Typically set at `2048`, `ctx_len` provides ample context for model operations like `GPT-3.5`. (*Maximum*: `4096`, *Minimum*: `1`) |
| `ngl` | **Integer** | Defaulted at `100`, `ngl` determines GPU layer usage. |
| `cpu_threads` | **Integer** | Determines CPU inference threads, limited by hardware and OS. (*Maximum* determined by system) |
| `cont_batching` | **Integer** | Controls continuous batching, enhancing throughput for LLM inference. |
| `embedding` | **Integer** | Enables embedding utilization for tasks like document-enhanced chat in RAG-based applications. |
| Parameter | Type | Description |
| --------------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------- |
| `ctx_len` | **Integer** | Typically set at `2048`, `ctx_len` provides ample context for model operations like `GPT-3.5`. (_Maximum_: `4096`, _Minimum_: `1`) |
| `ngl` | **Integer** | Defaulted at `100`, `ngl` determines GPU layer usage. |
| `cpu_threads` | **Integer** | Determines CPU inference threads, limited by hardware and OS. (_Maximum_ determined by system) |
| `cont_batching` | **Integer** | Controls continuous batching, enhancing throughput for LLM inference. |
| `embedding` | **Integer** | Enables embedding utilization for tasks like document-enhanced chat in RAG-based applications. |
:::tip
- By default, the value of `ngl` is set to 100, which indicates that it will offload all. If you wish to offload only 50% of the GPU, you can set `ngl` to 15 because most models on Mistral or Llama are around ~ 30 layers.
- To utilize the embedding feature, include the JSON parameter `"embedding": true`. It will enable Nitro to process inferences with embedding capabilities. Please refer to the [Embedding in the Nitro documentation](https://nitro.jan.ai/features/embed) for a more detailed explanation.
- To utilize the continuous batching feature for boosting throughput and minimizing latency in large language model (LLM) inference, include `cont_batching: true`. For details, please refer to the [Continuous Batching in the Nitro documentation](https://nitro.jan.ai/features/cont-batch).
- By default, the value of `ngl` is set to 100, which indicates that it will offload all. If you wish to offload only 50% of the GPU, you can set `ngl` to 15 because most models on Mistral or Llama are around ~ 30 layers.
- To utilize the embedding feature, include the JSON parameter `"embedding": true`. It will enable Nitro to process inferences with embedding capabilities. Please refer to the [Embedding in the Nitro documentation](https://nitro.jan.ai/features/embed) for a more detailed explanation.
- To utilize the continuous batching feature for boosting throughput and minimizing latency in large language model (LLM) inference, include `cont_batching: true`. For details, please refer to the [Continuous Batching in the Nitro documentation](https://nitro.jan.ai/features/cont-batch).
:::

View File

@ -5,14 +5,15 @@ sidebar_position: 8
description: A step-by-step guide on how to integrate Jan with LM Studio.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
LM Studio integration,
]
---
@ -20,6 +21,7 @@ keywords:
## Integrate LM Studio with Jan
[LM Studio](https://lmstudio.ai/) enables you to explore, download, and run local Large Language Models (LLMs). You can integrate Jan with LM Studio using two methods:
1. Integrate the LM Studio server with Jan UI
2. Migrate your downloaded model from LM Studio to Jan.
@ -29,6 +31,7 @@ To integrate LM Studio with Jan follow the steps below:
In this guide, we're going to show you how to connect Jan to [LM Studio](https://lmstudio.ai/) using the second method. We'll use the [Phi 2 - GGUF](https://huggingface.co/TheBloke/phi-2-GGUF) model from Hugging Face as our example.
:::
### Step 1: Server Setup
1. Access the `Local Inference Server` within LM Studio.
@ -81,10 +84,10 @@ Replace `(port)` with your chosen port number. The default is 1234.
"engine": "openai"
}
```
:::note
For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
:::
:::note
For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
:::
### Step 3: Starting the Model
@ -108,7 +111,6 @@ For more details regarding the `model.json` settings and parameters fields, ple
Starting from version 0.4.7, Jan enables direct import of LM Studio models using absolute file paths.
### Step 1: Locating the Model Path
1. Access `My Models` in LM Studio and locate your model folder.

View File

@ -5,14 +5,15 @@ sidebar_position: 4
description: A step-by-step guide on how to integrate Jan with Ollama.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Ollama integration,
]
---
@ -20,6 +21,7 @@ keywords:
## Integrate Ollama with Jan
Ollama provides you with largen language that you can run locally. There are two methods to integrate Ollama with Jan:
1. Integrate Ollama server with Jan.
2. Migrate the downloaded model from Ollama to Jan.
@ -40,7 +42,6 @@ ollama run <model-name>
3. According to the [Ollama documentation on OpenAI compatibility](https://github.com/ollama/ollama/blob/main/docs/openai.md), you can connect to the Ollama server using the web address `http://localhost:11434/v1/chat/completions`. To do this, change the `openai.json` file in the `~/jan/engines` folder to add the Ollama server's full web address:
```json title="~/jan/engines/openai.json"
{
"full_url": "http://localhost:11434/v1/chat/completions"
@ -52,10 +53,11 @@ ollama run <model-name>
1. Navigate to the `~/jan/models` folder.
2. Create a folder named `(ollam-modelname)`, for example, `lmstudio-phi-2`.
3. Create a `model.json` file inside the folder including the following configurations:
- Set the `id` property to the model name as Ollama model name.
- Set the `format` property to `api`.
- Set the `engine` property to `openai`.
- Set the `state` property to `ready`.
- Set the `id` property to the model name as Ollama model name.
- Set the `format` property to `api`.
- Set the `engine` property to `openai`.
- Set the `state` property to `ready`.
```json title="~/jan/models/llama2/model.json"
{
@ -80,10 +82,12 @@ ollama run <model-name>
"engine": "openai"
}
```
:::note
For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
:::
### Step 3: Start the Model
1. Restart Jan and navigate to the **Hub**.
2. Locate your model and click the **Use** button.

View File

@ -5,14 +5,15 @@ sidebar_position: 2
description: A step-by-step guide on how to customize the TensorRT-LLM extension.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
TensorRT-LLM Extension,
TensorRT,
tensorRT,
@ -21,12 +22,15 @@ keywords:
---
## Overview
Users with Nvidia GPUs can get **20-40% faster token speeds** compared to using LlamaCPP engine on their laptop or desktops by using [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM). The greater implication is that you are running FP16, which is also more accurate than quantized models.
## TensortRT-LLM Extension
This guide walks you through how to install Jan's official [TensorRT-LLM Extension](https://github.com/janhq/nitro-tensorrt-llm). This extension uses [Nitro-TensorRT-LLM](https://github.com/janhq/nitro-tensorrt-llm) as the AI engine, instead of the default [Nitro-Llama-CPP](https://github.com/janhq/nitro). It includes an efficient C++ server to natively execute the [TRT-LLM C++ runtime](https://nvidia.github.io/TensorRT-LLM/gpt_runtime.html). It also comes with additional feature and performance improvements like OpenAI compatibility, tokenizer improvements, and queues.
:::warning
- This feature is only available for Windows users. Linux is coming soon.
- Additionally, we only prebuilt a few demo models. You can always build your desired models directly on your machine. For more information, please see [here](#build-your-own-tensorrt-models).
@ -54,6 +58,7 @@ ls ~\jan\extensions\@janhq\tensorrt-llm-extension\dist\bin
```
### Step 2: Download a Compatible Model
TensorRT-LLM can only run models in `TensorRT` format. These models, aka "TensorRT Engines", are prebuilt specifically for each target OS+GPU architecture.
We offer a handful of precompiled models for Ampere and Ada cards that you can immediately download and play with:
@ -91,6 +96,7 @@ coming soon
For now, the model versions are pinned to the extension versions.
### Uninstall Extension
To uninstall the extension, follow the steps below:
1. Quit the app.

View File

@ -1,20 +1,24 @@
---
title: TensorRT-LLM
slug: /guides/providers/tensorrt-llm
keywords:
[
Jan,
Rethink the Computer,
local AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language models,
TensorRT-LLM Extension,
TensorRT,
tensorRT,
extension,
]
---
<head>
<title>TensorRT-LLM - Jan Guides</title>
<meta name="description" content="Learn how to install Jan's official TensorRT-LLM Extension, which offers 20-40% faster token speeds on Nvidia GPUs. Understand the requirements, installation steps, and troubleshooting tips."/>
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, TensorRT-LLM, Nvidia GPU, TensorRT, extension, installation, troubleshooting"/>
<meta property="og:title" content="TensorRT-LLM - Jan Guides"/>
<meta property="og:description" content="Learn how to install Jan's official TensorRT-LLM Extension, which offers 20-40% faster token speeds on Nvidia GPUs. Understand the requirements, installation steps, and troubleshooting tips."/>
<meta property="og:url" content="https://jan.ai/guides/providers/tensorrt-llm"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:title" content="TensorRT-LLM - Jan Guides"/>
<meta name="twitter:description" content="Learn how to install Jan's official TensorRT-LLM Extension, which offers 20-40% faster token speeds on Nvidia GPUs. Understand the requirements, installation steps, and troubleshooting tips."/>
</head>
:::info
TensorRT-LLM support was launched in 0.4.9, and should be regarded as an Experimental feature.
@ -29,6 +33,7 @@ Jan supports [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) as an altern
## What is TensorRT-LLM?
[TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) is an hardware-optimized LLM inference engine for Nvidia GPUs, that compiles models to run extremely fast on Nvidia GPUs.
- Mainly used on Nvidia's Datacenter-grade GPUs like the H100s [to produce 10,000 tok/s](https://nvidia.github.io/TensorRT-LLM/blogs/H100vsA100.html).
- Can be used on Nvidia's workstation (e.g. [A6000](https://www.nvidia.com/en-us/design-visualization/rtx-6000/)) and consumer-grade GPUs (e.g. [RTX 4090](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/))
@ -48,7 +53,6 @@ Jan supports [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) as an altern
:::
## Requirements
### Hardware
@ -59,11 +63,11 @@ Jan supports [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) as an altern
**Compatible GPUs**
| Architecture | Supported? | Consumer-grade | Workstation-grade |
| ------------ | --- | -------------- | ----------------- |
| Ada | ✅ | 4050 and above | RTX A2000 Ada |
| Ampere | ✅ | 3050 and above | A100 |
| Turing | ❌ | Not Supported | Not Supported |
| Architecture | Supported? | Consumer-grade | Workstation-grade |
| ------------ | ---------- | -------------- | ----------------- |
| Ada | ✅ | 4050 and above | RTX A2000 Ada |
| Ampere | ✅ | 3050 and above | A100 |
| Turing | ❌ | Not Supported | Not Supported |
:::info
@ -74,8 +78,8 @@ Please ping us in Discord's [#tensorrt-llm](https://discord.com/channels/1107178
### Software
- Jan v0.4.9+ or Jan v0.4.8-321+ (nightly)
- [Nvidia Driver v535+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
- [CUDA Toolkit v12.2+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
- [Nvidia Driver v535+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
- [CUDA Toolkit v12.2+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
## Getting Started
@ -91,6 +95,7 @@ You can check if files have been correctly downloaded:
ls ~\jan\extensions\@janhq\tensorrt-llm-extension\dist\bin
# Your Extension Folder should now include `nitro.exe`, among other `.dll` files needed to run TRT-LLM
```
:::
### Download a TensorRT-LLM Model
@ -100,12 +105,11 @@ Jan's Hub has a few pre-compiled TensorRT-LLM models that you can download, whic
- We automatically download the TensorRT-LLM Model Engine for your GPU architecture
- We have made a few 1.1b models available that can run even on Laptop GPUs with 8gb VRAM
| Model | OS | Ada (40XX) | Ampere (30XX) | Description |
| ------------------- | ------- | ---------- | ------------- | --------------------------------------------------- |
| Llamacorn 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned for usability |
| TinyJensen 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned on Jensen Huang speeches |
| Mistral Instruct 7b | Windows | ✅ | ✅ | Mistral |
| Llamacorn 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned for usability |
| TinyJensen 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned on Jensen Huang speeches |
| Mistral Instruct 7b | Windows | ✅ | ✅ | Mistral |
### Importing Pre-built Models
@ -181,6 +185,7 @@ Note the `engine` is `nitro-tensorrt-llm`: this won't work without it!
### Using a TensorRT-LLM Model
You can just select and use a TensorRT-LLM model from Jan's Thread interface.
- Jan will automatically start the TensorRT-LLM model engine in the background
- You may encounter a pop-up from Windows Security, asking for Nitro to allow public and private network access
@ -214,7 +219,6 @@ To manually build the artifacts needed to run the server and TensorRT-LLM, you c
3. Delete the entire Extensions folder.
4. Reopen the app, only the default extensions should be restored.
## Build your own TensorRT models
:::info

View File

@ -5,14 +5,15 @@ sidebar_position: 14
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
build extension,
]
---

View File

@ -5,14 +5,15 @@ slug: /guides/engines/claude
description: A step-by-step guide on how to integrate Jan with LM Studio.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Claude integration,
claude,
]

View File

@ -17,18 +17,6 @@ keywords:
]
---
<head>
<title>Groq</title>
<meta name="description" content="Learn how to integrate Groq API with Jan for enhanced functionality. Follow step-by-step instructions to obtain Groq API credentials, configure Jan settings, enable Groq integration, and troubleshoot any issues."/>
<meta name="keywords" content="Groq API, Jan, Jan AI, ChatGPT alternative, conversational AI, large language model, integration, Groq integration, API integration"/>
<meta property="og:title" content="Groq"/>
<meta property="og:description" content="Learn how to integrate Groq API with Jan for enhanced functionality. Follow step-by-step instructions to obtain Groq API credentials, configure Jan settings, enable Groq integration, and troubleshoot any issues."/>
<meta property="og:url" content="https://jan.ai/guides/integration/groq"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:title" content="Groq"/>
<meta name="twitter:description" content="Learn how to integrate Groq API with Jan for enhanced functionality. Follow step-by-step instructions to obtain Groq API credentials, configure Jan settings, enable Groq integration, and troubleshoot any issues."/>
</head>
## How to Integrate Mistral AI with Jan
This guide provides step-by-step instructions on integrating the Groq API with Jan, enabling users to leverage Groq's capabilities within Jan's conversational interface.

View File

@ -5,33 +5,23 @@ slug: /guides/engines/mistral
description: A step-by-step guide on how to integrate Jan with Mistral AI.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Mistral integration,
]
---
<head>
<title>Mistral AI</title>
<meta name="description" content="A step-by-step guide on how to integrate Jan with Mistral AI. Learn how to configure Mistral API keys, set up model configuration, and start the model in Jan for enhanced functionality."/>
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, Mistral integration"/>
<meta property="og:title" content="Mistral AI"/>
<meta property="og:description" content="A step-by-step guide on how to integrate Jan with Mistral AI. Learn how to configure Mistral API keys, set up model configuration, and start the model in Jan for enhanced functionality."/>
<meta property="og:url" content="https://jan.ai/guides/integration/mistral-ai"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:title" content="Mistral AI"/>
<meta name="twitter:description" content="A step-by-step guide on how to integrate Jan with Mistral AI. Learn how to configure Mistral API keys, set up model configuration, and start the model in Jan for enhanced functionality."/>
</head>
## How to Integrate Mistral AI with Jan
[Mistral AI](https://docs.mistral.ai/) provides two ways to use their Large Language Models (LLM):
1. API
2. Open-source models on Hugging Face.
@ -58,10 +48,10 @@ This tutorial demonstrates integrating Mistral AI with Jan using the API.
1. Navigate to `~/jan/models`.
2. Create a folder named `mistral-(modelname)` (e.g., `mistral-tiny`).
3. Inside, create a `model.json` file with these settings:
- Set `id` to the Mistral AI model ID.
- Set `format` to `api`.
- Set `engine` to `openai`.
- Set `state` to `ready`.
- Set `id` to the Mistral AI model ID.
- Set `format` to `api`.
- Set `engine` to `openai`.
- Set `state` to `ready`.
```json title="~/jan/models/mistral-tiny/model.json"
{
@ -85,13 +75,13 @@ This tutorial demonstrates integrating Mistral AI with Jan using the API.
},
"engine": "openai"
}
```
:::note
- For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
- For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
- Mistral AI offers various endpoints. Refer to their [endpoint documentation](https://docs.mistral.ai/platform/endpoints/) to select the one that fits your requirements. Here, we use the `mistral-tiny` model as an example.
:::
:::
### Step 3: Start the Model

View File

@ -5,14 +5,15 @@ slug: /guides/engines/openai
description: A step-by-step guide on how to integrate Jan with Azure OpenAI.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
integration,
Azure OpenAI Service,
]
@ -42,10 +43,11 @@ The [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/o
1. Go to the `~/jan/models` directory.
2. Make a new folder called `(your-deployment-name)`, for example `gpt-35-hieu-jan`.
3. Create a `model.json` file inside the folder with the specified configurations:
- Match the `id` property with both the folder name and your deployment name.
- Set the `format` property as `api`.
- Choose `openai` for the `engine` property.
- Set the `state` property as `ready`.
- Match the `id` property with both the folder name and your deployment name.
- Set the `format` property as `api`.
- Choose `openai` for the `engine` property.
- Set the `state` property as `ready`.
```json title="~/jan/models/gpt-35-hieu-jan/model.json"
{
@ -72,7 +74,7 @@ The [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/o
```
:::note
For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
For more details regarding the `model.json` settings and parameters fields, please see [here](/guides/engines/remote-server/#modeljson).
:::
### Step 3: Start the Model

View File

@ -5,32 +5,21 @@ slug: /guides/engines/remote-server
description: A step-by-step guide on how to set up Jan to connect with any remote or local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
import-models-manually,
remote server,
OAI compatible,
]
---
<head>
<title>Remote Server Integration</title>
<meta name="description" content="A step-by-step guide on how to set up Jan to connect with any remote or local API server. Learn how to configure Jan as a client to connect with OpenAI Platform or any OpenAI-compatible endpoint, and how to start models using Jan's Hub."/>
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, import-models-manually, remote server, OAI compatible"/>
<meta property="og:title" content="Remote Server Integration"/>
<meta property="og:description" content="A step-by-step guide on how to set up Jan to connect with any remote or local API server. Learn how to configure Jan as a client to connect with OpenAI Platform or any OpenAI-compatible endpoint, and how to start models using Jan's Hub."/>
<meta property="og:url" content="https://jan.ai/guides/remote-server-integration"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:title" content="Remote Server Integration"/>
<meta name="twitter:description" content="A step-by-step guide on how to set up Jan to connect with any remote or local API server. Learn how to configure Jan as a client to connect with OpenAI Platform or any OpenAI-compatible endpoint, and how to start models using Jan's Hub."/>
</head>
This guide will show you how to configure Jan as a client and point it to any remote & local (self-hosted) API server.
## OpenAI Platform Configuration
@ -41,7 +30,6 @@ This guide will show you how to configure Jan as a client and point it to any re
2. In this folder, add a `model.json` file with Filename as `model.json`, `id` matching folder name, `Format` as `api`, `Engine` as `openai`, and `State` as `ready`.
```json title="~/jan/models/gpt-3.5-turbo-16k/model.json"
{
"sources": [
@ -70,26 +58,26 @@ This guide will show you how to configure Jan as a client and point it to any re
The `model.json` file is used to set up your local models.
:::note
- If you've set up your model's configuration in `nitro.json`, please note that `model.json` can overwrite the settings.
- When using OpenAI models like GPT-3.5 and GPT-4, you can use the default settings in `model.json` file.
:::
:::
There are two important fields in model.json that you need to setup:
#### Settings
This is the field where to set your engine configurations, there are two imporant field that you need to define for your local models:
| Term | Description |
|-------------------|---------------------------------------------------------|
| `ctx_len` | Defined based on the model's context size. |
| Term | Description |
| ----------------- | --------------------------------------------------------------------- |
| `ctx_len` | Defined based on the model's context size. |
| `prompt_template` | Defined based on the model's trained template (e.g., ChatML, Alpaca). |
To set up the `prompt_template` based on your model, follow the steps below:
1. Visit [Hugging Face](https://huggingface.co/), an open-source machine learning platform.
2. Find the current model that you're using (e.g., [Gemma 7b it](https://huggingface.co/google/gemma-7b-it)).
3. Review the text and identify the template.
To set up the `prompt_template` based on your model, follow the steps below: 1. Visit [Hugging Face](https://huggingface.co/), an open-source machine learning platform. 2. Find the current model that you're using (e.g., [Gemma 7b it](https://huggingface.co/google/gemma-7b-it)). 3. Review the text and identify the template.
#### Parameters
`parameters` is the adjustable settings that affect how your model operates or processes the data.
The fields in `parameters` are typically general and can be the same across models. An example is provided below:
@ -104,12 +92,11 @@ The fields in `parameters` are typically general and can be the same across mode
}
```
:::tip
- You can find the list of available models in the [OpenAI Platform](https://platform.openai.com/docs/models/overview).
- The `id` property needs to match the model name in the list.
- For example, if you want to use the [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo), you must set the `id` property to `gpt-4-1106-preview`.
- You can find the list of available models in the [OpenAI Platform](https://platform.openai.com/docs/models/overview).
- The `id` property needs to match the model name in the list.
- For example, if you want to use the [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo), you must set the `id` property to `gpt-4-1106-preview`.
:::
@ -166,11 +153,11 @@ Please note that currently, the code that supports any OpenAI-compatible endpoin
1. In `~/jan/models`, create a folder named `mistral-ins-7b-q4`.
2. In this folder, add a `model.json` file with Filename as `model.json`, ensure the following configurations:
- `id` matching folder name.
- `Format` set to `api`.
- `Engine` set to `openai`
- `State` set to `ready`.
- `id` matching folder name.
- `Format` set to `api`.
- `Engine` set to `openai`
- `State` set to `ready`.
```json title="~/jan/models/mistral-ins-7b-q4/model.json"
{
@ -194,8 +181,8 @@ Please note that currently, the code that supports any OpenAI-compatible endpoin
},
"engine": "openai"
}
```
### 3. Start the Model
1. Restart Jan and navigate to the **Hub**.

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 21
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
troubleshooting,
error codes,
broken build,
@ -27,6 +28,7 @@ import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## Broken Build
To resolve the issue where your Jan is stuck in a broken build after installation.
<Tabs>
@ -155,6 +157,7 @@ To resolve the issue where your Jan is stuck in a broken build after installatio
Download the latest version of Jan from our [homepage](https://jan.ai/).
</TabItem>
</Tabs>
By following these steps, you can cleanly uninstall and reinstall Jan, ensuring a smooth and error-free experience with the latest version.
@ -166,6 +169,7 @@ Before reinstalling Jan, ensure it's completely removed from all shared spaces i
:::
## Troubleshooting NVIDIA GPU
To resolve issues when the Jan app does not utilize the NVIDIA GPU on Windows and Linux systems.
#### 1. Ensure GPU Mode Requirements
@ -268,28 +272,33 @@ If GPU mode isn't enabled by default:
"gpu_highest_vram": "0"
}
```
#### 4. Restart Jan
Restart Jan application to make sure it works.
##### Troubleshooting Tips
- Ensure `nvidia_driver` and `cuda` fields indicate installed software.
- If `gpus` field is empty or lacks your GPU, check NVIDIA driver and CUDA toolkit installations.
- For further assistance, share the `settings.json` file.
- Ensure `nvidia_driver` and `cuda` fields indicate installed software.
- If `gpus` field is empty or lacks your GPU, check NVIDIA driver and CUDA toolkit installations.
- For further assistance, share the `settings.json` file.
#### Tested Configurations
- **Windows 11 Pro 64-bit:**
- GPU: NVIDIA GeForce RTX 4070ti
- CUDA: 12.2
- NVIDIA driver: 531.18 (Bare metal)
- **Ubuntu 22.04 LTS:**
- GPU: NVIDIA GeForce RTX 4070ti
- CUDA: 12.2
- NVIDIA driver: 545 (Bare metal)
- **Ubuntu 20.04 LTS:**
- GPU: NVIDIA GeForce GTX 1660ti
- CUDA: 12.1
- NVIDIA driver: 535 (Proxmox VM passthrough GPU)
@ -306,8 +315,11 @@ Restart Jan application to make sure it works.
3. Seek assistance in [Jan Discord](https://discord.gg/mY69SZaMaC).
## How to Get Error Logs
To get the error logs of your Jan application, follow the steps below:
#### Jan Application
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
@ -315,20 +327,25 @@ To get the error logs of your Jan application, follow the steps below:
5. Click the **logs** folder.
#### Jan UI
1. Open your Unix or Linux terminal.
2. Use the following commands to get the recent 50 lines of log files:
```bash
tail -n 50 ~/jan/logs/app.log
```
#### Jan API Server
1. Open your Unix or Linux terminal.
2. Use the following commands to get the recent 50 lines of log files:
```bash
tail -n 50 ~/jan/logs/server.log
```
:::warning
Ensure to redact any private or sensitive information when sharing logs or error details.
:::
@ -338,6 +355,7 @@ If you have any questions or are looking for support, please don't hesitate to c
:::
## Permission Denied
When running Jan, you might encounter the following error message:
```
@ -355,13 +373,17 @@ This error mainly caused by permission problem during installation. To resolve t
```sh
sudo chown -R $(whoami) ~/.npm
```
:::note
- This command ensures that the necessary permissions are granted for Jan installation, resolving the encountered error.
- If you have any questions or are looking for support, please don't hesitate to contact us via our [Discord community](https://discord.gg/Dt7MxDyNNZ) or create a new issue in our [GitHub repository](https://github.com/janhq/jan/issues/new/choose).
:::
:::
## Something's Amiss
When you start a chat with a model and encounter with a Something's Amiss error, here's how to resolve it:
1. Ensure your OS is up to date.
2. Choose a model smaller than 80% of your hardware's V/RAM. For example, on an 8GB machine, opt for models smaller than 6GB.
3. Install the latest [Nightly release](/guides/quickstart/#nightly-releases) or [clear the application cache](/troubleshooting/#broken-build) when reinstalling Jan.
@ -410,6 +432,7 @@ If you have any questions or are looking for support, please don't hesitate to c
:::
## Undefined Issue
Encountering an `undefined issue` in Jan is caused by errors related to the Nitro tool or other internal processes. It can be resolved through the following steps:
1. Clearing the Jan folder and then reopen the application to determine if the problem persists
@ -423,6 +446,7 @@ If you have any questions or are looking for support, please don't hesitate to c
:::
## Unexpected Token
Encountering the `Unexpected token` error when initiating a chat with OpenAI models mainly caused by either your OpenAI key or where you access your OpenAI from. This issue can be solved through the following steps:
1. Obtain an OpenAI API key from [OpenAI's developer platform](https://platform.openai.com/) and integrate it into your application.

View File

@ -5,59 +5,46 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 11
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
Advanced Settings,
HTTPS Proxy,
SSL,
settings,
Jan settings
Jan settings,
]
---
<head>
<title>Advanced Settings</title>
<meta name="description" content="This guide will show you how to use the advanced settings in Jan. Learn about keyboard shortcuts, experimental mode, GPU acceleration, Jan data folder access, HTTPS proxy configuration, SSL certificate handling, log clearing, and resetting to factory default settings."/>
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model, advanced-settings"/>
<meta property="og:title" content="Advanced Settings"/>
<meta property="og:description" content="This guide will show you how to use the advanced settings in Jan. Learn about keyboard shortcuts, experimental mode, GPU acceleration, Jan data folder access, HTTPS proxy configuration, SSL certificate handling, log clearing, and resetting to factory default settings."/>
<meta property="og:image" content="https://jan.ai/img/advanced-settings.png"/>
<meta property="og:url" content="https://jan.ai/advanced-settings"/>
<meta name="twitter:card" content="summary_large_image"/>
<meta name="twitter:title" content="Advanced Settings"/>
<meta name="twitter:description" content="This guide will show you how to use the advanced settings in Jan. Learn about keyboard shortcuts, experimental mode, GPU acceleration, Jan data folder access, HTTPS proxy configuration, SSL certificate handling, log clearing, and resetting to factory default settings."/>
<meta name="twitter:image" content="https://jan.ai/img/advanced-settings.png"/>
</head>
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
This guide will show you how to use the advanced settings in Jan.
## Access the Advanced Settings
To access the Jan's advanced settings, follow the steps below:
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. You can configure the following settings:
| Feature | Description |
|---------------------------|-----------------------------------------------------------------------------------------------------------------------|
| **Keyboard Shortcuts** | Keyboard shortcuts speed up your workflow. For a quick overview of useful keyboard shortcuts, refer to the list [below](advanced-settings.mdx#keyboard-shortcuts). |
| **Experimental Mode** | Enables experimental features that may be unstable. |
| **GPU Acceleration** | Enables the boosting of your model performance by using your GPU devices for acceleration. |
| **Jan Data Folder** | Location for messages, model configurations, and user data. Changeable to a different location. |
| **HTTPS Proxy & Ignore SSL Certificate** | Use a proxy server for internet connections and ignore SSL certificates for self-signed certificates. Please check out the guide on how to set up your own HTTPS proxy server [here](advanced-settings.mdx#https-proxy). |
| **Clear Logs** | Removes all logs from the Jan application. |
| **Reset To Factory Default** | Resets the application to its original state, deleting all data including model customizations and conversation history. |
| Feature | Description |
| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| **Keyboard Shortcuts** | Keyboard shortcuts speed up your workflow. For a quick overview of useful keyboard shortcuts, refer to the list [below](advanced-settings.mdx#keyboard-shortcuts). |
| **Experimental Mode** | Enables experimental features that may be unstable. |
| **GPU Acceleration** | Enables the boosting of your model performance by using your GPU devices for acceleration. |
| **Jan Data Folder** | Location for messages, model configurations, and user data. Changeable to a different location. |
| **HTTPS Proxy & Ignore SSL Certificate** | Use a proxy server for internet connections and ignore SSL certificates for self-signed certificates. Please check out the guide on how to set up your own HTTPS proxy server [here](advanced-settings.mdx#https-proxy). |
| **Clear Logs** | Removes all logs from the Jan application. |
| **Reset To Factory Default** | Resets the application to its original state, deleting all data including model customizations and conversation history. |
## Keyboard Shortcuts
@ -78,62 +65,72 @@ Here are some of the keyboard shortcuts that you can use in Jan.
</TabItem>
<TabItem value="windows" label = "Windows" default>
| Combination | Description |
| --------------- | -------------------------------------------------- |
| `Ctrl E` | Show list your models |
| `Ctrl K` | Show list navigation pages |
| `Ctrl B` | Toggle collapsible left panel |
| `Ctrl ,` | Navigate to setting page |
| `Enter` | Send a message |
| `Shift + Enter` | Insert new line in input box |
| Combination | Description |
| --------------- | ---------------------------------------------------------- |
| `Ctrl E` | Show list your models |
| `Ctrl K` | Show list navigation pages |
| `Ctrl B` | Toggle collapsible left panel |
| `Ctrl ,` | Navigate to setting page |
| `Enter` | Send a message |
| `Shift + Enter` | Insert new line in input box |
| `Arrow Up` | Navigate to the previous option (within the search dialog) |
| `Arrow Down` | Navigate to the next option (within the search dialog) |
</TabItem>
<TabItem value="linux" label = "Linux" default>
| Combination | Description |
| --------------- | -------------------------------------------------- |
| `Ctrl E` | Show list your models |
| `Ctrl K` | Show list navigation pages |
| `Ctrl B` | Toggle collapsible left panel |
| `Ctrl ,` | Navigate to setting page |
| `Enter` | Send a message |
| `Shift + Enter` | Insert new line in input box |
| Combination | Description |
| --------------- | ---------------------------------------------------------- |
| `Ctrl E` | Show list your models |
| `Ctrl K` | Show list navigation pages |
| `Ctrl B` | Toggle collapsible left panel |
| `Ctrl ,` | Navigate to setting page |
| `Enter` | Send a message |
| `Shift + Enter` | Insert new line in input box |
| `Arrow Up` | Navigate to the previous option (within the search dialog) |
| `Arrow Down` | Navigate to the next option (within the search dialog) |
</TabItem>
</Tabs>
:::note
The keyboard shortcuts are customizable.
:::
## Enable the Experimental Mode
To try out new fetures that are still in testing phase, follow the steps below:
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **Experimental Mode** click the slider to enable.
## Enable the GPU Acceleration
To enhance your model performance, follow the steps below:
:::warning
Ensure that you have read the [troubleshooting guide](/troubleshooting/#troubleshooting-nvidia-gpu) here for further assistance.
:::
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **GPU Acceleration** click the slider to enable.
## Access the Jan Data Folder
To access the folder where messages, model configurations and user data are stored, follow the steps below:
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **Jan Data Folder** click the **folder icon (📂)** to access the data or the **pencil icon (✏️)** to change the folder where you keep your data.
## HTTPS Proxy
HTTPS Proxy encrypts data between your browser and the internet, making it hard for outsiders to intercept or read. It also helps you to maintain your privacy and security while being able to bypass regional restrictions on internet.
:::note
@ -142,7 +139,9 @@ HTTPS Proxy encrypts data between your browser and the internet, making it hard
- HTTPS Proxy does not affect the remote model usage.
:::
### Setting Up Your Own HTTPS Proxy Server
This guide provides a simple overview of setting up an HTTPS proxy server using **Squid**, a widely used open-source proxy software.
:::note
@ -150,19 +149,23 @@ Other software options are also available depending on your requirements.
:::
#### Step 1: Choosing a Server
1. Firstly, you need to choose a server to host your proxy server.
:::note
We recommend using a well-known cloud provider service like:
:::note
We recommend using a well-known cloud provider service like:
- Amazon AWS
- Google Cloud
- Microsoft Azure
- Digital Ocean
:::
:::
2. Ensure that your server has a public IP address and is accessible from the internet.
#### Step 2: Installing Squid
Instal **Squid** using the following command:
```bash
sudo apt-get update
sudo apt-get install squid
@ -219,6 +222,7 @@ sudo systemctl restart squid
:::tip
Tips for Secure Your Proxy:
- **Firewall rules**: Ensure that only intended users or IP addresses can connect to your proxy server. This can be achieved by setting up appropriate firewall rules.
- **Regular updates**: Keep your server and proxy software updated to ensure that you are protected against known vulnerabilities.
- **Monitoring and logging**: Monitor your proxy server for unusual activity and enable logging to keep track of the traffic passing through your proxy.
@ -228,33 +232,39 @@ Tips for Secure Your Proxy:
### Setting Up Jan to Use Your HTTPS Proxy
Once you have your HTTPS proxy server set up, you can configure Jan to use it.
1. Navigate to **Settings** > **Advanced Settings**.
2. On the **HTTPS Proxy** click the slider to enable.
3. Input your domain in the blank field.
## Ignore SSL Certificate
To Allow self-signed or unverified certificates, follow the steps below:
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **Ignore SSL Certificates** click the slider to enable.
## Clear Logs
To clear all logs on your Jan app, follow the steps below:
:::warning
This feature clears all the data in your **Jan Data Folder**.
:::
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.
4. On the **Clear Logs** click the the **Clear** button.
## Reset To Factory Default
To reset the Jan app to its original state, follow the steps below:
:::danger[Remember!]
This irreversible action is only recommended if the application is corrupted.
:::
1. Navigate to the main dashboard.
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
3. Under the **Settings screen**, click the **Advanced Settings**.

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 6
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
data folder,
source folder,
Jan data,

View File

@ -5,14 +5,15 @@ description: A step-by-step guide to start Jan Local Server.
sidebar_position: 10
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
local server,
start server,
api endpoint,
@ -22,21 +23,25 @@ keywords:
Jan provides a built-in API server that can be used as a drop-in for OpenAI's API local replacement. This guide will walk you through on how to start the local server and use it to make request to the local server.
## Step 1: Set the Local Server
To start the local server, follow the steps below:
1. Navigate to the Jan main menu dashboard.
2. Click the corresponding icon on the bottom left side of your screen.
3. Select the model you want to use under the Model Settings screen to set the LLM for your local server.
4. Configure the server settings as follows:
| Feature | Description | Default Setting |
|-------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------|
| Local Server Address | By default, Jan is only accessible on the same computer it's running on, using the address 127.0.0.1. You can change this to 0.0.0.0 to let other devices on your local network access it. However, this is less secure than just allowing access from the same computer. | `localhost (127.0.0.1)` |
| Port | Jan runs on port 1337 by default. The port can be changed to any other port number as needed. | `1337` |
| Cross-Origin Resource Sharing (CORS) | Manages resource access from external domains. Enabled for security by default but can be disabled if needed. | Enabled |
| Verbose Server Logs | Provides extensive details about server activities as the local server runs, displayed at the center of the screen. | Not specified (implied enabled) |
| Feature | Description | Default Setting |
| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------- |
| Local Server Address | By default, Jan is only accessible on the same computer it's running on, using the address 127.0.0.1. You can change this to 0.0.0.0 to let other devices on your local network access it. However, this is less secure than just allowing access from the same computer. | `localhost (127.0.0.1)` |
| Port | Jan runs on port 1337 by default. The port can be changed to any other port number as needed. | `1337` |
| Cross-Origin Resource Sharing (CORS) | Manages resource access from external domains. Enabled for security by default but can be disabled if needed. | Enabled |
| Verbose Server Logs | Provides extensive details about server activities as the local server runs, displayed at the center of the screen. | Not specified (implied enabled) |
## Step 2: Start and Use the Built-in API Server
Once you have set the server settings, you can start the server by following the steps below:
1. Click the **Start Server** button on the top left of your screen.
:::note
@ -49,6 +54,7 @@ When the server starts, you'll see a message like `Server listening at http://12
4. In this example, we will show you how it works using the `Chat` endpoint.
5. Click the **Try it out** button.
6. The Chat endpoint has the following `cURL request example` when running using a `tinyllama-1.1b` model local server:
```json
{
"messages": [
@ -74,7 +80,9 @@ When the server starts, you'll see a message like `Server listening at http://12
}
'
```
7. The endpoint returns the following `JSON response body`:
```json
{
"choices": [

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 8
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
manage assistants,
assistants,
]

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 7
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
models,
remote models,
local models,

View File

@ -5,14 +5,15 @@ description: Manage your interaction with AI locally.
sidebar_position: 9
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
threads,
chat history,
thread history,
@ -22,7 +23,6 @@ keywords:
Jan provides a straightforward and private solution for managing your threads with AI on your own device. As you interact with AI using Jan, you'll accumulate a history of threads.
Jan offers easy tools to organize, delete, or review your past threads with AI. This guide will show you how to keep your threads private and well-organized.
### View Thread History
To view your thread history, follow the steps below:
1. Navigate to the main dashboard.
@ -49,14 +49,12 @@ Jan offers easy tools to organize, delete, or review your past threads with AI.
This will delete all messages in the thread while keeping the thread settings.
:::
### Delete Threads History
To delete a thread, follow the steps below:
1. Navigate to the Thread that you want to delete.
2. Click on the **three dots (⋮)** in the Thread section.
3. Sleect the **Delete Thread** button.
:::note
This will delete all messages and the thread settings.
:::

View File

@ -5,14 +5,15 @@ description: Jan Docs | Jan is a ChatGPT-alternative that runs on your own compu
sidebar_position: 5
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---

View File

@ -3,31 +3,18 @@ title: Hardware Examples
description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
keywords:
[
Jan AI,
Jan,
ChatGPT alternative,
Rethink the Computer,
local AI,
private AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language model,
large language models,
]
---
<head>
<title>Hardware Examples</title>
<meta charset="utf-8" />
<meta name="description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server. Add your own hardware examples to this page by creating a new file in the `docs/docs/hardware/examples` directory." />
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, local AI, private AI, conversational AI, no-subscription fee, large language model" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/guides/hardware-examples" />
<meta property="og:title" content="Hardware Examples" />
<meta property="og:description" content="Jan is a ChatGPT-alternative that runs on your own computer, with a local API server. Add your own hardware examples to this page by creating a new file in the `docs/docs/hardware/examples` directory." />
<meta property="og:url" content="https://jan.ai/guides/hardware-examples" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image-hardware-examples.png" />
</head>
## Add your own example
Add your own examples to this page by creating a new file in the `docs/docs/hardware/examples` directory.

View File

@ -2,20 +2,6 @@
title: GPUs and VRAM
---
<head>
<title>Understanding GPUs and VRAM for AI and Gaming</title>
<meta charSet="utf-8" />
<meta name="description" content="Explore the world of GPUs and VRAM, learn their importance in gaming, AI, machine learning, and more. Understand how to connect a GPU to a motherboard and choose the right graphics card for your needs." />
<meta name="keywords" content="GPU, VRAM, graphics card, gaming, AI, machine learning, CUDA, NVIDIA, AMD, PCIe, VRAM vs RAM, choosing GPU" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/guides/gpus-and-vram" />
<meta property="og:title" content="Understanding GPUs and VRAM for AI and Gaming" />
<meta property="og:description" content="Explore the world of GPUs and VRAM, learn their importance in gaming, AI, machine learning, and more. Understand how to connect a GPU to a motherboard and choose the right graphics card for your needs." />
<meta property="og:url" content="https://jan.ai/guides/gpus-and-vram" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image.png" />
</head>
## What Is a GPU?
A Graphics Card, or GPU (Graphics Processing Unit), is a fundamental component in modern computing. Think of it as the powerhouse behind rendering the stunning visuals you see on your screen. Similar to the motherboard in your computer, the graphics card is a printed circuit board. However, it's not just a passive piece of hardware; it's a sophisticated device equipped with essential components like fans, onboard RAM, a dedicated memory controller, BIOS, and various other features. If you want to learn more about GPUs then read here to [Understand the architecture of a GPU.](https://medium.com/codex/understanding-the-architecture-of-a-gpu-d5d2d2e8978b)

View File

@ -2,18 +2,6 @@
title: Cloud vs. Self-hosting Your AI
---
<head>
<title>Cloud vs. Self-hosting Your AI</title>
<meta name="description" content="Explore the pros and cons of renting AI services from the cloud versus self-hosting, including cost comparisons, business considerations, and conclusions about the best approach for different scenarios."/>
<meta name="keywords" content="Cloud AI, Self-hosted AI, AI cost comparison, AI business considerations, AI deployment, cloud vs self-hosting AI"/>
<meta property="og:title" content="Cloud vs. Self-hosting Your AI"/>
<meta property="og:description" content="Explore the pros and cons of renting AI services from the cloud versus self-hosting, including cost comparisons, business considerations, and conclusions about the best approach for different scenarios."/>
<meta property="og:url" content="https://jan.ai/articles/cloud-vs-self-hosting-your-ai"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:title" content="Cloud vs. Self-hosting Your AI"/>
<meta name="twitter:description" content="Explore the pros and cons of renting AI services from the cloud versus self-hosting, including cost comparisons, business considerations, and conclusions about the best approach for different scenarios."/>
</head>
The choice of how to run your AI - on GPU cloud services, on-prem, or just using an API provider - involves various trade-offs. The following is a naive exploration of the pros and cons of renting vs self-hosting.
## Cost Comparison

View File

@ -2,20 +2,6 @@
title: GPU vs CPU What's the Difference?
---
<head>
<title>GPU vs CPU What's the Difference?</title>
<meta name="description" content="Explore the differences between CPU and GPU in terms of function, processing, design, and best-suited applications."/>
<meta name="keywords" content="CPU, GPU, CPU vs GPU, Central Processing Unit, Graphics Processing Unit, CPU vs GPU differences"/>
<meta property="og:title" content="GPU vs CPU What's the Difference?"/>
<meta property="og:description" content="Explore the differences between CPU and GPU in terms of function, processing, design, and best-suited applications."/>
<meta property="og:image" content="https://media.discordapp.net/attachments/964896173401976932/1157998193741660222/CPU-vs-GPU-rendering.png?ex=651aa55b&is=651953db&hm=a22c80ed108a0d25106a20aa25236f7d0fa74167a50788194470f57ce7f4a6ca&=&width=807&height=426"/>
<meta property="og:url" content="https://jan.ai/articles/gpu-vs-cpu-differences"/>
<meta name="twitter:card" content="summary_large_image"/>
<meta name="twitter:title" content="GPU vs CPU What's the Difference?"/>
<meta name="twitter:description" content="Explore the differences between CPU and GPU in terms of function, processing, design, and best-suited applications."/>
<meta name="twitter:image" content="https://media.discordapp.net/attachments/964896173401976932/1157998193741660222/CPU-vs-GPU-rendering.png?ex=651aa55b&is=651953db&hm=a22c80ed108a0d25106a20aa25236f7d0fa74167a50788194470f57ce7f4a6ca&=&width=807&height=426"/>
</head>
## CPU vs. GPU
| | CPU | GPU |

View File

@ -2,20 +2,6 @@
title: Recommended AI Hardware by Budget
---
<head>
<title>Recommended AI Hardware Builds by Budget</title>
<meta charSet="utf-8" />
<meta name="description" content="Explore recommended AI hardware builds for entry-level, mid-range, and high-end budgets. Find the perfect balance of performance and cost for your AI and machine learning projects." />
<meta name="keywords" content="AI hardware, budget PC build, entry-level AI PC, mid-range AI PC, high-end AI PC, GPU for AI, AI PC build, machine learning hardware" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/guides/recommended-ai-hardware-by-budget" />
<meta property="og:title" content="Recommended AI Hardware Builds by Budget" />
<meta property="og:description" content="Explore recommended AI hardware builds for entry-level, mid-range, and high-end budgets. Find the perfect balance of performance and cost for your AI and machine learning projects." />
<meta property="og:url" content="https://jan.ai/guides/recommended-ai-hardware-by-budget" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image.png" />
</head>
> :warning: **Warning:** Do your own research before any purchase. Jan is not liable for compatibility, performance or other issues. Products can become outdated quickly.
## Entry-level PC Build at $1000

View File

@ -2,20 +2,6 @@
title: Selecting AI Hardware
---
<head>
<title>Selecting AI Hardware</title>
<meta charset="utf-8" />
<meta name="description" content="Guidance on selecting optimal AI hardware, including GPUs, CPUs, RAM, and motherboards for running Large Language Models (LLMs) efficiently. Explore factors like VRAM, CUDA compatibility, and unified memory architecture to build a powerful AI setup." />
<meta name="keywords" content="AI hardware, GPU for LLM, CPU for AI, RAM for machine learning, motherboard for AI, CUDA, VRAM, Unified Memory Architecture, M1, M2 Pro/Max, VRAM calculation" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/guides/selecting-ai-hardware" />
<meta property="og:title" content="Selecting AI Hardware" />
<meta property="og:description" content="Guidance on selecting optimal AI hardware, including GPUs, CPUs, RAM, and motherboards for running Large Language Models (LLMs) efficiently. Explore factors like VRAM, CUDA compatibility, and unified memory architecture to build a powerful AI setup." />
<meta property="og:url" content="https://jan.ai/guides/selecting-ai-hardware" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image-ai-hardware.png" />
</head>
When selecting a GPU for LLMs, remember that it's not just about the GPU itself. Consider the synergy with other components in your PC:
- **CPU**: To ensure efficient processing, pair your GPU with a powerful CPU. LLMs benefit from fast processors, so having a capable CPU is essential.

View File

@ -2,20 +2,6 @@
title: Recommended AI Hardware by Model
---
<head>
<title>Recommended AI Hardware by Model</title>
<meta charset="utf-8" />
<meta name="description" content="Explore the recommended AI hardware specifications for running Codellama models efficiently. Find RAM and VRAM requirements for different parameter sizes and quantization formats, along with minimum recommended GPUs." />
<meta name="keywords" content="Codellama, AI hardware, recommended hardware, system requirements, RAM requirements, VRAM requirements, GPU recommendations, GPTQ, GGML, GGUF, parameter models, quantization formats" />
<meta name="twitter:card" content="summary" />
<link rel="canonical" href="https://jan.ai/recommended-ai-hardware-by-model" />
<meta property="og:title" content="Recommended AI Hardware by Model" />
<meta property="og:description" content="Explore the recommended AI hardware specifications for running Codellama models efficiently. Find RAM and VRAM requirements for different parameter sizes and quantization formats, along with minimum recommended GPUs." />
<meta property="og:url" content="https://jan.ai/recommended-ai-hardware-by-model" />
<meta property="og:type" content="article" />
<meta property="og:image" content="https://jan.ai/img/og-image-recommended-hardware.png" />
</head>
## Codellama 34b
### System Requirements:

Some files were not shown because too many files have changed in this diff Show More