[WIP] Updating Extensions & Engines guide
This commit is contained in:
parent
867a51cf61
commit
a90287b9c3
BIN
docs/src/pages/docs/_assets/extensions-01.png
Normal file
BIN
docs/src/pages/docs/_assets/extensions-01.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 172 KiB |
@ -24,17 +24,19 @@
|
||||
"threads": "Threads",
|
||||
"settings": "Settings",
|
||||
"inference-engines": {
|
||||
"title": "MODEL PROVIDER",
|
||||
"title": "ENGINES",
|
||||
"type": "separator"
|
||||
},
|
||||
"built-in": "Built-in Models",
|
||||
"remote-models": "Remote APIs",
|
||||
"built-in": "Local Engines",
|
||||
"remote-models": "Remote Engines",
|
||||
"extensions-separator": {
|
||||
"title": "EXTENSIONS",
|
||||
"type": "separator"
|
||||
},
|
||||
"extensions": "Overview",
|
||||
"installing-extension": "Installing an Extension",
|
||||
"extensions-settings": "Extensions Settings",
|
||||
"installing-extension": "Install Extension",
|
||||
"configure-extensions": "Configure Extensions",
|
||||
"troubleshooting-separator": {
|
||||
"title": "TROUBLESHOOTING",
|
||||
"type": "separator"
|
||||
|
||||
@ -42,9 +42,6 @@ This guide walks you through installing Jan's official [TensorRT-LLM Engine](htt
|
||||
### Step 1: Install TensorRT-Extension
|
||||
|
||||
1. Click the **Gear Icon (⚙️)** on the bottom left of your screen.
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
2. Select the **TensorRT-LLM** under the **Model Provider** section.
|
||||
<br/>
|
||||

|
||||
|
||||
268
docs/src/pages/docs/configure-extensions.mdx
Normal file
268
docs/src/pages/docs/configure-extensions.mdx
Normal file
@ -0,0 +1,268 @@
|
||||
---
|
||||
title: Configure Extensions
|
||||
description: Learn about Jan's default extensions and explore how to configure them.
|
||||
[
|
||||
Jan,
|
||||
Customizable Intelligence, LLM,
|
||||
local AI,
|
||||
privacy focus,
|
||||
free and open source,
|
||||
private and offline,
|
||||
conversational AI,
|
||||
no-subscription fee,
|
||||
large language models,
|
||||
Jan Extensions,
|
||||
Extensions,
|
||||
]
|
||||
---
|
||||
|
||||
# Configure an Extension Settings
|
||||
|
||||
To configure an extension settings:
|
||||
|
||||
1. Navigate to the `~/jan/data/extensions`.
|
||||
2. Open the `extensions.json` file
|
||||
3. Edit the file with options including:
|
||||
|
||||
| Option | Description |
|
||||
| ---------------- | ----------------------------------- |
|
||||
| `_active` | Enable/disable the extension. |
|
||||
| `listeners` | Default listener setting. |
|
||||
| `origin` | Extension file path. |
|
||||
| `installOptions` | Version and metadata configuration. |
|
||||
| `name` | Extension name. |
|
||||
| `productName` | Extension display name. |
|
||||
| `version` | Extension version. |
|
||||
| `main` | Main file path. |
|
||||
| `description` | Extension description. |
|
||||
| `url` | Extension URL. |
|
||||
|
||||
```json title="~/jan/data/extensions/extensions.json"
|
||||
{
|
||||
"@janhq/conversational-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-conversational-extension-1.0.0.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/conversational-extension",
|
||||
"productName": "Conversational",
|
||||
"version": "1.0.0",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables conversations and state persistence via your filesystem",
|
||||
"url": "extension://@janhq/conversational-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-anthropic-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-anthropic-extension-1.0.2.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-anthropic-extension",
|
||||
"productName": "Anthropic Inference Engine",
|
||||
"version": "1.0.2",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Anthropic chat completion API calls",
|
||||
"url": "extension://@janhq/inference-anthropic-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-triton-trt-llm-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-triton-trt-llm-extension-1.0.0.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-triton-trt-llm-extension",
|
||||
"productName": "Triton-TRT-LLM Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Nvidia's TensorRT-LLM as an inference engine option",
|
||||
"url": "extension://@janhq/inference-triton-trt-llm-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-mistral-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-mistral-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-mistral-extension",
|
||||
"productName": "MistralAI Inference Engine",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Mistral chat completion API calls",
|
||||
"url": "extension://@janhq/inference-mistral-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-martian-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-martian-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-martian-extension",
|
||||
"productName": "Martian Inference Engine",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Martian chat completion API calls",
|
||||
"url": "extension://@janhq/inference-martian-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-openrouter-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-openrouter-extension-1.0.0.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-openrouter-extension",
|
||||
"productName": "OpenRouter Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Open Router chat completion API calls",
|
||||
"url": "extension://@janhq/inference-openrouter-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-nvidia-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-nvidia-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-nvidia-extension",
|
||||
"productName": "NVIDIA NIM Inference Engine",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables NVIDIA chat completion API calls",
|
||||
"url": "extension://@janhq/inference-nvidia-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-groq-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-groq-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-groq-extension",
|
||||
"productName": "Groq Inference Engine",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables fast Groq chat completion API calls",
|
||||
"url": "extension://@janhq/inference-groq-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-openai-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-openai-extension-1.0.2.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-openai-extension",
|
||||
"productName": "OpenAI Inference Engine",
|
||||
"version": "1.0.2",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables OpenAI chat completion API calls",
|
||||
"url": "extension://@janhq/inference-openai-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-cohere-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-cohere-extension-1.0.0.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-cohere-extension",
|
||||
"productName": "Cohere Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Cohere chat completion API calls",
|
||||
"url": "extension://@janhq/inference-cohere-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/model-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-model-extension-1.0.33.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/model-extension",
|
||||
"productName": "Model Management",
|
||||
"version": "1.0.33",
|
||||
"main": "dist/index.js",
|
||||
"description": "Model Management Extension provides model exploration and seamless downloads",
|
||||
"url": "extension://@janhq/model-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/monitoring-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-monitoring-extension-1.0.10.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/monitoring-extension",
|
||||
"productName": "System Monitoring",
|
||||
"version": "1.0.10",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension provides system health and OS level data",
|
||||
"url": "extension://@janhq/monitoring-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/assistant-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-assistant-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/assistant-extension",
|
||||
"productName": "Jan Assistant",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models",
|
||||
"url": "extension://@janhq/assistant-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/tensorrt-llm-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-tensorrt-llm-extension-0.0.3.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/tensorrt-llm-extension",
|
||||
"productName": "TensorRT-LLM Inference Engine",
|
||||
"version": "0.0.3",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Nvidia's TensorRT-LLM for the fastest GPU acceleration. See the [setup guide](https://jan.ai/guides/providers/tensorrt-llm/) for next steps.",
|
||||
"url": "extension://@janhq/tensorrt-llm-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-cortex-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-cortex-extension-1.0.15.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-cortex-extension",
|
||||
"productName": "Cortex Inference Engine",
|
||||
"version": "1.0.15",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.",
|
||||
"url": "extension://@janhq/inference-cortex-extension/dist/index.js"
|
||||
}
|
||||
}
|
||||
```
|
||||
15
docs/src/pages/docs/extensions-settings/_meta.json
Normal file
15
docs/src/pages/docs/extensions-settings/_meta.json
Normal file
@ -0,0 +1,15 @@
|
||||
{
|
||||
"cortex": {
|
||||
"title": "Cortex",
|
||||
"href": "/docs/extensions-settings/cortex"
|
||||
},
|
||||
"model-management": {
|
||||
"title": "Model Management",
|
||||
"href": "/docs/extensions-settings/model-management"
|
||||
},
|
||||
"system-monitoring": {
|
||||
"title": "System Monitoring",
|
||||
"href": "/docs/extensions-settings/system-monitoring"
|
||||
}
|
||||
}
|
||||
|
||||
20
docs/src/pages/docs/extensions-settings/cortex.mdx
Normal file
20
docs/src/pages/docs/extensions-settings/cortex.mdx
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
title: Cortex
|
||||
description: Learn about Jan's default extensions and explore how to configure them.
|
||||
keywords:
|
||||
[
|
||||
Jan,
|
||||
Customizable Intelligence, LLM,
|
||||
local AI,
|
||||
privacy focus,
|
||||
free and open source,
|
||||
private and offline,
|
||||
conversational AI,
|
||||
no-subscription fee,
|
||||
large language models,
|
||||
Jan Extensions,
|
||||
Extensions,
|
||||
]
|
||||
---
|
||||
|
||||
|
||||
55
docs/src/pages/docs/extensions-settings/model-management.mdx
Normal file
55
docs/src/pages/docs/extensions-settings/model-management.mdx
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
title: Model Management
|
||||
description: Learn about Jan's default extensions and explore how to configure them.
|
||||
keywords:
|
||||
[
|
||||
Jan,
|
||||
Customizable Intelligence, LLM,
|
||||
local AI,
|
||||
privacy focus,
|
||||
free and open source,
|
||||
private and offline,
|
||||
conversational AI,
|
||||
no-subscription fee,
|
||||
large language models,
|
||||
Jan Extensions,
|
||||
Extensions,
|
||||
]
|
||||
---
|
||||
|
||||
import { Callout } from 'nextra/components'
|
||||
import { Settings, EllipsisVertical, Plus, FolderOpen, Pencil } from 'lucide-react'
|
||||
|
||||
# Model Management
|
||||
|
||||
Configure how Jan handles model downloads and management. Access these settings through **Settings** (<Settings width={16} height={16} style={{display:"inline"}}/>) > **Extensions** > **Model Management**:
|
||||
|
||||
## Hugging Face Access Token
|
||||
|
||||
Access tokens authenticate your identity to Hugging Face Hub for model downloads.
|
||||
|
||||
|
||||
|
||||
Enter your token in the format: `hf_************************`
|
||||
|
||||
<Callout type="info">
|
||||
Get your Hugging Face token from [Hugging Face Settings](https://huggingface.co/settings/tokens)
|
||||
</Callout>
|
||||
|
||||
## Log Management
|
||||
|
||||
### Enable App Logs
|
||||
Toggle to save logs locally on your computer for:
|
||||
- Debugging model issues
|
||||
- Crash reports
|
||||
- Download troubleshooting
|
||||
|
||||
### Log Cleaning Interval
|
||||
Set automatic log deletion interval in milliseconds:
|
||||
- Default: 120000 (2 minutes)
|
||||
- Controls disk space usage
|
||||
- Prevents log accumulation
|
||||
|
||||
<Callout type="warning">
|
||||
Keep your access tokens secure and never share them. Enable logs temporarily when needed for troubleshooting.
|
||||
</Callout>
|
||||
@ -0,0 +1,34 @@
|
||||
---
|
||||
title: System Monitoring
|
||||
description: Learn about Jan's default extensions and explore how to configure them.
|
||||
keywords:
|
||||
[
|
||||
Jan,
|
||||
Customizable Intelligence, LLM,
|
||||
local AI,
|
||||
privacy focus,
|
||||
free and open source,
|
||||
private and offline,
|
||||
conversational AI,
|
||||
no-subscription fee,
|
||||
large language models,
|
||||
Jan Extensions,
|
||||
Extensions,
|
||||
]
|
||||
---
|
||||
import { Callout } from 'nextra/components'
|
||||
import { Settings, EllipsisVertical } from 'lucide-react'
|
||||
|
||||
|
||||
## System Monitor
|
||||
The System Monitor extension now offers enhanced customization for app logging. Users can toggle the application logging feature on or off and set a custom interval for clearing the app logs. To configure the app log feature, follow these steps:
|
||||
1. Navigate to the main dashboard.
|
||||
2. Click the **Gear Icon (⚙️)** on the bottom left of your screen.
|
||||
3. Under the **Core Extensions** section, select the **System Monitoring** extension.
|
||||
4. Use the **slider** to turn the app logging feature on or off.
|
||||
5. Specify the log cleaning interval in milliseconds.
|
||||
|
||||
<Callout type='info'>
|
||||
- You can clear the app logs manually by clicking the **Clear logs** button in the advanced settings.
|
||||
- There are no minimum or maximum intervals for setting the time. However, invalid inputs will default to `120000ms (2 minutes)`.
|
||||
</Callout>
|
||||
@ -17,346 +17,60 @@ description: Learn about Jan's default extensions and explore how to configure t
|
||||
---
|
||||
|
||||
import { Callout } from 'nextra/components'
|
||||
import { Settings, EllipsisVertical } from 'lucide-react'
|
||||
|
||||
|
||||
# Extensions Overview
|
||||
# Extensions
|
||||
|
||||
The current Jan Desktop Client has some default extensions built on this framework to enhance the user experience. There are two types of extensions:
|
||||
- Model Providers: These are extensions that enable Jan to support cloud-based models and provide the engine to support additional local models.
|
||||
- Core Extensions: These are built-in tools for managing and monitoring the Jan's system.
|
||||
## Overview
|
||||
Extensions are modular components that add functionality to Jan. Each extension is designed to handle specific features.
|
||||
|
||||
## List of Model Provider Extensions
|
||||
| Extension Name | Version | Description |
|
||||
|----------------|---------|-------------|
|
||||
| Anthropic | v1.0.2 | This extension enables Anthropic chat completion API calls |
|
||||
| Cohere | v1.0.0 | This extension enables Cohere chat completion API calls |
|
||||
| Groq | v1.0.1 | This extension enables fast Groq chat completion API calls |
|
||||
| Martian | v1.0.1 | This extension enables Martian chat completion API calls |
|
||||
| MistralAI | v1.0.1 | This extension enables Mistral chat completion API calls |
|
||||
| TensorRT-LLM | v0.0.3 | This extension enables Nvidia's TensorRT-LLM for the fastest GPU acceleration. See the setup guide for next steps |
|
||||
| NVIDIA NIM | v1.0.1 | This extension enables NVIDIA chat completion API calls |
|
||||
| OpenAI | v1.0.2 | This extension enables OpenAI chat completion API calls |
|
||||
| OpenRouter | v1.0.0 | This extension enables Open Router chat completion API calls |
|
||||
| Triton-TRT-LLM | v1.0.0 | This extension enables Nvidia's Triton-TRT-LLM as an inference engine option |
|
||||
Extensions can be managed through **Settings** (<Settings width={16} height={16} style={{display:"inline"}}/>) > **Extensions**:
|
||||
|
||||
## List of Core Extensions
|
||||
| Extension Name | Version | Description |
|
||||
|---------------------|----------|-----------------------------------------------------------------------------|
|
||||
| Jan Assistant | v1.0.1 | This extension enables assistants, including Jan, a default assistant that can call all downloaded models |
|
||||
| Conversational | v1.0.0 | This extension enables conversations and state persistence via your filesystem |
|
||||
| Model Management | v1.0.33 | Model Management Extension provides model exploration and seamless downloads |
|
||||
| System Monitoring | v1.0.10 | This extension provides system health and OS level data |
|
||||
|
||||
|
||||
## Configure an Extension Settings
|
||||
|
||||
To configure an extension settings:
|
||||
|
||||
1. Navigate to the `~/jan/data/extensions`.
|
||||
2. Open the `extensions.json` file
|
||||
3. Edit the file with options including:
|
||||
|
||||
| Option | Description |
|
||||
| ---------------- | ----------------------------------- |
|
||||
| `_active` | Enable/disable the extension. |
|
||||
| `listeners` | Default listener setting. |
|
||||
| `origin` | Extension file path. |
|
||||
| `installOptions` | Version and metadata configuration. |
|
||||
| `name` | Extension name. |
|
||||
| `productName` | Extension display name. |
|
||||
| `version` | Extension version. |
|
||||
| `main` | Main file path. |
|
||||
| `description` | Extension description. |
|
||||
| `url` | Extension URL. |
|
||||
|
||||
```json title="~/jan/data/extensions/extensions.json"
|
||||
{
|
||||
"@janhq/conversational-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-conversational-extension-1.0.0.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/conversational-extension",
|
||||
"productName": "Conversational",
|
||||
"version": "1.0.0",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables conversations and state persistence via your filesystem",
|
||||
"url": "extension://@janhq/conversational-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-anthropic-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-anthropic-extension-1.0.2.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-anthropic-extension",
|
||||
"productName": "Anthropic Inference Engine",
|
||||
"version": "1.0.2",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Anthropic chat completion API calls",
|
||||
"url": "extension://@janhq/inference-anthropic-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-triton-trt-llm-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-triton-trt-llm-extension-1.0.0.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-triton-trt-llm-extension",
|
||||
"productName": "Triton-TRT-LLM Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Nvidia's TensorRT-LLM as an inference engine option",
|
||||
"url": "extension://@janhq/inference-triton-trt-llm-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-mistral-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-mistral-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-mistral-extension",
|
||||
"productName": "MistralAI Inference Engine",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Mistral chat completion API calls",
|
||||
"url": "extension://@janhq/inference-mistral-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-martian-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-martian-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-martian-extension",
|
||||
"productName": "Martian Inference Engine",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Martian chat completion API calls",
|
||||
"url": "extension://@janhq/inference-martian-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-openrouter-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-openrouter-extension-1.0.0.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-openrouter-extension",
|
||||
"productName": "OpenRouter Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Open Router chat completion API calls",
|
||||
"url": "extension://@janhq/inference-openrouter-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-nvidia-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-nvidia-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-nvidia-extension",
|
||||
"productName": "NVIDIA NIM Inference Engine",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables NVIDIA chat completion API calls",
|
||||
"url": "extension://@janhq/inference-nvidia-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-groq-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-groq-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-groq-extension",
|
||||
"productName": "Groq Inference Engine",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables fast Groq chat completion API calls",
|
||||
"url": "extension://@janhq/inference-groq-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-openai-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-openai-extension-1.0.2.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-openai-extension",
|
||||
"productName": "OpenAI Inference Engine",
|
||||
"version": "1.0.2",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables OpenAI chat completion API calls",
|
||||
"url": "extension://@janhq/inference-openai-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-cohere-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-cohere-extension-1.0.0.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-cohere-extension",
|
||||
"productName": "Cohere Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Cohere chat completion API calls",
|
||||
"url": "extension://@janhq/inference-cohere-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/model-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-model-extension-1.0.33.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/model-extension",
|
||||
"productName": "Model Management",
|
||||
"version": "1.0.33",
|
||||
"main": "dist/index.js",
|
||||
"description": "Model Management Extension provides model exploration and seamless downloads",
|
||||
"url": "extension://@janhq/model-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/monitoring-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-monitoring-extension-1.0.10.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/monitoring-extension",
|
||||
"productName": "System Monitoring",
|
||||
"version": "1.0.10",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension provides system health and OS level data",
|
||||
"url": "extension://@janhq/monitoring-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/assistant-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-assistant-extension-1.0.1.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/assistant-extension",
|
||||
"productName": "Jan Assistant",
|
||||
"version": "1.0.1",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models",
|
||||
"url": "extension://@janhq/assistant-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/tensorrt-llm-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-tensorrt-llm-extension-0.0.3.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/tensorrt-llm-extension",
|
||||
"productName": "TensorRT-LLM Inference Engine",
|
||||
"version": "0.0.3",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension enables Nvidia's TensorRT-LLM for the fastest GPU acceleration. See the [setup guide](https://jan.ai/guides/providers/tensorrt-llm/) for next steps.",
|
||||
"url": "extension://@janhq/tensorrt-llm-extension/dist/index.js"
|
||||
},
|
||||
"@janhq/inference-cortex-extension": {
|
||||
"_active": true,
|
||||
"listeners": {},
|
||||
"origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-cortex-extension-1.0.15.tgz",
|
||||
"installOptions": {
|
||||
"version": false,
|
||||
"fullMetadata": true
|
||||
},
|
||||
"name": "@janhq/inference-cortex-extension",
|
||||
"productName": "Cortex Inference Engine",
|
||||
"version": "1.0.15",
|
||||
"main": "dist/index.js",
|
||||
"description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.",
|
||||
"url": "extension://@janhq/inference-cortex-extension/dist/index.js"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Specific Extension Settings
|
||||
Jan offers an Extensions settings menu for configuring extensions that have registered their settings within the application. Here, you can directly integrate Remote Inference Engines with Jan without inserting the URL and API Key directly in the `JSON` file. Additionally, you can turn the Logging extensions available on or off in Jan. To access the Extension settings, follow the steps below:
|
||||
1. Navigate to the main dashboard.
|
||||
2. Click the **gear icon (⚙️)** on the bottom left of your screen.
|
||||
<br/>
|
||||

|
||||

|
||||
<br/>
|
||||
3. Click **Extensions**.
|
||||
<br/>
|
||||

|
||||
|
||||
## Turn Off an Extension
|
||||
## Core Extensions
|
||||
|
||||
To turn off the extension, follow the steps below:
|
||||
### [Cortex.cpp](/docs/extensions-settings/cortex)
|
||||
The primary extension that manages both **local** and **remote engines** capabilities:
|
||||
|
||||
1. Click the **Gear Icon (⚙️)** on the bottom left of your screen.
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
2. Click the **Extensions** button.
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
3. Click the slider button to turn off the extension.
|
||||
#### Local Engines
|
||||
- [llama.cpp](/docs/built-in/llama-cpp): Fast, efficient local inference engine that runs GGUF models directly on your device. Powers Jan's default local AI capabilities with support for multiple hardware configurations.
|
||||
- [TensorRT-LLM](/docs/built-in/tensorrt-llm): GPU-accelerated inference engine optimized for LLMs on NVIDIA GPUs, offering high-performance and low latency for transformer-based models.
|
||||
|
||||
4. Restart the app to see that the extension has been disabled.
|
||||
#### Remote Engines
|
||||
- [Anthropic](/docs/remote-models/anthropic): Access Claude models
|
||||
- [Cohere](/docs/remote-models/cohere): Access Cohere's models
|
||||
- [Groq](/docs/remote-models/groq): High-performance inference
|
||||
- [Martian](/docs/remote-models/martian): Specialized model access
|
||||
- [MistralAI](/docs/remote-models/mistralai): Access Mistral models
|
||||
- [NVIDIA NIM](/docs/remote-models/nvidia-nim) (NVIDIA Inference Microservices): Platform for deploying and serving GPU-accelerated AI models, providing enterprise-grade reliability and scalability.
|
||||
- [OpenAI](/docs/remote-models/openai): Access GPT models
|
||||
- [OpenRouter](/docs/remote-models/openrouter): Multi-provider model access
|
||||
- [Triton-TRT-LLM](/docs/remote-models/triton): High-performance inference backend using NVIDIA Triton Inference Server with TensorRT-LLM optimization, designed for large-scale model deployment.
|
||||
|
||||
## Model Management
|
||||
The Model Management extension allows Jan's app to download specific models from Hugging Face repositories, which require an access token for access.
|
||||
1. Navigate to the main dashboard.
|
||||
2. Click the **Gear Icon (⚙️)** on the bottom left of your screen.
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
3. Under the **Core Extensions** section, select the **Model Management** extension.
|
||||
|
||||
4. Enter the HuggingFace access token.
|
||||
|
||||
## System Monitor
|
||||
The System Monitor extension now offers enhanced customization for app logging. Users can toggle the application logging feature on or off and set a custom interval for clearing the app logs. To configure the app log feature, follow these steps:
|
||||
1. Navigate to the main dashboard.
|
||||
2. Click the **Gear Icon (⚙️)** on the bottom left of your screen.
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
3. Under the **Core Extensions** section, select the **System Monitoring** extension.
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
4. Use the **slider** to turn the app logging feature on or off.
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
5. Specify the log cleaning interval in milliseconds.
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
<Callout type='info'>
|
||||
- You can clear the app logs manually by clicking the **Clear logs** button in the advanced settings.
|
||||
- There are no minimum or maximum intervals for setting the time. However, invalid inputs will default to `120000ms (2 minutes)`.
|
||||
</Callout>
|
||||
### Jan Assistant
|
||||
Enables assistants functionality, including Jan - the default assistant that can utilize all downloaded models. This extension manages:
|
||||
- Default assistant configurations
|
||||
- Model selection
|
||||
- Conversation management
|
||||
|
||||
### Conversational
|
||||
Manages all chat-related functionality and data persistence:
|
||||
|
||||
|
||||
### [Model Management](/docs/extensions-settings/model-management)
|
||||
Provides model exploration and seamless downloads:
|
||||
- Model discovery and browsing
|
||||
- Version control & configuration handling
|
||||
- Download management
|
||||
|
||||
### [System Monitoring](/docs/extensions-settings/system-monitoring)
|
||||
Provides system health and OS level data:
|
||||
- Hardware utilization tracking
|
||||
- Performance monitoring
|
||||
- Error logging
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Installing an Extension
|
||||
title: Install Extension
|
||||
description: A step-by-step guide on installing an extension.
|
||||
keywords:
|
||||
[
|
||||
@ -20,7 +20,7 @@ keywords:
|
||||
import { Callout } from 'nextra/components'
|
||||
|
||||
|
||||
## Installing an Extension
|
||||
## Install Extension
|
||||
|
||||
Jan automatically installs 11 default extensions when you set up the application. You can also add a custom third-party extension at your own risk.
|
||||
|
||||
@ -30,9 +30,7 @@ Jan only accepts the `.tgz` file format for installing a custom extension.
|
||||
</Callout>
|
||||
|
||||
1. Click the **Gear Icon (⚙️)** on the bottom left of your screen.
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
|
||||
2. Click the **Extensions** button.
|
||||
<br/>
|
||||

|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"openai": {
|
||||
"title": "OpenAI API",
|
||||
"title": "OpenAI",
|
||||
"href": "/docs/remote-models/openai"
|
||||
},
|
||||
"azure": {
|
||||
@ -8,27 +8,27 @@
|
||||
"href": "/docs/remote-models/azure",
|
||||
"display": "hidden"
|
||||
},
|
||||
"groq": { "title": "Groq API", "href": "/docs/remote-models/groq" },
|
||||
"groq": { "title": "Groq", "href": "/docs/remote-models/groq" },
|
||||
"mistralai": {
|
||||
"title": "Mistral AI API",
|
||||
"title": "Mistral AI",
|
||||
"href": "/docs/remote-models/mistralai"
|
||||
},
|
||||
"openrouter": { "title": "OpenRouter", "href": "/docs/remote-models/openrouter" },
|
||||
"generic-openai": { "title": "Any OpenAI Compatible API", "href": "/docs/remote-models/generic-openai", "display": "hidden"},
|
||||
"martian": {
|
||||
"title": "Martian API",
|
||||
"title": "Martian",
|
||||
"href": "/docs/remote-models/martian"
|
||||
},
|
||||
"cohere": {
|
||||
"title": "Cohere API",
|
||||
"title": "Cohere",
|
||||
"href": "/docs/remote-models/cohere"
|
||||
},
|
||||
"anthropic": {
|
||||
"title": "Anthropic API",
|
||||
"title": "Anthropic",
|
||||
"href": "/docs/remote-models/anthropic"
|
||||
},
|
||||
"nvidia-nim": {
|
||||
"title": "NVIDIA NIM API",
|
||||
"title": "NVIDIA NIM",
|
||||
"href": "/docs/remote-models/nvidia-nim"
|
||||
},
|
||||
"triton": {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user