Merge branch 'dev' into docs-pena-team
This commit is contained in:
commit
86b6dd5657
10
README.md
10
README.md
@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
|
||||
<tr style="text-align:center">
|
||||
<td style="text-align:center"><b>Experimental (Nightly Build)</b></td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.9-335.exe'>
|
||||
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.9-336.exe'>
|
||||
<img src='./docs/static/img/windows.png' style="height:14px; width: 14px" />
|
||||
<b>jan.exe</b>
|
||||
</a>
|
||||
</td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.9-335.dmg'>
|
||||
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.9-336.dmg'>
|
||||
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
|
||||
<b>Intel</b>
|
||||
</a>
|
||||
</td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.9-335.dmg'>
|
||||
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.9-336.dmg'>
|
||||
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
|
||||
<b>M1/M2</b>
|
||||
</a>
|
||||
</td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.9-335.deb'>
|
||||
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.9-336.deb'>
|
||||
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
|
||||
<b>jan.deb</b>
|
||||
</a>
|
||||
</td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.9-335.AppImage'>
|
||||
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.9-336.AppImage'>
|
||||
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
|
||||
<b>jan.AppImage</b>
|
||||
</a>
|
||||
|
||||
@ -101,7 +101,7 @@ export class FSExt implements Processor {
|
||||
})
|
||||
}
|
||||
|
||||
rmdir(path: string): Promise<void> {
|
||||
rm(path: string): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
fs.rm(path, { recursive: true }, (err) => {
|
||||
if (err) {
|
||||
|
||||
@ -3,5 +3,4 @@ UMAMI_PROJECT_API_KEY=xxxx
|
||||
UMAMI_APP_URL=xxxx
|
||||
ALGOLIA_API_KEY=xxxx
|
||||
ALGOLIA_APP_ID=xxxx
|
||||
GITHUB_ACCESS_TOKEN=xxxx
|
||||
API_KEY_BREVO=xxxx
|
||||
GITHUB_ACCESS_TOKEN=xxxx
|
||||
222
docs/docs/guides/providers/tensorrt-llm.md
Normal file
222
docs/docs/guides/providers/tensorrt-llm.md
Normal file
@ -0,0 +1,222 @@
|
||||
---
|
||||
title: TensorRT-LLM
|
||||
slug: /guides/providers/tensorrt-llm
|
||||
---
|
||||
|
||||
<head>
|
||||
<title>TensorRT-LLM - Jan Guides</title>
|
||||
<meta name="description" content="Learn how to install Jan's official TensorRT-LLM Extension, which offers 20-40% faster token speeds on Nvidia GPUs. Understand the requirements, installation steps, and troubleshooting tips."/>
|
||||
<meta name="keywords" content="Jan AI, Jan, ChatGPT alternative, TensorRT-LLM, Nvidia GPU, TensorRT, extension, installation, troubleshooting"/>
|
||||
<meta property="og:title" content="TensorRT-LLM - Jan Guides"/>
|
||||
<meta property="og:description" content="Learn how to install Jan's official TensorRT-LLM Extension, which offers 20-40% faster token speeds on Nvidia GPUs. Understand the requirements, installation steps, and troubleshooting tips."/>
|
||||
<meta property="og:url" content="https://jan.ai/guides/providers/tensorrt-llm"/>
|
||||
<meta name="twitter:card" content="summary"/>
|
||||
<meta name="twitter:title" content="TensorRT-LLM - Jan Guides"/>
|
||||
<meta name="twitter:description" content="Learn how to install Jan's official TensorRT-LLM Extension, which offers 20-40% faster token speeds on Nvidia GPUs. Understand the requirements, installation steps, and troubleshooting tips."/>
|
||||
</head>
|
||||
|
||||
:::info
|
||||
|
||||
TensorRT-LLM support was launched in 0.4.9, and should be regarded as an Experimental feature.
|
||||
|
||||
- Only Windows is supported for now.
|
||||
- Please report bugs in our Discord's [#tensorrt-llm](https://discord.com/channels/1107178041848909847/1201832734704795688) channel.
|
||||
|
||||
:::
|
||||
|
||||
Jan supports [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) as an alternate Inference Engine, for users who have Nvidia GPUs with large VRAM. TensorRT-LLM allows for blazing fast inference, but requires Nvidia GPUs with [larger VRAM](https://nvidia.github.io/TensorRT-LLM/memory.html).
|
||||
|
||||
## What is TensorRT-LLM?
|
||||
|
||||
[TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) is an hardware-optimized LLM inference engine for Nvidia GPUs, that compiles models to run extremely fast on Nvidia GPUs.
|
||||
- Mainly used on Nvidia's Datacenter-grade GPUs like the H100s [to produce 10,000 tok/s](https://nvidia.github.io/TensorRT-LLM/blogs/H100vsA100.html).
|
||||
- Can be used on Nvidia's workstation (e.g. [A6000](https://www.nvidia.com/en-us/design-visualization/rtx-6000/)) and consumer-grade GPUs (e.g. [RTX 4090](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/))
|
||||
|
||||
:::tip[Benefits]
|
||||
|
||||
- Our performance testing shows 20-40% faster token/s speeds on consumer-grade GPUs
|
||||
- On datacenter-grade GPUs, TensorRT-LLM can go up to 10,000 tokens/s
|
||||
- TensorRT-LLM is a relatively new library, that was [released in Sept 2023](https://github.com/NVIDIA/TensorRT-LLM/graphs/contributors). We anticipate performance and resource utilization improvements in the future.
|
||||
|
||||
:::
|
||||
|
||||
:::warning[Caveats]
|
||||
|
||||
- TensorRT-LLM requires models to be compiled into GPU and OS-specific "Model Engines" (vs. GGUF's "convert once, run anywhere" approach)
|
||||
- TensorRT-LLM Model Engines tend to utilize larger amount of VRAM and RAM in exchange for performance
|
||||
- This usually means only people with top-of-the-line Nvidia GPUs can use TensorRT-LLM
|
||||
|
||||
:::
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
### Hardware
|
||||
|
||||
- Windows PC
|
||||
- Nvidia GPU(s): Ada or Ampere series (i.e. RTX 4000s & 3000s). More will be supported soon.
|
||||
- 3GB+ of disk space to download TRT-LLM artifacts and a Nitro binary
|
||||
|
||||
**Compatible GPUs**
|
||||
|
||||
| Architecture | Supported? | Consumer-grade | Workstation-grade |
|
||||
| ------------ | --- | -------------- | ----------------- |
|
||||
| Ada | ✅ | 4050 and above | RTX A2000 Ada |
|
||||
| Ampere | ✅ | 3050 and above | A100 |
|
||||
| Turing | ❌ | Not Supported | Not Supported |
|
||||
|
||||
:::info
|
||||
|
||||
Please ping us in Discord's [#tensorrt-llm](https://discord.com/channels/1107178041848909847/1201832734704795688) channel if you would like Turing support.
|
||||
|
||||
:::
|
||||
|
||||
### Software
|
||||
|
||||
- Jan v0.4.9+ or Jan v0.4.8-321+ (nightly)
|
||||
- [Nvidia Driver v535+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
|
||||
- [CUDA Toolkit v12.2+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Install TensorRT-Extension
|
||||
|
||||
1. Go to Settings > Extensions
|
||||
2. Install the TensorRT-LLM Extension
|
||||
|
||||
:::info
|
||||
You can check if files have been correctly downloaded:
|
||||
|
||||
```sh
|
||||
ls ~\jan\extensions\@janhq\tensorrt-llm-extension\dist\bin
|
||||
# Your Extension Folder should now include `nitro.exe`, among other `.dll` files needed to run TRT-LLM
|
||||
```
|
||||
:::
|
||||
|
||||
### Download a TensorRT-LLM Model
|
||||
|
||||
Jan's Hub has a few pre-compiled TensorRT-LLM models that you can download, which have a `TensorRT-LLM` label
|
||||
|
||||
- We automatically download the TensorRT-LLM Model Engine for your GPU architecture
|
||||
- We have made a few 1.1b models available that can run even on Laptop GPUs with 8gb VRAM
|
||||
|
||||
|
||||
| Model | OS | Ada (40XX) | Ampere (30XX) | Description |
|
||||
| ------------------- | ------- | ---------- | ------------- | --------------------------------------------------- |
|
||||
| Llamacorn 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned for usability |
|
||||
| TinyJensen 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned on Jensen Huang speeches |
|
||||
| Mistral Instruct 7b | Windows | ✅ | ✅ | Mistral |
|
||||
|
||||
### Importing Pre-built Models
|
||||
|
||||
You can import a pre-built model, by creating a new folder in Jan's `/models` directory that includes:
|
||||
|
||||
- TensorRT-LLM Engine files (e.g. `tokenizer`, `.engine`, etc)
|
||||
- `model.json` that registers these files, and specifies `engine` as `nitro-tensorrt-llm`
|
||||
|
||||
:::note[Sample model.json]
|
||||
|
||||
Note the `engine` is `nitro-tensorrt-llm`: this won't work without it!
|
||||
|
||||
```js
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "config.json",
|
||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/config.json"
|
||||
},
|
||||
{
|
||||
"filename": "mistral_float16_tp1_rank0.engine",
|
||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/mistral_float16_tp1_rank0.engine"
|
||||
},
|
||||
{
|
||||
"filename": "tokenizer.model",
|
||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer.model"
|
||||
},
|
||||
{
|
||||
"filename": "special_tokens_map.json",
|
||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/special_tokens_map.json"
|
||||
},
|
||||
{
|
||||
"filename": "tokenizer.json",
|
||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer.json"
|
||||
},
|
||||
{
|
||||
"filename": "tokenizer_config.json",
|
||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer_config.json"
|
||||
},
|
||||
{
|
||||
"filename": "model.cache",
|
||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/model.cache"
|
||||
}
|
||||
],
|
||||
"id": "tinyjensen-1.1b-chat-fp16",
|
||||
"object": "model",
|
||||
"name": "TinyJensen 1.1B Chat FP16",
|
||||
"version": "1.0",
|
||||
"description": "Do you want to chat with Jensen Huan? Here you are",
|
||||
"format": "TensorRT-LLM",
|
||||
"settings": {
|
||||
"ctx_len": 2048,
|
||||
"text_model": false
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "LLama",
|
||||
"tags": [
|
||||
"TensorRT-LLM",
|
||||
"1B",
|
||||
"Finetuned"
|
||||
],
|
||||
"size": 2151000000
|
||||
},
|
||||
"engine": "nitro-tensorrt-llm"
|
||||
}
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
### Using a TensorRT-LLM Model
|
||||
|
||||
You can just select and use a TensorRT-LLM model from Jan's Thread interface.
|
||||
- Jan will automatically start the TensorRT-LLM model engine in the background
|
||||
- You may encounter a pop-up from Windows Security, asking for Nitro to allow public and private network access
|
||||
|
||||
:::info[Why does Nitro need network access?]
|
||||
|
||||
- This is because Jan runs TensorRT-LLM using the [Nitro Server](https://github.com/janhq/nitro-tensorrt-llm/)
|
||||
- Jan makes network calls to the Nitro server running on your computer on a separate port
|
||||
|
||||
:::
|
||||
|
||||
### Configure Settings
|
||||
|
||||
:::note
|
||||
coming soon
|
||||
:::
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
## Extension Details
|
||||
|
||||
Jan's TensorRT-LLM Extension is built on top of the open source [Nitro TensorRT-LLM Server](https://github.com/janhq/nitro-tensorrt-llm), a C++ inference server on top of TensorRT-LLM that provides an OpenAI-compatible API.
|
||||
|
||||
### Manual Build
|
||||
|
||||
To manually build the artifacts needed to run the server and TensorRT-LLM, you can reference the source code. [Read here](https://github.com/janhq/nitro-tensorrt-llm?tab=readme-ov-file#quickstart).
|
||||
|
||||
### Uninstall Extension
|
||||
|
||||
1. Quit the app
|
||||
2. Go to Settings > Extensions
|
||||
3. Delete the entire Extensions folder.
|
||||
4. Reopen the app, only the default extensions should be restored.
|
||||
|
||||
|
||||
## Build your own TensorRT models
|
||||
|
||||
:::info
|
||||
coming soon
|
||||
:::
|
||||
@ -1,8 +0,0 @@
|
||||
---
|
||||
title: TensorRT-LLM
|
||||
---
|
||||
|
||||
## Quicklinks
|
||||
|
||||
- Jan Framework [Extension Code](https://github.com/janhq/jan/tree/main/extensions/inference-triton-trtllm-extension)
|
||||
- TensorRT [Source URL](https://github.com/NVIDIA/TensorRT-LLM)
|
||||
@ -117,6 +117,10 @@ const config = {
|
||||
from: '/guides/using-extensions/',
|
||||
to: '/extensions/',
|
||||
},
|
||||
{
|
||||
from: '/integrations/tensorrt',
|
||||
to: '/guides/providers/tensorrt-llm',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
@ -404,11 +408,6 @@ const config = {
|
||||
},
|
||||
},
|
||||
|
||||
// Put your custom environment here
|
||||
customFields: {
|
||||
apiKeyBrevo: process.env.API_KEY_BREVO,
|
||||
},
|
||||
|
||||
themes: ['@docusaurus/theme-live-codeblock', '@docusaurus/theme-mermaid'],
|
||||
}
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import React from 'react'
|
||||
import React, { useState } from 'react'
|
||||
|
||||
import useDocusaurusContext from '@docusaurus/useDocusaurusContext'
|
||||
import { AiOutlineGithub, AiOutlineTwitter } from 'react-icons/ai'
|
||||
@ -130,15 +130,13 @@ export default function Footer() {
|
||||
siteConfig: { customFields },
|
||||
} = useDocusaurusContext()
|
||||
|
||||
const [formMessage, setFormMessage] = useState('')
|
||||
|
||||
const onSubmit = (data) => {
|
||||
const { email } = data
|
||||
const options = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'accept': 'application/json',
|
||||
'content-type': 'application/json',
|
||||
'api-key': customFields.apiKeyBrevo,
|
||||
},
|
||||
|
||||
body: JSON.stringify({
|
||||
updateEnabled: false,
|
||||
email,
|
||||
@ -147,12 +145,18 @@ export default function Footer() {
|
||||
}
|
||||
|
||||
if (email) {
|
||||
fetch('https://api.brevo.com/v3/contacts', options)
|
||||
fetch('https://brevo.jan.ai/', options)
|
||||
.then((response) => response.json())
|
||||
.then((response) => {
|
||||
if (response.id) {
|
||||
reset()
|
||||
setFormMessage('You have successfully joined our newsletter')
|
||||
} else {
|
||||
setFormMessage(response.message)
|
||||
}
|
||||
reset()
|
||||
setTimeout(() => {
|
||||
setFormMessage('')
|
||||
}, 5000)
|
||||
})
|
||||
.catch((err) => console.error(err))
|
||||
}
|
||||
@ -203,6 +207,7 @@ export default function Footer() {
|
||||
</svg>
|
||||
</button>
|
||||
</form>
|
||||
{formMessage && <p className="text-left mt-4">{formMessage}</p>}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import React, { useState, useEffect } from 'react'
|
||||
import DownloadApp from '@site/src/containers/DownloadApp'
|
||||
import React, { useState } from 'react'
|
||||
|
||||
import { useForm } from 'react-hook-form'
|
||||
|
||||
import useBaseUrl from '@docusaurus/useBaseUrl'
|
||||
@ -86,6 +86,7 @@ export default function Home() {
|
||||
const isBrowser = useIsBrowser()
|
||||
const { stargazers } = useAppStars()
|
||||
const { data } = useDiscordWidget()
|
||||
const [formMessage, setFormMessage] = useState('')
|
||||
|
||||
const userAgent = isBrowser && navigator.userAgent
|
||||
const isBrowserChrome = isBrowser && userAgent.includes('Chrome')
|
||||
@ -104,11 +105,6 @@ export default function Home() {
|
||||
const { email } = data
|
||||
const options = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'accept': 'application/json',
|
||||
'content-type': 'application/json',
|
||||
'api-key': customFields.apiKeyBrevo,
|
||||
},
|
||||
body: JSON.stringify({
|
||||
updateEnabled: false,
|
||||
email,
|
||||
@ -117,12 +113,18 @@ export default function Home() {
|
||||
}
|
||||
|
||||
if (email) {
|
||||
fetch('https://api.brevo.com/v3/contacts', options)
|
||||
fetch('https://brevo.jan.ai/', options)
|
||||
.then((response) => response.json())
|
||||
.then((response) => {
|
||||
if (response.id) {
|
||||
reset()
|
||||
setFormMessage('You have successfully joined our newsletter')
|
||||
} else {
|
||||
setFormMessage(response.message)
|
||||
}
|
||||
reset()
|
||||
setTimeout(() => {
|
||||
setFormMessage('')
|
||||
}, 5000)
|
||||
})
|
||||
.catch((err) => console.error(err))
|
||||
}
|
||||
@ -748,6 +750,9 @@ export default function Home() {
|
||||
Subscribe
|
||||
</button>
|
||||
</form>
|
||||
{formMessage && (
|
||||
<p className="text-left mt-4">{formMessage}</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -20,9 +20,9 @@ export default class JSONConversationalExtension extends ConversationalExtension
|
||||
* Called when the extension is loaded.
|
||||
*/
|
||||
async onLoad() {
|
||||
if (!(await fs.existsSync(JSONConversationalExtension._threadFolder)))
|
||||
if (!(await fs.existsSync(JSONConversationalExtension._threadFolder))) {
|
||||
await fs.mkdirSync(JSONConversationalExtension._threadFolder)
|
||||
console.debug('JSONConversationalExtension loaded')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
import { Fragment, ReactNode, useEffect } from 'react'
|
||||
|
||||
import { AppConfiguration } from '@janhq/core'
|
||||
import { AppConfiguration, getUserHomePath, joinPath } from '@janhq/core'
|
||||
import { useSetAtom } from 'jotai'
|
||||
|
||||
import useAssistants from '@/hooks/useAssistants'
|
||||
@ -10,6 +10,7 @@ import useGetSystemResources from '@/hooks/useGetSystemResources'
|
||||
import useModels from '@/hooks/useModels'
|
||||
import useThreads from '@/hooks/useThreads'
|
||||
|
||||
import { defaultJanDataFolderAtom } from '@/helpers/atoms/App.atom'
|
||||
import {
|
||||
janDataFolderPathAtom,
|
||||
quickAskEnabledAtom,
|
||||
@ -22,6 +23,7 @@ type Props = {
|
||||
const DataLoader: React.FC<Props> = ({ children }) => {
|
||||
const setJanDataFolderPath = useSetAtom(janDataFolderPathAtom)
|
||||
const setQuickAskEnabled = useSetAtom(quickAskEnabledAtom)
|
||||
const setJanDefaultDataFolder = useSetAtom(defaultJanDataFolderAtom)
|
||||
|
||||
useModels()
|
||||
useThreads()
|
||||
@ -37,6 +39,16 @@ const DataLoader: React.FC<Props> = ({ children }) => {
|
||||
})
|
||||
}, [setJanDataFolderPath, setQuickAskEnabled])
|
||||
|
||||
useEffect(() => {
|
||||
async function getDefaultJanDataFolder() {
|
||||
const homePath = await getUserHomePath()
|
||||
const defaultJanDataFolder = await joinPath([homePath, 'jan'])
|
||||
|
||||
setJanDefaultDataFolder(defaultJanDataFolder)
|
||||
}
|
||||
getDefaultJanDataFolder()
|
||||
}, [setJanDefaultDataFolder])
|
||||
|
||||
console.debug('Load Data...')
|
||||
|
||||
return <Fragment>{children}</Fragment>
|
||||
|
||||
@ -3,3 +3,5 @@ import { atom } from 'jotai'
|
||||
import { MainViewState } from '@/constants/screens'
|
||||
|
||||
export const mainViewStateAtom = atom<MainViewState>(MainViewState.Thread)
|
||||
|
||||
export const defaultJanDataFolderAtom = atom<string>('')
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
|
||||
import { events, Model, ModelEvent } from '@janhq/core'
|
||||
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
|
||||
@ -86,13 +86,12 @@ export function useActiveModel() {
|
||||
events.emit(ModelEvent.OnModelInit, model)
|
||||
}
|
||||
|
||||
const stopModel = async () => {
|
||||
const stopModel = useCallback(async () => {
|
||||
if (activeModel) {
|
||||
setActiveModel(undefined)
|
||||
setStateModel({ state: 'stop', loading: true, model: activeModel.id })
|
||||
events.emit(ModelEvent.OnModelStop, activeModel)
|
||||
}
|
||||
}
|
||||
}, [activeModel, setStateModel])
|
||||
|
||||
return { activeModel, startModel, stopModel, stateModel }
|
||||
}
|
||||
|
||||
@ -1,49 +1,68 @@
|
||||
import { useEffect, useState } from 'react'
|
||||
import { useCallback } from 'react'
|
||||
|
||||
import { fs, AppConfiguration, joinPath, getUserHomePath } from '@janhq/core'
|
||||
import { fs, AppConfiguration } from '@janhq/core'
|
||||
import { atom, useAtomValue, useSetAtom } from 'jotai'
|
||||
|
||||
import { useActiveModel } from './useActiveModel'
|
||||
|
||||
import { defaultJanDataFolderAtom } from '@/helpers/atoms/App.atom'
|
||||
|
||||
export enum FactoryResetState {
|
||||
Idle = 'idle',
|
||||
Starting = 'starting',
|
||||
StoppingModel = 'stopping_model',
|
||||
DeletingData = 'deleting_data',
|
||||
ClearLocalStorage = 'clear_local_storage',
|
||||
}
|
||||
|
||||
export const factoryResetStateAtom = atom(FactoryResetState.Idle)
|
||||
|
||||
export default function useFactoryReset() {
|
||||
const [defaultJanDataFolder, setdefaultJanDataFolder] = useState('')
|
||||
const defaultJanDataFolder = useAtomValue(defaultJanDataFolderAtom)
|
||||
const { activeModel, stopModel } = useActiveModel()
|
||||
const setFactoryResetState = useSetAtom(factoryResetStateAtom)
|
||||
|
||||
useEffect(() => {
|
||||
async function getDefaultJanDataFolder() {
|
||||
const homePath = await getUserHomePath()
|
||||
const defaultJanDataFolder = await joinPath([homePath, 'jan'])
|
||||
setdefaultJanDataFolder(defaultJanDataFolder)
|
||||
}
|
||||
getDefaultJanDataFolder()
|
||||
}, [])
|
||||
const resetAll = useCallback(
|
||||
async (keepCurrentFolder?: boolean) => {
|
||||
setFactoryResetState(FactoryResetState.Starting)
|
||||
// read the place of jan data folder
|
||||
const appConfiguration: AppConfiguration | undefined =
|
||||
await window.core?.api?.getAppConfigurations()
|
||||
|
||||
const resetAll = async (keepCurrentFolder?: boolean) => {
|
||||
// read the place of jan data folder
|
||||
const appConfiguration: AppConfiguration | undefined =
|
||||
await window.core?.api?.getAppConfigurations()
|
||||
|
||||
if (!appConfiguration) {
|
||||
console.debug('Failed to get app configuration')
|
||||
}
|
||||
|
||||
console.debug('appConfiguration: ', appConfiguration)
|
||||
const janDataFolderPath = appConfiguration!.data_folder
|
||||
|
||||
if (!keepCurrentFolder) {
|
||||
// set the default jan data folder to user's home directory
|
||||
const configuration: AppConfiguration = {
|
||||
data_folder: defaultJanDataFolder,
|
||||
quick_ask: appConfiguration?.quick_ask ?? false,
|
||||
if (!appConfiguration) {
|
||||
console.debug('Failed to get app configuration')
|
||||
}
|
||||
await window.core?.api?.updateAppConfiguration(configuration)
|
||||
}
|
||||
await fs.rmdirSync(janDataFolderPath, { recursive: true })
|
||||
|
||||
// reset the localStorage
|
||||
localStorage.clear()
|
||||
const janDataFolderPath = appConfiguration!.data_folder
|
||||
|
||||
await window.core?.api?.relaunch()
|
||||
}
|
||||
if (!keepCurrentFolder) {
|
||||
// set the default jan data folder to user's home directory
|
||||
const configuration: AppConfiguration = {
|
||||
data_folder: defaultJanDataFolder,
|
||||
quick_ask: appConfiguration?.quick_ask ?? false,
|
||||
}
|
||||
await window.core?.api?.updateAppConfiguration(configuration)
|
||||
}
|
||||
|
||||
if (activeModel) {
|
||||
setFactoryResetState(FactoryResetState.StoppingModel)
|
||||
await stopModel()
|
||||
await new Promise((resolve) => setTimeout(resolve, 4000))
|
||||
}
|
||||
|
||||
setFactoryResetState(FactoryResetState.DeletingData)
|
||||
await fs.rm(janDataFolderPath)
|
||||
|
||||
setFactoryResetState(FactoryResetState.ClearLocalStorage)
|
||||
// reset the localStorage
|
||||
localStorage.clear()
|
||||
|
||||
await window.core?.api?.relaunch()
|
||||
},
|
||||
[defaultJanDataFolder, activeModel, stopModel, setFactoryResetState]
|
||||
)
|
||||
|
||||
return {
|
||||
defaultJanDataFolder,
|
||||
resetAll,
|
||||
}
|
||||
}
|
||||
|
||||
@ -11,21 +11,25 @@ import {
|
||||
Checkbox,
|
||||
Input,
|
||||
} from '@janhq/uikit'
|
||||
import { atom, useAtom } from 'jotai'
|
||||
import { atom, useAtom, useAtomValue } from 'jotai'
|
||||
|
||||
import useFactoryReset from '@/hooks/useFactoryReset'
|
||||
|
||||
import { defaultJanDataFolderAtom } from '@/helpers/atoms/App.atom'
|
||||
|
||||
export const modalValidationAtom = atom(false)
|
||||
|
||||
const ModalConfirmReset = () => {
|
||||
const [modalValidation, setModalValidation] = useAtom(modalValidationAtom)
|
||||
const { resetAll, defaultJanDataFolder } = useFactoryReset()
|
||||
const defaultJanDataFolder = useAtomValue(defaultJanDataFolderAtom)
|
||||
const { resetAll } = useFactoryReset()
|
||||
const [inputValue, setInputValue] = useState('')
|
||||
const [currentDirectoryChecked, setCurrentDirectoryChecked] = useState(true)
|
||||
const onFactoryResetClick = useCallback(
|
||||
() => resetAll(currentDirectoryChecked),
|
||||
[currentDirectoryChecked, resetAll]
|
||||
)
|
||||
|
||||
const onFactoryResetClick = useCallback(() => {
|
||||
setModalValidation(false)
|
||||
resetAll(currentDirectoryChecked)
|
||||
}, [currentDirectoryChecked, resetAll, setModalValidation])
|
||||
|
||||
return (
|
||||
<Modal
|
||||
@ -65,7 +69,6 @@ const ModalConfirmReset = () => {
|
||||
</label>
|
||||
<p className="mt-2 leading-relaxed">
|
||||
Otherwise it will reset back to its original location at:{' '}
|
||||
{/* TODO should be from system */}
|
||||
<span className="font-medium">{defaultJanDataFolder}</span>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@ -0,0 +1,29 @@
|
||||
import { Modal, ModalContent, ModalHeader, ModalTitle } from '@janhq/uikit'
|
||||
import { atom, useAtomValue } from 'jotai'
|
||||
|
||||
import {
|
||||
FactoryResetState,
|
||||
factoryResetStateAtom,
|
||||
} from '@/hooks/useFactoryReset'
|
||||
|
||||
const resetModalVisibilityAtom = atom((get) => {
|
||||
const visible = get(factoryResetStateAtom) !== FactoryResetState.Idle
|
||||
return visible
|
||||
})
|
||||
|
||||
const ResettingModal: React.FC = () => {
|
||||
const visibility = useAtomValue(resetModalVisibilityAtom)
|
||||
|
||||
return (
|
||||
<Modal open={visibility}>
|
||||
<ModalContent>
|
||||
<ModalHeader>
|
||||
<ModalTitle>Factory reset in progress..</ModalTitle>
|
||||
</ModalHeader>
|
||||
<p className="text-muted-foreground">Resetting..</p>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
|
||||
export default ResettingModal
|
||||
@ -3,6 +3,7 @@ import { Button } from '@janhq/uikit'
|
||||
import { useSetAtom } from 'jotai'
|
||||
|
||||
import ModalValidation, { modalValidationAtom } from './ModalConfirmReset'
|
||||
import ResettingModal from './ResettingModal'
|
||||
|
||||
const FactoryReset = () => {
|
||||
const setModalValidation = useSetAtom(modalValidationAtom)
|
||||
@ -30,6 +31,7 @@ const FactoryReset = () => {
|
||||
Reset
|
||||
</Button>
|
||||
<ModalValidation />
|
||||
<ResettingModal />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user