diff --git a/README.md b/README.md
index 8c3ff307b..1eae89297 100644
--- a/README.md
+++ b/README.md
@@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
| Experimental (Nightly Build) |
-
+
jan.exe
|
-
+
Intel
|
-
+
M1/M2
|
-
+
jan.deb
|
-
+
jan.AppImage
diff --git a/core/src/node/api/processors/fsExt.ts b/core/src/node/api/processors/fsExt.ts
index 9b88cfef9..7b08e24c9 100644
--- a/core/src/node/api/processors/fsExt.ts
+++ b/core/src/node/api/processors/fsExt.ts
@@ -101,7 +101,7 @@ export class FSExt implements Processor {
})
}
- rmdir(path: string): Promise {
+ rm(path: string): Promise {
return new Promise((resolve, reject) => {
fs.rm(path, { recursive: true }, (err) => {
if (err) {
diff --git a/docs/.env.example b/docs/.env.example
index 56b26dafb..22f6e715f 100644
--- a/docs/.env.example
+++ b/docs/.env.example
@@ -3,5 +3,4 @@ UMAMI_PROJECT_API_KEY=xxxx
UMAMI_APP_URL=xxxx
ALGOLIA_API_KEY=xxxx
ALGOLIA_APP_ID=xxxx
-GITHUB_ACCESS_TOKEN=xxxx
-API_KEY_BREVO=xxxx
\ No newline at end of file
+GITHUB_ACCESS_TOKEN=xxxx
\ No newline at end of file
diff --git a/docs/docs/guides/providers/tensorrt-llm.md b/docs/docs/guides/providers/tensorrt-llm.md
new file mode 100644
index 000000000..3526ef25d
--- /dev/null
+++ b/docs/docs/guides/providers/tensorrt-llm.md
@@ -0,0 +1,222 @@
+---
+title: TensorRT-LLM
+slug: /guides/providers/tensorrt-llm
+---
+
+
+ TensorRT-LLM - Jan Guides
+
+
+
+
+
+
+
+
+
+
+:::info
+
+TensorRT-LLM support was launched in 0.4.9, and should be regarded as an Experimental feature.
+
+- Only Windows is supported for now.
+- Please report bugs in our Discord's [#tensorrt-llm](https://discord.com/channels/1107178041848909847/1201832734704795688) channel.
+
+:::
+
+Jan supports [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) as an alternate Inference Engine, for users who have Nvidia GPUs with large VRAM. TensorRT-LLM allows for blazing fast inference, but requires Nvidia GPUs with [larger VRAM](https://nvidia.github.io/TensorRT-LLM/memory.html).
+
+## What is TensorRT-LLM?
+
+[TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) is an hardware-optimized LLM inference engine for Nvidia GPUs, that compiles models to run extremely fast on Nvidia GPUs.
+- Mainly used on Nvidia's Datacenter-grade GPUs like the H100s [to produce 10,000 tok/s](https://nvidia.github.io/TensorRT-LLM/blogs/H100vsA100.html).
+- Can be used on Nvidia's workstation (e.g. [A6000](https://www.nvidia.com/en-us/design-visualization/rtx-6000/)) and consumer-grade GPUs (e.g. [RTX 4090](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/))
+
+:::tip[Benefits]
+
+- Our performance testing shows 20-40% faster token/s speeds on consumer-grade GPUs
+- On datacenter-grade GPUs, TensorRT-LLM can go up to 10,000 tokens/s
+- TensorRT-LLM is a relatively new library, that was [released in Sept 2023](https://github.com/NVIDIA/TensorRT-LLM/graphs/contributors). We anticipate performance and resource utilization improvements in the future.
+
+:::
+
+:::warning[Caveats]
+
+- TensorRT-LLM requires models to be compiled into GPU and OS-specific "Model Engines" (vs. GGUF's "convert once, run anywhere" approach)
+- TensorRT-LLM Model Engines tend to utilize larger amount of VRAM and RAM in exchange for performance
+- This usually means only people with top-of-the-line Nvidia GPUs can use TensorRT-LLM
+
+:::
+
+
+## Requirements
+
+### Hardware
+
+- Windows PC
+- Nvidia GPU(s): Ada or Ampere series (i.e. RTX 4000s & 3000s). More will be supported soon.
+- 3GB+ of disk space to download TRT-LLM artifacts and a Nitro binary
+
+**Compatible GPUs**
+
+| Architecture | Supported? | Consumer-grade | Workstation-grade |
+| ------------ | --- | -------------- | ----------------- |
+| Ada | ✅ | 4050 and above | RTX A2000 Ada |
+| Ampere | ✅ | 3050 and above | A100 |
+| Turing | ❌ | Not Supported | Not Supported |
+
+:::info
+
+Please ping us in Discord's [#tensorrt-llm](https://discord.com/channels/1107178041848909847/1201832734704795688) channel if you would like Turing support.
+
+:::
+
+### Software
+
+- Jan v0.4.9+ or Jan v0.4.8-321+ (nightly)
+- [Nvidia Driver v535+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
+- [CUDA Toolkit v12.2+](https://jan.ai/guides/common-error/not-using-gpu/#1-ensure-gpu-mode-requirements)
+
+## Getting Started
+
+### Install TensorRT-Extension
+
+1. Go to Settings > Extensions
+2. Install the TensorRT-LLM Extension
+
+:::info
+You can check if files have been correctly downloaded:
+
+```sh
+ls ~\jan\extensions\@janhq\tensorrt-llm-extension\dist\bin
+# Your Extension Folder should now include `nitro.exe`, among other `.dll` files needed to run TRT-LLM
+```
+:::
+
+### Download a TensorRT-LLM Model
+
+Jan's Hub has a few pre-compiled TensorRT-LLM models that you can download, which have a `TensorRT-LLM` label
+
+- We automatically download the TensorRT-LLM Model Engine for your GPU architecture
+- We have made a few 1.1b models available that can run even on Laptop GPUs with 8gb VRAM
+
+
+| Model | OS | Ada (40XX) | Ampere (30XX) | Description |
+| ------------------- | ------- | ---------- | ------------- | --------------------------------------------------- |
+| Llamacorn 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned for usability |
+| TinyJensen 1.1b | Windows | ✅ | ✅ | TinyLlama-1.1b, fine-tuned on Jensen Huang speeches |
+| Mistral Instruct 7b | Windows | ✅ | ✅ | Mistral |
+
+### Importing Pre-built Models
+
+You can import a pre-built model, by creating a new folder in Jan's `/models` directory that includes:
+
+- TensorRT-LLM Engine files (e.g. `tokenizer`, `.engine`, etc)
+- `model.json` that registers these files, and specifies `engine` as `nitro-tensorrt-llm`
+
+:::note[Sample model.json]
+
+Note the `engine` is `nitro-tensorrt-llm`: this won't work without it!
+
+```js
+{
+ "sources": [
+ {
+ "filename": "config.json",
+ "url": "https://delta.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/config.json"
+ },
+ {
+ "filename": "mistral_float16_tp1_rank0.engine",
+ "url": "https://delta.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/mistral_float16_tp1_rank0.engine"
+ },
+ {
+ "filename": "tokenizer.model",
+ "url": "https://delta.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer.model"
+ },
+ {
+ "filename": "special_tokens_map.json",
+ "url": "https://delta.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/special_tokens_map.json"
+ },
+ {
+ "filename": "tokenizer.json",
+ "url": "https://delta.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer.json"
+ },
+ {
+ "filename": "tokenizer_config.json",
+ "url": "https://delta.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer_config.json"
+ },
+ {
+ "filename": "model.cache",
+ "url": "https://delta.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/model.cache"
+ }
+ ],
+ "id": "tinyjensen-1.1b-chat-fp16",
+ "object": "model",
+ "name": "TinyJensen 1.1B Chat FP16",
+ "version": "1.0",
+ "description": "Do you want to chat with Jensen Huan? Here you are",
+ "format": "TensorRT-LLM",
+ "settings": {
+ "ctx_len": 2048,
+ "text_model": false
+ },
+ "parameters": {
+ "max_tokens": 4096
+ },
+ "metadata": {
+ "author": "LLama",
+ "tags": [
+ "TensorRT-LLM",
+ "1B",
+ "Finetuned"
+ ],
+ "size": 2151000000
+ },
+ "engine": "nitro-tensorrt-llm"
+}
+```
+
+:::
+
+### Using a TensorRT-LLM Model
+
+You can just select and use a TensorRT-LLM model from Jan's Thread interface.
+- Jan will automatically start the TensorRT-LLM model engine in the background
+- You may encounter a pop-up from Windows Security, asking for Nitro to allow public and private network access
+
+:::info[Why does Nitro need network access?]
+
+- This is because Jan runs TensorRT-LLM using the [Nitro Server](https://github.com/janhq/nitro-tensorrt-llm/)
+- Jan makes network calls to the Nitro server running on your computer on a separate port
+
+:::
+
+### Configure Settings
+
+:::note
+coming soon
+:::
+
+## Troubleshooting
+
+## Extension Details
+
+Jan's TensorRT-LLM Extension is built on top of the open source [Nitro TensorRT-LLM Server](https://github.com/janhq/nitro-tensorrt-llm), a C++ inference server on top of TensorRT-LLM that provides an OpenAI-compatible API.
+
+### Manual Build
+
+To manually build the artifacts needed to run the server and TensorRT-LLM, you can reference the source code. [Read here](https://github.com/janhq/nitro-tensorrt-llm?tab=readme-ov-file#quickstart).
+
+### Uninstall Extension
+
+1. Quit the app
+2. Go to Settings > Extensions
+3. Delete the entire Extensions folder.
+4. Reopen the app, only the default extensions should be restored.
+
+
+## Build your own TensorRT models
+
+:::info
+coming soon
+:::
diff --git a/docs/docs/integrations/tensorrt.md b/docs/docs/integrations/tensorrt.md
deleted file mode 100644
index 8a77d1436..000000000
--- a/docs/docs/integrations/tensorrt.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: TensorRT-LLM
----
-
-## Quicklinks
-
-- Jan Framework [Extension Code](https://github.com/janhq/jan/tree/main/extensions/inference-triton-trtllm-extension)
-- TensorRT [Source URL](https://github.com/NVIDIA/TensorRT-LLM)
diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js
index 24bb39e8a..d77d59d9d 100644
--- a/docs/docusaurus.config.js
+++ b/docs/docusaurus.config.js
@@ -117,6 +117,10 @@ const config = {
from: '/guides/using-extensions/',
to: '/extensions/',
},
+ {
+ from: '/integrations/tensorrt',
+ to: '/guides/providers/tensorrt-llm',
+ },
],
},
],
@@ -404,11 +408,6 @@ const config = {
},
},
- // Put your custom environment here
- customFields: {
- apiKeyBrevo: process.env.API_KEY_BREVO,
- },
-
themes: ['@docusaurus/theme-live-codeblock', '@docusaurus/theme-mermaid'],
}
diff --git a/docs/src/containers/Footer/index.js b/docs/src/containers/Footer/index.js
index fa7bf83e7..a33caa2c8 100644
--- a/docs/src/containers/Footer/index.js
+++ b/docs/src/containers/Footer/index.js
@@ -1,4 +1,4 @@
-import React from 'react'
+import React, { useState } from 'react'
import useDocusaurusContext from '@docusaurus/useDocusaurusContext'
import { AiOutlineGithub, AiOutlineTwitter } from 'react-icons/ai'
@@ -130,15 +130,13 @@ export default function Footer() {
siteConfig: { customFields },
} = useDocusaurusContext()
+ const [formMessage, setFormMessage] = useState('')
+
const onSubmit = (data) => {
const { email } = data
const options = {
method: 'POST',
- headers: {
- 'accept': 'application/json',
- 'content-type': 'application/json',
- 'api-key': customFields.apiKeyBrevo,
- },
+
body: JSON.stringify({
updateEnabled: false,
email,
@@ -147,12 +145,18 @@ export default function Footer() {
}
if (email) {
- fetch('https://api.brevo.com/v3/contacts', options)
+ fetch('https://brevo.jan.ai/', options)
.then((response) => response.json())
.then((response) => {
if (response.id) {
- reset()
+ setFormMessage('You have successfully joined our newsletter')
+ } else {
+ setFormMessage(response.message)
}
+ reset()
+ setTimeout(() => {
+ setFormMessage('')
+ }, 5000)
})
.catch((err) => console.error(err))
}
@@ -203,6 +207,7 @@ export default function Footer() {
+ {formMessage && {formMessage} }
diff --git a/docs/src/pages/index.js b/docs/src/pages/index.js
index 7dc41ce3a..4f0f4bd6d 100644
--- a/docs/src/pages/index.js
+++ b/docs/src/pages/index.js
@@ -1,5 +1,5 @@
-import React, { useState, useEffect } from 'react'
-import DownloadApp from '@site/src/containers/DownloadApp'
+import React, { useState } from 'react'
+
import { useForm } from 'react-hook-form'
import useBaseUrl from '@docusaurus/useBaseUrl'
@@ -86,6 +86,7 @@ export default function Home() {
const isBrowser = useIsBrowser()
const { stargazers } = useAppStars()
const { data } = useDiscordWidget()
+ const [formMessage, setFormMessage] = useState('')
const userAgent = isBrowser && navigator.userAgent
const isBrowserChrome = isBrowser && userAgent.includes('Chrome')
@@ -104,11 +105,6 @@ export default function Home() {
const { email } = data
const options = {
method: 'POST',
- headers: {
- 'accept': 'application/json',
- 'content-type': 'application/json',
- 'api-key': customFields.apiKeyBrevo,
- },
body: JSON.stringify({
updateEnabled: false,
email,
@@ -117,12 +113,18 @@ export default function Home() {
}
if (email) {
- fetch('https://api.brevo.com/v3/contacts', options)
+ fetch('https://brevo.jan.ai/', options)
.then((response) => response.json())
.then((response) => {
if (response.id) {
- reset()
+ setFormMessage('You have successfully joined our newsletter')
+ } else {
+ setFormMessage(response.message)
}
+ reset()
+ setTimeout(() => {
+ setFormMessage('')
+ }, 5000)
})
.catch((err) => console.error(err))
}
@@ -748,6 +750,9 @@ export default function Home() {
Subscribe
+ {formMessage && (
+ {formMessage}
+ )}
diff --git a/extensions/conversational-extension/src/index.ts b/extensions/conversational-extension/src/index.ts
index bf8c213ad..1982d90c4 100644
--- a/extensions/conversational-extension/src/index.ts
+++ b/extensions/conversational-extension/src/index.ts
@@ -20,9 +20,9 @@ export default class JSONConversationalExtension extends ConversationalExtension
* Called when the extension is loaded.
*/
async onLoad() {
- if (!(await fs.existsSync(JSONConversationalExtension._threadFolder)))
+ if (!(await fs.existsSync(JSONConversationalExtension._threadFolder))) {
await fs.mkdirSync(JSONConversationalExtension._threadFolder)
- console.debug('JSONConversationalExtension loaded')
+ }
}
/**
diff --git a/web/containers/Providers/DataLoader.tsx b/web/containers/Providers/DataLoader.tsx
index 7d38a29d6..abd0e4889 100644
--- a/web/containers/Providers/DataLoader.tsx
+++ b/web/containers/Providers/DataLoader.tsx
@@ -2,7 +2,7 @@
import { Fragment, ReactNode, useEffect } from 'react'
-import { AppConfiguration } from '@janhq/core'
+import { AppConfiguration, getUserHomePath, joinPath } from '@janhq/core'
import { useSetAtom } from 'jotai'
import useAssistants from '@/hooks/useAssistants'
@@ -10,6 +10,7 @@ import useGetSystemResources from '@/hooks/useGetSystemResources'
import useModels from '@/hooks/useModels'
import useThreads from '@/hooks/useThreads'
+import { defaultJanDataFolderAtom } from '@/helpers/atoms/App.atom'
import {
janDataFolderPathAtom,
quickAskEnabledAtom,
@@ -22,6 +23,7 @@ type Props = {
const DataLoader: React.FC = ({ children }) => {
const setJanDataFolderPath = useSetAtom(janDataFolderPathAtom)
const setQuickAskEnabled = useSetAtom(quickAskEnabledAtom)
+ const setJanDefaultDataFolder = useSetAtom(defaultJanDataFolderAtom)
useModels()
useThreads()
@@ -37,6 +39,16 @@ const DataLoader: React.FC = ({ children }) => {
})
}, [setJanDataFolderPath, setQuickAskEnabled])
+ useEffect(() => {
+ async function getDefaultJanDataFolder() {
+ const homePath = await getUserHomePath()
+ const defaultJanDataFolder = await joinPath([homePath, 'jan'])
+
+ setJanDefaultDataFolder(defaultJanDataFolder)
+ }
+ getDefaultJanDataFolder()
+ }, [setJanDefaultDataFolder])
+
console.debug('Load Data...')
return {children}
diff --git a/web/helpers/atoms/App.atom.ts b/web/helpers/atoms/App.atom.ts
index 342c04819..b17d43db1 100644
--- a/web/helpers/atoms/App.atom.ts
+++ b/web/helpers/atoms/App.atom.ts
@@ -3,3 +3,5 @@ import { atom } from 'jotai'
import { MainViewState } from '@/constants/screens'
export const mainViewStateAtom = atom(MainViewState.Thread)
+
+export const defaultJanDataFolderAtom = atom('')
diff --git a/web/hooks/useActiveModel.ts b/web/hooks/useActiveModel.ts
index e6c519f9f..98433c2ea 100644
--- a/web/hooks/useActiveModel.ts
+++ b/web/hooks/useActiveModel.ts
@@ -1,4 +1,4 @@
-import { useEffect, useRef } from 'react'
+import { useCallback, useEffect, useRef } from 'react'
import { events, Model, ModelEvent } from '@janhq/core'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
@@ -86,13 +86,12 @@ export function useActiveModel() {
events.emit(ModelEvent.OnModelInit, model)
}
- const stopModel = async () => {
+ const stopModel = useCallback(async () => {
if (activeModel) {
- setActiveModel(undefined)
setStateModel({ state: 'stop', loading: true, model: activeModel.id })
events.emit(ModelEvent.OnModelStop, activeModel)
}
- }
+ }, [activeModel, setStateModel])
return { activeModel, startModel, stopModel, stateModel }
}
diff --git a/web/hooks/useFactoryReset.ts b/web/hooks/useFactoryReset.ts
index da0813060..878461ef1 100644
--- a/web/hooks/useFactoryReset.ts
+++ b/web/hooks/useFactoryReset.ts
@@ -1,49 +1,68 @@
-import { useEffect, useState } from 'react'
+import { useCallback } from 'react'
-import { fs, AppConfiguration, joinPath, getUserHomePath } from '@janhq/core'
+import { fs, AppConfiguration } from '@janhq/core'
+import { atom, useAtomValue, useSetAtom } from 'jotai'
+
+import { useActiveModel } from './useActiveModel'
+
+import { defaultJanDataFolderAtom } from '@/helpers/atoms/App.atom'
+
+export enum FactoryResetState {
+ Idle = 'idle',
+ Starting = 'starting',
+ StoppingModel = 'stopping_model',
+ DeletingData = 'deleting_data',
+ ClearLocalStorage = 'clear_local_storage',
+}
+
+export const factoryResetStateAtom = atom(FactoryResetState.Idle)
export default function useFactoryReset() {
- const [defaultJanDataFolder, setdefaultJanDataFolder] = useState('')
+ const defaultJanDataFolder = useAtomValue(defaultJanDataFolderAtom)
+ const { activeModel, stopModel } = useActiveModel()
+ const setFactoryResetState = useSetAtom(factoryResetStateAtom)
- useEffect(() => {
- async function getDefaultJanDataFolder() {
- const homePath = await getUserHomePath()
- const defaultJanDataFolder = await joinPath([homePath, 'jan'])
- setdefaultJanDataFolder(defaultJanDataFolder)
- }
- getDefaultJanDataFolder()
- }, [])
+ const resetAll = useCallback(
+ async (keepCurrentFolder?: boolean) => {
+ setFactoryResetState(FactoryResetState.Starting)
+ // read the place of jan data folder
+ const appConfiguration: AppConfiguration | undefined =
+ await window.core?.api?.getAppConfigurations()
- const resetAll = async (keepCurrentFolder?: boolean) => {
- // read the place of jan data folder
- const appConfiguration: AppConfiguration | undefined =
- await window.core?.api?.getAppConfigurations()
-
- if (!appConfiguration) {
- console.debug('Failed to get app configuration')
- }
-
- console.debug('appConfiguration: ', appConfiguration)
- const janDataFolderPath = appConfiguration!.data_folder
-
- if (!keepCurrentFolder) {
- // set the default jan data folder to user's home directory
- const configuration: AppConfiguration = {
- data_folder: defaultJanDataFolder,
- quick_ask: appConfiguration?.quick_ask ?? false,
+ if (!appConfiguration) {
+ console.debug('Failed to get app configuration')
}
- await window.core?.api?.updateAppConfiguration(configuration)
- }
- await fs.rmdirSync(janDataFolderPath, { recursive: true })
- // reset the localStorage
- localStorage.clear()
+ const janDataFolderPath = appConfiguration!.data_folder
- await window.core?.api?.relaunch()
- }
+ if (!keepCurrentFolder) {
+ // set the default jan data folder to user's home directory
+ const configuration: AppConfiguration = {
+ data_folder: defaultJanDataFolder,
+ quick_ask: appConfiguration?.quick_ask ?? false,
+ }
+ await window.core?.api?.updateAppConfiguration(configuration)
+ }
+
+ if (activeModel) {
+ setFactoryResetState(FactoryResetState.StoppingModel)
+ await stopModel()
+ await new Promise((resolve) => setTimeout(resolve, 4000))
+ }
+
+ setFactoryResetState(FactoryResetState.DeletingData)
+ await fs.rm(janDataFolderPath)
+
+ setFactoryResetState(FactoryResetState.ClearLocalStorage)
+ // reset the localStorage
+ localStorage.clear()
+
+ await window.core?.api?.relaunch()
+ },
+ [defaultJanDataFolder, activeModel, stopModel, setFactoryResetState]
+ )
return {
- defaultJanDataFolder,
resetAll,
}
}
diff --git a/web/screens/Settings/Advanced/FactoryReset/ModalConfirmReset.tsx b/web/screens/Settings/Advanced/FactoryReset/ModalConfirmReset.tsx
index 4560ac1ad..a50b17895 100644
--- a/web/screens/Settings/Advanced/FactoryReset/ModalConfirmReset.tsx
+++ b/web/screens/Settings/Advanced/FactoryReset/ModalConfirmReset.tsx
@@ -11,21 +11,25 @@ import {
Checkbox,
Input,
} from '@janhq/uikit'
-import { atom, useAtom } from 'jotai'
+import { atom, useAtom, useAtomValue } from 'jotai'
import useFactoryReset from '@/hooks/useFactoryReset'
+import { defaultJanDataFolderAtom } from '@/helpers/atoms/App.atom'
+
export const modalValidationAtom = atom(false)
const ModalConfirmReset = () => {
const [modalValidation, setModalValidation] = useAtom(modalValidationAtom)
- const { resetAll, defaultJanDataFolder } = useFactoryReset()
+ const defaultJanDataFolder = useAtomValue(defaultJanDataFolderAtom)
+ const { resetAll } = useFactoryReset()
const [inputValue, setInputValue] = useState('')
const [currentDirectoryChecked, setCurrentDirectoryChecked] = useState(true)
- const onFactoryResetClick = useCallback(
- () => resetAll(currentDirectoryChecked),
- [currentDirectoryChecked, resetAll]
- )
+
+ const onFactoryResetClick = useCallback(() => {
+ setModalValidation(false)
+ resetAll(currentDirectoryChecked)
+ }, [currentDirectoryChecked, resetAll, setModalValidation])
return (
{
Otherwise it will reset back to its original location at:{' '}
- {/* TODO should be from system */}
{defaultJanDataFolder}
diff --git a/web/screens/Settings/Advanced/FactoryReset/ResettingModal.tsx b/web/screens/Settings/Advanced/FactoryReset/ResettingModal.tsx
new file mode 100644
index 000000000..6469f6416
--- /dev/null
+++ b/web/screens/Settings/Advanced/FactoryReset/ResettingModal.tsx
@@ -0,0 +1,29 @@
+import { Modal, ModalContent, ModalHeader, ModalTitle } from '@janhq/uikit'
+import { atom, useAtomValue } from 'jotai'
+
+import {
+ FactoryResetState,
+ factoryResetStateAtom,
+} from '@/hooks/useFactoryReset'
+
+const resetModalVisibilityAtom = atom((get) => {
+ const visible = get(factoryResetStateAtom) !== FactoryResetState.Idle
+ return visible
+})
+
+const ResettingModal: React.FC = () => {
+ const visibility = useAtomValue(resetModalVisibilityAtom)
+
+ return (
+
+
+
+ Factory reset in progress..
+
+ Resetting..
+
+
+ )
+}
+
+export default ResettingModal
diff --git a/web/screens/Settings/Advanced/FactoryReset/index.tsx b/web/screens/Settings/Advanced/FactoryReset/index.tsx
index e7b1e2995..fb6bf8b6f 100644
--- a/web/screens/Settings/Advanced/FactoryReset/index.tsx
+++ b/web/screens/Settings/Advanced/FactoryReset/index.tsx
@@ -3,6 +3,7 @@ import { Button } from '@janhq/uikit'
import { useSetAtom } from 'jotai'
import ModalValidation, { modalValidationAtom } from './ModalConfirmReset'
+import ResettingModal from './ResettingModal'
const FactoryReset = () => {
const setModalValidation = useSetAtom(modalValidationAtom)
@@ -30,6 +31,7 @@ const FactoryReset = () => {
Reset
+
)
}
|