fix: race condition issue - reading settings.json file (#2683)
* fix: race condition issue - reading settings.json file * fix: cannot reset data while starting model * chore: remove extension suffix
This commit is contained in:
parent
9e7bdc7f2a
commit
02c49e796d
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/assistant-extension",
|
||||
"productName": "Jan Assistant Extension",
|
||||
"productName": "Jan Assistant",
|
||||
"version": "1.0.1",
|
||||
"description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/conversational-extension",
|
||||
"productName": "Conversational Extension",
|
||||
"productName": "Conversational",
|
||||
"version": "1.0.0",
|
||||
"description": "This extension enables conversations and state persistence via your filesystem",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/huggingface-extension",
|
||||
"productName": "HuggingFace Extension",
|
||||
"productName": "HuggingFace",
|
||||
"version": "1.0.0",
|
||||
"description": "Hugging Face extension for converting HF models to GGUF",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/inference-groq-extension",
|
||||
"productName": "Groq Inference Engine Extension",
|
||||
"productName": "Groq Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"description": "This extension enables fast Groq chat completion API calls",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/inference-mistral-extension",
|
||||
"productName": "Mistral AI Inference Engine Extension",
|
||||
"productName": "MistralAI Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"description": "This extension enables Mistral chat completion API calls",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/inference-nitro-extension",
|
||||
"productName": "Nitro Inference Engine Extension",
|
||||
"productName": "Nitro Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"description": "This extension embeds Nitro, a lightweight (3mb) inference engine written in C++. See https://nitro.jan.ai.\nUse this setting if you encounter errors related to **CUDA toolkit** during application execution.",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/inference-openai-extension",
|
||||
"productName": "OpenAI Inference Engine Extension",
|
||||
"productName": "OpenAI Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"description": "This extension enables OpenAI chat completion API calls",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/inference-triton-trt-llm-extension",
|
||||
"productName": "Triton-TRT-LLM Inference Engine Extension",
|
||||
"productName": "Triton-TRT-LLM Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"description": "This extension enables Nvidia's TensorRT-LLM as an inference engine option",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/model-extension",
|
||||
"productName": "Model Management Extension",
|
||||
"productName": "Model Management",
|
||||
"version": "1.0.30",
|
||||
"description": "Model Management Extension provides model exploration and seamless downloads",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/monitoring-extension",
|
||||
"productName": "System Monitoring Extension",
|
||||
"productName": "System Monitoring",
|
||||
"version": "1.0.10",
|
||||
"description": "This extension provides system health and OS level data",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -49,7 +49,9 @@ const DEFAULT_SETTINGS: GpuSetting = {
|
||||
|
||||
export const getGpuConfig = async (): Promise<GpuSetting | undefined> => {
|
||||
if (process.platform === 'darwin') return undefined
|
||||
return JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
|
||||
if (existsSync(GPU_INFO_FILE))
|
||||
return JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
|
||||
return DEFAULT_SETTINGS
|
||||
}
|
||||
|
||||
export const getResourcesInfo = async (): Promise<ResourceInfo> => {
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@janhq/tensorrt-llm-extension",
|
||||
"productName": "TensorRT-LLM Inference Engine Extension",
|
||||
"productName": "TensorRT-LLM Inference Engine",
|
||||
"version": "0.0.3",
|
||||
"description": "This extension enables Nvidia's TensorRT-LLM for the fastest GPU acceleration. See the [setup guide](https://jan.ai/guides/providers/tensorrt-llm/) for next steps.",
|
||||
"main": "dist/index.js",
|
||||
|
||||
@ -126,33 +126,27 @@ export function useActiveModel() {
|
||||
})
|
||||
}
|
||||
|
||||
const stopModel = useCallback(
|
||||
async (model?: Model) => {
|
||||
const stoppingModel = activeModel || model
|
||||
if (
|
||||
!stoppingModel ||
|
||||
(!model && stateModel.state === 'stop' && stateModel.loading)
|
||||
)
|
||||
return
|
||||
const stopModel = useCallback(async () => {
|
||||
const stoppingModel = activeModel || stateModel.model
|
||||
if (!stoppingModel || (stateModel.state === 'stop' && stateModel.loading))
|
||||
return
|
||||
|
||||
setStateModel({ state: 'stop', loading: true, model: stoppingModel })
|
||||
const engine = EngineManager.instance().get(stoppingModel.engine)
|
||||
return engine
|
||||
?.unloadModel(stoppingModel)
|
||||
.catch()
|
||||
.then(() => {
|
||||
setActiveModel(undefined)
|
||||
setStateModel({ state: 'start', loading: false, model: undefined })
|
||||
loadModelController?.abort()
|
||||
})
|
||||
},
|
||||
[activeModel, setActiveModel, setStateModel, stateModel]
|
||||
)
|
||||
setStateModel({ state: 'stop', loading: true, model: stoppingModel })
|
||||
const engine = EngineManager.instance().get(stoppingModel.engine)
|
||||
return engine
|
||||
?.unloadModel(stoppingModel)
|
||||
.catch()
|
||||
.then(() => {
|
||||
setActiveModel(undefined)
|
||||
setStateModel({ state: 'start', loading: false, model: undefined })
|
||||
loadModelController?.abort()
|
||||
})
|
||||
}, [activeModel, setActiveModel, setStateModel, stateModel])
|
||||
|
||||
const stopInference = useCallback(async () => {
|
||||
// Loading model
|
||||
if (stateModel.loading) {
|
||||
stopModel(stateModel.model)
|
||||
stopModel()
|
||||
return
|
||||
}
|
||||
if (!activeModel) return
|
||||
|
||||
@ -19,7 +19,7 @@ export const factoryResetStateAtom = atom(FactoryResetState.Idle)
|
||||
|
||||
export default function useFactoryReset() {
|
||||
const defaultJanDataFolder = useAtomValue(defaultJanDataFolderAtom)
|
||||
const { activeModel, stopModel } = useActiveModel()
|
||||
const { stopModel } = useActiveModel()
|
||||
const setFactoryResetState = useSetAtom(factoryResetStateAtom)
|
||||
|
||||
const resetAll = useCallback(
|
||||
@ -44,11 +44,9 @@ export default function useFactoryReset() {
|
||||
await window.core?.api?.updateAppConfiguration(configuration)
|
||||
}
|
||||
|
||||
if (activeModel) {
|
||||
setFactoryResetState(FactoryResetState.StoppingModel)
|
||||
await stopModel()
|
||||
await new Promise((resolve) => setTimeout(resolve, 4000))
|
||||
}
|
||||
setFactoryResetState(FactoryResetState.StoppingModel)
|
||||
await stopModel()
|
||||
await new Promise((resolve) => setTimeout(resolve, 4000))
|
||||
|
||||
setFactoryResetState(FactoryResetState.DeletingData)
|
||||
await fs.rm(janDataFolderPath)
|
||||
@ -59,7 +57,7 @@ export default function useFactoryReset() {
|
||||
|
||||
await window.core?.api?.relaunch()
|
||||
},
|
||||
[defaultJanDataFolder, activeModel, stopModel, setFactoryResetState]
|
||||
[defaultJanDataFolder, stopModel, setFactoryResetState]
|
||||
)
|
||||
|
||||
return {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user