jan/web/hooks/useSettings.ts
NamH 3a3bceb0c0
Release/v0.4.9 (#2421)
* fix: turn off experimental settings should also turn off quick ask (#2411)

* fix: app glitches 1s generating response before starting model (#2412)

* fix: disable experimental feature should also disable vulkan (#2414)

* fix: model load stuck on windows when can't get CPU core count (#2413)

Signed-off-by: James <james@jan.ai>
Co-authored-by: James <james@jan.ai>

* feat: TensorRT-LLM engine update support (#2415)

* fix: engine update

* chore: add remove prepopulated models

Signed-off-by: James <james@jan.ai>

* update tinyjensen url

Signed-off-by: James <james@jan.ai>

* update llamacorn

Signed-off-by: James <james@jan.ai>

* update Mistral 7B Instruct v0.1 int4

Signed-off-by: James <james@jan.ai>

* update tensorrt

Signed-off-by: James <james@jan.ai>

* update

Signed-off-by: hiro <hiro@jan.ai>

* update

Signed-off-by: James <james@jan.ai>

* prettier

Signed-off-by: James <james@jan.ai>

* update mistral config

Signed-off-by: James <james@jan.ai>

* fix some lint

Signed-off-by: James <james@jan.ai>

---------

Signed-off-by: James <james@jan.ai>
Signed-off-by: hiro <hiro@jan.ai>
Co-authored-by: James <james@jan.ai>
Co-authored-by: hiro <hiro@jan.ai>

* Tensorrt LLM disable turing support (#2418)

Co-authored-by: Hien To <tominhhien97@gmail.com>

* chore: add prompt template tensorrtllm (#2375)

* chore: add prompt template tensorrtllm

* Add Prompt template for mistral and correct model metadata

---------

Co-authored-by: Hien To <tominhhien97@gmail.com>

* fix: correct tensorrt mistral model.json (#2419)

---------

Signed-off-by: James <james@jan.ai>
Signed-off-by: hiro <hiro@jan.ai>
Co-authored-by: Louis <louis@jan.ai>
Co-authored-by: James <james@jan.ai>
Co-authored-by: hiro <hiro@jan.ai>
Co-authored-by: hiento09 <136591877+hiento09@users.noreply.github.com>
Co-authored-by: Hien To <tominhhien97@gmail.com>
2024-03-19 10:06:47 +07:00

84 lines
2.3 KiB
TypeScript

import { useCallback, useEffect, useState } from 'react'
import { fs, joinPath } from '@janhq/core'
import { atom, useAtom } from 'jotai'
export const isShowNotificationAtom = atom<boolean>(false)
export const useSettings = () => {
const [isGPUModeEnabled, setIsGPUModeEnabled] = useState(false) // New state for GPU mode
const [showNotification, setShowNotification] = useAtom(
isShowNotificationAtom
)
useEffect(() => {
setTimeout(() => validateSettings, 3000)
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
const validateSettings = async () => {
readSettings().then((settings) => {
if (
settings &&
settings.notify &&
((settings.nvidia_driver?.exist && !settings.cuda?.exist) ||
!settings.nvidia_driver?.exist)
) {
setShowNotification(false)
}
// Check if run_mode is 'gpu' or 'cpu' and update state accordingly
setIsGPUModeEnabled(settings?.run_mode === 'gpu')
})
}
const readSettings = useCallback(async () => {
if (!window?.core?.api) {
return
}
const settingsFile = await joinPath(['file://settings', 'settings.json'])
if (await fs.existsSync(settingsFile)) {
const settings = await fs.readFileSync(settingsFile, 'utf-8')
return typeof settings === 'object' ? settings : JSON.parse(settings)
}
return {}
}, [])
const saveSettings = async ({
runMode,
notify,
gpusInUse,
vulkan,
}: {
runMode?: string | undefined
notify?: boolean | undefined
gpusInUse?: string[] | undefined
vulkan?: boolean | undefined
}) => {
const settingsFile = await joinPath(['file://settings', 'settings.json'])
const settings = await readSettings()
if (runMode != null) settings.run_mode = runMode
if (notify != null) settings.notify = notify
if (gpusInUse != null) settings.gpus_in_use = gpusInUse
if (vulkan != null) {
settings.vulkan = vulkan
// GPU enabled, set run_mode to 'gpu'
if (settings.vulkan) {
settings.run_mode = 'gpu'
} else {
settings.run_mode = settings.gpus?.length > 0 ? 'gpu' : 'cpu'
}
}
await fs.writeFileSync(settingsFile, JSON.stringify(settings))
}
return {
showNotification,
isGPUModeEnabled,
readSettings,
saveSettings,
setShowNotification,
validateSettings,
}
}