feat: Add nitro vulkan to support AMD GPU/ APU and Intel Arc GPU (#2056)
* feat: add vulkan support on windows and linux * fix: correct vulkan settings * fix: gpu settings and enable Vulkan support * fix: vulkan support 1 device at a time only * inference-nitro-extension add download vulkaninfo --------- Co-authored-by: Louis <louis@jan.ai> Co-authored-by: Hien To <tominhhien97@gmail.com>
This commit is contained in:
parent
f60d42a3d3
commit
926f19bd9b
3
.gitignore
vendored
3
.gitignore
vendored
@ -29,4 +29,5 @@ extensions/inference-nitro-extension/bin/*/*.exp
|
||||
extensions/inference-nitro-extension/bin/*/*.lib
|
||||
extensions/inference-nitro-extension/bin/saved-*
|
||||
extensions/inference-nitro-extension/bin/*.tar.gz
|
||||
|
||||
extensions/inference-nitro-extension/bin/vulkaninfoSDK.exe
|
||||
extensions/inference-nitro-extension/bin/vulkaninfo
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
@echo off
|
||||
set /p NITRO_VERSION=<./bin/version.txt
|
||||
.\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda-12-0.tar.gz -e --strip 1 -o ./bin/win-cuda-12-0 && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda-11-7.tar.gz -e --strip 1 -o ./bin/win-cuda-11-7 && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64.tar.gz -e --strip 1 -o ./bin/win-cpu
|
||||
.\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda-12-0.tar.gz -e --strip 1 -o ./bin/win-cuda-12-0 && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda-11-7.tar.gz -e --strip 1 -o ./bin/win-cuda-11-7 && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64.tar.gz -e --strip 1 -o ./bin/win-cpu && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-vulkan.tar.gz -e --strip 1 -o ./bin/win-vulkan && .\node_modules\.bin\download https://delta.jan.ai/vulkaninfoSDK.exe -o ./bin
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
"license": "AGPL-3.0",
|
||||
"scripts": {
|
||||
"build": "tsc --module commonjs && rollup -c rollup.config.ts",
|
||||
"downloadnitro:linux": "NITRO_VERSION=$(cat ./bin/version.txt) && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64.tar.gz -e --strip 1 -o ./bin/linux-cpu && chmod +x ./bin/linux-cpu/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-cuda-12-0.tar.gz -e --strip 1 -o ./bin/linux-cuda-12-0 && chmod +x ./bin/linux-cuda-12-0/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-cuda-11-7.tar.gz -e --strip 1 -o ./bin/linux-cuda-11-7 && chmod +x ./bin/linux-cuda-11-7/nitro",
|
||||
"downloadnitro:linux": "NITRO_VERSION=$(cat ./bin/version.txt) && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64.tar.gz -e --strip 1 -o ./bin/linux-cpu && chmod +x ./bin/linux-cpu/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-cuda-12-0.tar.gz -e --strip 1 -o ./bin/linux-cuda-12-0 && chmod +x ./bin/linux-cuda-12-0/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-cuda-11-7.tar.gz -e --strip 1 -o ./bin/linux-cuda-11-7 && chmod +x ./bin/linux-cuda-11-7/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-vulkan.tar.gz -e --strip 1 -o ./bin/linux-vulkan && chmod +x ./bin/linux-vulkan/nitro && download https://delta.jan.ai/vulkaninfo -o ./bin && chmod +x ./bin/vulkaninfo",
|
||||
"downloadnitro:darwin": "NITRO_VERSION=$(cat ./bin/version.txt) && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-mac-arm64.tar.gz -e --strip 1 -o ./bin/mac-arm64 && chmod +x ./bin/mac-arm64/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-mac-amd64.tar.gz -e --strip 1 -o ./bin/mac-x64 && chmod +x ./bin/mac-x64/nitro",
|
||||
"downloadnitro:win32": "download.bat",
|
||||
"downloadnitro": "run-script-os",
|
||||
|
||||
@ -1,10 +1,11 @@
|
||||
import { writeFileSync, existsSync, readFileSync } from 'fs'
|
||||
import { exec } from 'child_process'
|
||||
import { exec, spawn } from 'child_process'
|
||||
import path from 'path'
|
||||
import { getJanDataFolderPath } from '@janhq/core/node'
|
||||
import { getJanDataFolderPath, log } from '@janhq/core/node'
|
||||
|
||||
/**
|
||||
* Default GPU settings
|
||||
* TODO: This needs to be refactored to support multiple accelerators
|
||||
**/
|
||||
const DEFALT_SETTINGS = {
|
||||
notify: true,
|
||||
@ -21,12 +22,17 @@ const DEFALT_SETTINGS = {
|
||||
gpu_highest_vram: '',
|
||||
gpus_in_use: [],
|
||||
is_initial: true,
|
||||
// TODO: This needs to be set based on user toggle in settings
|
||||
vulkan: {
|
||||
enabled: true,
|
||||
gpu_in_use: '1',
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Path to the settings file
|
||||
**/
|
||||
export const NVIDIA_INFO_FILE = path.join(
|
||||
export const GPU_INFO_FILE = path.join(
|
||||
getJanDataFolderPath(),
|
||||
'settings',
|
||||
'settings.json'
|
||||
@ -52,10 +58,10 @@ export async function updateNvidiaInfo() {
|
||||
if (process.platform !== 'darwin') {
|
||||
let data
|
||||
try {
|
||||
data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8'))
|
||||
data = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
|
||||
} catch (error) {
|
||||
data = DEFALT_SETTINGS
|
||||
writeFileSync(NVIDIA_INFO_FILE, JSON.stringify(data, null, 2))
|
||||
writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2))
|
||||
}
|
||||
updateNvidiaDriverInfo()
|
||||
updateGpuInfo()
|
||||
@ -79,7 +85,7 @@ export async function updateNvidiaDriverInfo(): Promise<void> {
|
||||
exec(
|
||||
'nvidia-smi --query-gpu=driver_version --format=csv,noheader',
|
||||
(error, stdout) => {
|
||||
let data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8'))
|
||||
let data = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
|
||||
|
||||
if (!error) {
|
||||
const firstLine = stdout.split('\n')[0].trim()
|
||||
@ -89,7 +95,7 @@ export async function updateNvidiaDriverInfo(): Promise<void> {
|
||||
data['nvidia_driver'].exist = false
|
||||
}
|
||||
|
||||
writeFileSync(NVIDIA_INFO_FILE, JSON.stringify(data, null, 2))
|
||||
writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2))
|
||||
Promise.resolve()
|
||||
}
|
||||
)
|
||||
@ -158,42 +164,77 @@ export function updateCudaExistence(
|
||||
* Get GPU information
|
||||
*/
|
||||
export async function updateGpuInfo(): Promise<void> {
|
||||
exec(
|
||||
'nvidia-smi --query-gpu=index,memory.total,name --format=csv,noheader,nounits',
|
||||
(error, stdout) => {
|
||||
let data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8'))
|
||||
let data = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
|
||||
|
||||
if (!error) {
|
||||
// Get GPU info and gpu has higher memory first
|
||||
let highestVram = 0
|
||||
let highestVramId = '0'
|
||||
let gpus = stdout
|
||||
.trim()
|
||||
.split('\n')
|
||||
.map((line) => {
|
||||
let [id, vram, name] = line.split(', ')
|
||||
vram = vram.replace(/\r/g, '')
|
||||
if (parseFloat(vram) > highestVram) {
|
||||
highestVram = parseFloat(vram)
|
||||
highestVramId = id
|
||||
}
|
||||
return { id, vram, name }
|
||||
})
|
||||
// Cuda
|
||||
if (data['vulkan'] === true) {
|
||||
// Vulkan
|
||||
exec(
|
||||
process.platform === 'win32'
|
||||
? `${__dirname}\\..\\bin\\vulkaninfoSDK.exe --summary`
|
||||
: `${__dirname}/../bin/vulkaninfo --summary`,
|
||||
(error, stdout) => {
|
||||
if (!error) {
|
||||
const output = stdout.toString()
|
||||
log(output)
|
||||
const gpuRegex = /GPU(\d+):(?:[\s\S]*?)deviceName\s*=\s*(.*)/g
|
||||
|
||||
data.gpus = gpus
|
||||
data.gpu_highest_vram = highestVramId
|
||||
} else {
|
||||
data.gpus = []
|
||||
data.gpu_highest_vram = ''
|
||||
let gpus = []
|
||||
let match
|
||||
while ((match = gpuRegex.exec(output)) !== null) {
|
||||
const id = match[1]
|
||||
const name = match[2]
|
||||
gpus.push({ id, vram: 0, name })
|
||||
}
|
||||
data.gpus = gpus
|
||||
|
||||
if (!data['gpus_in_use'] || data['gpus_in_use'].length === 0) {
|
||||
data.gpus_in_use = [data.gpus.length > 1 ? '1' : '0']
|
||||
}
|
||||
|
||||
data = updateCudaExistence(data)
|
||||
writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2))
|
||||
}
|
||||
Promise.resolve()
|
||||
}
|
||||
)
|
||||
} else {
|
||||
exec(
|
||||
'nvidia-smi --query-gpu=index,memory.total,name --format=csv,noheader,nounits',
|
||||
(error, stdout) => {
|
||||
if (!error) {
|
||||
log(stdout)
|
||||
// Get GPU info and gpu has higher memory first
|
||||
let highestVram = 0
|
||||
let highestVramId = '0'
|
||||
let gpus = stdout
|
||||
.trim()
|
||||
.split('\n')
|
||||
.map((line) => {
|
||||
let [id, vram, name] = line.split(', ')
|
||||
vram = vram.replace(/\r/g, '')
|
||||
if (parseFloat(vram) > highestVram) {
|
||||
highestVram = parseFloat(vram)
|
||||
highestVramId = id
|
||||
}
|
||||
return { id, vram, name }
|
||||
})
|
||||
|
||||
if (!data['gpus_in_use'] || data['gpus_in_use'].length === 0) {
|
||||
data.gpus_in_use = [data['gpu_highest_vram']]
|
||||
data.gpus = gpus
|
||||
data.gpu_highest_vram = highestVramId
|
||||
} else {
|
||||
data.gpus = []
|
||||
data.gpu_highest_vram = ''
|
||||
}
|
||||
|
||||
if (!data['gpus_in_use'] || data['gpus_in_use'].length === 0) {
|
||||
data.gpus_in_use = [data['gpu_highest_vram']]
|
||||
}
|
||||
|
||||
data = updateCudaExistence(data)
|
||||
writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2))
|
||||
Promise.resolve()
|
||||
}
|
||||
|
||||
data = updateCudaExistence(data)
|
||||
writeFileSync(NVIDIA_INFO_FILE, JSON.stringify(data, null, 2))
|
||||
Promise.resolve()
|
||||
}
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -1,10 +1,11 @@
|
||||
import { readFileSync } from 'fs'
|
||||
import * as path from 'path'
|
||||
import { NVIDIA_INFO_FILE } from './nvidia'
|
||||
import { GPU_INFO_FILE } from './accelerator'
|
||||
|
||||
export interface NitroExecutableOptions {
|
||||
executablePath: string
|
||||
cudaVisibleDevices: string
|
||||
vkVisibleDevices: string
|
||||
}
|
||||
/**
|
||||
* Find which executable file to run based on the current platform.
|
||||
@ -13,24 +14,30 @@ export interface NitroExecutableOptions {
|
||||
export const executableNitroFile = (): NitroExecutableOptions => {
|
||||
let binaryFolder = path.join(__dirname, '..', 'bin') // Current directory by default
|
||||
let cudaVisibleDevices = ''
|
||||
let vkVisibleDevices = ''
|
||||
let binaryName = 'nitro'
|
||||
/**
|
||||
* The binary folder is different for each platform.
|
||||
*/
|
||||
if (process.platform === 'win32') {
|
||||
/**
|
||||
* For Windows: win-cpu, win-cuda-11-7, win-cuda-12-0
|
||||
* For Windows: win-cpu, win-vulkan, win-cuda-11-7, win-cuda-12-0
|
||||
*/
|
||||
let nvidiaInfo = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8'))
|
||||
if (nvidiaInfo['run_mode'] === 'cpu') {
|
||||
let gpuInfo = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
|
||||
if (gpuInfo['run_mode'] === 'cpu') {
|
||||
binaryFolder = path.join(binaryFolder, 'win-cpu')
|
||||
} else {
|
||||
if (nvidiaInfo['cuda'].version === '11') {
|
||||
if (gpuInfo['cuda']?.version === '11') {
|
||||
binaryFolder = path.join(binaryFolder, 'win-cuda-11-7')
|
||||
} else {
|
||||
binaryFolder = path.join(binaryFolder, 'win-cuda-12-0')
|
||||
}
|
||||
cudaVisibleDevices = nvidiaInfo['gpus_in_use'].join(',')
|
||||
cudaVisibleDevices = gpuInfo['gpus_in_use'].join(',')
|
||||
}
|
||||
if (gpuInfo['vulkan'] === true) {
|
||||
binaryFolder = path.join(__dirname, '..', 'bin')
|
||||
binaryFolder = path.join(binaryFolder, 'win-vulkan')
|
||||
vkVisibleDevices = gpuInfo['gpus_in_use'].toString()
|
||||
}
|
||||
binaryName = 'nitro.exe'
|
||||
} else if (process.platform === 'darwin') {
|
||||
@ -44,22 +51,29 @@ export const executableNitroFile = (): NitroExecutableOptions => {
|
||||
}
|
||||
} else {
|
||||
/**
|
||||
* For Linux: linux-cpu, linux-cuda-11-7, linux-cuda-12-0
|
||||
* For Linux: linux-cpu, linux-vulkan, linux-cuda-11-7, linux-cuda-12-0
|
||||
*/
|
||||
let nvidiaInfo = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8'))
|
||||
if (nvidiaInfo['run_mode'] === 'cpu') {
|
||||
let gpuInfo = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
|
||||
if (gpuInfo['run_mode'] === 'cpu') {
|
||||
binaryFolder = path.join(binaryFolder, 'linux-cpu')
|
||||
} else {
|
||||
if (nvidiaInfo['cuda'].version === '11') {
|
||||
if (gpuInfo['cuda']?.version === '11') {
|
||||
binaryFolder = path.join(binaryFolder, 'linux-cuda-11-7')
|
||||
} else {
|
||||
binaryFolder = path.join(binaryFolder, 'linux-cuda-12-0')
|
||||
}
|
||||
cudaVisibleDevices = nvidiaInfo['gpus_in_use'].join(',')
|
||||
cudaVisibleDevices = gpuInfo['gpus_in_use'].join(',')
|
||||
}
|
||||
|
||||
if (gpuInfo['vulkan'] === true) {
|
||||
binaryFolder = path.join(__dirname, '..', 'bin')
|
||||
binaryFolder = path.join(binaryFolder, 'win-vulkan')
|
||||
vkVisibleDevices = gpuInfo['gpus_in_use'].toString()
|
||||
}
|
||||
}
|
||||
return {
|
||||
executablePath: path.join(binaryFolder, binaryName),
|
||||
cudaVisibleDevices,
|
||||
vkVisibleDevices,
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,7 +4,7 @@ import { ChildProcessWithoutNullStreams, spawn } from 'child_process'
|
||||
import tcpPortUsed from 'tcp-port-used'
|
||||
import fetchRT from 'fetch-retry'
|
||||
import { log, getSystemResourceInfo } from '@janhq/core/node'
|
||||
import { getNitroProcessInfo, updateNvidiaInfo } from './nvidia'
|
||||
import { getNitroProcessInfo, updateNvidiaInfo } from './accelerator'
|
||||
import {
|
||||
Model,
|
||||
InferenceEngine,
|
||||
@ -345,6 +345,10 @@ function spawnNitroProcess(): Promise<any> {
|
||||
env: {
|
||||
...process.env,
|
||||
CUDA_VISIBLE_DEVICES: executableOptions.cudaVisibleDevices,
|
||||
// Vulkan - Support 1 device at a time for now
|
||||
...(executableOptions.vkVisibleDevices?.length > 0 && {
|
||||
GGML_VULKAN_DEVICE: executableOptions.vkVisibleDevices[0],
|
||||
}),
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@ -37,10 +37,10 @@ const getCurrentLoad = () =>
|
||||
}
|
||||
if (data.run_mode === 'gpu' && data.gpus_in_use.length > 0) {
|
||||
const gpuIds = data['gpus_in_use'].join(',')
|
||||
if (gpuIds !== '') {
|
||||
if (gpuIds !== '' && data['vulkan'] !== true) {
|
||||
exec(
|
||||
`nvidia-smi --query-gpu=index,name,temperature.gpu,utilization.gpu,memory.total,memory.free,utilization.memory --format=csv,noheader,nounits --id=${gpuIds}`,
|
||||
(error, stdout, stderr) => {
|
||||
(error, stdout, _) => {
|
||||
if (error) {
|
||||
console.error(`exec error: ${error}`)
|
||||
reject(error)
|
||||
|
||||
@ -5,7 +5,9 @@ interface FeatureToggleContextType {
|
||||
ignoreSSL: boolean
|
||||
proxy: string
|
||||
proxyEnabled: boolean
|
||||
vulkanEnabled: boolean
|
||||
setExperimentalFeature: (on: boolean) => void
|
||||
setVulkanEnabled: (on: boolean) => void
|
||||
setIgnoreSSL: (on: boolean) => void
|
||||
setProxy: (value: string) => void
|
||||
setProxyEnabled: (on: boolean) => void
|
||||
@ -16,7 +18,9 @@ const initialContext: FeatureToggleContextType = {
|
||||
ignoreSSL: false,
|
||||
proxy: '',
|
||||
proxyEnabled: false,
|
||||
vulkanEnabled: false,
|
||||
setExperimentalFeature: () => {},
|
||||
setVulkanEnabled: () => {},
|
||||
setIgnoreSSL: () => {},
|
||||
setProxy: () => {},
|
||||
setProxyEnabled: () => {},
|
||||
@ -31,6 +35,7 @@ export default function FeatureToggleWrapper({
|
||||
children: ReactNode
|
||||
}) {
|
||||
const EXPERIMENTAL_FEATURE = 'experimentalFeature'
|
||||
const VULKAN_ENABLED = 'vulkanEnabled'
|
||||
const IGNORE_SSL = 'ignoreSSLFeature'
|
||||
const HTTPS_PROXY_FEATURE = 'httpsProxyFeature'
|
||||
const PROXY_FEATURE_ENABLED = 'proxyFeatureEnabled'
|
||||
@ -38,6 +43,7 @@ export default function FeatureToggleWrapper({
|
||||
const [experimentalFeature, directSetExperimentalFeature] =
|
||||
useState<boolean>(false)
|
||||
const [proxyEnabled, directSetProxyEnabled] = useState<boolean>(false)
|
||||
const [vulkanEnabled, directEnableVulkan] = useState<boolean>(false)
|
||||
const [ignoreSSL, directSetIgnoreSSL] = useState<boolean>(false)
|
||||
const [proxy, directSetProxy] = useState<string>('')
|
||||
|
||||
@ -57,6 +63,11 @@ export default function FeatureToggleWrapper({
|
||||
directSetExperimentalFeature(on)
|
||||
}
|
||||
|
||||
const setVulkanEnabled = (on: boolean) => {
|
||||
localStorage.setItem(VULKAN_ENABLED, on ? 'true' : 'false')
|
||||
directEnableVulkan(on)
|
||||
}
|
||||
|
||||
const setIgnoreSSL = (on: boolean) => {
|
||||
localStorage.setItem(IGNORE_SSL, on ? 'true' : 'false')
|
||||
directSetIgnoreSSL(on)
|
||||
@ -79,7 +90,9 @@ export default function FeatureToggleWrapper({
|
||||
ignoreSSL,
|
||||
proxy,
|
||||
proxyEnabled,
|
||||
vulkanEnabled,
|
||||
setExperimentalFeature,
|
||||
setVulkanEnabled,
|
||||
setIgnoreSSL,
|
||||
setProxy,
|
||||
setProxyEnabled,
|
||||
|
||||
@ -48,17 +48,33 @@ export const useSettings = () => {
|
||||
runMode,
|
||||
notify,
|
||||
gpusInUse,
|
||||
vulkan,
|
||||
}: {
|
||||
runMode?: string | undefined
|
||||
notify?: boolean | undefined
|
||||
gpusInUse?: string[] | undefined
|
||||
vulkan?: boolean | undefined
|
||||
}) => {
|
||||
const settingsFile = await joinPath(['file://settings', 'settings.json'])
|
||||
const settings = await readSettings()
|
||||
if (runMode != null) settings.run_mode = runMode
|
||||
if (notify != null) settings.notify = notify
|
||||
if (gpusInUse != null) settings.gpus_in_use = gpusInUse
|
||||
if (vulkan != null) {
|
||||
settings.vulkan = vulkan
|
||||
// GPU enabled, set run_mode to 'gpu'
|
||||
if (settings.vulkan) {
|
||||
settings.run_mode = 'gpu'
|
||||
} else {
|
||||
settings.run_mode = settings.gpus?.length > 0 ? 'gpu' : 'cpu'
|
||||
}
|
||||
}
|
||||
await fs.writeFileSync(settingsFile, JSON.stringify(settings))
|
||||
|
||||
// Relaunch to apply settings
|
||||
if (vulkan != null) {
|
||||
window.location.reload()
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
|
||||
@ -83,10 +83,7 @@ const DataFolder = () => {
|
||||
await window.core?.api?.getAppConfigurations()
|
||||
const currentJanDataFolder = appConfiguration.data_folder
|
||||
appConfiguration.data_folder = destinationPath
|
||||
const { _, err } = await fs.syncFile(
|
||||
currentJanDataFolder,
|
||||
destinationPath
|
||||
)
|
||||
const { err } = await fs.syncFile(currentJanDataFolder, destinationPath)
|
||||
if (err) throw err
|
||||
await window.core?.api?.updateAppConfiguration(appConfiguration)
|
||||
console.debug(
|
||||
|
||||
@ -58,6 +58,8 @@ const Advanced = () => {
|
||||
setProxy,
|
||||
proxyEnabled,
|
||||
setProxyEnabled,
|
||||
vulkanEnabled,
|
||||
setVulkanEnabled,
|
||||
} = useContext(FeatureToggleContext)
|
||||
const [partialProxy, setPartialProxy] = useState<string>(proxy)
|
||||
const [gpuEnabled, setGpuEnabled] = useState<boolean>(false)
|
||||
@ -91,12 +93,13 @@ const Advanced = () => {
|
||||
const settings = await readSettings()
|
||||
setGpuEnabled(settings.run_mode === 'gpu' && settings.gpus?.length > 0)
|
||||
setGpusInUse(settings.gpus_in_use || [])
|
||||
setVulkanEnabled(settings.vulkan || false)
|
||||
if (settings.gpus) {
|
||||
setGpuList(settings.gpus)
|
||||
}
|
||||
}
|
||||
setUseGpuIfPossible()
|
||||
}, [readSettings])
|
||||
}, [readSettings, setGpuList, setGpuEnabled, setGpusInUse, setVulkanEnabled])
|
||||
|
||||
const clearLogs = async () => {
|
||||
if (await fs.existsSync(`file://logs`)) {
|
||||
@ -110,14 +113,21 @@ const Advanced = () => {
|
||||
}
|
||||
|
||||
const handleGPUChange = (gpuId: string) => {
|
||||
// TODO detect current use GPU nvidia or AMD
|
||||
let updatedGpusInUse = [...gpusInUse]
|
||||
if (updatedGpusInUse.includes(gpuId)) {
|
||||
updatedGpusInUse = updatedGpusInUse.filter((id) => id !== gpuId)
|
||||
if (gpuEnabled && updatedGpusInUse.length === 0) {
|
||||
// Vulkan support only allow 1 active device at a time
|
||||
if (vulkanEnabled) {
|
||||
updatedGpusInUse = []
|
||||
}
|
||||
updatedGpusInUse.push(gpuId)
|
||||
}
|
||||
} else {
|
||||
// Vulkan support only allow 1 active device at a time
|
||||
if (vulkanEnabled) {
|
||||
updatedGpusInUse = []
|
||||
}
|
||||
updatedGpusInUse.push(gpuId)
|
||||
}
|
||||
setGpusInUse(updatedGpusInUse)
|
||||
@ -173,8 +183,8 @@ const Advanced = () => {
|
||||
</h6>
|
||||
</div>
|
||||
<p className="pr-8 leading-relaxed">
|
||||
Enable to enhance model performance by utilizing your devices
|
||||
GPU for acceleration. Read{' '}
|
||||
Enable to enhance model performance by utilizing your GPU
|
||||
devices for acceleration. Read{' '}
|
||||
<span>
|
||||
{' '}
|
||||
<span
|
||||
@ -202,7 +212,7 @@ const Advanced = () => {
|
||||
className="max-w-[240px]"
|
||||
>
|
||||
<span>
|
||||
Disabling GPU Acceleration may result in reduced
|
||||
Disabling NVIDIA GPU Acceleration may result in reduced
|
||||
performance. It is recommended to keep this enabled for
|
||||
optimal user experience.
|
||||
</span>
|
||||
@ -214,7 +224,7 @@ const Advanced = () => {
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<Switch
|
||||
disabled={gpuList.length === 0}
|
||||
disabled={gpuList.length === 0 || vulkanEnabled}
|
||||
checked={gpuEnabled}
|
||||
onCheckedChange={(e) => {
|
||||
if (e === true) {
|
||||
@ -259,7 +269,9 @@ const Advanced = () => {
|
||||
</Tooltip>
|
||||
</div>
|
||||
<div className="mt-2 w-full rounded-lg bg-secondary p-4">
|
||||
<label className="mb-1 inline-block font-medium">Choose GPU</label>
|
||||
<label className="mb-1 inline-block font-medium">
|
||||
Choose device(s)
|
||||
</label>
|
||||
<Select
|
||||
disabled={gpuList.length === 0 || !gpuEnabled}
|
||||
value={selectedGpu.join()}
|
||||
@ -274,12 +286,16 @@ const Advanced = () => {
|
||||
<SelectPortal>
|
||||
<SelectContent className="w-[400px] px-1 pb-2">
|
||||
<SelectGroup>
|
||||
<SelectLabel>Nvidia</SelectLabel>
|
||||
<SelectLabel>
|
||||
{vulkanEnabled ? 'Vulkan Supported GPUs' : 'Nvidia'}
|
||||
</SelectLabel>
|
||||
<div className="px-4 pb-2">
|
||||
<div className="rounded-lg bg-secondary p-3">
|
||||
{gpuList
|
||||
.filter((gpu) =>
|
||||
gpu.name?.toLowerCase().includes('nvidia')
|
||||
vulkanEnabled
|
||||
? gpu.name
|
||||
: gpu.name?.toLowerCase().includes('nvidia')
|
||||
)
|
||||
.map((gpu) => (
|
||||
<div
|
||||
@ -299,7 +315,9 @@ const Advanced = () => {
|
||||
htmlFor={`gpu-${gpu.id}`}
|
||||
>
|
||||
<span>{gpu.name}</span>
|
||||
<span>{gpu.vram}MB VRAM</span>
|
||||
{!vulkanEnabled && (
|
||||
<span>{gpu.vram}MB VRAM</span>
|
||||
)}
|
||||
</label>
|
||||
</div>
|
||||
))}
|
||||
@ -328,6 +346,37 @@ const Advanced = () => {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Vulkan for AMD GPU/ APU and Intel Arc GPU */}
|
||||
{!isMac && experimentalFeature && (
|
||||
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
|
||||
<div className="flex-shrink-0 space-y-1.5">
|
||||
<div className="flex gap-x-2">
|
||||
<h6 className="text-sm font-semibold capitalize">
|
||||
Vulkan Support
|
||||
</h6>
|
||||
</div>
|
||||
<p className="text-xs leading-relaxed">
|
||||
Enable Vulkan with AMD GPU/APU and Intel Arc GPU for better model
|
||||
performance (reload needed).
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<Switch
|
||||
checked={vulkanEnabled}
|
||||
onCheckedChange={(e) => {
|
||||
toaster({
|
||||
title: 'Reload',
|
||||
description:
|
||||
'Vulkan settings updated. Reload now to apply the changes.',
|
||||
})
|
||||
stopModel()
|
||||
saveSettings({ vulkan: e, gpusInUse: [] })
|
||||
setVulkanEnabled(e)
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<DataFolder />
|
||||
{/* Proxy */}
|
||||
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user