diff --git a/.gitignore b/.gitignore index 24748e08c..62878011e 100644 --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,5 @@ extensions/inference-nitro-extension/bin/*/*.exp extensions/inference-nitro-extension/bin/*/*.lib extensions/inference-nitro-extension/bin/saved-* extensions/inference-nitro-extension/bin/*.tar.gz - +extensions/inference-nitro-extension/bin/vulkaninfoSDK.exe +extensions/inference-nitro-extension/bin/vulkaninfo diff --git a/extensions/inference-nitro-extension/download.bat b/extensions/inference-nitro-extension/download.bat index 22e1c85b3..2ef3165c1 100644 --- a/extensions/inference-nitro-extension/download.bat +++ b/extensions/inference-nitro-extension/download.bat @@ -1,3 +1,3 @@ @echo off set /p NITRO_VERSION=<./bin/version.txt -.\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda-12-0.tar.gz -e --strip 1 -o ./bin/win-cuda-12-0 && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda-11-7.tar.gz -e --strip 1 -o ./bin/win-cuda-11-7 && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64.tar.gz -e --strip 1 -o ./bin/win-cpu +.\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda-12-0.tar.gz -e --strip 1 -o ./bin/win-cuda-12-0 && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-cuda-11-7.tar.gz -e --strip 1 -o ./bin/win-cuda-11-7 && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64.tar.gz -e --strip 1 -o ./bin/win-cpu && .\node_modules\.bin\download https://github.com/janhq/nitro/releases/download/v%NITRO_VERSION%/nitro-%NITRO_VERSION%-win-amd64-vulkan.tar.gz -e --strip 1 -o ./bin/win-vulkan && .\node_modules\.bin\download https://delta.jan.ai/vulkaninfoSDK.exe -o ./bin diff --git a/extensions/inference-nitro-extension/package.json b/extensions/inference-nitro-extension/package.json index b65cf445f..ba6b473eb 100644 --- a/extensions/inference-nitro-extension/package.json +++ b/extensions/inference-nitro-extension/package.json @@ -8,7 +8,7 @@ "license": "AGPL-3.0", "scripts": { "build": "tsc --module commonjs && rollup -c rollup.config.ts", - "downloadnitro:linux": "NITRO_VERSION=$(cat ./bin/version.txt) && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64.tar.gz -e --strip 1 -o ./bin/linux-cpu && chmod +x ./bin/linux-cpu/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-cuda-12-0.tar.gz -e --strip 1 -o ./bin/linux-cuda-12-0 && chmod +x ./bin/linux-cuda-12-0/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-cuda-11-7.tar.gz -e --strip 1 -o ./bin/linux-cuda-11-7 && chmod +x ./bin/linux-cuda-11-7/nitro", + "downloadnitro:linux": "NITRO_VERSION=$(cat ./bin/version.txt) && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64.tar.gz -e --strip 1 -o ./bin/linux-cpu && chmod +x ./bin/linux-cpu/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-cuda-12-0.tar.gz -e --strip 1 -o ./bin/linux-cuda-12-0 && chmod +x ./bin/linux-cuda-12-0/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-cuda-11-7.tar.gz -e --strip 1 -o ./bin/linux-cuda-11-7 && chmod +x ./bin/linux-cuda-11-7/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-linux-amd64-vulkan.tar.gz -e --strip 1 -o ./bin/linux-vulkan && chmod +x ./bin/linux-vulkan/nitro && download https://delta.jan.ai/vulkaninfo -o ./bin && chmod +x ./bin/vulkaninfo", "downloadnitro:darwin": "NITRO_VERSION=$(cat ./bin/version.txt) && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-mac-arm64.tar.gz -e --strip 1 -o ./bin/mac-arm64 && chmod +x ./bin/mac-arm64/nitro && download https://github.com/janhq/nitro/releases/download/v${NITRO_VERSION}/nitro-${NITRO_VERSION}-mac-amd64.tar.gz -e --strip 1 -o ./bin/mac-x64 && chmod +x ./bin/mac-x64/nitro", "downloadnitro:win32": "download.bat", "downloadnitro": "run-script-os", diff --git a/extensions/inference-nitro-extension/src/node/nvidia.ts b/extensions/inference-nitro-extension/src/node/accelerator.ts similarity index 55% rename from extensions/inference-nitro-extension/src/node/nvidia.ts rename to extensions/inference-nitro-extension/src/node/accelerator.ts index 60468f0c9..972f88681 100644 --- a/extensions/inference-nitro-extension/src/node/nvidia.ts +++ b/extensions/inference-nitro-extension/src/node/accelerator.ts @@ -1,10 +1,11 @@ import { writeFileSync, existsSync, readFileSync } from 'fs' -import { exec } from 'child_process' +import { exec, spawn } from 'child_process' import path from 'path' -import { getJanDataFolderPath } from '@janhq/core/node' +import { getJanDataFolderPath, log } from '@janhq/core/node' /** * Default GPU settings + * TODO: This needs to be refactored to support multiple accelerators **/ const DEFALT_SETTINGS = { notify: true, @@ -21,12 +22,17 @@ const DEFALT_SETTINGS = { gpu_highest_vram: '', gpus_in_use: [], is_initial: true, + // TODO: This needs to be set based on user toggle in settings + vulkan: { + enabled: true, + gpu_in_use: '1', + }, } /** * Path to the settings file **/ -export const NVIDIA_INFO_FILE = path.join( +export const GPU_INFO_FILE = path.join( getJanDataFolderPath(), 'settings', 'settings.json' @@ -52,10 +58,10 @@ export async function updateNvidiaInfo() { if (process.platform !== 'darwin') { let data try { - data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8')) + data = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) } catch (error) { data = DEFALT_SETTINGS - writeFileSync(NVIDIA_INFO_FILE, JSON.stringify(data, null, 2)) + writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2)) } updateNvidiaDriverInfo() updateGpuInfo() @@ -79,7 +85,7 @@ export async function updateNvidiaDriverInfo(): Promise { exec( 'nvidia-smi --query-gpu=driver_version --format=csv,noheader', (error, stdout) => { - let data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8')) + let data = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) if (!error) { const firstLine = stdout.split('\n')[0].trim() @@ -89,7 +95,7 @@ export async function updateNvidiaDriverInfo(): Promise { data['nvidia_driver'].exist = false } - writeFileSync(NVIDIA_INFO_FILE, JSON.stringify(data, null, 2)) + writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2)) Promise.resolve() } ) @@ -158,42 +164,77 @@ export function updateCudaExistence( * Get GPU information */ export async function updateGpuInfo(): Promise { - exec( - 'nvidia-smi --query-gpu=index,memory.total,name --format=csv,noheader,nounits', - (error, stdout) => { - let data = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8')) + let data = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) - if (!error) { - // Get GPU info and gpu has higher memory first - let highestVram = 0 - let highestVramId = '0' - let gpus = stdout - .trim() - .split('\n') - .map((line) => { - let [id, vram, name] = line.split(', ') - vram = vram.replace(/\r/g, '') - if (parseFloat(vram) > highestVram) { - highestVram = parseFloat(vram) - highestVramId = id - } - return { id, vram, name } - }) + // Cuda + if (data['vulkan'] === true) { + // Vulkan + exec( + process.platform === 'win32' + ? `${__dirname}\\..\\bin\\vulkaninfoSDK.exe --summary` + : `${__dirname}/../bin/vulkaninfo --summary`, + (error, stdout) => { + if (!error) { + const output = stdout.toString() + log(output) + const gpuRegex = /GPU(\d+):(?:[\s\S]*?)deviceName\s*=\s*(.*)/g - data.gpus = gpus - data.gpu_highest_vram = highestVramId - } else { - data.gpus = [] - data.gpu_highest_vram = '' + let gpus = [] + let match + while ((match = gpuRegex.exec(output)) !== null) { + const id = match[1] + const name = match[2] + gpus.push({ id, vram: 0, name }) + } + data.gpus = gpus + + if (!data['gpus_in_use'] || data['gpus_in_use'].length === 0) { + data.gpus_in_use = [data.gpus.length > 1 ? '1' : '0'] + } + + data = updateCudaExistence(data) + writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2)) + } + Promise.resolve() } + ) + } else { + exec( + 'nvidia-smi --query-gpu=index,memory.total,name --format=csv,noheader,nounits', + (error, stdout) => { + if (!error) { + log(stdout) + // Get GPU info and gpu has higher memory first + let highestVram = 0 + let highestVramId = '0' + let gpus = stdout + .trim() + .split('\n') + .map((line) => { + let [id, vram, name] = line.split(', ') + vram = vram.replace(/\r/g, '') + if (parseFloat(vram) > highestVram) { + highestVram = parseFloat(vram) + highestVramId = id + } + return { id, vram, name } + }) - if (!data['gpus_in_use'] || data['gpus_in_use'].length === 0) { - data.gpus_in_use = [data['gpu_highest_vram']] + data.gpus = gpus + data.gpu_highest_vram = highestVramId + } else { + data.gpus = [] + data.gpu_highest_vram = '' + } + + if (!data['gpus_in_use'] || data['gpus_in_use'].length === 0) { + data.gpus_in_use = [data['gpu_highest_vram']] + } + + data = updateCudaExistence(data) + writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2)) + Promise.resolve() } - - data = updateCudaExistence(data) - writeFileSync(NVIDIA_INFO_FILE, JSON.stringify(data, null, 2)) - Promise.resolve() - } - ) + ) + } } diff --git a/extensions/inference-nitro-extension/src/node/execute.ts b/extensions/inference-nitro-extension/src/node/execute.ts index 795c83ded..08baba0d5 100644 --- a/extensions/inference-nitro-extension/src/node/execute.ts +++ b/extensions/inference-nitro-extension/src/node/execute.ts @@ -1,10 +1,11 @@ import { readFileSync } from 'fs' import * as path from 'path' -import { NVIDIA_INFO_FILE } from './nvidia' +import { GPU_INFO_FILE } from './accelerator' export interface NitroExecutableOptions { executablePath: string cudaVisibleDevices: string + vkVisibleDevices: string } /** * Find which executable file to run based on the current platform. @@ -13,24 +14,30 @@ export interface NitroExecutableOptions { export const executableNitroFile = (): NitroExecutableOptions => { let binaryFolder = path.join(__dirname, '..', 'bin') // Current directory by default let cudaVisibleDevices = '' + let vkVisibleDevices = '' let binaryName = 'nitro' /** * The binary folder is different for each platform. */ if (process.platform === 'win32') { /** - * For Windows: win-cpu, win-cuda-11-7, win-cuda-12-0 + * For Windows: win-cpu, win-vulkan, win-cuda-11-7, win-cuda-12-0 */ - let nvidiaInfo = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8')) - if (nvidiaInfo['run_mode'] === 'cpu') { + let gpuInfo = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) + if (gpuInfo['run_mode'] === 'cpu') { binaryFolder = path.join(binaryFolder, 'win-cpu') } else { - if (nvidiaInfo['cuda'].version === '11') { + if (gpuInfo['cuda']?.version === '11') { binaryFolder = path.join(binaryFolder, 'win-cuda-11-7') } else { binaryFolder = path.join(binaryFolder, 'win-cuda-12-0') } - cudaVisibleDevices = nvidiaInfo['gpus_in_use'].join(',') + cudaVisibleDevices = gpuInfo['gpus_in_use'].join(',') + } + if (gpuInfo['vulkan'] === true) { + binaryFolder = path.join(__dirname, '..', 'bin') + binaryFolder = path.join(binaryFolder, 'win-vulkan') + vkVisibleDevices = gpuInfo['gpus_in_use'].toString() } binaryName = 'nitro.exe' } else if (process.platform === 'darwin') { @@ -44,22 +51,29 @@ export const executableNitroFile = (): NitroExecutableOptions => { } } else { /** - * For Linux: linux-cpu, linux-cuda-11-7, linux-cuda-12-0 + * For Linux: linux-cpu, linux-vulkan, linux-cuda-11-7, linux-cuda-12-0 */ - let nvidiaInfo = JSON.parse(readFileSync(NVIDIA_INFO_FILE, 'utf-8')) - if (nvidiaInfo['run_mode'] === 'cpu') { + let gpuInfo = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) + if (gpuInfo['run_mode'] === 'cpu') { binaryFolder = path.join(binaryFolder, 'linux-cpu') } else { - if (nvidiaInfo['cuda'].version === '11') { + if (gpuInfo['cuda']?.version === '11') { binaryFolder = path.join(binaryFolder, 'linux-cuda-11-7') } else { binaryFolder = path.join(binaryFolder, 'linux-cuda-12-0') } - cudaVisibleDevices = nvidiaInfo['gpus_in_use'].join(',') + cudaVisibleDevices = gpuInfo['gpus_in_use'].join(',') + } + + if (gpuInfo['vulkan'] === true) { + binaryFolder = path.join(__dirname, '..', 'bin') + binaryFolder = path.join(binaryFolder, 'win-vulkan') + vkVisibleDevices = gpuInfo['gpus_in_use'].toString() } } return { executablePath: path.join(binaryFolder, binaryName), cudaVisibleDevices, + vkVisibleDevices, } } diff --git a/extensions/inference-nitro-extension/src/node/index.ts b/extensions/inference-nitro-extension/src/node/index.ts index 32a12cf8a..9b2684a6c 100644 --- a/extensions/inference-nitro-extension/src/node/index.ts +++ b/extensions/inference-nitro-extension/src/node/index.ts @@ -4,7 +4,7 @@ import { ChildProcessWithoutNullStreams, spawn } from 'child_process' import tcpPortUsed from 'tcp-port-used' import fetchRT from 'fetch-retry' import { log, getSystemResourceInfo } from '@janhq/core/node' -import { getNitroProcessInfo, updateNvidiaInfo } from './nvidia' +import { getNitroProcessInfo, updateNvidiaInfo } from './accelerator' import { Model, InferenceEngine, @@ -345,6 +345,10 @@ function spawnNitroProcess(): Promise { env: { ...process.env, CUDA_VISIBLE_DEVICES: executableOptions.cudaVisibleDevices, + // Vulkan - Support 1 device at a time for now + ...(executableOptions.vkVisibleDevices?.length > 0 && { + GGML_VULKAN_DEVICE: executableOptions.vkVisibleDevices[0], + }), }, } ) diff --git a/extensions/monitoring-extension/src/module.ts b/extensions/monitoring-extension/src/module.ts index ea7319b47..27781a5d6 100644 --- a/extensions/monitoring-extension/src/module.ts +++ b/extensions/monitoring-extension/src/module.ts @@ -37,10 +37,10 @@ const getCurrentLoad = () => } if (data.run_mode === 'gpu' && data.gpus_in_use.length > 0) { const gpuIds = data['gpus_in_use'].join(',') - if (gpuIds !== '') { + if (gpuIds !== '' && data['vulkan'] !== true) { exec( `nvidia-smi --query-gpu=index,name,temperature.gpu,utilization.gpu,memory.total,memory.free,utilization.memory --format=csv,noheader,nounits --id=${gpuIds}`, - (error, stdout, stderr) => { + (error, stdout, _) => { if (error) { console.error(`exec error: ${error}`) reject(error) diff --git a/web/context/FeatureToggle.tsx b/web/context/FeatureToggle.tsx index d8632f700..5a63eb66e 100644 --- a/web/context/FeatureToggle.tsx +++ b/web/context/FeatureToggle.tsx @@ -5,7 +5,9 @@ interface FeatureToggleContextType { ignoreSSL: boolean proxy: string proxyEnabled: boolean + vulkanEnabled: boolean setExperimentalFeature: (on: boolean) => void + setVulkanEnabled: (on: boolean) => void setIgnoreSSL: (on: boolean) => void setProxy: (value: string) => void setProxyEnabled: (on: boolean) => void @@ -16,7 +18,9 @@ const initialContext: FeatureToggleContextType = { ignoreSSL: false, proxy: '', proxyEnabled: false, + vulkanEnabled: false, setExperimentalFeature: () => {}, + setVulkanEnabled: () => {}, setIgnoreSSL: () => {}, setProxy: () => {}, setProxyEnabled: () => {}, @@ -31,6 +35,7 @@ export default function FeatureToggleWrapper({ children: ReactNode }) { const EXPERIMENTAL_FEATURE = 'experimentalFeature' + const VULKAN_ENABLED = 'vulkanEnabled' const IGNORE_SSL = 'ignoreSSLFeature' const HTTPS_PROXY_FEATURE = 'httpsProxyFeature' const PROXY_FEATURE_ENABLED = 'proxyFeatureEnabled' @@ -38,6 +43,7 @@ export default function FeatureToggleWrapper({ const [experimentalFeature, directSetExperimentalFeature] = useState(false) const [proxyEnabled, directSetProxyEnabled] = useState(false) + const [vulkanEnabled, directEnableVulkan] = useState(false) const [ignoreSSL, directSetIgnoreSSL] = useState(false) const [proxy, directSetProxy] = useState('') @@ -57,6 +63,11 @@ export default function FeatureToggleWrapper({ directSetExperimentalFeature(on) } + const setVulkanEnabled = (on: boolean) => { + localStorage.setItem(VULKAN_ENABLED, on ? 'true' : 'false') + directEnableVulkan(on) + } + const setIgnoreSSL = (on: boolean) => { localStorage.setItem(IGNORE_SSL, on ? 'true' : 'false') directSetIgnoreSSL(on) @@ -79,7 +90,9 @@ export default function FeatureToggleWrapper({ ignoreSSL, proxy, proxyEnabled, + vulkanEnabled, setExperimentalFeature, + setVulkanEnabled, setIgnoreSSL, setProxy, setProxyEnabled, diff --git a/web/hooks/useSettings.ts b/web/hooks/useSettings.ts index ca84f6b79..9ff89827e 100644 --- a/web/hooks/useSettings.ts +++ b/web/hooks/useSettings.ts @@ -48,17 +48,33 @@ export const useSettings = () => { runMode, notify, gpusInUse, + vulkan, }: { runMode?: string | undefined notify?: boolean | undefined gpusInUse?: string[] | undefined + vulkan?: boolean | undefined }) => { const settingsFile = await joinPath(['file://settings', 'settings.json']) const settings = await readSettings() if (runMode != null) settings.run_mode = runMode if (notify != null) settings.notify = notify if (gpusInUse != null) settings.gpus_in_use = gpusInUse + if (vulkan != null) { + settings.vulkan = vulkan + // GPU enabled, set run_mode to 'gpu' + if (settings.vulkan) { + settings.run_mode = 'gpu' + } else { + settings.run_mode = settings.gpus?.length > 0 ? 'gpu' : 'cpu' + } + } await fs.writeFileSync(settingsFile, JSON.stringify(settings)) + + // Relaunch to apply settings + if (vulkan != null) { + window.location.reload() + } } return { diff --git a/web/screens/Settings/Advanced/DataFolder/index.tsx b/web/screens/Settings/Advanced/DataFolder/index.tsx index f9c2f440a..c11c49fa4 100644 --- a/web/screens/Settings/Advanced/DataFolder/index.tsx +++ b/web/screens/Settings/Advanced/DataFolder/index.tsx @@ -83,10 +83,7 @@ const DataFolder = () => { await window.core?.api?.getAppConfigurations() const currentJanDataFolder = appConfiguration.data_folder appConfiguration.data_folder = destinationPath - const { _, err } = await fs.syncFile( - currentJanDataFolder, - destinationPath - ) + const { err } = await fs.syncFile(currentJanDataFolder, destinationPath) if (err) throw err await window.core?.api?.updateAppConfiguration(appConfiguration) console.debug( diff --git a/web/screens/Settings/Advanced/index.tsx b/web/screens/Settings/Advanced/index.tsx index 49718120e..60812a350 100644 --- a/web/screens/Settings/Advanced/index.tsx +++ b/web/screens/Settings/Advanced/index.tsx @@ -58,6 +58,8 @@ const Advanced = () => { setProxy, proxyEnabled, setProxyEnabled, + vulkanEnabled, + setVulkanEnabled, } = useContext(FeatureToggleContext) const [partialProxy, setPartialProxy] = useState(proxy) const [gpuEnabled, setGpuEnabled] = useState(false) @@ -91,12 +93,13 @@ const Advanced = () => { const settings = await readSettings() setGpuEnabled(settings.run_mode === 'gpu' && settings.gpus?.length > 0) setGpusInUse(settings.gpus_in_use || []) + setVulkanEnabled(settings.vulkan || false) if (settings.gpus) { setGpuList(settings.gpus) } } setUseGpuIfPossible() - }, [readSettings]) + }, [readSettings, setGpuList, setGpuEnabled, setGpusInUse, setVulkanEnabled]) const clearLogs = async () => { if (await fs.existsSync(`file://logs`)) { @@ -110,14 +113,21 @@ const Advanced = () => { } const handleGPUChange = (gpuId: string) => { - // TODO detect current use GPU nvidia or AMD let updatedGpusInUse = [...gpusInUse] if (updatedGpusInUse.includes(gpuId)) { updatedGpusInUse = updatedGpusInUse.filter((id) => id !== gpuId) if (gpuEnabled && updatedGpusInUse.length === 0) { + // Vulkan support only allow 1 active device at a time + if (vulkanEnabled) { + updatedGpusInUse = [] + } updatedGpusInUse.push(gpuId) } } else { + // Vulkan support only allow 1 active device at a time + if (vulkanEnabled) { + updatedGpusInUse = [] + } updatedGpusInUse.push(gpuId) } setGpusInUse(updatedGpusInUse) @@ -173,8 +183,8 @@ const Advanced = () => {

- Enable to enhance model performance by utilizing your devices - GPU for acceleration. Read{' '} + Enable to enhance model performance by utilizing your GPU + devices for acceleration. Read{' '} {' '} { className="max-w-[240px]" > - Disabling GPU Acceleration may result in reduced + Disabling NVIDIA GPU Acceleration may result in reduced performance. It is recommended to keep this enabled for optimal user experience. @@ -214,7 +224,7 @@ const Advanced = () => { { if (e === true) { @@ -259,7 +269,9 @@ const Advanced = () => {

- +