feat: better hardware setting (#4471)

* feat: better hardware setting

* chore: update layout

* feat: better hardware setting

* chore: fix title section

* chore: added hardware engine management

* chore: integrate gpus and enable set gpu activate

* chore: update calculate ram and vram

* chore: update calulate vram and ram used

* fix: set active gpus

* chore: fix progress bar spacing

* chore: always update cache vram gpu

* chore: update cpu usage percentage

* chore: fix type usage cpu

* chore: update ram cpus usage getsystemmonitor from new api harware engine management system

* test: update test case data using hardware management extension

* chore: resolve conflict lock json

* chore: cleanup app services

* chore: update type OperationSystemInfo

* chore: update app service

* chore: show list gpus on system monitor

* chore: remove monitoring extension

* chore: update test case app service

* chore: remove unused hooks useGpusSetting

* chore: remove monitor from shource index

* chore: fix test core

* chore: update gpu and cpu info on engine management ext

* chore: fix app service test

* chore: update test appService include cpu info

* chore: filter gpus show or hide on system monitor based activated gpu

* chore: remove unused run_mode

* chore: remove tensort

* chore: update check gpu run_mode

* chore: handle undefined gpus

* chore: cleanup PR

* chore: cleanup process node error

* chore: fix type
This commit is contained in:
Faisal Amir 2025-02-03 22:01:08 +07:00 committed by GitHub
parent 72b9aaeba1
commit daa7c0ca21
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
63 changed files with 1095 additions and 1580 deletions

View File

@ -12,6 +12,7 @@ export enum ExtensionTypeEnum {
SystemMonitoring = 'systemMonitoring',
HuggingFace = 'huggingFace',
Engine = 'engine',
Hardware = 'hardware',
}
export interface ExtensionType {

View File

@ -0,0 +1,26 @@
import { HardwareInformation } from '../../types'
import { BaseExtension, ExtensionTypeEnum } from '../extension'
/**
* Engine management extension. Persists and retrieves engine management.
* @abstract
* @extends BaseExtension
*/
export abstract class HardwareManagementExtension extends BaseExtension {
type(): ExtensionTypeEnum | undefined {
return ExtensionTypeEnum.Hardware
}
/**
* @returns A Promise that resolves to an object of list hardware.
*/
abstract getHardware(): Promise<HardwareInformation>
/**
* @returns A Promise that resolves to an object of set active gpus.
*/
abstract setAvtiveGpu(data: { gpus: number[] }): Promise<{
message: string
activated_gpus: number[]
}>
}

View File

@ -1,6 +1,5 @@
import { ConversationalExtension } from './index';
import { InferenceExtension } from './index';
import { MonitoringExtension } from './index';
import { AssistantExtension } from './index';
import { ModelExtension } from './index';
import * as Engines from './index';
@ -14,10 +13,6 @@ describe('index.ts exports', () => {
expect(InferenceExtension).toBeDefined();
});
test('should export MonitoringExtension', () => {
expect(MonitoringExtension).toBeDefined();
});
test('should export AssistantExtension', () => {
expect(AssistantExtension).toBeDefined();
});
@ -29,4 +24,4 @@ describe('index.ts exports', () => {
test('should export Engines', () => {
expect(Engines).toBeDefined();
});
});
});

View File

@ -9,10 +9,7 @@ export { ConversationalExtension } from './conversational'
*/
export { InferenceExtension } from './inference'
/**
* Monitoring extension for system monitoring.
*/
export { MonitoringExtension } from './monitoring'
/**
* Assistant extension for managing assistants.
@ -33,3 +30,8 @@ export * from './engines'
* Engines Management
*/
export * from './enginesManagement'
/**
* Hardware Management
*/
export * from './hardwareManagement'

View File

@ -1,42 +0,0 @@
import { ExtensionTypeEnum } from '../extension';
import { MonitoringExtension } from './monitoring';
it('should have the correct type', () => {
class TestMonitoringExtension extends MonitoringExtension {
getGpuSetting(): Promise<GpuSetting | undefined> {
throw new Error('Method not implemented.');
}
getResourcesInfo(): Promise<any> {
throw new Error('Method not implemented.');
}
getCurrentLoad(): Promise<any> {
throw new Error('Method not implemented.');
}
getOsInfo(): Promise<OperatingSystemInfo> {
throw new Error('Method not implemented.');
}
}
const monitoringExtension = new TestMonitoringExtension();
expect(monitoringExtension.type()).toBe(ExtensionTypeEnum.SystemMonitoring);
});
it('should create an instance of MonitoringExtension', () => {
class TestMonitoringExtension extends MonitoringExtension {
getGpuSetting(): Promise<GpuSetting | undefined> {
throw new Error('Method not implemented.');
}
getResourcesInfo(): Promise<any> {
throw new Error('Method not implemented.');
}
getCurrentLoad(): Promise<any> {
throw new Error('Method not implemented.');
}
getOsInfo(): Promise<OperatingSystemInfo> {
throw new Error('Method not implemented.');
}
}
const monitoringExtension = new TestMonitoringExtension();
expect(monitoringExtension).toBeInstanceOf(MonitoringExtension);
});

View File

@ -1,20 +0,0 @@
import { BaseExtension, ExtensionTypeEnum } from '../extension'
import { GpuSetting, MonitoringInterface, OperatingSystemInfo } from '../../types'
/**
* Monitoring extension for system monitoring.
* @extends BaseExtension
*/
export abstract class MonitoringExtension extends BaseExtension implements MonitoringInterface {
/**
* Monitoring extension type.
*/
type(): ExtensionTypeEnum | undefined {
return ExtensionTypeEnum.SystemMonitoring
}
abstract getGpuSetting(): Promise<GpuSetting | undefined>
abstract getResourcesInfo(): Promise<any>
abstract getCurrentLoad(): Promise<any>
abstract getOsInfo(): Promise<OperatingSystemInfo>
}

View File

@ -0,0 +1,55 @@
export type Cpu = {
arch: string
cores: number
instructions: string[]
model: string
usage: number
}
export type GpuAdditionalInformation = {
compute_cap: string
driver_version: string
}
export type Gpu = {
activated: boolean
additional_information: GpuAdditionalInformation
free_vram: number
id: string
name: string
total_vram: number
uuid: string
version: string
}
export type Os = {
name: string
version: string
}
export type Power = {
battery_life: number
charging_status: string
is_power_saving: boolean
}
export type Ram = {
available: number
total: number
type: string
}
export type Storage = {
available: number
total: number
type: string
}
export type HardwareInformation = {
cpu: Cpu
gpus: Gpu[]
os: Os
power: Power
ram: Ram
storage: Storage
}

View File

@ -4,7 +4,6 @@ import * as model from './model';
import * as thread from './thread';
import * as message from './message';
import * as inference from './inference';
import * as monitoring from './monitoring';
import * as file from './file';
import * as config from './config';
import * as huggingface from './huggingface';
@ -18,7 +17,6 @@ import * as setting from './setting';
expect(thread).toBeDefined();
expect(message).toBeDefined();
expect(inference).toBeDefined();
expect(monitoring).toBeDefined();
expect(file).toBeDefined();
expect(config).toBeDefined();
expect(huggingface).toBeDefined();

View File

@ -3,7 +3,6 @@ export * from './model'
export * from './thread'
export * from './message'
export * from './inference'
export * from './monitoring'
export * from './file'
export * from './config'
export * from './huggingface'
@ -11,3 +10,4 @@ export * from './miscellaneous'
export * from './api'
export * from './setting'
export * from './engine'
export * from './hardware'

View File

@ -1,33 +1,24 @@
export type SystemResourceInfo = {
memAvailable: number
}
export type RunMode = 'cpu' | 'gpu'
export type GpuSetting = {
notify: boolean
run_mode: RunMode
nvidia_driver: {
exist: boolean
version: string
}
cuda: {
exist: boolean
version: string
}
gpus: GpuSettingInfo[]
gpu_highest_vram: string
gpus_in_use: string[]
is_initial: boolean
// TODO: This needs to be set based on user toggle in settings
vulkan: boolean
cpu?: any
}
export type GpuSettingInfo = {
id: string
vram: string
name: string
arch?: string
activated: boolean;
free_vram: number;
id: string;
name: string;
total_vram: number;
uuid: string;
version: string;
}
export type SystemInformation = {
@ -42,9 +33,6 @@ export type SupportedPlatform = SupportedPlatformTuple[number]
export type OperatingSystemInfo = {
platform: SupportedPlatform | 'unknown'
arch: string
release: string
machine: string
version: string
totalMem: number
freeMem: number
}

View File

@ -1,13 +0,0 @@
import * as monitoringInterface from './monitoringInterface'
import * as resourceInfo from './resourceInfo'
import * as index from './index'
it('should re-export all symbols from monitoringInterface and resourceInfo', () => {
for (const key in monitoringInterface) {
expect(index[key]).toBe(monitoringInterface[key])
}
for (const key in resourceInfo) {
expect(index[key]).toBe(resourceInfo[key])
}
})

View File

@ -1,2 +0,0 @@
export * from './monitoringInterface'
export * from './resourceInfo'

View File

@ -1,29 +0,0 @@
import { GpuSetting, OperatingSystemInfo } from '../miscellaneous'
/**
* Monitoring extension for system monitoring.
* @extends BaseExtension
*/
export interface MonitoringInterface {
/**
* Returns information about the system resources.
* @returns {Promise<any>} A promise that resolves with the system resources information.
*/
getResourcesInfo(): Promise<any>
/**
* Returns the current system load.
* @returns {Promise<any>} A promise that resolves with the current system load.
*/
getCurrentLoad(): Promise<any>
/**
* Returns the GPU configuration.
*/
getGpuSetting(): Promise<GpuSetting | undefined>
/**
* Returns information about the operating system.
*/
getOsInfo(): Promise<OperatingSystemInfo>
}

View File

@ -1,6 +0,0 @@
export type ResourceInfo = {
mem: {
totalMemory: number
usedMemory: number
}
}

View File

@ -29,12 +29,10 @@
},
"dependencies": {
"@janhq/core": "../../core/package.tgz",
"cpu-instructions": "^0.0.13",
"ky": "^1.7.2",
"p-queue": "^8.0.1"
},
"bundledDependencies": [
"cpu-instructions",
"@janhq/core"
],
"engines": {

View File

@ -13,6 +13,7 @@ export default defineConfig([
NODE: JSON.stringify(`${pkgJson.name}/${pkgJson.node}`),
API_URL: JSON.stringify('http://127.0.0.1:39291'),
SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'),
PLATFORM: JSON.stringify(process.platform),
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.49'),
DEFAULT_REMOTE_ENGINES: JSON.stringify(engines),
DEFAULT_REMOTE_MODELS: JSON.stringify(models),
@ -36,15 +37,4 @@ export default defineConfig([
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.49'),
},
},
{
input: 'src/node/cpuInfo.ts',
output: {
format: 'cjs',
file: 'dist/node/cpuInfo.js',
},
external: ['cpu-instructions'],
resolve: {
extensions: ['.ts', '.js', '.svg'],
},
},
])

View File

@ -1,5 +1,6 @@
declare const API_URL: string
declare const CORTEX_ENGINE_VERSION: string
declare const PLATFORM: string
declare const SOCKET_URL: string
declare const NODE: string
declare const DEFAULT_REQUEST_PAYLOAD_TRANSFORM: string

View File

@ -19,6 +19,7 @@ import ky, { HTTPError } from 'ky'
import PQueue from 'p-queue'
import { EngineError } from './error'
import { getJanDataFolderPath } from '@janhq/core'
import { engineVariant } from './utils'
interface ModelList {
data: Model[]
@ -276,11 +277,7 @@ export default class JSONEngineManagementExtension extends EngineManagementExten
error instanceof EngineError
) {
const systemInfo = await systemInformation()
const variant = await executeOnMain(
NODE,
'engineVariant',
systemInfo.gpuSetting
)
const variant = await engineVariant(systemInfo.gpuSetting)
await this.setDefaultEngineVariant(InferenceEngine.cortex_llamacpp, {
variant: variant,
version: `${CORTEX_ENGINE_VERSION}`,

View File

@ -1,27 +0,0 @@
import { cpuInfo } from 'cpu-instructions'
// Check the CPU info and determine the supported instruction set
const info = cpuInfo.cpuInfo().some((e) => e.toUpperCase() === 'AVX512')
? 'avx512'
: cpuInfo.cpuInfo().some((e) => e.toUpperCase() === 'AVX2')
? 'avx2'
: cpuInfo.cpuInfo().some((e) => e.toUpperCase() === 'AVX')
? 'avx'
: 'noavx'
// Send the result and wait for confirmation before exiting
new Promise<void>((resolve, reject) => {
// @ts-ignore
process.send(info, (error: Error | null) => {
if (error) {
reject(error)
} else {
resolve()
}
})
})
.then(() => process.exit(0))
.catch((error) => {
console.error('Failed to send info:', error)
process.exit(1)
})

View File

@ -1,7 +1,6 @@
import { describe, expect, it } from '@jest/globals'
import engine from './index'
import { GpuSetting } from '@janhq/core/node'
import { cpuInfo } from 'cpu-instructions'
import { GpuSetting } from '@janhq/core'
import { fork } from 'child_process'
let testSettings: GpuSetting = {
@ -23,22 +22,12 @@ let testSettings: GpuSetting = {
}
const originalPlatform = process.platform
jest.mock('cpu-instructions', () => ({
cpuInfo: {
cpuInfo: jest.fn(),
},
}))
let mockCpuInfo = cpuInfo.cpuInfo as jest.Mock
mockCpuInfo.mockReturnValue([])
jest.mock('@janhq/core/node', () => ({
jest.mock('@janhq/core', () => ({
appResourcePath: () => '.',
log: jest.fn(),
}))
jest.mock('child_process', () => ({
fork: jest.fn(),
}))
const mockFork = fork as jest.Mock
describe('test executable cortex file', () => {
afterAll(function () {
@ -48,14 +37,7 @@ describe('test executable cortex file', () => {
})
it('executes on MacOS', () => {
const mockProcess = {
on: jest.fn((event, callback) => {
if (event === 'message') {
callback('noavx')
}
}),
send: jest.fn(),
}
Object.defineProperty(process, 'platform', {
value: 'darwin',
})
@ -63,7 +45,7 @@ describe('test executable cortex file', () => {
value: 'arm64',
})
mockFork.mockReturnValue(mockProcess)
expect(engine.engineVariant(testSettings)).resolves.toEqual('mac-arm64')
})
@ -83,7 +65,7 @@ describe('test executable cortex file', () => {
}),
send: jest.fn(),
}
mockFork.mockReturnValue(mockProcess)
Object.defineProperty(process, 'arch', {
value: 'x64',
})
@ -107,7 +89,6 @@ describe('test executable cortex file', () => {
}),
send: jest.fn(),
}
mockFork.mockReturnValue(mockProcess)
expect(engine.engineVariant()).resolves.toEqual('windows-amd64-avx')
})
@ -145,7 +126,6 @@ describe('test executable cortex file', () => {
}),
send: jest.fn(),
}
mockFork.mockReturnValue(mockProcess)
expect(engine.engineVariant(settings)).resolves.toEqual(
'windows-amd64-avx2-cuda-11-7'
@ -176,26 +156,11 @@ describe('test executable cortex file', () => {
},
],
}
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback('noavx')
}
}),
send: jest.fn(),
})
expect(engine.engineVariant(settings)).resolves.toEqual(
'windows-amd64-noavx-cuda-12-0'
)
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback('avx512')
}
}),
send: jest.fn(),
})
expect(engine.engineVariant(settings)).resolves.toEqual(
'windows-amd64-avx2-cuda-12-0'
)
@ -209,14 +174,6 @@ describe('test executable cortex file', () => {
...testSettings,
run_mode: 'cpu',
}
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback('noavx')
}
}),
send: jest.fn(),
})
expect(engine.engineVariant()).resolves.toEqual('linux-amd64-noavx')
})
@ -245,16 +202,6 @@ describe('test executable cortex file', () => {
},
],
}
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback('avx512')
}
}),
send: jest.fn(),
})
expect(engine.engineVariant(settings)).resolves.toBe(
'linux-amd64-avx2-cuda-11-7'
)
@ -284,14 +231,7 @@ describe('test executable cortex file', () => {
},
],
}
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback('avx2')
}
}),
send: jest.fn(),
})
expect(engine.engineVariant(settings)).resolves.toEqual(
'linux-amd64-avx2-cuda-12-0'
@ -310,15 +250,6 @@ describe('test executable cortex file', () => {
const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx']
cpuInstructions.forEach((instruction) => {
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback(instruction)
}
}),
send: jest.fn(),
})
expect(engine.engineVariant(settings)).resolves.toEqual(
`linux-amd64-${instruction}`
)
@ -335,14 +266,7 @@ describe('test executable cortex file', () => {
}
const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx']
cpuInstructions.forEach((instruction) => {
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback(instruction)
}
}),
send: jest.fn(),
})
expect(engine.engineVariant(settings)).resolves.toEqual(
`windows-amd64-${instruction}`
)
@ -376,14 +300,7 @@ describe('test executable cortex file', () => {
}
const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx']
cpuInstructions.forEach((instruction) => {
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback(instruction)
}
}),
send: jest.fn(),
})
expect(engine.engineVariant(settings)).resolves.toEqual(
`windows-amd64-${instruction === 'avx512' || instruction === 'avx2' ? 'avx2' : 'noavx'}-cuda-12-0`
)
@ -417,14 +334,7 @@ describe('test executable cortex file', () => {
],
}
cpuInstructions.forEach((instruction) => {
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback(instruction)
}
}),
send: jest.fn(),
})
expect(engine.engineVariant(settings)).resolves.toEqual(
`linux-amd64-${instruction === 'avx512' || instruction === 'avx2' ? 'avx2' : 'noavx'}-cuda-12-0`
)
@ -459,14 +369,7 @@ describe('test executable cortex file', () => {
],
}
cpuInstructions.forEach((instruction) => {
mockFork.mockReturnValue({
on: jest.fn((event, callback) => {
if (event === 'message') {
callback(instruction)
}
}),
send: jest.fn(),
})
expect(engine.engineVariant(settings)).resolves.toEqual(
`linux-amd64-vulkan`
)

View File

@ -2,111 +2,10 @@ import * as path from 'path'
import {
appResourcePath,
getJanDataFolderPath,
GpuSetting,
log,
} from '@janhq/core/node'
import { fork } from 'child_process'
import { mkdir, readdir, symlink } from 'fs/promises'
/**
* The GPU runMode that will be set - either 'vulkan', 'cuda', or empty for cpu.
* @param settings
* @returns
*/
const gpuRunMode = (settings?: GpuSetting): string => {
if (process.platform === 'darwin')
// MacOS now has universal binaries
return ''
if (!settings) return ''
return settings.vulkan === true || settings.run_mode === 'cpu' ? '' : 'cuda'
}
/**
* The OS & architecture that the current process is running on.
* @returns win, mac-x64, mac-arm64, or linux
*/
const os = (): string => {
return process.platform === 'win32'
? 'windows-amd64'
: process.platform === 'darwin'
? process.arch === 'arm64'
? 'mac-arm64'
: 'mac-amd64'
: 'linux-amd64'
}
/**
* The CUDA version that will be set - either '11-7' or '12-0'.
* @param settings
* @returns
*/
const cudaVersion = (settings?: GpuSetting): '11-7' | '12-0' | undefined => {
const isUsingCuda =
settings?.vulkan !== true &&
settings?.run_mode === 'gpu' &&
!os().includes('mac')
if (!isUsingCuda) return undefined
return settings?.cuda?.version === '11' ? '11-7' : '12-0'
}
/**
* The CPU instructions that will be set - either 'avx512', 'avx2', 'avx', or 'noavx'.
* @returns
*/
const cpuInstructions = async (): Promise<string> => {
if (process.platform === 'darwin') return ''
const child = fork(path.join(__dirname, './cpuInfo.js')) // Path to the child process file
return new Promise((resolve, reject) => {
child.on('message', (cpuInfo?: string) => {
resolve(cpuInfo ?? 'noavx')
child.kill() // Kill the child process after receiving the result
})
child.on('error', (err) => {
resolve('noavx')
child.kill()
})
child.on('exit', (code) => {
if (code !== 0) {
resolve('noavx')
child.kill()
}
})
})
}
/**
* Find which variant to run based on the current platform.
*/
const engineVariant = async (gpuSetting?: GpuSetting): Promise<string> => {
const cpuInstruction = await cpuInstructions()
log(`[CORTEX]: CPU instruction: ${cpuInstruction}`)
let engineVariant = [
os(),
gpuSetting?.vulkan
? 'vulkan'
: gpuRunMode(gpuSetting) !== 'cuda'
? // CPU mode - support all variants
cpuInstruction
: // GPU mode - packaged CUDA variants of avx2 and noavx
cpuInstruction === 'avx2' || cpuInstruction === 'avx512'
? 'avx2'
: 'noavx',
gpuRunMode(gpuSetting),
cudaVersion(gpuSetting),
]
.filter((e) => !!e)
.join('-')
log(`[CORTEX]: Engine variant: ${engineVariant}`)
return engineVariant
}
/**
* Create symlink to each variant for the default bundled version
@ -148,6 +47,5 @@ const symlinkEngines = async () => {
}
export default {
engineVariant,
symlinkEngines,
}

View File

@ -0,0 +1,81 @@
import {
GpuSetting,
log,
} from '@janhq/core'
/**
* The GPU runMode that will be set - either 'vulkan', 'cuda', or empty for cpu.
* @param settings
* @returns
*/
const gpuRunMode = (settings?: GpuSetting): string => {
if (!settings) return ''
return settings.vulkan === true ||
settings.gpus?.some((gpu) => gpu.activated !== true)
? ''
: 'cuda'
}
/**
* The OS & architecture that the current process is running on.
* @returns win, mac-x64, mac-arm64, or linux
*/
const os = (settings?: GpuSetting): string => {
return PLATFORM === 'win32'
? 'windows-amd64'
: PLATFORM === 'darwin'
? settings?.cpu?.arch === 'arm64'
? 'mac-arm64'
: 'mac-amd64'
: 'linux-amd64'
}
/**
* The CUDA version that will be set - either '11-7' or '12-0'.
* @param settings
* @returns
*/
const cudaVersion = (settings?: GpuSetting): '12-0' | '11-7' | undefined => {
const isUsingCuda =
settings?.vulkan !== true &&
settings?.gpus?.some((gpu) => (gpu.activated === true ? 'gpu' : 'cpu')) &&
!os().includes('mac')
if (!isUsingCuda) return undefined
// return settings?.cuda?.version === '11' ? '11-7' : '12-0'
return settings.gpus?.some((gpu) => gpu.version.includes('12'))
? '12-0'
: '11-7'
}
/**
* The CPU instructions that will be set - either 'avx512', 'avx2', 'avx', or 'noavx'.
* @returns
*/
/**
* Find which variant to run based on the current platform.
*/
export const engineVariant = async (gpuSetting?: GpuSetting): Promise<string> => {
let engineVariant = [
os(gpuSetting),
gpuSetting?.vulkan
? 'vulkan'
: (gpuRunMode(gpuSetting) === 'cuda' && // GPU mode - packaged CUDA variants of avx2 and noavx
gpuSetting.cpu.instructions.some((inst) => inst === 'avx2')) ||
gpuSetting.cpu.instructions.some((inst) => inst === 'avx512')
? 'avx2'
: 'noavx',
gpuRunMode(gpuSetting),
cudaVersion(gpuSetting),
]
.filter((e) => !!e)
.join('-')
log(`[CORTEX]: Engine variant: ${engineVariant}`)
return engineVariant
}

View File

@ -0,0 +1,5 @@
/** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
}

View File

@ -0,0 +1,48 @@
{
"name": "@janhq/hardware-management-extension",
"productName": "Hardware Management",
"version": "1.0.0",
"description": "Manages Better Hardware settings.",
"main": "dist/index.js",
"node": "dist/node/index.cjs.js",
"author": "Jan <service@jan.ai>",
"license": "MIT",
"scripts": {
"test": "jest",
"build": "rolldown -c rolldown.config.mjs",
"codesign:darwin": "../../.github/scripts/auto-sign.sh",
"codesign:win32:linux": "echo 'No codesigning required'",
"codesign": "run-script-os",
"build:publish": "rimraf *.tgz --glob || true && yarn build && yarn codesign && npm pack && cpx *.tgz ../../pre-install"
},
"exports": {
".": "./dist/index.js",
"./main": "./dist/module.js"
},
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^3.0.2",
"rolldown": "^1.0.0-beta.1",
"run-script-os": "^1.1.6",
"ts-loader": "^9.5.0",
"typescript": "^5.3.3"
},
"dependencies": {
"@janhq/core": "../../core/package.tgz",
"cpu-instructions": "^0.0.13",
"ky": "^1.7.2",
"p-queue": "^8.0.1"
},
"bundledDependencies": [
"cpu-instructions",
"@janhq/core"
],
"hardwares": {
"node": ">=18.0.0"
},
"files": [
"dist/*",
"package.json",
"README.md"
]
}

View File

@ -0,0 +1,17 @@
import { defineConfig } from 'rolldown'
import pkgJson from './package.json' with { type: 'json' }
export default defineConfig([
{
input: 'src/index.ts',
output: {
format: 'esm',
file: 'dist/index.js',
},
define: {
NODE: JSON.stringify(`${pkgJson.name}/${pkgJson.node}`),
API_URL: JSON.stringify('http://127.0.0.1:39291'),
SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'),
},
},
])

View File

@ -0,0 +1,12 @@
declare const API_URL: string
declare const SOCKET_URL: string
declare const NODE: string
interface Core {
api: APIFunctions
events: EventEmitter
}
interface Window {
core?: Core | undefined
electronAPI?: any | undefined
}

View File

@ -0,0 +1,67 @@
import {
executeOnMain,
HardwareManagementExtension,
HardwareInformation,
} from '@janhq/core'
import ky from 'ky'
import PQueue from 'p-queue'
/**
* JSONHardwareManagementExtension is a HardwareManagementExtension implementation that provides
* functionality for managing engines.
*/
export default class JSONHardwareManagementExtension extends HardwareManagementExtension {
queue = new PQueue({ concurrency: 1 })
/**
* Called when the extension is loaded.
*/
async onLoad() {
// Run Healthcheck
this.queue.add(() => this.healthz())
}
/**
* Called when the extension is unloaded.
*/
onUnload() {}
/**
* Do health check on cortex.cpp
* @returns
*/
async healthz(): Promise<void> {
return ky
.get(`${API_URL}/healthz`, {
retry: { limit: 20, delay: () => 500, methods: ['get'] },
})
.then(() => {})
}
/**
* @returns A Promise that resolves to an object of hardware.
*/
async getHardware(): Promise<HardwareInformation> {
return this.queue.add(() =>
ky
.get(`${API_URL}/v1/hardware`)
.json<HardwareInformation>()
.then((e) => e)
) as Promise<HardwareInformation>
}
/**
* @returns A Promise that resolves to an object of set gpu activate.
*/
async setAvtiveGpu(data: { gpus: number[] }): Promise<{
message: string
activated_gpus: number[]
}> {
return this.queue.add(() =>
ky.post(`${API_URL}/v1/hardware/activate`, { json: data }).then((e) => e)
) as Promise<{
message: string
activated_gpus: number[]
}>
}
}

View File

@ -8,7 +8,9 @@
"forceConsistentCasingInFileNames": true,
"strict": false,
"skipLibCheck": true,
"rootDir": "./src"
"rootDir": "./src",
"resolveJsonModule": true
},
"include": ["./src"]
"include": ["./src"],
"exclude": ["src/**/*.test.ts", "rolldown.config.mjs"]
}

View File

@ -112,8 +112,8 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine {
if (!Number.isNaN(threads_number)) this.cpu_threads = threads_number
// Run the process watchdog
const systemInfo = await systemInformation()
this.queue.add(() => executeOnMain(NODE, 'run', systemInfo))
// const systemInfo = await systemInformation()
this.queue.add(() => executeOnMain(NODE, 'run'))
this.queue.add(() => this.healthz())
this.subscribeToEvents()

View File

@ -16,12 +16,14 @@ let watchdog: ProcessWatchdog | undefined = undefined
* Spawns a Nitro subprocess.
* @returns A promise that resolves when the Nitro subprocess is started.
*/
function run(systemInfo?: SystemInformation): Promise<any> {
function run(): Promise<any> {
log(`[CORTEX]:: Spawning cortex subprocess...`)
return new Promise<void>(async (resolve, reject) => {
let gpuVisibleDevices = systemInfo?.gpuSetting?.gpus_in_use.join(',') ?? ''
let binaryName = `cortex-server${process.platform === 'win32' ? '.exe' : ''}`
// let gpuVisibleDevices = systemInfo?.gpuSetting?.gpus_in_use.join(',') ?? ''
let binaryName = `cortex-server${
process.platform === 'win32' ? '.exe' : ''
}`
const binPath = path.join(__dirname, '..', 'bin')
const executablePath = path.join(binPath, binaryName)
@ -48,11 +50,11 @@ function run(systemInfo?: SystemInformation): Promise<any> {
{
env: {
...process.env,
CUDA_VISIBLE_DEVICES: gpuVisibleDevices,
// Vulkan - Support 1 device at a time for now
...(gpuVisibleDevices?.length > 0 && {
GGML_VK_VISIBLE_DEVICES: gpuVisibleDevices,
}),
// CUDA_VISIBLE_DEVICES: gpuVisibleDevices,
// // Vulkan - Support 1 device at a time for now
// ...(gpuVisibleDevices?.length > 0 && {
// GGML_VK_VISIBLE_DEVICES: gpuVisibleDevices,
// }),
},
cwd: sharedPath,
}

View File

@ -14,8 +14,6 @@ import {
} from '@janhq/core'
import { CortexAPI } from './cortex'
import { scanModelsFolder } from './legacy/model-json'
import { downloadModel } from './legacy/download'
import { systemInformation } from '@janhq/core'
import { deleteModelFiles } from './legacy/delete'
export enum Settings {
@ -70,18 +68,6 @@ export default class JanModelExtension extends ModelExtension {
* @returns A Promise that resolves when the model is downloaded.
*/
async pullModel(model: string, id?: string, name?: string): Promise<void> {
if (id) {
const model: Model = ModelManager.instance().get(id)
// Clip vision model - should not be handled by cortex.cpp
// TensorRT model - should not be handled by cortex.cpp
if (
model &&
(model.engine === InferenceEngine.nitro_tensorrt_llm ||
model.settings.vision_model)
) {
return downloadModel(model, (await systemInformation()).gpuSetting)
}
}
/**
* Sending POST to /models/pull/{id} endpoint to pull the model
*/

View File

@ -2,15 +2,12 @@ import {
downloadFile,
DownloadRequest,
fs,
GpuSetting,
InferenceEngine,
joinPath,
Model,
} from '@janhq/core'
export const downloadModel = async (
model: Model,
gpuSettings?: GpuSetting,
network?: { ignoreSSL?: boolean; proxy?: string }
): Promise<void> => {
const homedir = 'file://models'
@ -27,41 +24,6 @@ export const downloadModel = async (
JSON.stringify(model, null, 2)
)
if (model.engine === InferenceEngine.nitro_tensorrt_llm) {
if (!gpuSettings || gpuSettings.gpus.length === 0) {
console.error('No GPU found. Please check your GPU setting.')
return
}
const firstGpu = gpuSettings.gpus[0]
if (!firstGpu.name.toLowerCase().includes('nvidia')) {
console.error('No Nvidia GPU found. Please check your GPU setting.')
return
}
const gpuArch = firstGpu.arch
if (gpuArch === undefined) {
console.error('No GPU architecture found. Please check your GPU setting.')
return
}
if (!supportedGpuArch.includes(gpuArch)) {
console.debug(
`Your GPU: ${JSON.stringify(firstGpu)} is not supported. Only 30xx, 40xx series are supported.`
)
return
}
const os = 'windows' // TODO: remove this hard coded value
const newSources = model.sources.map((source) => {
const newSource = { ...source }
newSource.url = newSource.url
.replace(/<os>/g, os)
.replace(/<gpuarch>/g, gpuArch)
return newSource
})
model.sources = newSources
}
console.debug(`Download sources: ${JSON.stringify(model.sources)}`)
if (model.sources.length > 1) {

View File

@ -1,75 +0,0 @@
# Create a Jan Extension using Typescript
Use this template to bootstrap the creation of a TypeScript Jan extension. 🚀
## Create Your Own Extension
To create your own extension, you can use this repository as a template! Just follow the below instructions:
1. Click the Use this template button at the top of the repository
2. Select Create a new repository
3. Select an owner and name for your new repository
4. Click Create repository
5. Clone your new repository
## Initial Setup
After you've cloned the repository to your local machine or codespace, you'll need to perform some initial setup steps before you can develop your extension.
> [!NOTE]
>
> You'll need to have a reasonably modern version of
> [Node.js](https://nodejs.org) handy. If you are using a version manager like
> [`nodenv`](https://github.com/nodenv/nodenv) or
> [`nvm`](https://github.com/nvm-sh/nvm), you can run `nodenv install` in the
> root of your repository to install the version specified in
> [`package.json`](./package.json). Otherwise, 20.x or later should work!
1. :hammer_and_wrench: Install the dependencies
```bash
npm install
```
1. :building_construction: Package the TypeScript for distribution
```bash
npm run bundle
```
1. :white_check_mark: Check your artifact
There will be a tgz file in your extension directory now
## Update the Extension Metadata
The [`package.json`](package.json) file defines metadata about your extension, such as
extension name, main entry, description and version.
When you copy this repository, update `package.json` with the name, description for your extension.
## Update the Extension Code
The [`src/`](./src/) directory is the heart of your extension! This contains the
source code that will be run when your extension functions are invoked. You can replace the
contents of this directory with your own code.
There are a few things to keep in mind when writing your extension code:
- Most Jan Extension functions are processed asynchronously.
In `index.ts`, you will see that the extension function will return a `Promise<any>`.
```typescript
import { events, MessageEvent, MessageRequest } from '@janhq/core'
function onStart(): Promise<any> {
return events.on(MessageEvent.OnMessageSent, (data: MessageRequest) =>
this.inference(data)
)
}
```
For more information about the Jan Extension Core module, see the
[documentation](https://github.com/janhq/jan/blob/main/core/README.md).
So, what are you waiting for? Go ahead and start customizing your extension!

View File

@ -1,2 +0,0 @@
@echo off
.\node_modules\.bin\download https://catalog.jan.ai/vulkaninfoSDK.exe -o ./bin

View File

@ -1,49 +0,0 @@
{
"name": "@janhq/monitoring-extension",
"productName": "System Monitoring",
"version": "1.0.10",
"description": "Provides system health and OS level data.",
"main": "dist/index.js",
"node": "dist/node/index.cjs.js",
"author": "Jan <service@jan.ai>",
"license": "AGPL-3.0",
"scripts": {
"build": "rolldown -c rolldown.config.mjs && yarn download-artifacts",
"download-artifacts": "run-script-os && cpx \"bin/**\" \"dist/bin\"",
"download-artifacts:darwin": "echo 'No artifacts to download for darwin'",
"download-artifacts:win32": "download.bat",
"download-artifacts:linux": "download https://catalog.jan.ai/vulkaninfo -o ./bin && chmod +x ./bin/vulkaninfo",
"build:publish": "rimraf *.tgz --glob || true && yarn build && npm pack && cpx *.tgz ../../pre-install"
},
"exports": {
".": "./dist/index.js",
"./main": "./dist/node/index.cjs.js"
},
"devDependencies": {
"@types/node": "^20.11.4",
"@types/node-os-utils": "^1.3.4",
"cpx": "^1.5.0",
"download-cli": "^1.1.1",
"rimraf": "^3.0.2",
"rolldown": "1.0.0-beta.1",
"run-script-os": "^1.1.6",
"typescript": "^5.3.3"
},
"dependencies": {
"@janhq/core": "../../core/package.tgz",
"node-os-utils": "^1.3.7"
},
"files": [
"dist/*",
"package.json",
"README.md"
],
"bundleDependencies": [
"node-os-utils",
"@janhq/core"
],
"installConfig": {
"hoistingLimits": "workspaces"
},
"packageManager": "yarn@4.5.3"
}

View File

@ -1,22 +0,0 @@
[
{
"key": "log-enabled",
"title": "Enable App Logs",
"description": "Saves app logs locally on your computer. This enables you to send us crash reports.",
"controllerType": "checkbox",
"controllerProps": {
"value": true
}
},
{
"key": "log-cleaning-interval",
"title": "Log Cleaning Interval",
"description": "Automatically delete local logs after a certain time interval (in milliseconds).",
"controllerType": "input",
"controllerProps": {
"value": "120000",
"placeholder": "Interval in milliseconds. E.g. 120000",
"textAlign": "right"
}
}
]

View File

@ -1,32 +0,0 @@
import { defineConfig } from 'rolldown'
import packageJson from './package.json' with { type: 'json' }
import settingJson from './resources/settings.json' with { type: 'json' }
export default defineConfig([
{
input: 'src/index.ts',
output: {
format: 'esm',
file: 'dist/index.js',
},
platform: 'browser',
define: {
NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`),
SETTINGS: JSON.stringify(settingJson),
},
},
{
input: 'src/node/index.ts',
external: ['@janhq/core/node'],
output: {
format: 'cjs',
file: 'dist/node/index.cjs.js',
sourcemap: false,
inlineDynamicImports: true,
},
resolve: {
extensions: ['.js', '.ts', '.json'],
},
platform: 'node',
},
])

View File

@ -1,19 +0,0 @@
declare const NODE: string
declare const SETTINGS: SettingComponentProps[]
type CpuGpuInfo = {
cpu: {
usage: number
}
gpu: GpuInfo[]
}
type GpuInfo = {
id: string
name: string
temperature: string
utilization: string
memoryTotal: string
memoryFree: string
memoryUtilization: string
}

View File

@ -1,90 +0,0 @@
import {
AppConfigurationEventName,
GpuSetting,
MonitoringExtension,
OperatingSystemInfo,
events,
executeOnMain,
} from '@janhq/core'
enum Settings {
logEnabled = 'log-enabled',
logCleaningInterval = 'log-cleaning-interval',
}
/**
* JanMonitoringExtension is a extension that provides system monitoring functionality.
* It implements the MonitoringExtension interface from the @janhq/core package.
*/
export default class JanMonitoringExtension extends MonitoringExtension {
/**
* Called when the extension is loaded.
*/
async onLoad() {
// Register extension settings
this.registerSettings(SETTINGS)
const logEnabled = await this.getSetting<boolean>(Settings.logEnabled, true)
const logCleaningInterval = parseInt(
await this.getSetting<string>(Settings.logCleaningInterval, '120000')
)
// Register File Logger provided by this extension
await executeOnMain(NODE, 'registerLogger', {
logEnabled,
logCleaningInterval: isNaN(logCleaningInterval)
? 120000
: logCleaningInterval,
})
// Attempt to fetch nvidia info
await executeOnMain(NODE, 'updateNvidiaInfo')
events.emit(AppConfigurationEventName.OnConfigurationUpdate, {})
}
onSettingUpdate<T>(key: string, value: T): void {
if (key === Settings.logEnabled) {
executeOnMain(NODE, 'updateLogger', { logEnabled: value })
} else if (key === Settings.logCleaningInterval) {
executeOnMain(NODE, 'updateLogger', { logCleaningInterval: value })
}
}
/**
* Called when the extension is unloaded.
*/
onUnload(): void {
// Register File Logger provided by this extension
executeOnMain(NODE, 'unregisterLogger')
}
/**
* Returns the GPU configuration.
* @returns A Promise that resolves to an object containing the GPU configuration.
*/
async getGpuSetting(): Promise<GpuSetting | undefined> {
return executeOnMain(NODE, 'getGpuConfig')
}
/**
* Returns information about the system resources.
* @returns A Promise that resolves to an object containing information about the system resources.
*/
getResourcesInfo(): Promise<any> {
return executeOnMain(NODE, 'getResourcesInfo')
}
/**
* Returns information about the current system load.
* @returns A Promise that resolves to an object containing information about the current system load.
*/
getCurrentLoad(): Promise<any> {
return executeOnMain(NODE, 'getCurrentLoad')
}
/**
* Returns information about the OS
* @returns
*/
getOsInfo(): Promise<OperatingSystemInfo> {
return executeOnMain(NODE, 'getOsInfo')
}
}

View File

@ -1,389 +0,0 @@
import {
GpuSetting,
GpuSettingInfo,
LoggerManager,
OperatingSystemInfo,
ResourceInfo,
SupportedPlatforms,
getJanDataFolderPath,
log,
} from '@janhq/core/node'
import { mem, cpu } from 'node-os-utils'
import { exec } from 'child_process'
import { writeFileSync, existsSync, readFileSync, mkdirSync } from 'fs'
import path from 'path'
import os from 'os'
import { FileLogger } from './logger'
/**
* Path to the settings directory
**/
export const SETTINGS_DIR = path.join(getJanDataFolderPath(), 'settings')
/**
* Path to the settings file
**/
export const GPU_INFO_FILE = path.join(SETTINGS_DIR, 'settings.json')
/**
* Default GPU settings
* TODO: This needs to be refactored to support multiple accelerators
**/
const DEFAULT_SETTINGS: GpuSetting = {
notify: true,
run_mode: 'cpu',
nvidia_driver: {
exist: false,
version: '',
},
cuda: {
exist: false,
version: '',
},
gpus: [],
gpu_highest_vram: '',
gpus_in_use: [],
is_initial: true,
// TODO: This needs to be set based on user toggle in settings
vulkan: false,
}
export const getGpuConfig = async (): Promise<GpuSetting | undefined> => {
if (process.platform === 'darwin') return undefined
if (existsSync(GPU_INFO_FILE))
return JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
return DEFAULT_SETTINGS
}
export const getResourcesInfo = async (): Promise<ResourceInfo> => {
const ramUsedInfo = await mem.used()
const totalMemory = ramUsedInfo.totalMemMb * 1024 * 1024
const usedMemory = ramUsedInfo.usedMemMb * 1024 * 1024
const resourceInfo: ResourceInfo = {
mem: {
totalMemory,
usedMemory,
},
}
return resourceInfo
}
export const getCurrentLoad = () =>
new Promise<CpuGpuInfo>(async (resolve, reject) => {
const cpuPercentage = await cpu.usage()
let data = {
run_mode: 'cpu',
gpus_in_use: [],
}
if (process.platform !== 'darwin') {
data = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
}
if (data.run_mode === 'gpu' && data.gpus_in_use.length > 0) {
const gpuIds = data.gpus_in_use.join(',')
if (gpuIds !== '' && data['vulkan'] !== true) {
exec(
`nvidia-smi --query-gpu=index,name,temperature.gpu,utilization.gpu,memory.total,memory.free,utilization.memory --format=csv,noheader,nounits --id=${gpuIds}`,
(error, stdout, _) => {
if (error) {
console.error(`exec error: ${error}`)
throw new Error(error.message)
}
const gpuInfo: GpuInfo[] = stdout
.trim()
.split('\n')
.map((line) => {
const [
id,
name,
temperature,
utilization,
memoryTotal,
memoryFree,
memoryUtilization,
] = line.split(', ').map((item) => item.replace(/\r/g, ''))
return {
id,
name,
temperature,
utilization,
memoryTotal,
memoryFree,
memoryUtilization,
}
})
resolve({
cpu: { usage: cpuPercentage },
gpu: gpuInfo,
})
}
)
} else {
// Handle the case where gpuIds is empty
resolve({
cpu: { usage: cpuPercentage },
gpu: [],
})
}
} else {
// Handle the case where run_mode is not 'gpu' or no GPUs are in use
resolve({
cpu: { usage: cpuPercentage },
gpu: [],
})
}
})
/**
* This will retrieve GPU information and persist settings.json
* Will be called when the extension is loaded to turn on GPU acceleration if supported
*/
export const updateNvidiaInfo = async () => {
// ignore if macos
if (process.platform === 'darwin') return
try {
JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
} catch (error) {
if (!existsSync(SETTINGS_DIR)) {
mkdirSync(SETTINGS_DIR, {
recursive: true,
})
}
writeFileSync(GPU_INFO_FILE, JSON.stringify(DEFAULT_SETTINGS, null, 2))
}
await updateNvidiaDriverInfo()
await updateGpuInfo()
}
const updateNvidiaDriverInfo = async () =>
new Promise((resolve, reject) => {
exec(
'nvidia-smi --query-gpu=driver_version --format=csv,noheader',
(error, stdout) => {
const data: GpuSetting = JSON.parse(
readFileSync(GPU_INFO_FILE, 'utf-8')
)
if (!error) {
const firstLine = stdout.split('\n')[0].trim()
data.nvidia_driver.exist = true
data.nvidia_driver.version = firstLine
} else {
data.nvidia_driver.exist = false
}
writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2))
resolve({})
}
)
})
const getGpuArch = (gpuName: string): string => {
if (!gpuName.toLowerCase().includes('nvidia')) return 'unknown'
if (gpuName.includes('30')) return 'ampere'
else if (gpuName.includes('40')) return 'ada'
else return 'unknown'
}
const updateGpuInfo = async () =>
new Promise((resolve, reject) => {
let data: GpuSetting = JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8'))
// Cuda
if (data.vulkan === true) {
// Vulkan
exec(
process.platform === 'win32'
? `${__dirname}\\..\\bin\\vulkaninfoSDK.exe --summary`
: `${__dirname}/../bin/vulkaninfo --summary`,
async (error, stdout) => {
if (!error) {
const output = stdout.toString()
log(output)
const gpuRegex = /GPU(\d+):(?:[\s\S]*?)deviceName\s*=\s*(.*)/g
const gpus: GpuSettingInfo[] = []
let match
while ((match = gpuRegex.exec(output)) !== null) {
const id = match[1]
const name = match[2]
const arch = getGpuArch(name)
gpus.push({ id, vram: '0', name, arch })
}
data.gpus = gpus
if (!data.gpus_in_use || data.gpus_in_use.length === 0) {
data.gpus_in_use = [data.gpus.length > 1 ? '1' : '0']
}
data = await updateCudaExistence(data)
writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2))
log(`[APP]::${JSON.stringify(data)}`)
resolve({})
} else {
reject(error)
}
}
)
} else {
exec(
'nvidia-smi --query-gpu=index,memory.total,name --format=csv,noheader,nounits',
async (error, stdout) => {
if (!error) {
log(`[SPECS]::${stdout}`)
// Get GPU info and gpu has higher memory first
let highestVram = 0
let highestVramId = '0'
const gpus: GpuSettingInfo[] = stdout
.trim()
.split('\n')
.map((line) => {
let [id, vram, name] = line.split(', ')
const arch = getGpuArch(name)
vram = vram.replace(/\r/g, '')
if (parseFloat(vram) > highestVram) {
highestVram = parseFloat(vram)
highestVramId = id
}
return { id, vram, name, arch }
})
data.gpus = gpus
data.gpu_highest_vram = highestVramId
} else {
data.gpus = []
data.gpu_highest_vram = undefined
}
if (!data.gpus_in_use || data.gpus_in_use.length === 0) {
data.gpus_in_use = data.gpu_highest_vram ? [data.gpu_highest_vram].filter(e => !!e) : []
}
data = await updateCudaExistence(data)
console.log('[MONITORING]::Cuda info: ', data)
writeFileSync(GPU_INFO_FILE, JSON.stringify(data, null, 2))
log(`[APP]::${JSON.stringify(data)}`)
resolve({})
}
)
}
})
/**
* Check if file exists in paths
*/
const checkFileExistenceInPaths = (file: string, paths: string[]): boolean => {
return paths.some((p) => existsSync(path.join(p, file)))
}
/**
* Validate cuda for linux and windows
*/
const updateCudaExistence = async (
data: GpuSetting = DEFAULT_SETTINGS
): Promise<GpuSetting> => {
let filesCuda12: string[]
let filesCuda11: string[]
let paths: string[]
let cudaVersion: string = ''
if (process.platform === 'win32') {
filesCuda12 = ['cublas64_12.dll', 'cudart64_12.dll', 'cublasLt64_12.dll']
filesCuda11 = ['cublas64_11.dll', 'cudart64_110.dll', 'cublasLt64_11.dll']
paths = process.env.PATH ? process.env.PATH.split(path.delimiter) : []
} else {
filesCuda12 = ['libcudart.so.12', 'libcublas.so.12', 'libcublasLt.so.12']
filesCuda11 = ['libcudart.so.11.0', 'libcublas.so.11', 'libcublasLt.so.11']
paths = process.env.LD_LIBRARY_PATH
? process.env.LD_LIBRARY_PATH.split(path.delimiter)
: []
paths.push('/usr/lib/x86_64-linux-gnu/')
}
let cudaExists = filesCuda12.every(
(file) => existsSync(file) || checkFileExistenceInPaths(file, paths)
)
if (!cudaExists) {
cudaExists = filesCuda11.every(
(file) => existsSync(file) || checkFileExistenceInPaths(file, paths)
)
if (cudaExists) {
cudaVersion = '11'
}
} else {
cudaVersion = '12'
}
data.cuda.exist = cudaExists
data.cuda.version = cudaVersion
console.debug(data.is_initial, data.gpus_in_use)
if (cudaExists && data.is_initial && data.gpus_in_use.length > 0) {
data.run_mode = 'gpu'
}
data.is_initial = false
// Attempt to query CUDA using NVIDIA SMI
if (!cudaExists) {
await new Promise<void>((resolve) => {
exec('nvidia-smi', (error, stdout) => {
if (!error) {
const regex = /CUDA\s*Version:\s*(\d+\.\d+)/g
const match = regex.exec(stdout)
if (match && match[1]) {
data.cuda.version = match[1]
}
}
console.log('[MONITORING]::Finalized cuda info update: ', data)
resolve()
})
})
}
return data
}
export const getOsInfo = (): OperatingSystemInfo => {
const platform =
SupportedPlatforms.find((p) => p === process.platform) || 'unknown'
const osInfo: OperatingSystemInfo = {
platform: platform,
arch: process.arch,
release: os.release(),
machine: os.machine(),
version: os.version(),
totalMem: os.totalmem(),
freeMem: os.freemem(),
}
return osInfo
}
export const registerLogger = ({ logEnabled, logCleaningInterval }) => {
const logger = new FileLogger(logEnabled, logCleaningInterval)
LoggerManager.instance().register(logger)
logger.cleanLogs()
}
export const unregisterLogger = () => {
LoggerManager.instance().unregister('file')
}
export const updateLogger = ({ logEnabled, logCleaningInterval }) => {
const logger = LoggerManager.instance().loggers.get('file') as FileLogger
if (logger && logEnabled !== undefined) logger.logEnabled = logEnabled
if (logger && logCleaningInterval)
logger.logCleaningInterval = logCleaningInterval
// Rerun
logger && logger.cleanLogs()
}

View File

@ -1,142 +0,0 @@
import fs from 'fs'
import util from 'util'
import {
getAppConfigurations,
getJanDataFolderPath,
Logger,
} from '@janhq/core/node'
import path, { join } from 'path'
export class FileLogger extends Logger {
name = 'file'
logCleaningInterval: number = 120000
timeout: NodeJS.Timeout | null = null
appLogPath: string = './'
logEnabled: boolean = true
constructor(
logEnabled: boolean = true,
logCleaningInterval: number = 120000
) {
super()
this.logEnabled = logEnabled
if (logCleaningInterval) this.logCleaningInterval = logCleaningInterval
const appConfigurations = getAppConfigurations()
const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
fs.mkdirSync(logFolderPath, { recursive: true })
}
this.appLogPath = join(logFolderPath, 'app.log')
}
log(args: any) {
if (!this.logEnabled) return
let message = args[0]
const scope = args[1]
if (!message) return
const path = this.appLogPath
if (!scope && !message.startsWith('[')) {
message = `[APP]::${message}`
} else if (scope) {
message = `${scope}::${message}`
}
message = `${new Date().toISOString()} ${message}`
writeLog(message, path)
}
cleanLogs(
maxFileSizeBytes?: number | undefined,
daysToKeep?: number | undefined
): void {
// clear existing timeout
// in case we rerun it with different values
if (this.timeout) clearTimeout(this.timeout)
this.timeout = undefined
if (!this.logEnabled) return
console.log(
'Validating app logs. Next attempt in ',
this.logCleaningInterval
)
const size = maxFileSizeBytes ?? 1 * 1024 * 1024 // 1 MB
const days = daysToKeep ?? 7 // 7 days
const logDirectory = path.join(getJanDataFolderPath(), 'logs')
// Perform log cleaning
const currentDate = new Date()
if (fs.existsSync(logDirectory))
fs.readdir(logDirectory, (err, files) => {
if (err) {
console.error('Error reading log directory:', err)
return
}
files.forEach((file) => {
const filePath = path.join(logDirectory, file)
fs.stat(filePath, (err, stats) => {
if (err) {
console.error('Error getting file stats:', err)
return
}
// Check size
if (stats.size > size) {
fs.unlink(filePath, (err) => {
if (err) {
console.error('Error deleting log file:', err)
return
}
console.debug(
`Deleted log file due to exceeding size limit: ${filePath}`
)
})
} else {
// Check age
const creationDate = new Date(stats.ctime)
const daysDifference = Math.floor(
(currentDate.getTime() - creationDate.getTime()) /
(1000 * 3600 * 24)
)
if (daysDifference > days) {
fs.unlink(filePath, (err) => {
if (err) {
console.error('Error deleting log file:', err)
return
}
console.debug(`Deleted old log file: ${filePath}`)
})
}
}
})
})
})
// Schedule the next execution with doubled delays
this.timeout = setTimeout(
() => this.cleanLogs(maxFileSizeBytes, daysToKeep),
this.logCleaningInterval
)
}
}
const writeLog = (message: string, logPath: string) => {
if (!fs.existsSync(logPath)) {
const logDirectory = path.join(getJanDataFolderPath(), 'logs')
if (!fs.existsSync(logDirectory)) {
fs.mkdirSync(logDirectory)
}
fs.writeFileSync(logPath, message)
} else {
const logFile = fs.createWriteStream(logPath, {
flags: 'a',
})
logFile.write(util.format(message) + '\n')
logFile.close()
console.debug(message)
}
}

View File

@ -509,67 +509,83 @@ __metadata:
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=c5ae26&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=8a4445&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/8d0da05e8a691b55c4de0578f80b134695789a01e2b8e197846318467afa391ae68675fdc9bf2aa3f173563f0a01efb2cf1008564b1df8631355f0f4ae9c2772
checksum: 10c0/74cb4d1126dd504b81b31a3a1da89b1f332e327e980a99d645a9088cba0ccd5fafdb94e1c31d40991cbfc7e18615cf6644f4882ff8df29293ea6adc6a2977d65
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=c5ae26&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=8a4445&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/8d0da05e8a691b55c4de0578f80b134695789a01e2b8e197846318467afa391ae68675fdc9bf2aa3f173563f0a01efb2cf1008564b1df8631355f0f4ae9c2772
checksum: 10c0/74cb4d1126dd504b81b31a3a1da89b1f332e327e980a99d645a9088cba0ccd5fafdb94e1c31d40991cbfc7e18615cf6644f4882ff8df29293ea6adc6a2977d65
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=c5ae26&locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=8a4445&locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/8d0da05e8a691b55c4de0578f80b134695789a01e2b8e197846318467afa391ae68675fdc9bf2aa3f173563f0a01efb2cf1008564b1df8631355f0f4ae9c2772
checksum: 10c0/74cb4d1126dd504b81b31a3a1da89b1f332e327e980a99d645a9088cba0ccd5fafdb94e1c31d40991cbfc7e18615cf6644f4882ff8df29293ea6adc6a2977d65
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fhardware-management-extension%40workspace%3Ahardware-management-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=8a4445&locator=%40janhq%2Fhardware-management-extension%40workspace%3Ahardware-management-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/74cb4d1126dd504b81b31a3a1da89b1f332e327e980a99d645a9088cba0ccd5fafdb94e1c31d40991cbfc7e18615cf6644f4882ff8df29293ea6adc6a2977d65
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=c5ae26&locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=8a4445&locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/8d0da05e8a691b55c4de0578f80b134695789a01e2b8e197846318467afa391ae68675fdc9bf2aa3f173563f0a01efb2cf1008564b1df8631355f0f4ae9c2772
checksum: 10c0/74cb4d1126dd504b81b31a3a1da89b1f332e327e980a99d645a9088cba0ccd5fafdb94e1c31d40991cbfc7e18615cf6644f4882ff8df29293ea6adc6a2977d65
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=c5ae26&locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=8a4445&locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/8d0da05e8a691b55c4de0578f80b134695789a01e2b8e197846318467afa391ae68675fdc9bf2aa3f173563f0a01efb2cf1008564b1df8631355f0f4ae9c2772
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=c5ae26&locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/8d0da05e8a691b55c4de0578f80b134695789a01e2b8e197846318467afa391ae68675fdc9bf2aa3f173563f0a01efb2cf1008564b1df8631355f0f4ae9c2772
checksum: 10c0/74cb4d1126dd504b81b31a3a1da89b1f332e327e980a99d645a9088cba0ccd5fafdb94e1c31d40991cbfc7e18615cf6644f4882ff8df29293ea6adc6a2977d65
languageName: node
linkType: hard
"@janhq/engine-management-extension@workspace:engine-management-extension":
version: 0.0.0-use.local
resolution: "@janhq/engine-management-extension@workspace:engine-management-extension"
dependencies:
"@janhq/core": ../../core/package.tgz
cpx: "npm:^1.5.0"
ky: "npm:^1.7.2"
p-queue: "npm:^8.0.1"
rimraf: "npm:^3.0.2"
rolldown: "npm:^1.0.0-beta.1"
run-script-os: "npm:^1.1.6"
ts-loader: "npm:^9.5.0"
typescript: "npm:^5.3.3"
languageName: unknown
linkType: soft
"@janhq/hardware-management-extension@workspace:hardware-management-extension":
version: 0.0.0-use.local
resolution: "@janhq/hardware-management-extension@workspace:hardware-management-extension"
dependencies:
"@janhq/core": ../../core/package.tgz
cpu-instructions: "npm:^0.0.13"
@ -630,23 +646,6 @@ __metadata:
languageName: unknown
linkType: soft
"@janhq/monitoring-extension@workspace:monitoring-extension":
version: 0.0.0-use.local
resolution: "@janhq/monitoring-extension@workspace:monitoring-extension"
dependencies:
"@janhq/core": ../../core/package.tgz
"@types/node": "npm:^20.11.4"
"@types/node-os-utils": "npm:^1.3.4"
cpx: "npm:^1.5.0"
download-cli: "npm:^1.1.1"
node-os-utils: "npm:^1.3.7"
rimraf: "npm:^3.0.2"
rolldown: "npm:1.0.0-beta.1"
run-script-os: "npm:^1.1.6"
typescript: "npm:^5.3.3"
languageName: unknown
linkType: soft
"@jest/console@npm:^29.7.0":
version: 29.7.0
resolution: "@jest/console@npm:29.7.0"
@ -1877,13 +1876,6 @@ __metadata:
languageName: node
linkType: hard
"@types/node-os-utils@npm:^1.3.4":
version: 1.3.4
resolution: "@types/node-os-utils@npm:1.3.4"
checksum: 10c0/d57bfa84862ee388f538e2bf38b5a6e6a555aebf6e50573ad5700f5858f657ee72388833aa7ed6c9d0b68ce0a6763802366326617b0d5f4d56cc3fe61dd617e1
languageName: node
linkType: hard
"@types/node@npm:*":
version: 22.10.2
resolution: "@types/node@npm:22.10.2"
@ -5999,13 +5991,6 @@ __metadata:
languageName: node
linkType: hard
"node-os-utils@npm:^1.3.7":
version: 1.3.7
resolution: "node-os-utils@npm:1.3.7"
checksum: 10c0/88b8a4c7ed99ca0ca8f077f4f4672026e732605d5afb125e856de9ba1880b842facefa4c38f732f5cce20a34f9f471ce18a20c677dcdb702b4b68c17bacf9584
languageName: node
linkType: hard
"node-releases@npm:^2.0.19":
version: 2.0.19
resolution: "node-releases@npm:2.0.19"

View File

@ -87,7 +87,7 @@ describe('SystemMonitor', () => {
expect(screen.getByText('Running Models')).toBeInTheDocument()
expect(screen.getByText('App Log')).toBeInTheDocument()
expect(screen.getByText('7.45/14.90 GB')).toBeInTheDocument()
expect(screen.getByText('7.45GB / 14.90GB')).toBeInTheDocument()
expect(screen.getByText('30%')).toBeInTheDocument()
})

View File

@ -134,8 +134,8 @@ const SystemMonitor = () => {
<div className="flex items-center justify-between gap-2">
<h6 className="font-bold">Memory</h6>
<span>
{toGibibytes(usedRam, { hideUnit: true })}/
{toGibibytes(totalRam, { hideUnit: true })} GB
{toGibibytes(usedRam, { hideUnit: true })}GB /{' '}
{toGibibytes(totalRam, { hideUnit: true })}GB
</span>
</div>
<div className="flex items-center gap-x-4">
@ -149,41 +149,43 @@ const SystemMonitor = () => {
</div>
{gpus.length > 0 && (
<div className="mb-4 border-b border-[hsla(var(--app-border))] pb-4 last:border-none">
{gpus.map((gpu, index) => {
const gpuUtilization = utilizedMemory(
gpu.memoryFree,
gpu.memoryTotal
)
return (
<div key={index} className="mt-4 flex flex-col gap-x-2">
<div className="flex w-full items-start justify-between">
<span className="line-clamp-1 w-1/2 font-bold">
{gpu.name}
</span>
<div className="flex gap-x-2">
<div className="">
<span>
{gpu.memoryTotal - gpu.memoryFree}/
{gpu.memoryTotal}
</span>
<span> MB</span>
{gpus
.filter((gpu) => gpu.activated === true)
.map((gpu, index) => {
const gpuUtilization = utilizedMemory(
gpu.free_vram,
gpu.total_vram
)
return (
<div key={index} className="mt-4 flex flex-col gap-x-2">
<div className="flex w-full items-start justify-between">
<span className="line-clamp-1 w-1/2 font-bold">
{gpu.name}
</span>
<div className="flex gap-x-2">
<div className="">
<span>
{gpu.total_vram - gpu.free_vram}/
{gpu.total_vram}
</span>
<span> MB</span>
</div>
</div>
</div>
</div>
<div className="flex items-center gap-x-4">
<Progress
value={gpuUtilization}
className="w-full"
size="small"
/>
<span className="flex-shrink-0 ">
{gpuUtilization}%
</span>
<div className="flex items-center gap-x-4">
<Progress
value={gpuUtilization}
className="w-full"
size="small"
/>
<span className="flex-shrink-0 ">
{gpuUtilization}%
</span>
</div>
</div>
</div>
)
})}
)
})}
</div>
)}
</div>

View File

@ -39,14 +39,13 @@ const ModelLabel = ({ metadata, compact }: Props) => {
const getLabel = (size: number) => {
const minimumRamModel = size * 1.25
const availableRam =
settings?.run_mode === 'gpu'
? availableVram * 1000000 // MB to bytes
: totalRam - usedRam + (activeModel?.metadata?.size ?? 0)
const availableRam = settings?.gpus?.some((gpu) => gpu.activated)
? availableVram * 1000000 // MB to bytes
: totalRam - usedRam + (activeModel?.metadata?.size ?? 0)
if (minimumRamModel > totalRam) {
return (
<NotEnoughMemoryLabel
unit={settings?.run_mode === 'gpu' ? 'VRAM' : 'RAM'}
unit={settings?.gpus?.some((gpu) => gpu.activated) ? 'VRAM' : 'RAM'}
compact={compact}
/>
)

View File

@ -8,6 +8,8 @@ export const mainViewStateAtom = atom<MainViewState>(MainViewState.Thread)
export const defaultJanDataFolderAtom = atom<string>('')
export const LocalEngineDefaultVariantAtom = atom<string>('')
const SHOW_RIGHT_PANEL = 'showRightPanel'
// Store panel atom

View File

@ -21,7 +21,7 @@ jest.mock('jotai', () => ({
describe('useGetSystemResources', () => {
const mockMonitoringExtension = {
getResourcesInfo: jest.fn(),
getHardware: jest.fn(),
getCurrentLoad: jest.fn(),
}
@ -38,17 +38,17 @@ describe('useGetSystemResources', () => {
})
it('should fetch system resources on initial render', async () => {
mockMonitoringExtension.getResourcesInfo.mockResolvedValue({
mem: { usedMemory: 4000, totalMemory: 8000 },
mockMonitoringExtension.getHardware.mockResolvedValue({
cpu: { usage: 50 },
ram: { available: 4000, total: 8000 },
})
mockMonitoringExtension.getCurrentLoad.mockResolvedValue({
cpu: { usage: 50 },
gpu: [],
})
const { result } = renderHook(() => useGetSystemResources())
expect(mockMonitoringExtension.getResourcesInfo).toHaveBeenCalledTimes(1)
expect(mockMonitoringExtension.getHardware).toHaveBeenCalledTimes(1)
})
it('should start watching system resources when watch is called', () => {
@ -58,14 +58,14 @@ describe('useGetSystemResources', () => {
result.current.watch()
})
expect(mockMonitoringExtension.getResourcesInfo).toHaveBeenCalled()
expect(mockMonitoringExtension.getHardware).toHaveBeenCalled()
// Fast-forward time by 2 seconds
act(() => {
jest.advanceTimersByTime(2000)
})
expect(mockMonitoringExtension.getResourcesInfo).toHaveBeenCalled()
expect(mockMonitoringExtension.getHardware).toHaveBeenCalled()
})
it('should stop watching when stopWatching is called', () => {
@ -85,7 +85,7 @@ describe('useGetSystemResources', () => {
})
// Expect no additional calls after stopping
expect(mockMonitoringExtension.getResourcesInfo).toHaveBeenCalled()
expect(mockMonitoringExtension.getHardware).toHaveBeenCalled()
})
it('should not fetch resources if monitoring extension is not available', async () => {
@ -97,7 +97,7 @@ describe('useGetSystemResources', () => {
result.current.getSystemResources()
})
expect(mockMonitoringExtension.getResourcesInfo).not.toHaveBeenCalled()
expect(mockMonitoringExtension.getHardware).not.toHaveBeenCalled()
expect(mockMonitoringExtension.getCurrentLoad).not.toHaveBeenCalled()
})
})

View File

@ -1,6 +1,7 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { useCallback, useEffect, useState } from 'react'
import { ExtensionTypeEnum, MonitoringExtension } from '@janhq/core'
import { ExtensionTypeEnum, HardwareManagementExtension } from '@janhq/core'
import { useSetAtom } from 'jotai'
@ -20,58 +21,62 @@ export default function useGetSystemResources() {
NodeJS.Timeout | number | undefined
>(undefined)
const setTotalRam = useSetAtom(totalRamAtom)
const setGpus = useSetAtom(gpusAtom)
const setUsedRam = useSetAtom(usedRamAtom)
const setCpuUsage = useSetAtom(cpuUsageAtom)
const setTotalNvidiaVram = useSetAtom(nvidiaTotalVramAtom)
const setAvailableVram = useSetAtom(availableVramAtom)
const setUsedRam = useSetAtom(usedRamAtom)
const setTotalRam = useSetAtom(totalRamAtom)
const setRamUtilitized = useSetAtom(ramUtilitizedAtom)
const getSystemResources = useCallback(async () => {
if (
!extensionManager.get<MonitoringExtension>(
ExtensionTypeEnum.SystemMonitoring
!extensionManager.get<HardwareManagementExtension>(
ExtensionTypeEnum.Hardware
)
) {
return
}
const monitoring = extensionManager.get<MonitoringExtension>(
ExtensionTypeEnum.SystemMonitoring
)
const resourceInfor = await monitoring?.getResourcesInfo()
const currentLoadInfor = await monitoring?.getCurrentLoad()
if (resourceInfor?.mem?.usedMemory) setUsedRam(resourceInfor.mem.usedMemory)
if (resourceInfor?.mem?.totalMemory)
setTotalRam(resourceInfor.mem.totalMemory)
const hardwareExtension = extensionManager.get<HardwareManagementExtension>(
ExtensionTypeEnum.Hardware
)
const hardwareInfo = await hardwareExtension?.getHardware()
const usedMemory =
Number(hardwareInfo?.ram.total) - Number(hardwareInfo?.ram.available)
if (hardwareInfo?.ram?.total && hardwareInfo?.ram?.available)
setUsedRam(Number(usedMemory))
if (hardwareInfo?.ram?.total) setTotalRam(hardwareInfo.ram.total)
const ramUtilitized =
((resourceInfor?.mem?.usedMemory ?? 0) /
(resourceInfor?.mem?.totalMemory ?? 1)) *
100
((Number(usedMemory) ?? 0) / (hardwareInfo?.ram.total ?? 1)) * 100
setRamUtilitized(Math.round(ramUtilitized))
setCpuUsage(Math.round(currentLoadInfor?.cpu?.usage ?? 0))
setCpuUsage(Math.round(hardwareInfo?.cpu.usage ?? 0))
const gpus = currentLoadInfor?.gpu ?? []
setGpus(gpus)
const gpus = hardwareInfo?.gpus ?? []
setGpus(gpus as any)
let totalNvidiaVram = 0
if (gpus.length > 0) {
totalNvidiaVram = gpus.reduce(
(total: number, gpu: { memoryTotal: string }) =>
total + Number(gpu.memoryTotal),
(total: number, gpu: { total_vram: number }) =>
total + Number(gpu.total_vram),
0
)
}
setTotalNvidiaVram(totalNvidiaVram)
setAvailableVram(
gpus.reduce(
(total: number, gpu: { memoryFree: string }) =>
total + Number(gpu.memoryFree),
0
)
gpus.reduce((total, gpu) => {
return total + Number(gpu.free_vram || 0)
}, 0)
)
}, [
setUsedRam,

View File

@ -1,87 +0,0 @@
// useGpuSetting.test.ts
import { renderHook, act } from '@testing-library/react'
import { ExtensionTypeEnum, MonitoringExtension } from '@janhq/core'
// Mock dependencies
jest.mock('@/extension')
import useGpuSetting from './useGpuSetting'
import { extensionManager } from '@/extension'
describe('useGpuSetting', () => {
beforeEach(() => {
jest.clearAllMocks()
})
it('should return GPU settings when available', async () => {
const mockGpuSettings = {
gpuCount: 2,
gpuNames: ['NVIDIA GeForce RTX 3080', 'NVIDIA GeForce RTX 3070'],
totalMemory: 20000,
freeMemory: 15000,
}
const mockMonitoringExtension: Partial<MonitoringExtension> = {
getGpuSetting: jest.fn().mockResolvedValue(mockGpuSettings),
}
jest
.spyOn(extensionManager, 'get')
.mockReturnValue(mockMonitoringExtension as MonitoringExtension)
const { result } = renderHook(() => useGpuSetting())
let gpuSettings
await act(async () => {
gpuSettings = await result.current.getGpuSettings()
})
expect(gpuSettings).toEqual(mockGpuSettings)
expect(extensionManager.get).toHaveBeenCalledWith(
ExtensionTypeEnum.SystemMonitoring
)
expect(mockMonitoringExtension.getGpuSetting).toHaveBeenCalled()
})
it('should return undefined when no GPU settings are found', async () => {
const mockMonitoringExtension: Partial<MonitoringExtension> = {
getGpuSetting: jest.fn().mockResolvedValue(undefined),
}
jest
.spyOn(extensionManager, 'get')
.mockReturnValue(mockMonitoringExtension as MonitoringExtension)
const { result } = renderHook(() => useGpuSetting())
let gpuSettings
await act(async () => {
gpuSettings = await result.current.getGpuSettings()
})
expect(gpuSettings).toBeUndefined()
expect(extensionManager.get).toHaveBeenCalledWith(
ExtensionTypeEnum.SystemMonitoring
)
expect(mockMonitoringExtension.getGpuSetting).toHaveBeenCalled()
})
it('should handle missing MonitoringExtension', async () => {
jest.spyOn(extensionManager, 'get').mockReturnValue(undefined)
jest.spyOn(console, 'debug').mockImplementation(() => {})
const { result } = renderHook(() => useGpuSetting())
let gpuSettings
await act(async () => {
gpuSettings = await result.current.getGpuSettings()
})
expect(gpuSettings).toBeUndefined()
expect(extensionManager.get).toHaveBeenCalledWith(
ExtensionTypeEnum.SystemMonitoring
)
expect(console.debug).toHaveBeenCalledWith('No GPU setting found')
})
})

View File

@ -1,21 +0,0 @@
import { useCallback } from 'react'
import { ExtensionTypeEnum, MonitoringExtension } from '@janhq/core'
import { extensionManager } from '@/extension'
export default function useGpuSetting() {
const getGpuSettings = useCallback(async () => {
const gpuSetting = await extensionManager
?.get<MonitoringExtension>(ExtensionTypeEnum.SystemMonitoring)
?.getGpuSetting()
if (!gpuSetting) {
console.debug('No GPU setting found')
return undefined
}
return gpuSetting
}, [])
return { getGpuSettings }
}

View File

@ -0,0 +1,99 @@
import { useMemo } from 'react'
import { ExtensionTypeEnum, HardwareManagementExtension } from '@janhq/core'
import { useSetAtom } from 'jotai'
import useSWR from 'swr'
import { extensionManager } from '@/extension/ExtensionManager'
import {
cpuUsageAtom,
ramUtilitizedAtom,
totalRamAtom,
usedRamAtom,
} from '@/helpers/atoms/SystemBar.atom'
// fetcher function
async function fetchExtensionData<T>(
extension: HardwareManagementExtension | null,
method: (extension: HardwareManagementExtension) => Promise<T>
): Promise<T> {
if (!extension) {
throw new Error('Extension not found')
}
return method(extension)
}
const getExtension = () =>
extensionManager.get<HardwareManagementExtension>(
ExtensionTypeEnum.Hardware
) ?? null
/**
* @returns A Promise that resolves to an object of list engines.
*/
export function useGetHardwareInfo() {
const setCpuUsage = useSetAtom(cpuUsageAtom)
const setUsedRam = useSetAtom(usedRamAtom)
const setTotalRam = useSetAtom(totalRamAtom)
const setRamUtilitized = useSetAtom(ramUtilitizedAtom)
const extension = useMemo(
() =>
extensionManager.get<HardwareManagementExtension>(
ExtensionTypeEnum.Hardware
) ?? null,
[]
)
const {
data: hardware,
error,
mutate,
} = useSWR(
extension ? 'hardware' : null,
() => fetchExtensionData(extension, (ext) => ext.getHardware()),
{
revalidateOnFocus: false,
revalidateOnReconnect: false,
refreshInterval: 2000,
}
)
const usedMemory =
Number(hardware?.ram.total) - Number(hardware?.ram.available)
if (hardware?.ram?.total && hardware?.ram?.available)
setUsedRam(Number(usedMemory))
if (hardware?.ram?.total) setTotalRam(hardware.ram.total)
const ramUtilitized =
((Number(usedMemory) ?? 0) / (hardware?.ram.total ?? 1)) * 100
setRamUtilitized(Math.round(ramUtilitized))
setCpuUsage(Math.round(hardware?.cpu.usage ?? 0))
return { hardware, error, mutate }
}
/**
* set gpus activate
* @returns A Promise that resolves set gpus activate.
*/
export const setActiveGpus = async (data: { gpus: number[] }) => {
const extension = getExtension()
if (!extension) {
throw new Error('Extension is not available')
}
try {
const response = await extension.setAvtiveGpu(data)
return response
} catch (error) {
console.error('Failed to install engine variant:', error)
throw error
}
}

View File

@ -1,20 +1,10 @@
import { useCallback, useEffect, useState } from 'react'
import { fs, joinPath } from '@janhq/core'
type NvidiaDriver = {
exist: boolean
version: string
}
import { fs, GpuSettingInfo, joinPath } from '@janhq/core'
export type AppSettings = {
run_mode: 'cpu' | 'gpu' | undefined
notify: boolean
gpus_in_use: string[]
vulkan: boolean
gpus: string[]
nvidia_driver: NvidiaDriver
cuda: NvidiaDriver
gpus: GpuSettingInfo[]
}
export const useSettings = () => {
@ -38,29 +28,16 @@ export const useSettings = () => {
return {}
}, [])
const saveSettings = async ({
runMode,
notify,
gpusInUse,
vulkan,
}: {
runMode?: string | undefined
notify?: boolean | undefined
gpusInUse?: string[] | undefined
vulkan?: boolean | undefined
}) => {
const saveSettings = async ({ vulkan }: { vulkan?: boolean | undefined }) => {
const settingsFile = await joinPath(['file://settings', 'settings.json'])
const settings = await readSettings()
if (runMode != null) settings.run_mode = runMode
if (notify != null) settings.notify = notify
if (gpusInUse != null) settings.gpus_in_use = gpusInUse.filter((e) => !!e)
if (vulkan != null) {
settings.vulkan = vulkan
// GPU enabled, set run_mode to 'gpu'
if (settings.vulkan === true) {
settings.run_mode = 'gpu'
} else {
settings.run_mode = 'cpu'
settings?.gpus?.some((gpu: { activated: boolean }) =>
gpu.activated === true ? 'gpu' : 'cpu'
)
}
}
await fs.writeFileSync(settingsFile, JSON.stringify(settings))

View File

@ -14,6 +14,7 @@
"test": "jest"
},
"dependencies": {
"@hello-pangea/dnd": "17.0.0",
"@hookform/resolvers": "^3.9.1",
"@janhq/core": "link:../core",
"@janhq/joi": "link:../joi",

View File

@ -57,7 +57,7 @@ const ModelItemHeader = ({ model, onClick, open }: Props) => {
// Default nvidia returns vram in MB, need to convert to bytes to match the unit of totalRamW
let ram = nvidiaTotalVram * 1024 * 1024
if (ram === 0 || settings?.run_mode === 'cpu') {
if (ram === 0 || settings?.gpus?.some((gpu) => gpu.activated !== true)) {
ram = totalRam
}
const serverEnabled = useAtomValue(serverEnabledAtom)

View File

@ -123,7 +123,7 @@ const Advanced = ({ setSubdir }: { setSubdir: (subdir: string) => void }) => {
})
stopModel()
setVulkanEnabled(e)
await saveSettings({ vulkan: e, gpusInUse: [] })
await saveSettings({ vulkan: e })
// Relaunch to apply settings
if (relaunch) window.location.reload()
}
@ -155,7 +155,11 @@ const Advanced = ({ setSubdir }: { setSubdir: (subdir: string) => void }) => {
useEffect(() => {
const setUseGpuIfPossible = async () => {
const settings = await readSettings()
setGpuEnabled(settings.run_mode === 'gpu' && settings.gpus?.length > 0)
setGpuEnabled(
settings.gpus?.some(
(gpu: { activated: boolean }) => gpu.activated === true
) === 'gpu' && settings.gpus?.length > 0
)
setGpusInUse(settings.gpus_in_use || [])
setVulkanEnabled(settings.vulkan || false)
if (settings.gpus) {
@ -194,7 +198,6 @@ const Advanced = ({ setSubdir }: { setSubdir: (subdir: string) => void }) => {
if (gpuId && gpuId.trim()) updatedGpusInUse.push(gpuId)
}
setGpusInUse(updatedGpusInUse)
await saveSettings({ gpusInUse: updatedGpusInUse.filter((e) => !!e) })
// Reload window to apply changes
// This will trigger engine servers to restart
window.location.reload()
@ -280,7 +283,6 @@ const Advanced = ({ setSubdir }: { setSubdir: (subdir: string) => void }) => {
checked={gpuEnabled}
onChange={(e) => {
if (e.target.checked === true) {
saveSettings({ runMode: 'gpu' })
setGpuEnabled(true)
snackbar({
description:
@ -288,7 +290,6 @@ const Advanced = ({ setSubdir }: { setSubdir: (subdir: string) => void }) => {
type: 'success',
})
} else {
saveSettings({ runMode: 'cpu' })
setGpuEnabled(false)
snackbar({
description:

View File

@ -9,6 +9,7 @@ import {
} from '@janhq/core'
import { Button, ScrollArea, Badge, Select, Progress } from '@janhq/joi'
import { useAtom } from 'jotai'
import { twMerge } from 'tailwind-merge'
import { useActiveModel } from '@/hooks/useActiveModel'
@ -27,6 +28,8 @@ import { formatDownloadPercentage } from '@/utils/converter'
import ExtensionSetting from '../ExtensionSetting'
import DeleteEngineVariant from './DeleteEngineVariant'
import { LocalEngineDefaultVariantAtom } from '@/helpers/atoms/App.atom'
const os = () => {
switch (PLATFORM) {
case 'win32':
@ -86,8 +89,8 @@ const LocalEngineSettings = ({ engine }: { engine: InferenceEngine }) => {
(x: any) => x.version === defaultEngineVariant?.version
)
const [selectedVariants, setSelectedVariants] = useState(
defaultEngineVariant?.variant
const [selectedVariants, setSelectedVariants] = useAtom(
LocalEngineDefaultVariantAtom
)
const selectedVariant = useMemo(
@ -102,7 +105,7 @@ const LocalEngineSettings = ({ engine }: { engine: InferenceEngine }) => {
if (defaultEngineVariant?.variant) {
setSelectedVariants(defaultEngineVariant.variant || '')
}
}, [defaultEngineVariant])
}, [defaultEngineVariant, setSelectedVariants])
const handleEngineUpdate = useCallback(
async (event: { id: string; type: DownloadEvent; percent: number }) => {

View File

@ -0,0 +1,342 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import * as React from 'react'
import { useState } from 'react'
import { DragDropContext, Draggable, Droppable } from '@hello-pangea/dnd'
import { Gpu } from '@janhq/core'
import { Progress, ScrollArea, Switch } from '@janhq/joi'
import { useAtom, useAtomValue } from 'jotai'
import { atomWithStorage } from 'jotai/utils'
import { ChevronDownIcon, GripVerticalIcon } from 'lucide-react'
import { twMerge } from 'tailwind-merge'
import {
useGetHardwareInfo,
setActiveGpus,
} from '@/hooks/useHardwareManagement'
import { toGibibytes } from '@/utils/converter'
import {
cpuUsageAtom,
ramUtilitizedAtom,
totalRamAtom,
usedRamAtom,
} from '@/helpers/atoms/SystemBar.atom'
const gpusAtom = atomWithStorage<Gpu[]>('gpus', [], undefined, {
getOnInit: true,
})
const Hardware = () => {
const { hardware } = useGetHardwareInfo()
const [openPanels, setOpenPanels] = useState<Record<number, boolean>>({})
const cpuUsage = useAtomValue(cpuUsageAtom)
const totalRam = useAtomValue(totalRamAtom)
const usedRam = useAtomValue(usedRamAtom)
const ramUtilitized = useAtomValue(ramUtilitizedAtom)
const [gpus, setGpus] = useAtom<Gpu[]>(gpusAtom)
const togglePanel = (index: number) => {
setOpenPanels((prev) => ({
...prev,
[index]: !prev[index], // Toggle the specific panel
}))
}
// Handle switch toggle for GPU activation
const handleSwitchChange = async (index: number, isActive: boolean) => {
const updatedGpus = gpus.map((gpu, i) =>
i === index ? { ...gpu, activated: isActive } : gpu
)
setGpus(updatedGpus)
// Call the API to update the active GPUs
try {
const activeGpuIds = updatedGpus
.filter((gpu) => gpu.activated)
.map((gpu) => Number(gpu.id))
await setActiveGpus({ gpus: activeGpuIds })
} catch (error) {
console.error('Failed to update active GPUs:', error)
}
}
const handleDragEnd = (result: any) => {
if (!result.destination) return
const reorderedGpus = Array.from(gpus)
const [movedGpu] = reorderedGpus.splice(result.source.index, 1)
reorderedGpus.splice(result.destination.index, 0, movedGpu)
setGpus(reorderedGpus) // Update the atom, which persists to localStorage
}
React.useEffect(() => {
if (hardware?.gpus) {
setGpus((prevGpus) => {
// Create a map of existing GPUs by UUID for quick lookup
const gpuMap = new Map(prevGpus.map((gpu) => [gpu.uuid, gpu]))
// Update existing GPUs or add new ones
const updatedGpus = hardware.gpus.map((newGpu) => {
const existingGpu = gpuMap.get(newGpu.uuid)
if (existingGpu) {
// Update the GPU properties while keeping the original order
return {
...existingGpu,
free_vram: newGpu.free_vram,
total_vram: newGpu.total_vram,
}
}
// Return the new GPU if not already in the state
return newGpu
})
// Append GPUs from the previous state that are not in the hardware.gpus
// This preserves user-reordered GPUs that aren't present in the new data
const remainingGpus = prevGpus.filter(
(prevGpu) => !hardware.gpus?.some((gpu) => gpu.uuid === prevGpu.uuid)
)
return [...updatedGpus, ...remainingGpus]
})
}
}, [hardware?.gpus, setGpus])
return (
<ScrollArea className="h-full w-full px-4">
<div className="block w-full py-4">
{/* CPU */}
<div className="flex w-full flex-col items-start justify-between gap-4 border-b border-[hsla(var(--app-border))] py-4 first:pt-0 last:border-none sm:flex-row">
<div className="flex-shrink-0 space-y-1">
<div className="flex gap-x-2">
<h6 className="font-semibold capitalize">CPU</h6>
</div>
</div>
<div className="w-full md:w-2/3">
<div className="flex flex-col items-end gap-2">
<div className="flex w-full justify-end gap-2 text-xs text-[hsla(var(--text-secondary))]">
<span>{hardware?.cpu.model}</span>
<span>|</span>
<span>Cores: {hardware?.cpu.cores}</span>
<span>|</span>
<span>Architecture: {hardware?.cpu.arch}</span>
</div>
<div className="flex w-2/3 items-center gap-3">
<Progress value={cpuUsage} size="small" className="w-full" />
<span className="font-medium">{cpuUsage}%</span>
</div>
</div>
</div>
</div>
{/* RAM */}
<div className="flex w-full flex-col items-start justify-between gap-4 border-b border-[hsla(var(--app-border))] py-4 first:pt-0 last:border-none sm:flex-row">
<div className="flex-shrink-0 space-y-1">
<div className="flex gap-x-2">
<h6 className="font-semibold capitalize">RAM</h6>
</div>
</div>
<div className="w-full md:w-2/3">
<div className="flex flex-col items-end gap-2">
<div className="flex w-full justify-end gap-2 text-xs text-[hsla(var(--text-secondary))]">
<span>
{toGibibytes(usedRam, { hideUnit: true })}GB /{' '}
{toGibibytes(totalRam, { hideUnit: true })}GB
</span>
{hardware?.ram.type && (
<>
<span>|</span>
<span>Type: {hardware?.ram.type}</span>
</>
)}
</div>
<div className="flex w-2/3 items-center gap-3">
<Progress
value={Math.round((usedRam / totalRam) * 100)}
size="small"
className="w-full"
/>
<span className="font-medium">{ramUtilitized}%</span>
</div>
</div>
</div>
</div>
{/* OS */}
<div className="flex w-full flex-col items-start justify-between gap-4 border-b border-[hsla(var(--app-border))] py-4 first:pt-0 last:border-none sm:flex-row">
<div className="flex-shrink-0 space-y-1">
<div className="flex gap-x-2">
<h6 className="font-semibold capitalize">OS</h6>
</div>
</div>
<div className="w-full md:w-2/3">
<div className="flex flex-col items-end gap-2">
<div className="flex w-full justify-end gap-2 text-xs text-[hsla(var(--text-secondary))]">
<span>{hardware?.os.name}</span>
<span>|</span>
<span>{hardware?.os.version}</span>
</div>
</div>
</div>
</div>
{/* GPUs */}
{!isMac && gpus.length > 0 && (
<div className="flex w-full flex-col items-start justify-between gap-4 border-b border-[hsla(var(--app-border))] py-4 first:pt-0 last:border-none sm:flex-row">
<div className="w-full flex-shrink-0">
<div className="flex gap-x-2">
<h6 className="font-semibold capitalize">GPUs</h6>
</div>
<p className="mt-1 font-medium leading-relaxed text-[hsla(var(--text-secondary))]">
{`Enhance model performance by utilizing your device's GPU for
acceleration.`}
</p>
<DragDropContext onDragEnd={handleDragEnd}>
<Droppable droppableId="gpu-list">
{(provided) => (
<div
{...provided.droppableProps}
ref={provided.innerRef}
className="mt-4"
>
{gpus.map((item, i) => (
<Draggable key={i} draggableId={String(i)} index={i}>
{(provided, snapshot) => (
<div
ref={provided.innerRef}
{...provided.draggableProps}
{...provided.dragHandleProps}
className={twMerge(
'cursor-pointer border border-[hsla(var(--app-border))] bg-[hsla(var(--tertiary-bg))] p-4 first:rounded-t-lg last:rounded-b-lg',
gpus.length > 1 && 'last:rounded-t-none',
snapshot.isDragging
? 'border-b'
: 'border-b-0 last:border-b'
)}
onClick={() => togglePanel(i)}
>
<div className="flex flex-col items-start justify-start gap-4 sm:flex-row sm:items-center sm:justify-between">
<div className="flex w-full items-center justify-between">
<div className="flex h-full flex-shrink-0 items-center gap-2">
<GripVerticalIcon
size={14}
className="text-[hsla(var(--text-tertiary))]"
/>
<div
className={twMerge(
'h-2 w-2 rounded-full',
item.activated
? 'bg-green-400'
: 'bg-neutral-300'
)}
/>
<h6 title={item.name}>{item.name}</h6>
</div>
<div className="flex flex-shrink-0 items-end gap-4">
{item.activated && (
<div className="flex w-40 items-center gap-3">
<Progress
value={Math.round(
((Number(item.total_vram) -
Number(item.free_vram)) /
Number(item.total_vram)) *
100
)}
size="small"
className="w-full"
/>
<span className="font-medium">
{Math.round(
((Number(item.total_vram) -
Number(item.free_vram)) /
Number(item.total_vram)) *
100
).toFixed()}
%
</span>
</div>
)}
<div className="flex justify-end gap-2 text-xs text-[hsla(var(--text-secondary))]">
{item.activated && (
<span>
{(
(Number(item.total_vram) -
Number(item.free_vram)) /
1024
).toFixed(2)}
GB /{' '}
</span>
)}
<span>
{(
Number(item.total_vram) / 1024
).toFixed(2)}
GB
</span>
</div>
<Switch
checked={item.activated}
onChange={(e) =>
handleSwitchChange(i, e.target.checked)
}
/>
<ChevronDownIcon
size={14}
className={twMerge(
'relative z-10 transform cursor-pointer transition-transform',
openPanels[i]
? 'rotate-180'
: 'rotate-0'
)}
/>
</div>
</div>
</div>
{openPanels[i] && (
<div className="space-y-4 p-4 pb-0 text-[hsla(var(--text-secondary))]">
<div className="flex">
<div className="w-[200px]">
Driver Version
</div>
<span>
{
item.additional_information
?.driver_version
}
</span>
</div>
<div className="flex">
<div className="w-[200px]">
Compute Capability
</div>
<span>
{item.additional_information.compute_cap}
</span>
</div>
</div>
)}
</div>
)}
</Draggable>
))}
{provided.placeholder}
</div>
)}
</Droppable>
</DragDropContext>
</div>
</div>
)}
</div>
</ScrollArea>
)
}
export default Hardware

View File

@ -13,6 +13,7 @@ import Engines from '@/screens/Settings/Engines'
import LocalEngineSettings from '@/screens/Settings/Engines/LocalEngineSettings'
import RemoteEngineSettings from '@/screens/Settings/Engines/RemoteEngineSettings'
import ExtensionSetting from '@/screens/Settings/ExtensionSetting'
import Hardware from '@/screens/Settings/Hardware'
import Hotkeys from '@/screens/Settings/Hotkeys'
import MyModels from '@/screens/Settings/MyModels'
import Privacy from '@/screens/Settings/Privacy'
@ -39,6 +40,9 @@ const SettingDetail = () => {
case 'Keyboard Shortcuts':
return <Hotkeys />
case 'Hardware':
return <Hardware />
case 'Privacy':
return <Privacy />

View File

@ -15,6 +15,7 @@ export const SettingScreenList = [
'My Models',
'Preferences',
'Keyboard Shortcuts',
'Hardware',
'Privacy',
'Advanced Settings',
'Engines',

View File

@ -1,23 +1,34 @@
import { extensionManager } from '@/extension'
import { appService } from './appService'
test('should return correct system information when monitoring extension is found', async () => {
const mockGpuSetting = { name: 'NVIDIA GeForce GTX 1080', memory: 8192 }
const mockOsInfo = { platform: 'win32', release: '10.0.19041' }
const mockMonitoringExtension = {
getGpuSetting: jest.fn().mockResolvedValue(mockGpuSetting),
getOsInfo: jest.fn().mockResolvedValue(mockOsInfo),
test('should return correct system information when hardware extension is found', async () => {
(global as any).isMac = false;
(global as any).PLATFORM = "win32";
const mock = { cpu: { arch: 'arc' }, ram: { available: 4000, total: 8000 }, gpus: [{name: 'NVIDIA GeForce GTX 1080', total_vram: 8192}] }
const mockHardwareExtension = {
getHardware: jest.fn().mockResolvedValue(mock),
}
extensionManager.get = jest.fn().mockReturnValue(mockMonitoringExtension)
extensionManager.get = jest.fn().mockReturnValue(mockHardwareExtension)
const result = await appService.systemInformation()
expect(mockMonitoringExtension.getGpuSetting).toHaveBeenCalled()
expect(mockMonitoringExtension.getOsInfo).toHaveBeenCalled()
expect(result).toEqual({ gpuSetting: mockGpuSetting, osInfo: mockOsInfo })
expect(mockHardwareExtension.getHardware).toHaveBeenCalled()
expect(result).toEqual({
gpuSetting: {gpus: mock.gpus, vulkan: false, cpu: {arch: mock.cpu.arch},},
osInfo: {
platform: 'win32',
arch: mock.cpu.arch,
freeMem: mock.ram.available,
totalMem: mock.ram.total,
},
})
})
test('should log a warning when monitoring extension is not found', async () => {
test('should log a warning when hardware extension is not found', async () => {
const consoleWarnMock = jest
.spyOn(console, 'warn')
.mockImplementation(() => {})
@ -26,7 +37,7 @@ test('should log a warning when monitoring extension is not found', async () =>
await appService.systemInformation()
expect(consoleWarnMock).toHaveBeenCalledWith(
'System monitoring extension not found'
'Hardware extension not found'
)
consoleWarnMock.mockRestore()
})

View File

@ -1,29 +1,54 @@
import {
ExtensionTypeEnum,
MonitoringExtension,
HardwareManagementExtension,
SupportedPlatform,
SystemInformation,
GpuSetting,
GpuSettingInfo,
} from '@janhq/core'
import { getDefaultStore } from 'jotai'
import { toaster } from '@/containers/Toast'
import { extensionManager } from '@/extension'
import { LocalEngineDefaultVariantAtom } from '@/helpers/atoms/App.atom'
export const appService = {
systemInformation: async (): Promise<SystemInformation | undefined> => {
const monitorExtension = extensionManager?.get<MonitoringExtension>(
ExtensionTypeEnum.SystemMonitoring
const selectedVariants = getDefaultStore().get(
LocalEngineDefaultVariantAtom
)
if (!monitorExtension) {
console.warn('System monitoring extension not found')
const hardwareExtension =
extensionManager?.get<HardwareManagementExtension>(
ExtensionTypeEnum.Hardware
)
if (!hardwareExtension) {
console.warn('Hardware extension not found')
return undefined
}
const gpuSetting = await monitorExtension.getGpuSetting()
const osInfo = await monitorExtension.getOsInfo()
const hardwareInfo = await hardwareExtension?.getHardware()
const gpuSettingInfo: GpuSetting | undefined = {
gpus: hardwareInfo.gpus as GpuSettingInfo[],
vulkan: isMac ? false : selectedVariants.includes('vulkan'),
cpu: hardwareInfo.cpu,
}
const updateOsInfo = {
platform: PLATFORM as SupportedPlatform,
arch: hardwareInfo.cpu.arch,
freeMem: hardwareInfo.ram.available,
totalMem: hardwareInfo.ram.total,
}
return {
gpuSetting,
osInfo,
gpuSetting: gpuSettingInfo,
osInfo: updateOsInfo,
}
},

View File

@ -369,7 +369,7 @@ __metadata:
languageName: node
linkType: hard
"@babel/runtime@npm:^7.12.5":
"@babel/runtime@npm:^7.12.5, @babel/runtime@npm:^7.25.6":
version: 7.26.0
resolution: "@babel/runtime@npm:7.26.0"
dependencies:
@ -844,6 +844,24 @@ __metadata:
languageName: node
linkType: hard
"@hello-pangea/dnd@npm:17.0.0":
version: 17.0.0
resolution: "@hello-pangea/dnd@npm:17.0.0"
dependencies:
"@babel/runtime": "npm:^7.25.6"
css-box-model: "npm:^1.2.1"
memoize-one: "npm:^6.0.0"
raf-schd: "npm:^4.0.3"
react-redux: "npm:^9.1.2"
redux: "npm:^5.0.1"
use-memo-one: "npm:^1.1.3"
peerDependencies:
react: ^18.0.0
react-dom: ^18.0.0
checksum: 10c0/93417c055267f6f12a37a1cdb08d9db85ab021b102315e1e5a70a79d7de6c2ffaeff211e3ec40441c110f39e60688cfcea85ab86c21820041d974415c1ca715e
languageName: node
linkType: hard
"@hookform/resolvers@npm:^3.9.1":
version: 3.9.1
resolution: "@hookform/resolvers@npm:3.9.1"
@ -1067,6 +1085,7 @@ __metadata:
version: 0.0.0-use.local
resolution: "@janhq/web@workspace:web"
dependencies:
"@hello-pangea/dnd": "npm:17.0.0"
"@hookform/resolvers": "npm:^3.9.1"
"@janhq/core": "link:../core"
"@janhq/joi": "link:../joi"
@ -4408,6 +4427,13 @@ __metadata:
languageName: node
linkType: hard
"@types/use-sync-external-store@npm:^0.0.6":
version: 0.0.6
resolution: "@types/use-sync-external-store@npm:0.0.6"
checksum: 10c0/77c045a98f57488201f678b181cccd042279aff3da34540ad242f893acc52b358bd0a8207a321b8ac09adbcef36e3236944390e2df4fcedb556ce7bb2a88f2a8
languageName: node
linkType: hard
"@types/uuid@npm:^9.0.6":
version: 9.0.8
resolution: "@types/uuid@npm:9.0.8"
@ -6681,6 +6707,15 @@ __metadata:
languageName: node
linkType: hard
"css-box-model@npm:^1.2.1":
version: 1.2.1
resolution: "css-box-model@npm:1.2.1"
dependencies:
tiny-invariant: "npm:^1.0.6"
checksum: 10c0/611e56d76b16e4e21956ed9fa53f1936fbbfaccd378659587e9c929f342037fc6c062f8af9447226e11fe7c95e31e6c007a37e592f9bff4c2d40e6915553104a
languageName: node
linkType: hard
"css-declaration-sorter@npm:^6.3.1":
version: 6.4.1
resolution: "css-declaration-sorter@npm:6.4.1"
@ -12482,6 +12517,13 @@ __metadata:
languageName: node
linkType: hard
"memoize-one@npm:^6.0.0":
version: 6.0.0
resolution: "memoize-one@npm:6.0.0"
checksum: 10c0/45c88e064fd715166619af72e8cf8a7a17224d6edf61f7a8633d740ed8c8c0558a4373876c9b8ffc5518c2b65a960266adf403cc215cb1e90f7e262b58991f54
languageName: node
linkType: hard
"merge-stream@npm:^2.0.0":
version: 2.0.0
resolution: "merge-stream@npm:2.0.0"
@ -15331,6 +15373,13 @@ __metadata:
languageName: node
linkType: hard
"raf-schd@npm:^4.0.3":
version: 4.0.3
resolution: "raf-schd@npm:4.0.3"
checksum: 10c0/ecabf0957c05fad059779bddcd992f1a9d3a35dfea439a6f0935c382fcf4f7f7fa60489e467b4c2db357a3665167d2a379782586b59712bb36c766e02824709b
languageName: node
linkType: hard
"randomatic@npm:^3.0.0":
version: 3.1.1
resolution: "randomatic@npm:3.1.1"
@ -15477,6 +15526,25 @@ __metadata:
languageName: node
linkType: hard
"react-redux@npm:^9.1.2":
version: 9.2.0
resolution: "react-redux@npm:9.2.0"
dependencies:
"@types/use-sync-external-store": "npm:^0.0.6"
use-sync-external-store: "npm:^1.4.0"
peerDependencies:
"@types/react": ^18.2.25 || ^19
react: ^18.0 || ^19
redux: ^5.0.0
peerDependenciesMeta:
"@types/react":
optional: true
redux:
optional: true
checksum: 10c0/00d485f9d9219ca1507b4d30dde5f6ff8fb68ba642458f742e0ec83af052f89e65cd668249b99299e1053cc6ad3d2d8ac6cb89e2f70d2ac5585ae0d7fa0ef259
languageName: node
linkType: hard
"react-remove-scroll-bar@npm:^2.3.7":
version: 2.3.8
resolution: "react-remove-scroll-bar@npm:2.3.8"
@ -15684,6 +15752,13 @@ __metadata:
languageName: node
linkType: hard
"redux@npm:^5.0.1":
version: 5.0.1
resolution: "redux@npm:5.0.1"
checksum: 10c0/b10c28357194f38e7d53b760ed5e64faa317cc63de1fb95bc5d9e127fab956392344368c357b8e7a9bedb0c35b111e7efa522210cfdc3b3c75e5074718e9069c
languageName: node
linkType: hard
"reflect.getprototypeof@npm:^1.0.6, reflect.getprototypeof@npm:^1.0.8, reflect.getprototypeof@npm:^1.0.9":
version: 1.0.9
resolution: "reflect.getprototypeof@npm:1.0.9"
@ -17742,6 +17817,13 @@ __metadata:
languageName: node
linkType: hard
"tiny-invariant@npm:^1.0.6":
version: 1.3.3
resolution: "tiny-invariant@npm:1.3.3"
checksum: 10c0/65af4a07324b591a059b35269cd696aba21bef2107f29b9f5894d83cc143159a204b299553435b03874ebb5b94d019afa8b8eff241c8a4cfee95872c2e1c1c4a
languageName: node
linkType: hard
"tiny-typed-emitter@npm:^2.1.0":
version: 2.1.0
resolution: "tiny-typed-emitter@npm:2.1.0"
@ -18465,6 +18547,15 @@ __metadata:
languageName: node
linkType: hard
"use-memo-one@npm:^1.1.3":
version: 1.1.3
resolution: "use-memo-one@npm:1.1.3"
peerDependencies:
react: ^16.8.0 || ^17.0.0 || ^18.0.0
checksum: 10c0/3d596e65a6b47b2f1818061599738e00daad1f9a9bb4e5ce1f014b20a35b297e50fe4bf1d8c1699ab43ea97f01f84649a736c15ceff96de83bfa696925f6cc6b
languageName: node
linkType: hard
"use-sidecar@npm:^1.1.2":
version: 1.1.3
resolution: "use-sidecar@npm:1.1.3"