diff --git a/.gitignore b/.gitignore index d2f46cc8f..7242f0763 100644 --- a/.gitignore +++ b/.gitignore @@ -40,6 +40,7 @@ docs/yarn.lock electron/.version.bak src-tauri/binaries/engines/cortex.llamacpp src-tauri/resources/themes +src-tauri/resources/lib src-tauri/Cargo.lock src-tauri/icons !src-tauri/icons/icon.png diff --git a/Makefile b/Makefile index 6ed4429c0..513d0f8f2 100644 --- a/Makefile +++ b/Makefile @@ -41,6 +41,7 @@ dev: check-file-counts dev-tauri: check-file-counts yarn install:cortex yarn download:bin + yarn copy:lib CLEAN=true yarn dev:tauri # Linting @@ -121,6 +122,7 @@ build: check-file-counts yarn build build-tauri: check-file-counts + yarn copy:lib yarn build-tauri clean: diff --git a/lib/linux/libvulkan.so b/lib/linux/libvulkan.so new file mode 100644 index 000000000..241557479 Binary files /dev/null and b/lib/linux/libvulkan.so differ diff --git a/lib/windows/vulkan-1.dll b/lib/windows/vulkan-1.dll new file mode 100644 index 000000000..e0039bc4e Binary files /dev/null and b/lib/windows/vulkan-1.dll differ diff --git a/package.json b/package.json index 79c64dfa8..ab3e9e0f8 100644 --- a/package.json +++ b/package.json @@ -28,6 +28,10 @@ "install:cortex:win32": "cd src-tauri/binaries && download.bat", "install:cortex": "run-script-os", "download:bin": "node ./scripts/download-bin.mjs", + "copy:lib": "run-script-os", + "copy:lib:linux": "cpx \"./lib/linux/*.so\" \"./src-tauri/resources/lib/\"", + "copy:lib:win32": "cpx \"./lib/windows/*.dll\" \"./src-tauri/resources/lib/\"", + "copy:lib:darwin": "mkdir \"./src-tauri/resources/lib/\"", "dev:tauri": "yarn build:icon && yarn copy:assets:tauri && tauri dev", "build:tauri:linux:win32": "yarn download:bin && yarn install:cortex && yarn build:icon && yarn copy:assets:tauri && yarn tauri build", "build:tauri:darwin": "yarn install:cortex && yarn build:icon && yarn copy:assets:tauri && yarn tauri build --target universal-apple-darwin", diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index bb0cf1a99..342e22c66 100644 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -46,6 +46,13 @@ futures-util = "0.3.31" tokio-util = "0.7.14" tauri-plugin-dialog = "2.2.1" dirs = "6.0.0" +sysinfo = "0.34.2" +ash = "0.38.0" +nvml-wrapper = "0.10.0" + +[target.'cfg(windows)'.dependencies] +libloading = "0.8.7" +libc = "0.2.172" [target.'cfg(not(any(target_os = "android", target_os = "ios")))'.dependencies] tauri-plugin-updater = "2" diff --git a/src-tauri/src/core/hardware/amd.rs b/src-tauri/src/core/hardware/amd.rs new file mode 100644 index 000000000..c87aef8c7 --- /dev/null +++ b/src-tauri/src/core/hardware/amd.rs @@ -0,0 +1,192 @@ +use super::{GpuInfo, GpuUsage}; + +impl GpuInfo { + #[cfg(not(target_os = "linux"))] + #[cfg(not(target_os = "windows"))] + pub fn get_usage_amd(&self) -> GpuUsage { + self.get_usage_unsupported() + } + + #[cfg(target_os = "linux")] + pub fn get_usage_amd(&self) -> GpuUsage { + use std::fs; + use std::path::Path; + + let device_id = match &self.vulkan_info { + Some(vulkan_info) => vulkan_info.device_id, + None => { + log::error!("get_usage_amd called without Vulkan info"); + return self.get_usage_unsupported(); + } + }; + + for card_idx in 0.. { + let device_path = format!("/sys/class/drm/card{}/device", card_idx); + if !Path::new(&device_path).exists() { + break; + } + + // Check if this is an AMD GPU by looking for amdgpu directory + if !Path::new(&format!("{}/driver/module/drivers/pci:amdgpu", device_path)).exists() { + continue; + } + + // match device_id from Vulkan info + let this_device_id = fs::read_to_string(format!("{}/device", device_path)) + .map(|s| u32::from_str_radix(s.trim(), 16).unwrap_or(0)) + .unwrap_or(0); + if this_device_id != device_id { + continue; + } + + let read_mem = |path: &str| -> u64 { + fs::read_to_string(path) + .map(|content| content.trim().parse::().unwrap_or(0)) + .unwrap_or(0) + / 1024 + / 1024 // Convert bytes to MiB + }; + return GpuUsage { + uuid: self.uuid.clone(), + total_memory: read_mem(&format!("{}/mem_info_vram_total", device_path)), + used_memory: read_mem(&format!("{}/mem_info_vram_used", device_path)), + }; + } + + self.get_usage_unsupported() + } + + #[cfg(target_os = "windows")] + pub fn get_usage_amd(&self) -> GpuUsage { + use std::collections::HashMap; + + let memory_usage_map = windows_impl::get_gpu_usage().unwrap_or_else(|_| { + log::error!("Failed to get AMD GPU memory usage"); + HashMap::new() + }); + + match memory_usage_map.get(&self.name) { + Some(&used_memory) => GpuUsage { + uuid: self.uuid.clone(), + used_memory: used_memory as u64, + total_memory: self.total_memory, + }, + None => self.get_usage_unsupported(), + } + } +} + +// TODO: refactor this into a more egonomic API +#[cfg(target_os = "windows")] +mod windows_impl { + use libc; + use libloading::{Library, Symbol}; + use std::collections::HashMap; + use std::ffi::{c_char, c_int, c_void, CStr}; + use std::mem::{self, MaybeUninit}; + use std::ptr; + + // === FFI Struct Definitions === + #[repr(C)] + #[allow(non_snake_case)] + #[derive(Debug, Copy, Clone)] + pub struct AdapterInfo { + pub iSize: c_int, + pub iAdapterIndex: c_int, + pub strUDID: [c_char; 256], + pub iBusNumber: c_int, + pub iDeviceNumber: c_int, + pub iFunctionNumber: c_int, + pub iVendorID: c_int, + pub strAdapterName: [c_char; 256], + pub strDisplayName: [c_char; 256], + pub iPresent: c_int, + pub iExist: c_int, + pub strDriverPath: [c_char; 256], + pub strDriverPathExt: [c_char; 256], + pub strPNPString: [c_char; 256], + pub iOSDisplayIndex: c_int, + } + + type ADL_MAIN_MALLOC_CALLBACK = Option *mut c_void>; + type ADL_MAIN_CONTROL_CREATE = unsafe extern "C" fn(ADL_MAIN_MALLOC_CALLBACK, c_int) -> c_int; + type ADL_MAIN_CONTROL_DESTROY = unsafe extern "C" fn() -> c_int; + type ADL_ADAPTER_NUMBEROFADAPTERS_GET = unsafe extern "C" fn(*mut c_int) -> c_int; + type ADL_ADAPTER_ADAPTERINFO_GET = unsafe extern "C" fn(*mut AdapterInfo, c_int) -> c_int; + type ADL_ADAPTER_ACTIVE_GET = unsafe extern "C" fn(c_int, *mut c_int) -> c_int; + type ADL_GET_DEDICATED_VRAM_USAGE = + unsafe extern "C" fn(*mut c_void, c_int, *mut c_int) -> c_int; + + // === ADL Memory Allocator === + unsafe extern "C" fn adl_malloc(i_size: i32) -> *mut c_void { + libc::malloc(i_size as usize) + } + + pub fn get_gpu_usage() -> Result, Box> { + unsafe { + let lib = Library::new("atiadlxx.dll").or_else(|_| Library::new("atiadlxy.dll"))?; + + let adl_main_control_create: Symbol = + lib.get(b"ADL_Main_Control_Create")?; + let adl_main_control_destroy: Symbol = + lib.get(b"ADL_Main_Control_Destroy")?; + let adl_adapter_number_of_adapters_get: Symbol = + lib.get(b"ADL_Adapter_NumberOfAdapters_Get")?; + let adl_adapter_adapter_info_get: Symbol = + lib.get(b"ADL_Adapter_AdapterInfo_Get")?; + let adl_adapter_active_get: Symbol = + lib.get(b"ADL_Adapter_Active_Get")?; + let adl_get_dedicated_vram_usage: Symbol = + lib.get(b"ADL2_Adapter_DedicatedVRAMUsage_Get")?; + + // TODO: try to put nullptr here. then we don't need direct libc dep + if adl_main_control_create(Some(adl_malloc), 1) != 0 { + return Err("ADL initialization error!".into()); + } + // NOTE: after this call, we must call ADL_Main_Control_Destroy + // whenver we encounter an error + + let mut num_adapters: c_int = 0; + if adl_adapter_number_of_adapters_get(&mut num_adapters as *mut _) != 0 { + return Err("Cannot get number of adapters".into()); + } + + let mut vram_usages = HashMap::new(); + + if num_adapters > 0 { + let mut adapter_info: Vec = + vec![MaybeUninit::zeroed().assume_init(); num_adapters as usize]; + let ret = adl_adapter_adapter_info_get( + adapter_info.as_mut_ptr(), + mem::size_of::() as i32 * num_adapters, + ); + if ret != 0 { + return Err("Cannot get adapter info".into()); + } + + for adapter in adapter_info.iter() { + let mut is_active = 0; + adl_adapter_active_get(adapter.iAdapterIndex, &mut is_active); + + if is_active != 0 { + let mut vram_mb = 0; + let _ = adl_get_dedicated_vram_usage( + ptr::null_mut(), + adapter.iAdapterIndex, + &mut vram_mb, + ); + // NOTE: adapter name might not be unique? + let name = CStr::from_ptr(adapter.strAdapterName.as_ptr()) + .to_string_lossy() + .into_owned(); + vram_usages.insert(name, vram_mb); + } + } + } + + adl_main_control_destroy(); + + Ok(vram_usages) + } + } +} diff --git a/src-tauri/src/core/hardware/mod.rs b/src-tauri/src/core/hardware/mod.rs new file mode 100644 index 000000000..5d1dc47d8 --- /dev/null +++ b/src-tauri/src/core/hardware/mod.rs @@ -0,0 +1,350 @@ +pub mod amd; +pub mod nvidia; +pub mod vulkan; + +use std::sync::OnceLock; +use sysinfo::System; +use tauri::{path::BaseDirectory, Manager}; + +static SYSTEM_INFO: OnceLock = OnceLock::new(); + +#[derive(Clone, serde::Serialize, Debug)] +struct CpuStaticInfo { + name: String, + core_count: usize, + arch: String, + extensions: Vec, +} + +impl CpuStaticInfo { + fn new() -> Self { + let mut system = System::new(); + system.refresh_cpu_all(); + + let name = system + .cpus() + .first() + .map(|cpu| cpu.brand()) + .unwrap_or("unknown") + .to_string(); + + // cortex only returns amd64, arm64, or Unsupported + // TODO: find how Jan uses this value, if we can use + // std::env::consts::ARCH directly + let arch = match std::env::consts::ARCH { + "x86" => "amd64", + "x86_64" => "amd64", + "arm" => "arm64", + "aarch64" => "arm64", + _ => "Unsupported", + }; + + CpuStaticInfo { + name, + core_count: System::physical_core_count().unwrap_or(0), + arch: arch.to_string(), + extensions: CpuStaticInfo::get_extensions(), + } + } + + // TODO: see if we need to check for all CPU extensions + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + fn get_extensions() -> Vec { + let mut exts = vec![]; + + // fpu is always present on modern x86 processors, + // but is_x86_feature_detected doesn't support it + exts.push("fpu".to_string()); + if is_x86_feature_detected!("mmx") { + exts.push("mmx".to_string()); + } + if is_x86_feature_detected!("sse") { + exts.push("sse".to_string()); + } + if is_x86_feature_detected!("sse2") { + exts.push("sse2".to_string()); + } + if is_x86_feature_detected!("sse3") { + exts.push("sse3".to_string()); + } + if is_x86_feature_detected!("ssse3") { + exts.push("ssse3".to_string()); + } + if is_x86_feature_detected!("sse4.1") { + exts.push("sse4_1".to_string()); + } + if is_x86_feature_detected!("sse4.2") { + exts.push("sse4_2".to_string()); + } + if is_x86_feature_detected!("pclmulqdq") { + exts.push("pclmulqdq".to_string()); + } + if is_x86_feature_detected!("avx") { + exts.push("avx".to_string()); + } + if is_x86_feature_detected!("avx2") { + exts.push("avx2".to_string()); + } + if is_x86_feature_detected!("avx512f") { + exts.push("avx512_f".to_string()); + } + if is_x86_feature_detected!("avx512dq") { + exts.push("avx512_dq".to_string()); + } + if is_x86_feature_detected!("avx512ifma") { + exts.push("avx512_ifma".to_string()); + } + if is_x86_feature_detected!("avx512pf") { + exts.push("avx512_pf".to_string()); + } + if is_x86_feature_detected!("avx512er") { + exts.push("avx512_er".to_string()); + } + if is_x86_feature_detected!("avx512cd") { + exts.push("avx512_cd".to_string()); + } + if is_x86_feature_detected!("avx512bw") { + exts.push("avx512_bw".to_string()); + } + if is_x86_feature_detected!("avx512vl") { + exts.push("avx512_vl".to_string()); + } + if is_x86_feature_detected!("avx512vbmi") { + exts.push("avx512_vbmi".to_string()); + } + if is_x86_feature_detected!("avx512vbmi2") { + exts.push("avx512_vbmi2".to_string()); + } + if is_x86_feature_detected!("avx512vnni") { + exts.push("avx512_vnni".to_string()); + } + if is_x86_feature_detected!("avx512bitalg") { + exts.push("avx512_bitalg".to_string()); + } + if is_x86_feature_detected!("avx512vpopcntdq") { + exts.push("avx512_vpopcntdq".to_string()); + } + // avx512_4vnniw and avx512_4fmaps are only available on Intel Knights Mill, which are + // very rare. https://en.wikipedia.org/wiki/AVX-512 + // is_x86_feature_detected doesn't support them + if is_x86_feature_detected!("avx512vp2intersect") { + exts.push("avx512_vp2intersect".to_string()); + } + if is_x86_feature_detected!("aes") { + exts.push("aes".to_string()); + } + if is_x86_feature_detected!("f16c") { + exts.push("f16c".to_string()); + } + + exts + } + + // Cortex always returns empty list for non-x86 + #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] + fn get_extensions() -> Vec { + vec![] + } +} + +// https://devicehunt.com/all-pci-vendors +pub const VENDOR_ID_AMD: u32 = 0x1002; +pub const VENDOR_ID_NVIDIA: u32 = 0x10DE; +pub const VENDOR_ID_INTEL: u32 = 0x8086; + +#[derive(Debug, Clone)] +pub enum Vendor { + AMD, + NVIDIA, + Intel, + Unknown(u32), +} + +impl serde::Serialize for Vendor { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + Vendor::AMD => "AMD".serialize(serializer), + Vendor::NVIDIA => "NVIDIA".serialize(serializer), + Vendor::Intel => "Intel".serialize(serializer), + Vendor::Unknown(vendor_id) => { + let formatted = format!("Unknown (vendor_id: {})", vendor_id); + serializer.serialize_str(&formatted) + } + } + } +} + +impl Vendor { + pub fn from_vendor_id(vendor_id: u32) -> Self { + match vendor_id { + VENDOR_ID_AMD => Vendor::AMD, + VENDOR_ID_NVIDIA => Vendor::NVIDIA, + VENDOR_ID_INTEL => Vendor::Intel, + _ => Vendor::Unknown(vendor_id), + } + } +} + +#[derive(Clone, Debug, serde::Serialize)] +pub struct GpuInfo { + pub name: String, + pub total_memory: u64, + pub vendor: Vendor, + pub uuid: String, + pub driver_version: String, + pub nvidia_info: Option, + pub vulkan_info: Option, +} + +impl GpuInfo { + pub fn get_usage(&self) -> GpuUsage { + match self.vendor { + Vendor::NVIDIA => self.get_usage_nvidia(), + Vendor::AMD => self.get_usage_amd(), + _ => self.get_usage_unsupported(), + } + } + + pub fn get_usage_unsupported(&self) -> GpuUsage { + GpuUsage { + uuid: self.uuid.clone(), + used_memory: 0, + total_memory: 0, + } + } +} + +#[derive(serde::Serialize, Clone, Debug)] +pub struct SystemInfo { + cpu: CpuStaticInfo, + os: String, + total_memory: u64, + gpus: Vec, +} + +#[derive(serde::Serialize, Clone, Debug)] +pub struct GpuUsage { + uuid: String, + used_memory: u64, + total_memory: u64, +} + +#[derive(serde::Serialize, Clone, Debug)] +pub struct SystemUsage { + cpu: f32, + used_memory: u64, + total_memory: u64, + gpus: Vec, +} + +fn get_jan_libvulkan_path(app: tauri::AppHandle) -> String { + let lib_name = if cfg!(target_os = "windows") { + "vulkan-1.dll" + } else if cfg!(target_os = "linux") { + "libvulkan.so" + } else { + return "".to_string(); + }; + + // NOTE: this does not work in test mode (mock app) + match app.path().resolve( + format!("resources/lib/{}", lib_name), + BaseDirectory::Resource, + ) { + Ok(lib_path) => lib_path.to_string_lossy().to_string(), + Err(_) => "".to_string(), + } +} + +#[tauri::command] +pub fn get_system_info(app: tauri::AppHandle) -> SystemInfo { + SYSTEM_INFO + .get_or_init(|| { + let mut system = System::new(); + system.refresh_memory(); + + let mut gpu_map = std::collections::HashMap::new(); + for gpu in nvidia::get_nvidia_gpus() { + gpu_map.insert(gpu.uuid.clone(), gpu); + } + + // try system vulkan first + let paths = vec!["".to_string(), get_jan_libvulkan_path(app.clone())]; + let mut vulkan_gpus = vec![]; + for path in paths { + vulkan_gpus = vulkan::get_vulkan_gpus(&path); + if !vulkan_gpus.is_empty() { + break; + } + } + + for gpu in vulkan_gpus { + match gpu_map.get_mut(&gpu.uuid) { + // for existing NVIDIA GPUs, add Vulkan info + Some(nvidia_gpu) => { + nvidia_gpu.vulkan_info = gpu.vulkan_info; + } + None => { + gpu_map.insert(gpu.uuid.clone(), gpu); + } + } + } + + SystemInfo { + cpu: CpuStaticInfo::new(), + os: System::long_os_version().unwrap_or("Unknown".to_string()), + total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB + gpus: gpu_map.into_values().collect(), + } + }) + .clone() +} + +#[tauri::command] +pub fn get_system_usage(app: tauri::AppHandle) -> SystemUsage { + let mut system = System::new(); + system.refresh_memory(); + + // need to refresh 2 times to get CPU usage + system.refresh_cpu_all(); + std::thread::sleep(sysinfo::MINIMUM_CPU_UPDATE_INTERVAL); + system.refresh_cpu_all(); + + let cpus = system.cpus(); + let cpu_usage = + cpus.iter().map(|cpu| cpu.cpu_usage()).sum::() / (cpus.len().max(1) as f32); + + SystemUsage { + cpu: cpu_usage, + used_memory: system.used_memory() / 1024 / 1024, // bytes to MiB, + total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB, + gpus: get_system_info(app.clone()) + .gpus + .iter() + .map(|gpu| gpu.get_usage()) + .collect(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tauri::test::mock_app; + + #[test] + fn test_system_info() { + let app = mock_app(); + let info = get_system_info(app.handle().clone()); + println!("System Static Info: {:?}", info); + } + + #[test] + fn test_system_usage() { + let app = mock_app(); + let usage = get_system_usage(app.handle().clone()); + println!("System Usage Info: {:?}", usage); + } +} diff --git a/src-tauri/src/core/hardware/nvidia.rs b/src-tauri/src/core/hardware/nvidia.rs new file mode 100644 index 000000000..c703947dc --- /dev/null +++ b/src-tauri/src/core/hardware/nvidia.rs @@ -0,0 +1,101 @@ +use super::{GpuInfo, GpuUsage, Vendor}; +use nvml_wrapper::{error::NvmlError, Nvml}; +use std::sync::OnceLock; + +static NVML: OnceLock> = OnceLock::new(); + +#[derive(Debug, Clone, serde::Serialize)] +pub struct NvidiaInfo { + pub index: u32, + pub compute_capability: String, +} + +// NvmlError doesn't implement Copy, so we have to store an Option in OnceLock +fn get_nvml() -> Option<&'static Nvml> { + NVML.get_or_init(|| Nvml::init().ok()).as_ref() +} + +impl GpuInfo { + pub fn get_usage_nvidia(&self) -> GpuUsage { + let index = match self.nvidia_info { + Some(ref nvidia_info) => nvidia_info.index, + None => { + log::error!("get_usage_nvidia() called on non-NVIDIA GPU"); + return self.get_usage_unsupported(); + } + }; + let closure = || -> Result { + let nvml = get_nvml().ok_or(NvmlError::Unknown)?; + let device = nvml.device_by_index(index)?; + let mem_info = device.memory_info()?; + Ok(GpuUsage { + uuid: self.uuid.clone(), + used_memory: mem_info.used / 1024 / 1024, // bytes to MiB + total_memory: mem_info.total / 1024 / 1024, // bytes to MiB + }) + }; + closure().unwrap_or_else(|e| { + log::error!("Failed to get memory usage for NVIDIA GPU {}: {}", index, e); + self.get_usage_unsupported() + }) + } +} + +pub fn get_nvidia_gpus() -> Vec { + let closure = || -> Result, NvmlError> { + let nvml = get_nvml().ok_or(NvmlError::Unknown)?; + let num_gpus = nvml.device_count()?; + let driver_version = nvml.sys_driver_version()?; + + let mut gpus = Vec::with_capacity(num_gpus as usize); + for i in 0..num_gpus { + let device = nvml.device_by_index(i)?; + gpus.push(GpuInfo { + name: device.name()?, + total_memory: device.memory_info()?.total / 1024 / 1024, // bytes to MiB + vendor: Vendor::NVIDIA, + uuid: { + let mut uuid = device.uuid()?; + if uuid.starts_with("GPU-") { + uuid = uuid[4..].to_string(); + } + uuid + }, + driver_version: driver_version.clone(), + nvidia_info: Some(NvidiaInfo { + index: i, + compute_capability: { + let cc = device.cuda_compute_capability()?; + format!("{}.{}", cc.major, cc.minor) + }, + }), + vulkan_info: None, + }); + } + + Ok(gpus) + }; + + match closure() { + Ok(gpus) => gpus, + Err(e) => { + log::error!("Failed to get NVIDIA GPUs: {}", e); + vec![] + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_get_nvidia_gpus() { + let gpus = get_nvidia_gpus(); + for (i, gpu) in gpus.iter().enumerate() { + println!("GPU {}:", i); + println!(" {:?}", gpu); + println!(" {:?}", gpu.get_usage()); + } + } +} diff --git a/src-tauri/src/core/hardware/vulkan.rs b/src-tauri/src/core/hardware/vulkan.rs new file mode 100644 index 000000000..cba3ed391 --- /dev/null +++ b/src-tauri/src/core/hardware/vulkan.rs @@ -0,0 +1,145 @@ +use super::{GpuInfo, Vendor}; +use ash::{vk, Entry}; + +#[derive(Debug, Clone, serde::Serialize)] +pub struct VulkanInfo { + pub index: u64, + pub device_type: String, + pub api_version: String, + pub device_id: u32, +} + +fn parse_uuid(bytes: &[u8; 16]) -> String { + format!( + "{:02x}{:02x}{:02x}{:02x}-\ + {:02x}{:02x}-\ + {:02x}{:02x}-\ + {:02x}{:02x}-\ + {:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", + bytes[0], + bytes[1], + bytes[2], + bytes[3], + bytes[4], + bytes[5], + bytes[6], + bytes[7], + bytes[8], + bytes[9], + bytes[10], + bytes[11], + bytes[12], + bytes[13], + bytes[14], + bytes[15], + ) +} + +pub fn get_vulkan_gpus(lib_path: &str) -> Vec { + match get_vulkan_gpus_internal(lib_path) { + Ok(gpus) => gpus, + Err(e) => { + log::error!("Failed to get Vulkan GPUs: {:?}", e); + vec![] + } + } +} + +fn parse_c_string(buf: &[i8]) -> String { + unsafe { std::ffi::CStr::from_ptr(buf.as_ptr()) } + .to_str() + .unwrap_or_default() + .to_string() +} + +fn get_vulkan_gpus_internal(lib_path: &str) -> Result, Box> { + let entry = if lib_path.is_empty() { + unsafe { Entry::load()? } + } else { + unsafe { Entry::load_from(lib_path)? } + }; + let app_info = vk::ApplicationInfo { + api_version: vk::make_api_version(0, 1, 1, 0), + ..Default::default() + }; + let create_info = vk::InstanceCreateInfo { + p_application_info: &app_info, + ..Default::default() + }; + let instance = unsafe { entry.create_instance(&create_info, None)? }; + + let mut device_info_list = vec![]; + + for (i, device) in unsafe { instance.enumerate_physical_devices()? } + .iter() + .enumerate() + { + // create a chain of properties struct for VkPhysicalDeviceProperties2(3) + // https://registry.khronos.org/vulkan/specs/latest/man/html/VkPhysicalDeviceProperties2.html + // props2 -> driver_props -> id_props + let mut id_props = vk::PhysicalDeviceIDProperties::default(); + let mut driver_props = vk::PhysicalDeviceDriverProperties { + p_next: &mut id_props as *mut _ as *mut std::ffi::c_void, + ..Default::default() + }; + let mut props2 = vk::PhysicalDeviceProperties2 { + p_next: &mut driver_props as *mut _ as *mut std::ffi::c_void, + ..Default::default() + }; + unsafe { + instance.get_physical_device_properties2(*device, &mut props2); + } + + let props = props2.properties; + if props.device_type == vk::PhysicalDeviceType::CPU { + continue; + } + + let device_info = GpuInfo { + name: parse_c_string(&props.device_name), + total_memory: unsafe { instance.get_physical_device_memory_properties(*device) } + .memory_heaps + .iter() + .filter(|heap| heap.flags.contains(vk::MemoryHeapFlags::DEVICE_LOCAL)) + .map(|heap| heap.size / (1024 * 1024)) + .sum(), + vendor: Vendor::from_vendor_id(props.vendor_id), + uuid: parse_uuid(&id_props.device_uuid), + driver_version: parse_c_string(&driver_props.driver_info), + nvidia_info: None, + vulkan_info: Some(VulkanInfo { + index: i as u64, + device_type: format!("{:?}", props.device_type), + api_version: format!( + "{}.{}.{}", + vk::api_version_major(props.api_version), + vk::api_version_minor(props.api_version), + vk::api_version_patch(props.api_version) + ), + device_id: props.device_id, + }), + }; + device_info_list.push(device_info); + } + + unsafe { + instance.destroy_instance(None); + } + + Ok(device_info_list) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_get_vulkan_gpus() { + let gpus = get_vulkan_gpus(""); + for (i, gpu) in gpus.iter().enumerate() { + println!("GPU {}:", i); + println!(" {:?}", gpu); + println!(" {:?}", gpu.get_usage()); + } + } +} diff --git a/src-tauri/src/core/mod.rs b/src-tauri/src/core/mod.rs index 36f84f627..d18a8d6cc 100644 --- a/src-tauri/src/core/mod.rs +++ b/src-tauri/src/core/mod.rs @@ -6,3 +6,4 @@ pub mod setup; pub mod state; pub mod threads; pub mod utils; +pub mod hardware; diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index d4f50fc78..cc689d97c 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -73,6 +73,9 @@ pub fn run() { core::utils::download::download_file, core::utils::download::download_hf_repo, core::utils::download::cancel_download_task, + // hardware + core::hardware::get_system_info, + core::hardware::get_system_usage, ]) .manage(AppState { app_token: Some(generate_app_token()), diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 4a81ad8c8..4a45b6982 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -92,7 +92,8 @@ "resources": [ "binaries/engines/**/*", "resources/themes/**/*", - "resources/pre-install/**/*" + "resources/pre-install/**/*", + "resources/lib/" ], "externalBin": [ "binaries/cortex-server", diff --git a/web-app/src/lib/service.ts b/web-app/src/lib/service.ts index defe55b64..ce1a45cdf 100644 --- a/web-app/src/lib/service.ts +++ b/web-app/src/lib/service.ts @@ -28,6 +28,8 @@ export const AppRoutes = [ 'getConnectedServers', 'readLogs', 'changeAppDataFolder', + 'getSystemInfo', + 'getSystemUsage', ] // Define API routes based on different route types export const Routes = [...CoreRoutes, ...APIRoutes, ...AppRoutes].map((r) => ({