refactor: clean up unused hardware apis

This commit is contained in:
Louis 2025-08-07 19:59:09 +07:00
parent 469d787888
commit c1668a4e4a
No known key found for this signature in database
GPG Key ID: 44FA9F4D33C37DE2
15 changed files with 10 additions and 819 deletions

View File

@ -105,8 +105,7 @@ jobs:
jq --arg version "${{ inputs.new_version }}" '.version = $version | .bundle.createUpdaterArtifacts = true' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
if [ "${{ inputs.channel }}" != "stable" ]; then
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun",
"usr/lib/Jan-${{ inputs.channel }}/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
jq '.bundle.linux.deb.files = {"usr/bin/bun": "resources/bin/bun"}' ./src-tauri/tauri.linux.conf.json > /tmp/tauri.linux.conf.json
mv /tmp/tauri.linux.conf.json ./src-tauri/tauri.linux.conf.json
fi
jq --arg version "${{ inputs.new_version }}" '.version = $version' web-app/package.json > /tmp/package.json

View File

@ -31,7 +31,6 @@ endif
dev: install-and-build
yarn download:bin
yarn download:lib
yarn dev
# Linting
@ -41,7 +40,6 @@ lint: install-and-build
# Testing
test: lint
yarn download:bin
yarn download:lib
yarn test
# Builds and publishes the app
@ -50,7 +48,6 @@ build-and-publish: install-and-build
# Build
build: install-and-build
yarn download:lib
yarn build
clean:

View File

@ -19,7 +19,6 @@
"dev:web": "yarn workspace @janhq/web-app dev",
"dev:tauri": "yarn build:icon && yarn copy:assets:tauri && cross-env IS_CLEAN=true tauri dev",
"copy:assets:tauri": "cpx \"pre-install/*.tgz\" \"src-tauri/resources/pre-install/\"",
"download:lib": "node ./scripts/download-lib.mjs",
"download:bin": "node ./scripts/download-bin.mjs",
"build:tauri:win32": "yarn download:bin && yarn tauri build",
"build:tauri:linux": "yarn download:bin && ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build && ./src-tauri/build-utils/buildAppImage.sh",

View File

@ -1,86 +0,0 @@
console.log('Script is running')
// scripts/download-lib.mjs
import https from 'https'
import fs, { mkdirSync } from 'fs'
import os from 'os'
import path from 'path'
import { copySync } from 'cpx'
function download(url, dest) {
return new Promise((resolve, reject) => {
console.log(`Downloading ${url} to ${dest}`)
const file = fs.createWriteStream(dest)
https
.get(url, (response) => {
console.log(`Response status code: ${response.statusCode}`)
if (
response.statusCode >= 300 &&
response.statusCode < 400 &&
response.headers.location
) {
// Handle redirect
const redirectURL = response.headers.location
console.log(`Redirecting to ${redirectURL}`)
download(redirectURL, dest).then(resolve, reject) // Recursive call
return
} else if (response.statusCode !== 200) {
reject(`Failed to get '${url}' (${response.statusCode})`)
return
}
response.pipe(file)
file.on('finish', () => {
file.close(resolve)
})
})
.on('error', (err) => {
fs.unlink(dest, () => reject(err.message))
})
})
}
async function main() {
console.log('Starting main function')
const platform = os.platform() // 'darwin', 'linux', 'win32'
const arch = os.arch() // 'x64', 'arm64', etc.
if (arch != 'x64') return
let filename
if (platform == 'linux')
filename = 'libvulkan.so'
else if (platform == 'win32')
filename = 'vulkan-1.dll'
else
return
const url = `https://catalog.jan.ai/${filename}`
const libDir = 'src-tauri/resources/lib'
const tempDir = 'scripts/dist'
try {
mkdirSync('scripts/dist')
} catch (err) {
// Expect EEXIST error if the directory already exists
}
console.log(`Downloading libvulkan...`)
const savePath = path.join(tempDir, filename)
if (!fs.existsSync(savePath)) {
await download(url, savePath)
}
// copy to tauri resources
try {
copySync(savePath, libDir)
} catch (err) {
// Expect EEXIST error
}
console.log('Downloads completed.')
}
main().catch((err) => {
console.error('Error:', err)
process.exit(1)
})

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 36 KiB

View File

@ -1,10 +1,6 @@
pub mod amd;
pub mod nvidia;
pub mod vulkan;
use std::sync::OnceLock;
use sysinfo::System;
use tauri::{path::BaseDirectory, Manager};
use tauri;
static SYSTEM_INFO: OnceLock<SystemInfo> = OnceLock::new();
@ -143,90 +139,12 @@ impl CpuStaticInfo {
}
}
// https://devicehunt.com/all-pci-vendors
pub const VENDOR_ID_AMD: u32 = 0x1002;
pub const VENDOR_ID_NVIDIA: u32 = 0x10DE;
pub const VENDOR_ID_INTEL: u32 = 0x8086;
#[derive(Debug, Clone)]
pub enum Vendor {
AMD,
NVIDIA,
Intel,
Unknown(u32),
}
impl serde::Serialize for Vendor {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Vendor::AMD => "AMD".serialize(serializer),
Vendor::NVIDIA => "NVIDIA".serialize(serializer),
Vendor::Intel => "Intel".serialize(serializer),
Vendor::Unknown(vendor_id) => {
let formatted = format!("Unknown (vendor_id: {})", vendor_id);
serializer.serialize_str(&formatted)
}
}
}
}
impl Vendor {
pub fn from_vendor_id(vendor_id: u32) -> Self {
match vendor_id {
VENDOR_ID_AMD => Vendor::AMD,
VENDOR_ID_NVIDIA => Vendor::NVIDIA,
VENDOR_ID_INTEL => Vendor::Intel,
_ => Vendor::Unknown(vendor_id),
}
}
}
#[derive(Clone, Debug, serde::Serialize)]
pub struct GpuInfo {
pub name: String,
pub total_memory: u64,
pub vendor: Vendor,
pub uuid: String,
pub driver_version: String,
pub nvidia_info: Option<nvidia::NvidiaInfo>,
pub vulkan_info: Option<vulkan::VulkanInfo>,
}
impl GpuInfo {
pub fn get_usage(&self) -> GpuUsage {
match self.vendor {
Vendor::NVIDIA => self.get_usage_nvidia(),
Vendor::AMD => self.get_usage_amd(),
_ => self.get_usage_unsupported(),
}
}
pub fn get_usage_unsupported(&self) -> GpuUsage {
GpuUsage {
uuid: self.uuid.clone(),
used_memory: 0,
total_memory: 0,
}
}
}
#[derive(serde::Serialize, Clone, Debug)]
pub struct SystemInfo {
cpu: CpuStaticInfo,
os_type: String,
os_name: String,
total_memory: u64,
gpus: Vec<GpuInfo>,
}
#[derive(serde::Serialize, Clone, Debug)]
pub struct GpuUsage {
uuid: String,
used_memory: u64,
total_memory: u64,
}
#[derive(serde::Serialize, Clone, Debug)]
@ -234,62 +152,15 @@ pub struct SystemUsage {
cpu: f32,
used_memory: u64,
total_memory: u64,
gpus: Vec<GpuUsage>,
}
fn get_jan_libvulkan_path<R: tauri::Runtime>(app: tauri::AppHandle<R>) -> String {
let lib_name = if cfg!(target_os = "windows") {
"vulkan-1.dll"
} else if cfg!(target_os = "linux") {
"libvulkan.so"
} else {
return "".to_string();
};
// NOTE: this does not work in test mode (mock app)
match app.path().resolve(
format!("resources/lib/{}", lib_name),
BaseDirectory::Resource,
) {
Ok(lib_path) => lib_path.to_string_lossy().to_string(),
Err(_) => "".to_string(),
}
}
#[tauri::command]
pub fn get_system_info<R: tauri::Runtime>(app: tauri::AppHandle<R>) -> SystemInfo {
pub fn get_system_info() -> SystemInfo {
SYSTEM_INFO
.get_or_init(|| {
let mut system = System::new();
system.refresh_memory();
let mut gpu_map = std::collections::HashMap::new();
for gpu in nvidia::get_nvidia_gpus() {
gpu_map.insert(gpu.uuid.clone(), gpu);
}
// try system vulkan first
let paths = vec!["".to_string(), get_jan_libvulkan_path(app.clone())];
let mut vulkan_gpus = vec![];
for path in paths {
vulkan_gpus = vulkan::get_vulkan_gpus(&path);
if !vulkan_gpus.is_empty() {
break;
}
}
for gpu in vulkan_gpus {
match gpu_map.get_mut(&gpu.uuid) {
// for existing NVIDIA GPUs, add Vulkan info
Some(nvidia_gpu) => {
nvidia_gpu.vulkan_info = gpu.vulkan_info;
}
None => {
gpu_map.insert(gpu.uuid.clone(), gpu);
}
}
}
let os_type = if cfg!(target_os = "windows") {
"windows"
} else if cfg!(target_os = "macos") {
@ -306,14 +177,13 @@ pub fn get_system_info<R: tauri::Runtime>(app: tauri::AppHandle<R>) -> SystemInf
os_type: os_type.to_string(),
os_name,
total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB
gpus: gpu_map.into_values().collect(),
}
})
.clone()
}
#[tauri::command]
pub fn get_system_usage<R: tauri::Runtime>(app: tauri::AppHandle<R>) -> SystemUsage {
pub fn get_system_usage() -> SystemUsage {
let mut system = System::new();
system.refresh_memory();
@ -330,30 +200,22 @@ pub fn get_system_usage<R: tauri::Runtime>(app: tauri::AppHandle<R>) -> SystemUs
cpu: cpu_usage,
used_memory: system.used_memory() / 1024 / 1024, // bytes to MiB,
total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB,
gpus: get_system_info(app.clone())
.gpus
.iter()
.map(|gpu| gpu.get_usage())
.collect(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use tauri::test::mock_app;
#[test]
fn test_system_info() {
let app = mock_app();
let info = get_system_info(app.handle().clone());
let info = get_system_info();
println!("System Static Info: {:?}", info);
}
#[test]
fn test_system_usage() {
let app = mock_app();
let usage = get_system_usage(app.handle().clone());
let usage = get_system_usage();
println!("System Usage Info: {:?}", usage);
}
}

View File

@ -1,210 +0,0 @@
use super::{GpuInfo, GpuUsage};
impl GpuInfo {
#[cfg(not(target_os = "linux"))]
#[cfg(not(target_os = "windows"))]
pub fn get_usage_amd(&self) -> GpuUsage {
self.get_usage_unsupported()
}
#[cfg(target_os = "linux")]
pub fn get_usage_amd(&self) -> GpuUsage {
use std::fs;
use std::path::Path;
let device_id = match &self.vulkan_info {
Some(vulkan_info) => vulkan_info.device_id,
None => {
log::error!("get_usage_amd called without Vulkan info");
return self.get_usage_unsupported();
}
};
let closure = || -> Result<GpuUsage, Box<dyn std::error::Error>> {
for subdir in fs::read_dir("/sys/class/drm")? {
let device_path = subdir?.path().join("device");
// Check if this is an AMD GPU by looking for amdgpu directory
if !device_path
.join("driver/module/drivers/pci:amdgpu")
.exists()
{
continue;
}
// match device_id from Vulkan info
let this_device_id_str = fs::read_to_string(device_path.join("device"))?;
let this_device_id = u32::from_str_radix(
this_device_id_str
.strip_prefix("0x")
.unwrap_or(&this_device_id_str)
.trim(),
16,
)?;
if this_device_id != device_id {
continue;
}
let read_mem = |path: &Path| -> u64 {
fs::read_to_string(path)
.map(|content| content.trim().parse::<u64>().unwrap_or(0))
.unwrap_or(0)
/ 1024
/ 1024 // Convert bytes to MiB
};
return Ok(GpuUsage {
uuid: self.uuid.clone(),
total_memory: read_mem(&device_path.join("mem_info_vram_total")),
used_memory: read_mem(&device_path.join("mem_info_vram_used")),
});
}
Err(format!("GPU not found").into())
};
match closure() {
Ok(usage) => usage,
Err(e) => {
log::error!(
"Failed to get memory usage for AMD GPU {:#x}: {}",
device_id,
e
);
self.get_usage_unsupported()
}
}
}
#[cfg(target_os = "windows")]
pub fn get_usage_amd(&self) -> GpuUsage {
use std::collections::HashMap;
let memory_usage_map = windows_impl::get_gpu_usage().unwrap_or_else(|_| {
log::error!("Failed to get AMD GPU memory usage");
HashMap::new()
});
match memory_usage_map.get(&self.name) {
Some(&used_memory) => GpuUsage {
uuid: self.uuid.clone(),
used_memory: used_memory as u64,
total_memory: self.total_memory,
},
None => self.get_usage_unsupported(),
}
}
}
// TODO: refactor this into a more egonomic API
#[cfg(target_os = "windows")]
mod windows_impl {
use libc;
use libloading::{Library, Symbol};
use std::collections::HashMap;
use std::ffi::{c_char, c_int, c_void, CStr};
use std::mem::{self, MaybeUninit};
use std::ptr;
// === FFI Struct Definitions ===
#[repr(C)]
#[allow(non_snake_case)]
#[derive(Debug, Copy, Clone)]
pub struct AdapterInfo {
pub iSize: c_int,
pub iAdapterIndex: c_int,
pub strUDID: [c_char; 256],
pub iBusNumber: c_int,
pub iDeviceNumber: c_int,
pub iFunctionNumber: c_int,
pub iVendorID: c_int,
pub strAdapterName: [c_char; 256],
pub strDisplayName: [c_char; 256],
pub iPresent: c_int,
pub iExist: c_int,
pub strDriverPath: [c_char; 256],
pub strDriverPathExt: [c_char; 256],
pub strPNPString: [c_char; 256],
pub iOSDisplayIndex: c_int,
}
type ADL_MAIN_MALLOC_CALLBACK = Option<unsafe extern "C" fn(i32) -> *mut c_void>;
type ADL_MAIN_CONTROL_CREATE = unsafe extern "C" fn(ADL_MAIN_MALLOC_CALLBACK, c_int) -> c_int;
type ADL_MAIN_CONTROL_DESTROY = unsafe extern "C" fn() -> c_int;
type ADL_ADAPTER_NUMBEROFADAPTERS_GET = unsafe extern "C" fn(*mut c_int) -> c_int;
type ADL_ADAPTER_ADAPTERINFO_GET = unsafe extern "C" fn(*mut AdapterInfo, c_int) -> c_int;
type ADL_ADAPTER_ACTIVE_GET = unsafe extern "C" fn(c_int, *mut c_int) -> c_int;
type ADL_GET_DEDICATED_VRAM_USAGE =
unsafe extern "C" fn(*mut c_void, c_int, *mut c_int) -> c_int;
// === ADL Memory Allocator ===
unsafe extern "C" fn adl_malloc(i_size: i32) -> *mut c_void {
libc::malloc(i_size as usize)
}
pub fn get_gpu_usage() -> Result<HashMap<String, i32>, Box<dyn std::error::Error>> {
unsafe {
let lib = Library::new("atiadlxx.dll").or_else(|_| Library::new("atiadlxy.dll"))?;
let adl_main_control_create: Symbol<ADL_MAIN_CONTROL_CREATE> =
lib.get(b"ADL_Main_Control_Create")?;
let adl_main_control_destroy: Symbol<ADL_MAIN_CONTROL_DESTROY> =
lib.get(b"ADL_Main_Control_Destroy")?;
let adl_adapter_number_of_adapters_get: Symbol<ADL_ADAPTER_NUMBEROFADAPTERS_GET> =
lib.get(b"ADL_Adapter_NumberOfAdapters_Get")?;
let adl_adapter_adapter_info_get: Symbol<ADL_ADAPTER_ADAPTERINFO_GET> =
lib.get(b"ADL_Adapter_AdapterInfo_Get")?;
let adl_adapter_active_get: Symbol<ADL_ADAPTER_ACTIVE_GET> =
lib.get(b"ADL_Adapter_Active_Get")?;
let adl_get_dedicated_vram_usage: Symbol<ADL_GET_DEDICATED_VRAM_USAGE> =
lib.get(b"ADL2_Adapter_DedicatedVRAMUsage_Get")?;
// TODO: try to put nullptr here. then we don't need direct libc dep
if adl_main_control_create(Some(adl_malloc), 1) != 0 {
return Err("ADL initialization error!".into());
}
// NOTE: after this call, we must call ADL_Main_Control_Destroy
// whenver we encounter an error
let mut num_adapters: c_int = 0;
if adl_adapter_number_of_adapters_get(&mut num_adapters as *mut _) != 0 {
return Err("Cannot get number of adapters".into());
}
let mut vram_usages = HashMap::new();
if num_adapters > 0 {
let mut adapter_info: Vec<AdapterInfo> =
vec![MaybeUninit::zeroed().assume_init(); num_adapters as usize];
let ret = adl_adapter_adapter_info_get(
adapter_info.as_mut_ptr(),
mem::size_of::<AdapterInfo>() as i32 * num_adapters,
);
if ret != 0 {
return Err("Cannot get adapter info".into());
}
for adapter in adapter_info.iter() {
let mut is_active = 0;
adl_adapter_active_get(adapter.iAdapterIndex, &mut is_active);
if is_active != 0 {
let mut vram_mb = 0;
let _ = adl_get_dedicated_vram_usage(
ptr::null_mut(),
adapter.iAdapterIndex,
&mut vram_mb,
);
// NOTE: adapter name might not be unique?
let name = CStr::from_ptr(adapter.strAdapterName.as_ptr())
.to_string_lossy()
.into_owned();
vram_usages.insert(name, vram_mb);
}
}
}
adl_main_control_destroy();
Ok(vram_usages)
}
}
}

View File

@ -1,120 +0,0 @@
use super::{GpuInfo, GpuUsage, Vendor};
use nvml_wrapper::{error::NvmlError, Nvml};
use std::sync::OnceLock;
static NVML: OnceLock<Option<Nvml>> = OnceLock::new();
#[derive(Debug, Clone, serde::Serialize)]
pub struct NvidiaInfo {
pub index: u32,
pub compute_capability: String,
}
fn get_nvml() -> Option<&'static Nvml> {
NVML.get_or_init(|| {
let result = Nvml::init().or_else(|e| {
// fallback
if cfg!(target_os = "linux") {
let lib_path = std::ffi::OsStr::new("libnvidia-ml.so.1");
Nvml::builder().lib_path(lib_path).init()
} else {
Err(e)
}
});
// NvmlError doesn't implement Copy, so we have to store an Option in OnceLock
match result {
Ok(nvml) => Some(nvml),
Err(e) => {
log::error!("Unable to initialize NVML: {}", e);
None
}
}
})
.as_ref()
}
impl GpuInfo {
pub fn get_usage_nvidia(&self) -> GpuUsage {
let index = match self.nvidia_info {
Some(ref nvidia_info) => nvidia_info.index,
None => {
log::error!("get_usage_nvidia() called on non-NVIDIA GPU");
return self.get_usage_unsupported();
}
};
let closure = || -> Result<GpuUsage, NvmlError> {
let nvml = get_nvml().ok_or(NvmlError::Unknown)?;
let device = nvml.device_by_index(index)?;
let mem_info = device.memory_info()?;
Ok(GpuUsage {
uuid: self.uuid.clone(),
used_memory: mem_info.used / 1024 / 1024, // bytes to MiB
total_memory: mem_info.total / 1024 / 1024, // bytes to MiB
})
};
closure().unwrap_or_else(|e| {
log::error!("Failed to get memory usage for NVIDIA GPU {}: {}", index, e);
self.get_usage_unsupported()
})
}
}
pub fn get_nvidia_gpus() -> Vec<GpuInfo> {
let closure = || -> Result<Vec<GpuInfo>, NvmlError> {
let nvml = get_nvml().ok_or(NvmlError::Unknown)?;
let num_gpus = nvml.device_count()?;
let driver_version = nvml.sys_driver_version()?;
let mut gpus = Vec::with_capacity(num_gpus as usize);
for i in 0..num_gpus {
let device = nvml.device_by_index(i)?;
gpus.push(GpuInfo {
name: device.name()?,
total_memory: device.memory_info()?.total / 1024 / 1024, // bytes to MiB
vendor: Vendor::NVIDIA,
uuid: {
let mut uuid = device.uuid()?;
if uuid.starts_with("GPU-") {
uuid = uuid[4..].to_string();
}
uuid
},
driver_version: driver_version.clone(),
nvidia_info: Some(NvidiaInfo {
index: i,
compute_capability: {
let cc = device.cuda_compute_capability()?;
format!("{}.{}", cc.major, cc.minor)
},
}),
vulkan_info: None,
});
}
Ok(gpus)
};
match closure() {
Ok(gpus) => gpus,
Err(e) => {
log::error!("Failed to get NVIDIA GPUs: {}", e);
vec![]
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_nvidia_gpus() {
let gpus = get_nvidia_gpus();
for (i, gpu) in gpus.iter().enumerate() {
println!("GPU {}:", i);
println!(" {:?}", gpu);
println!(" {:?}", gpu.get_usage());
}
}
}

View File

@ -1,145 +0,0 @@
use super::{GpuInfo, Vendor};
use ash::{vk, Entry};
#[derive(Debug, Clone, serde::Serialize)]
pub struct VulkanInfo {
pub index: u64,
pub device_type: String,
pub api_version: String,
pub device_id: u32,
}
fn parse_uuid(bytes: &[u8; 16]) -> String {
format!(
"{:02x}{:02x}{:02x}{:02x}-\
{:02x}{:02x}-\
{:02x}{:02x}-\
{:02x}{:02x}-\
{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
bytes[0],
bytes[1],
bytes[2],
bytes[3],
bytes[4],
bytes[5],
bytes[6],
bytes[7],
bytes[8],
bytes[9],
bytes[10],
bytes[11],
bytes[12],
bytes[13],
bytes[14],
bytes[15],
)
}
pub fn get_vulkan_gpus(lib_path: &str) -> Vec<GpuInfo> {
match get_vulkan_gpus_internal(lib_path) {
Ok(gpus) => gpus,
Err(e) => {
log::error!("Failed to get Vulkan GPUs: {:?}", e);
vec![]
}
}
}
fn parse_c_string(buf: &[i8]) -> String {
unsafe { std::ffi::CStr::from_ptr(buf.as_ptr()) }
.to_str()
.unwrap_or_default()
.to_string()
}
fn get_vulkan_gpus_internal(lib_path: &str) -> Result<Vec<GpuInfo>, Box<dyn std::error::Error>> {
let entry = if lib_path.is_empty() {
unsafe { Entry::load()? }
} else {
unsafe { Entry::load_from(lib_path)? }
};
let app_info = vk::ApplicationInfo {
api_version: vk::make_api_version(0, 1, 1, 0),
..Default::default()
};
let create_info = vk::InstanceCreateInfo {
p_application_info: &app_info,
..Default::default()
};
let instance = unsafe { entry.create_instance(&create_info, None)? };
let mut device_info_list = vec![];
for (i, device) in unsafe { instance.enumerate_physical_devices()? }
.iter()
.enumerate()
{
// create a chain of properties struct for VkPhysicalDeviceProperties2(3)
// https://registry.khronos.org/vulkan/specs/latest/man/html/VkPhysicalDeviceProperties2.html
// props2 -> driver_props -> id_props
let mut id_props = vk::PhysicalDeviceIDProperties::default();
let mut driver_props = vk::PhysicalDeviceDriverProperties {
p_next: &mut id_props as *mut _ as *mut std::ffi::c_void,
..Default::default()
};
let mut props2 = vk::PhysicalDeviceProperties2 {
p_next: &mut driver_props as *mut _ as *mut std::ffi::c_void,
..Default::default()
};
unsafe {
instance.get_physical_device_properties2(*device, &mut props2);
}
let props = props2.properties;
if props.device_type == vk::PhysicalDeviceType::CPU {
continue;
}
let device_info = GpuInfo {
name: parse_c_string(&props.device_name),
total_memory: unsafe { instance.get_physical_device_memory_properties(*device) }
.memory_heaps
.iter()
.filter(|heap| heap.flags.contains(vk::MemoryHeapFlags::DEVICE_LOCAL))
.map(|heap| heap.size / (1024 * 1024))
.sum(),
vendor: Vendor::from_vendor_id(props.vendor_id),
uuid: parse_uuid(&id_props.device_uuid),
driver_version: parse_c_string(&driver_props.driver_info),
nvidia_info: None,
vulkan_info: Some(VulkanInfo {
index: i as u64,
device_type: format!("{:?}", props.device_type),
api_version: format!(
"{}.{}.{}",
vk::api_version_major(props.api_version),
vk::api_version_minor(props.api_version),
vk::api_version_patch(props.api_version)
),
device_id: props.device_id,
}),
};
device_info_list.push(device_info);
}
unsafe {
instance.destroy_instance(None);
}
Ok(device_info_list)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_vulkan_gpus() {
let gpus = get_vulkan_gpus("");
for (i, gpu) in gpus.iter().enumerate() {
println!("GPU {}:", i);
println!(" {:?}", gpu);
println!(" {:?}", gpu.get_usage());
}
}
}

View File

@ -696,8 +696,6 @@ Section Install
; Copy resources
CreateDirectory "$INSTDIR\resources"
CreateDirectory "$INSTDIR\resources\pre-install"
SetOutPath $INSTDIR
File /a "/oname=vulkan-1.dll" "D:\a\jan\jan\src-tauri\resources\lib\vulkan-1.dll"
SetOutPath "$INSTDIR\resources\pre-install"
File /nonfatal /a /r "D:\a\jan\jan\src-tauri\resources\pre-install\"
SetOutPath $INSTDIR

View File

@ -10,8 +10,7 @@
},
"deb": {
"files": {
"usr/bin/bun": "resources/bin/bun",
"usr/lib/Jan/resources/lib/libvulkan.so": "resources/lib/libvulkan.so"
"usr/bin/bun": "resources/bin/bun"
}
}
}

View File

@ -1,19 +0,0 @@
const jestRunner = require('jest-runner')
class EmptyTestFileRunner extends jestRunner.default {
async runTests(tests, watcher, onStart, onResult, onFailure, options) {
const nonEmptyTests = tests.filter(
(test) => test.context.hasteFS.getSize(test.path) > 0
)
return super.runTests(
nonEmptyTests,
watcher,
onStart,
onResult,
onFailure,
options
)
}
}
module.exports = EmptyTestFileRunner

View File

@ -12,30 +12,6 @@ export interface CPU {
instructions?: string[] // Cortex migration: ensure instructions data ready
}
export interface GPUAdditionalInfo {
compute_cap: string
driver_version: string
}
export interface GPU {
name: string
total_memory: number
vendor: string
uuid: string
driver_version: string
activated?: boolean
nvidia_info: {
index: number
compute_capability: string
}
vulkan_info: {
index: number
device_id: number
device_type: string
api_version: string
}
}
export interface OS {
name: string
version: string
@ -48,7 +24,6 @@ export interface RAM {
export interface HardwareData {
cpu: CPU
gpus: GPU[]
os_type: string
os_name: string
total_memory: number
@ -60,11 +35,6 @@ export interface SystemUsage {
cpu: number
used_memory: number
total_memory: number
gpus: {
uuid: string
used_memory: number
total_memory: number
}[]
}
// Default values
@ -76,7 +46,6 @@ const defaultHardwareData: HardwareData = {
name: '',
usage: 0,
},
gpus: [],
os_type: '',
os_name: '',
total_memory: 0,
@ -86,7 +55,6 @@ const defaultSystemUsage: SystemUsage = {
cpu: 0,
used_memory: 0,
total_memory: 0,
gpus: [],
}
interface HardwareStore {
@ -96,22 +64,17 @@ interface HardwareStore {
// Update functions
setCPU: (cpu: CPU) => void
setGPUs: (gpus: GPU[]) => void
setOS: (os: OS) => void
setRAM: (ram: RAM) => void
// Update entire hardware data at once
setHardwareData: (data: HardwareData) => void
// Update individual GPU
updateGPU: (index: number, gpu: GPU) => void
// Update RAM available
updateSystemUsage: (usage: SystemUsage) => void
// GPU loading state
gpuLoading: { [index: number]: boolean }
setGpuLoading: (index: number, loading: boolean) => void
// Polling control
pollingPaused: boolean
@ -126,13 +89,6 @@ export const useHardware = create<HardwareStore>()(
systemUsage: defaultSystemUsage,
gpuLoading: {},
pollingPaused: false,
setGpuLoading: (index, loading) =>
set((state) => ({
gpuLoading: {
...state.gpuLoading,
[state.hardwareData.gpus[index].uuid]: loading,
},
})),
pausePolling: () => set({ pollingPaused: true }),
resumePolling: () => set({ pollingPaused: false }),
@ -144,14 +100,6 @@ export const useHardware = create<HardwareStore>()(
},
})),
setGPUs: (gpus) =>
set((state) => ({
hardwareData: {
...state.hardwareData,
gpus,
},
})),
setOS: (os) =>
set((state) => ({
hardwareData: {
@ -181,27 +129,9 @@ export const useHardware = create<HardwareStore>()(
available: 0,
total: 0,
},
gpus: data.gpus.map((gpu) => ({
...gpu,
activated: gpu.activated ?? false,
})),
},
}),
updateGPU: (index, gpu) =>
set((state) => {
const newGPUs = [...state.hardwareData.gpus]
if (index >= 0 && index < newGPUs.length) {
newGPUs[index] = gpu
}
return {
hardwareData: {
...state.hardwareData,
gpus: newGPUs,
},
}
}),
updateSystemUsage: (systemUsage) =>
set(() => ({
systemUsage,

View File

@ -501,7 +501,7 @@ function Hub() {
</HeaderPage>
<div className="p-4 w-full h-[calc(100%-32px)] !overflow-y-auto first-step-setup-local-provider">
<div className="flex flex-col h-full justify-between gap-4 gap-y-3 w-full md:w-4/5 mx-auto">
{loading ? (
{loading && !filteredModels.length ? (
<div className="flex items-center justify-center">
<div className="text-center text-muted-foreground">
{t('hub:loadingModels')}

View File

@ -1,6 +1,6 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { createFileRoute } from '@tanstack/react-router'
import { useEffect, useState } from 'react'
import { useEffect } from 'react'
import { useHardware } from '@/hooks/useHardware'
import { Progress } from '@/components/ui/progress'
import { route } from '@/constants/routes'
@ -19,12 +19,7 @@ function SystemMonitor() {
const { t } = useTranslation()
const { hardwareData, systemUsage, updateSystemUsage } = useHardware()
const {
devices: llamacppDevices,
fetchDevices,
} = useLlamacppDevices()
const [isInitialized, setIsInitialized] = useState(false)
const { devices: llamacppDevices, fetchDevices } = useLlamacppDevices()
useEffect(() => {
// Fetch llamacpp devices
@ -46,14 +41,6 @@ function SystemMonitor() {
return () => clearInterval(intervalId)
}, [updateSystemUsage])
// Initialize when hardware data and llamacpp devices are available
useEffect(() => {
if (hardwareData.gpus.length > 0 && !isInitialized) {
setIsInitialized(true)
}
}, [hardwareData.gpus.length, isInitialized])
// Calculate RAM usage percentage
const ramUsagePercentage =
toNumber(systemUsage.used_memory / hardwareData.total_memory) * 100