Backend Architecture Refactoring (#6094) (#6162)

* add llamacpp plugin

* Refactor llamacpp plugin

* add utils plugin

* remove utils folder

* add hardware implementation

* add utils folder + move utils function

* organize cargo files

* refactor utils src

* refactor util

* apply fmt

* fmt

* Update gguf + reformat

* add permission for gguf commands

* fix cargo test windows

* revert yarn lock

* remove cargo.lock for hardware plugin

* ignore cargo.lock file

* Fix hardware invoke + refactor hardware + refactor tests, constants

* use api wrapper in extension to invoke hardware call + api wrapper build integration

* add newline at EOF (per Akarshan)

* add vi mock for getSystemInfo
This commit is contained in:
Dinh Long Nguyen 2025-08-15 08:59:01 +07:00 committed by GitHub
parent 9732a9b8b9
commit e1c8d98bf2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
138 changed files with 6998 additions and 4634 deletions

1
.gitignore vendored
View File

@ -53,6 +53,7 @@ docs/.next/
## cargo
target
Cargo.lock
## test
test-data

View File

@ -26,6 +26,7 @@ else ifeq ($(shell uname -s),Linux)
chmod +x src-tauri/build-utils/*
endif
yarn install
yarn build:tauri:plugin:api
yarn build:core
yarn build:extensions

View File

@ -120,6 +120,7 @@ mise dev # runs the full development setup
```bash
yarn install
yarn build:tauri:plugin:api
yarn build:core
yarn build:extensions
yarn dev

View File

@ -28,6 +28,8 @@
},
"dependencies": {
"@janhq/core": "../../core/package.tgz",
"@janhq/tauri-plugin-hardware-api": "link:../../src-tauri/plugins/tauri-plugin-hardware",
"@janhq/tauri-plugin-llamacpp-api": "link:../../src-tauri/plugins/tauri-plugin-llamacpp",
"@tauri-apps/api": "^2.5.0",
"@tauri-apps/plugin-log": "^2.6.0",
"fetch-retry": "^5.0.6",

View File

@ -2,6 +2,7 @@ import { getJanDataFolderPath, fs, joinPath, events } from '@janhq/core'
import { invoke } from '@tauri-apps/api/core'
import { getProxyConfig } from './util'
import { dirname } from '@tauri-apps/api/path'
import { getSystemInfo } from '@janhq/tauri-plugin-hardware-api'
// folder structure
// <Jan's data folder>/llamacpp/backends/<backend_version>/<backend_type>
@ -10,7 +11,7 @@ import { dirname } from '@tauri-apps/api/path'
export async function listSupportedBackends(): Promise<
{ version: string; backend: string }[]
> {
const sysInfo = await window.core.api.getSystemInfo()
const sysInfo = await getSystemInfo()
const os_type = sysInfo.os_type
const arch = sysInfo.cpu.arch
@ -229,7 +230,7 @@ export async function downloadBackend(
}
async function _getSupportedFeatures() {
const sysInfo = await window.core.api.getSystemInfo()
const sysInfo = await getSystemInfo()
const features = {
avx: sysInfo.cpu.extensions.includes('avx'),
avx2: sysInfo.cpu.extensions.includes('avx2'),
@ -289,7 +290,7 @@ async function _fetchGithubReleases(
}
async function _isCudaInstalled(version: string): Promise<boolean> {
const sysInfo = await window.core.api.getSystemInfo()
const sysInfo = await getSystemInfo()
const os_type = sysInfo.os_type
// not sure the reason behind this naming convention

View File

@ -808,7 +808,7 @@ export default class llamacpp_extension extends AIEngine {
}
private async generateApiKey(modelId: string, port: string): Promise<string> {
const hash = await invoke<string>('generate_api_key', {
const hash = await invoke<string>('plugin:llamacpp|generate_api_key', {
modelId: modelId + port,
apiSecret: this.apiSecret,
})
@ -1101,7 +1101,7 @@ export default class llamacpp_extension extends AIEngine {
*/
private async getRandomPort(): Promise<number> {
try {
const port = await invoke<number>('get_random_port')
const port = await invoke<number>('plugin:llamacpp|get_random_port')
return port
} catch {
logger.error('Unable to find a suitable port')
@ -1279,7 +1279,7 @@ export default class llamacpp_extension extends AIEngine {
try {
// TODO: add LIBRARY_PATH
const sInfo = await invoke<SessionInfo>('load_llama_model', {
const sInfo = await invoke<SessionInfo>('plugin:llamacpp|load_llama_model', {
backendPath,
libraryPath,
args,
@ -1299,7 +1299,7 @@ export default class llamacpp_extension extends AIEngine {
const pid = sInfo.pid
try {
// Pass the PID as the session_id
const result = await invoke<UnloadResult>('unload_llama_model', {
const result = await invoke<UnloadResult>('plugin:llamacpp|unload_llama_model', {
pid: pid,
})
@ -1437,7 +1437,7 @@ export default class llamacpp_extension extends AIEngine {
private async findSessionByModel(modelId: string): Promise<SessionInfo> {
try {
let sInfo = await invoke<SessionInfo>('find_session_by_model', {
let sInfo = await invoke<SessionInfo>('plugin:llamacpp|find_session_by_model', {
modelId,
})
return sInfo
@ -1456,7 +1456,7 @@ export default class llamacpp_extension extends AIEngine {
throw new Error(`No active session found for model: ${opts.model}`)
}
// check if the process is alive
const result = await invoke<boolean>('is_process_running', {
const result = await invoke<boolean>('plugin:llamacpp|is_process_running', {
pid: sessionInfo.pid,
})
if (result) {
@ -1516,7 +1516,7 @@ export default class llamacpp_extension extends AIEngine {
override async getLoadedModels(): Promise<string[]> {
try {
let models: string[] = await invoke<string[]>('get_loaded_models')
let models: string[] = await invoke<string[]>('plugin:llamacpp|get_loaded_models')
return models
} catch (e) {
logger.error(e)
@ -1539,7 +1539,7 @@ export default class llamacpp_extension extends AIEngine {
const backendPath = await getBackendExePath(backend, version)
const libraryPath = await joinPath([await this.getProviderPath(), 'lib'])
try {
const dList = await invoke<DeviceList[]>('get_devices', {
const dList = await invoke<DeviceList[]>('plugin:llamacpp|get_devices', {
backendPath,
libraryPath,
})
@ -1601,7 +1601,7 @@ export default class llamacpp_extension extends AIEngine {
private async loadMetadata(path: string): Promise<GgufMetadata> {
try {
const data = await invoke<GgufMetadata>('read_gguf_metadata', {
const data = await invoke<GgufMetadata>('plugin:llamacpp|read_gguf_metadata', {
path: path,
})
return data

View File

@ -18,7 +18,7 @@ describe('Backend functions', () => {
describe('listSupportedBackends', () => {
it('should return supported backends for Windows x64', async () => {
// Mock system info
window.core.api.getSystemInfo = vi.fn().mockResolvedValue({
const getSystemInfo = vi.fn().mockResolvedValue({
os_type: 'windows',
cpu: {
arch: 'x86_64',
@ -53,7 +53,7 @@ describe('Backend functions', () => {
it('should return CUDA backends with proper CPU instruction detection for Windows', async () => {
// Mock system info with CUDA support and AVX512
window.core.api.getSystemInfo = vi.fn().mockResolvedValue({
const getSystemInfo = vi.fn().mockResolvedValue({
os_type: 'windows',
cpu: {
arch: 'x86_64',
@ -92,7 +92,7 @@ describe('Backend functions', () => {
it('should select appropriate CUDA backend based on CPU features - AVX2 only', async () => {
// Mock system info with CUDA support but only AVX2
window.core.api.getSystemInfo = vi.fn().mockResolvedValue({
const getSystemInfo = vi.fn().mockResolvedValue({
os_type: 'windows',
cpu: {
arch: 'x86_64',
@ -131,7 +131,7 @@ describe('Backend functions', () => {
it('should select appropriate CUDA backend based on CPU features - no AVX', async () => {
// Mock system info with CUDA support but no AVX
window.core.api.getSystemInfo = vi.fn().mockResolvedValue({
const getSystemInfo = vi.fn().mockResolvedValue({
os_type: 'windows',
cpu: {
arch: 'x86_64',
@ -171,7 +171,7 @@ describe('Backend functions', () => {
it('should return CUDA backends with proper CPU instruction detection for Linux', async () => {
// Mock system info with CUDA support and AVX support
window.core.api.getSystemInfo = vi.fn().mockResolvedValue({
const getSystemInfo = vi.fn().mockResolvedValue({
os_type: 'linux',
cpu: {
arch: 'x86_64',
@ -210,7 +210,7 @@ describe('Backend functions', () => {
})
it('should return supported backends for macOS arm64', async () => {
window.core.api.getSystemInfo = vi.fn().mockResolvedValue({
const getSystemInfo = vi.fn().mockResolvedValue({
os_type: 'macos',
cpu: {
arch: 'aarch64',
@ -261,7 +261,7 @@ describe('Backend functions', () => {
describe('getBackendExePath', () => {
it('should return correct exe path for Windows', async () => {
window.core.api.getSystemInfo = vi.fn().mockResolvedValue({
const getSystemInfo = vi.fn().mockResolvedValue({
os_type: 'windows',
})
@ -289,7 +289,7 @@ describe('Backend functions', () => {
})
it('should return correct exe path for Linux/macOS', async () => {
window.core.api.getSystemInfo = vi.fn().mockResolvedValue({
const getSystemInfo = vi.fn().mockResolvedValue({
os_type: 'linux',
})

View File

@ -175,7 +175,7 @@ describe('llamacpp_extension', () => {
const { invoke } = await import('@tauri-apps/api/core')
// Mock system info for getBackendExePath
window.core.api.getSystemInfo = vi.fn().mockResolvedValue({
const getSystemInfo = vi.fn().mockResolvedValue({
os_type: 'linux'
})

View File

@ -19,7 +19,7 @@ Object.defineProperty(globalThis, 'window', {
localStorage: localStorageMock,
core: {
api: {
getSystemInfo: vi.fn(),
// getSystemInfo: vi.fn(),
},
extensionManager: {
getByName: vi.fn().mockReturnValue({
@ -31,6 +31,10 @@ Object.defineProperty(globalThis, 'window', {
},
})
vi.mock('@janhq/tauri-plugin-hardware-api', () => ({
getSystemInfo: vi.fn(),
}));
// Mock Tauri invoke function
vi.mock('@tauri-apps/api/core', () => ({
invoke: vi.fn(),

View File

@ -28,9 +28,19 @@ run = "yarn install"
sources = ['package.json', 'yarn.lock']
outputs = ['node_modules']
[tasks.build-tauri-plugin-api]
description = "Build Tauri plugin API"
depends = ["install"]
run = "yarn build:tauri:plugin:api"
sources = ['src-tauri/plugins/**/*']
outputs = [
'src-tauri/plugins/tauri-plugin-hardware/dist-js',
'src-tauri/plugins/tauri-plugin-llamacpp/dist-js',
]
[tasks.build-core]
description = "Build core package"
depends = ["install"]
depends = ["build-tauri-plugin-api"]
run = "yarn build:core"
sources = ['core/**/*']
outputs = ['core/dist']

View File

@ -25,6 +25,7 @@
"build:tauri:linux": "yarn download:bin && ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build && ./src-tauri/build-utils/buildAppImage.sh",
"build:tauri:darwin": "yarn tauri build --target universal-apple-darwin",
"build:tauri": "yarn build:icon && yarn copy:assets:tauri && run-script-os",
"build:tauri:plugin:api": "cd src-tauri/plugins && yarn install && yarn workspaces foreach -Apt run build",
"build:icon": "tauri icon ./src-tauri/icons/icon.png",
"build:core": "cd core && yarn build && yarn pack",
"build:web": "yarn workspace @janhq/web-app build",

63
src-tauri/Cargo.lock generated
View File

@ -6,36 +6,31 @@ version = 3
name = "Jan"
version = "0.6.599"
dependencies = [
"ash",
"base64 0.22.1",
"byteorder",
"dirs",
"env",
"fix-path-env",
"flate2",
"futures-util",
"hmac",
"hyper 0.14.32",
"jan-utils",
"libc",
"libloading 0.8.8",
"log",
"nix",
"nvml-wrapper",
"once_cell",
"rand 0.8.5",
"reqwest 0.11.27",
"rmcp",
"serde",
"serde_json",
"serde_yaml",
"sha2",
"sysinfo",
"tar",
"tauri",
"tauri-build",
"tauri-plugin-deep-link",
"tauri-plugin-dialog",
"tauri-plugin-hardware",
"tauri-plugin-http",
"tauri-plugin-llamacpp",
"tauri-plugin-log",
"tauri-plugin-opener",
"tauri-plugin-os",
@ -2281,6 +2276,21 @@ version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "jan-utils"
version = "0.1.0"
dependencies = [
"base64 0.22.1",
"hmac",
"rand 0.8.5",
"reqwest 0.11.27",
"serde",
"serde_json",
"sha2",
"tokio",
"url",
]
[[package]]
name = "javascriptcore-rs"
version = "1.1.2"
@ -5081,6 +5091,22 @@ dependencies = [
"url",
]
[[package]]
name = "tauri-plugin-hardware"
version = "0.6.599"
dependencies = [
"ash",
"libc",
"libloading 0.8.8",
"log",
"nvml-wrapper",
"serde",
"serde_json",
"sysinfo",
"tauri",
"tauri-plugin",
]
[[package]]
name = "tauri-plugin-http"
version = "2.5.1"
@ -5105,6 +5131,27 @@ dependencies = [
"urlpattern",
]
[[package]]
name = "tauri-plugin-llamacpp"
version = "0.6.599"
dependencies = [
"base64 0.22.1",
"byteorder",
"hmac",
"jan-utils",
"log",
"nix",
"rand 0.8.5",
"serde",
"sha2",
"sysinfo",
"tauri",
"tauri-plugin",
"thiserror 2.0.12",
"tokio",
"windows-sys 0.60.2",
]
[[package]]
name = "tauri-plugin-log"
version = "2.6.0"

View File

@ -34,21 +34,16 @@ test-tauri = [
tauri-build = { version = "2.0.2", features = [] }
[dependencies]
serde_json = "1.0"
serde = { version = "1.0", features = ["derive"] }
log = "0.4"
tauri-plugin-log = "2.0.0-rc"
tauri-plugin-shell = "2.2.0"
tauri-plugin-os = "2.2.1"
tauri-plugin-opener = "2.2.7"
dirs = "6.0.0"
env = "1.0.1"
fix-path-env = { git = "https://github.com/tauri-apps/fix-path-env-rs" }
flate2 = "1.0"
tar = "0.4"
rand = "0.8"
tauri-plugin-http = { version = "2", features = ["unsafe-headers"] }
tauri-plugin-store = "2"
futures-util = "0.3.31"
hyper = { version = "0.14", features = ["server"] }
jan-utils = { path = "./utils" }
libloading = "0.8.7"
log = "0.4"
reqwest = { version = "0.11", features = ["json", "blocking", "stream"] }
tokio = { version = "1", features = ["full"] }
rmcp = { git = "https://github.com/modelcontextprotocol/rust-sdk", rev = "3196c95f1dfafbffbdcdd6d365c94969ac975e6a", features = [
"client",
"transport-sse-client",
@ -56,34 +51,30 @@ rmcp = { git = "https://github.com/modelcontextprotocol/rust-sdk", rev = "3196c9
"tower",
"reqwest",
] }
uuid = { version = "1.7", features = ["v4"] }
env = "1.0.1"
futures-util = "0.3.31"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_yaml = "0.9.34"
tar = "0.4"
tauri-plugin-deep-link = "2"
tauri-plugin-dialog = "2.2.1"
tauri-plugin-hardware = { path = "./plugins/tauri-plugin-hardware" }
tauri-plugin-http = { version = "2", features = ["unsafe-headers"] }
tauri-plugin-llamacpp = { path = "./plugins/tauri-plugin-llamacpp" }
tauri-plugin-log = "2.0.0-rc"
tauri-plugin-opener = "2.2.7"
tauri-plugin-os = "2.2.1"
tauri-plugin-shell = "2.2.0"
tauri-plugin-store = "2"
thiserror = "2.0.12"
tokio = { version = "1", features = ["full"] }
tokio-util = "0.7.14"
url = "2.5"
tauri-plugin-dialog = "2.2.1"
dirs = "6.0.0"
sysinfo = "0.34.2"
ash = "0.38.0"
nvml-wrapper = "0.10.0"
tauri-plugin-deep-link = "2"
fix-path-env = { git = "https://github.com/tauri-apps/fix-path-env-rs" }
serde_yaml = "0.9.34"
hmac = "0.12.1"
sha2 = "0.10.9"
base64 = "0.22.1"
libloading = "0.8.7"
thiserror = "2.0.12"
byteorder = "1.5.0"
uuid = { version = "1.7", features = ["v4"] }
[dependencies.tauri]
version = "2.5.0"
default-features = false
features = [
"protocol-asset",
"macos-private-api",
"test",
]
features = ["protocol-asset", "macos-private-api", "test"]
[target.'cfg(windows)'.dev-dependencies]
tempfile = "3.20.0"

View File

@ -55,6 +55,8 @@
}
]
},
"store:default"
"store:default",
"llamacpp:default",
"hardware:default"
]
}

View File

@ -0,0 +1,3 @@
nmHoistingLimits: workspaces
nodeLinker: node-modules
checksumBehavior: update

View File

@ -0,0 +1,12 @@
{
"private": true,
"workspaces": {
"packages": [
"**"
]
},
"installConfig": {
"hoistingLimits": "workspaces"
},
"packageManager": "yarn@4.5.3"
}

View File

@ -0,0 +1,17 @@
/.vs
.DS_Store
.Thumbs.db
*.sublime*
.idea/
debug.log
package-lock.json
.vscode/settings.json
yarn.lock
/.tauri
/target
Cargo.lock
node_modules/
dist-js
dist

View File

@ -0,0 +1,28 @@
[package]
name = "tauri-plugin-hardware"
version = "0.6.599"
authors = ["Jan <service@jan.ai>"]
description = "Tauri plugin for hardware information and GPU monitoring"
license = "MIT"
repository = "https://github.com/menloresearch/jan"
edition = "2021"
rust-version = "1.77.2"
exclude = ["/examples", "/dist-js", "/guest-js", "/node_modules"]
links = "tauri-plugin-hardware"
[dependencies]
ash = "0.38.0"
libc = "0.2"
log = "0.4"
nvml-wrapper = "0.10.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
sysinfo = "0.34.2"
tauri = { version = "2.5.0", default-features = false, features = ["test"] }
# Windows-specific dependencies
[target.'cfg(windows)'.dependencies]
libloading = "0.8"
[build-dependencies]
tauri-plugin = { version = "2.3.1", features = ["build"] }

View File

@ -0,0 +1,5 @@
const COMMANDS: &[&str] = &["get_system_info", "get_system_usage"];
fn main() {
tauri_plugin::Builder::new(COMMANDS).build();
}

View File

@ -0,0 +1,49 @@
import { invoke } from '@tauri-apps/api/core'
// Types
export interface CpuStaticInfo {
name: string;
core_count: number;
arch: string;
extensions: string[];
}
export interface GpuInfo {
name: string;
total_memory: number;
vendor: string;
uuid: string;
driver_version: string;
nvidia_info?: any;
vulkan_info?: any;
}
export interface SystemInfo {
cpu: CpuStaticInfo;
os_type: string;
os_name: string;
total_memory: number;
gpus: GpuInfo[];
}
export interface GpuUsage {
uuid: string;
used_memory: number;
total_memory: number;
}
export interface SystemUsage {
cpu: number;
used_memory: number;
total_memory: number;
gpus: GpuUsage[];
}
// Hardware commands
export async function getSystemInfo(): Promise<SystemInfo> {
return await invoke('plugin:hardware|get_system_info');
}
export async function getSystemUsage(): Promise<SystemUsage> {
return await invoke('plugin:hardware|get_system_usage');
}

View File

@ -0,0 +1,33 @@
{
"name": "@janhq/tauri-plugin-hardware-api",
"version": "0.6.6",
"private": true,
"description": "Hardware monitoring plugin API for Tauri",
"type": "module",
"types": "./dist-js/index.d.ts",
"main": "./dist-js/index.cjs",
"module": "./dist-js/index.js",
"exports": {
"types": "./dist-js/index.d.ts",
"import": "./dist-js/index.js",
"require": "./dist-js/index.cjs"
},
"files": [
"dist-js",
"README.md"
],
"scripts": {
"build": "rollup -c",
"prepublishOnly": "yarn build",
"pretest": "yarn build"
},
"dependencies": {
"@tauri-apps/api": ">=2.0.0-beta.6"
},
"devDependencies": {
"@rollup/plugin-typescript": "^12.0.0",
"rollup": "^4.9.6",
"tslib": "^2.6.2",
"typescript": "^5.3.3"
}
}

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-get-system-info"
description = "Enables the get_system_info command without any pre-configured scope."
commands.allow = ["get_system_info"]
[[permission]]
identifier = "deny-get-system-info"
description = "Denies the get_system_info command without any pre-configured scope."
commands.deny = ["get_system_info"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-get-system-usage"
description = "Enables the get_system_usage command without any pre-configured scope."
commands.allow = ["get_system_usage"]
[[permission]]
identifier = "deny-get-system-usage"
description = "Denies the get_system_usage command without any pre-configured scope."
commands.deny = ["get_system_usage"]

View File

@ -0,0 +1,70 @@
## Default Permission
Default permissions for the hardware plugin
#### This default permission set includes the following:
- `allow-get-system-info`
- `allow-get-system-usage`
## Permission Table
<table>
<tr>
<th>Identifier</th>
<th>Description</th>
</tr>
<tr>
<td>
`hardware:allow-get-system-info`
</td>
<td>
Enables the get_system_info command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`hardware:deny-get-system-info`
</td>
<td>
Denies the get_system_info command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`hardware:allow-get-system-usage`
</td>
<td>
Enables the get_system_usage command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`hardware:deny-get-system-usage`
</td>
<td>
Denies the get_system_usage command without any pre-configured scope.
</td>
</tr>
</table>

View File

@ -0,0 +1,6 @@
[default]
description = "Default permissions for the hardware plugin"
permissions = [
"allow-get-system-info",
"allow-get-system-usage"
]

View File

@ -0,0 +1,330 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "PermissionFile",
"description": "Permission file that can define a default permission, a set of permissions or a list of inlined permissions.",
"type": "object",
"properties": {
"default": {
"description": "The default permission set for the plugin",
"anyOf": [
{
"$ref": "#/definitions/DefaultPermission"
},
{
"type": "null"
}
]
},
"set": {
"description": "A list of permissions sets defined",
"type": "array",
"items": {
"$ref": "#/definitions/PermissionSet"
}
},
"permission": {
"description": "A list of inlined permissions",
"default": [],
"type": "array",
"items": {
"$ref": "#/definitions/Permission"
}
}
},
"definitions": {
"DefaultPermission": {
"description": "The default permission set of the plugin.\n\nWorks similarly to a permission with the \"default\" identifier.",
"type": "object",
"required": [
"permissions"
],
"properties": {
"version": {
"description": "The version of the permission.",
"type": [
"integer",
"null"
],
"format": "uint64",
"minimum": 1.0
},
"description": {
"description": "Human-readable description of what the permission does. Tauri convention is to use `<h4>` headings in markdown content for Tauri documentation generation purposes.",
"type": [
"string",
"null"
]
},
"permissions": {
"description": "All permissions this set contains.",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"PermissionSet": {
"description": "A set of direct permissions grouped together under a new name.",
"type": "object",
"required": [
"description",
"identifier",
"permissions"
],
"properties": {
"identifier": {
"description": "A unique identifier for the permission.",
"type": "string"
},
"description": {
"description": "Human-readable description of what the permission does.",
"type": "string"
},
"permissions": {
"description": "All permissions this set contains.",
"type": "array",
"items": {
"$ref": "#/definitions/PermissionKind"
}
}
}
},
"Permission": {
"description": "Descriptions of explicit privileges of commands.\n\nIt can enable commands to be accessible in the frontend of the application.\n\nIf the scope is defined it can be used to fine grain control the access of individual or multiple commands.",
"type": "object",
"required": [
"identifier"
],
"properties": {
"version": {
"description": "The version of the permission.",
"type": [
"integer",
"null"
],
"format": "uint64",
"minimum": 1.0
},
"identifier": {
"description": "A unique identifier for the permission.",
"type": "string"
},
"description": {
"description": "Human-readable description of what the permission does. Tauri internal convention is to use `<h4>` headings in markdown content for Tauri documentation generation purposes.",
"type": [
"string",
"null"
]
},
"commands": {
"description": "Allowed or denied commands when using this permission.",
"default": {
"allow": [],
"deny": []
},
"allOf": [
{
"$ref": "#/definitions/Commands"
}
]
},
"scope": {
"description": "Allowed or denied scoped when using this permission.",
"allOf": [
{
"$ref": "#/definitions/Scopes"
}
]
},
"platforms": {
"description": "Target platforms this permission applies. By default all platforms are affected by this permission.",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/Target"
}
}
}
},
"Commands": {
"description": "Allowed and denied commands inside a permission.\n\nIf two commands clash inside of `allow` and `deny`, it should be denied by default.",
"type": "object",
"properties": {
"allow": {
"description": "Allowed command.",
"default": [],
"type": "array",
"items": {
"type": "string"
}
},
"deny": {
"description": "Denied command, which takes priority.",
"default": [],
"type": "array",
"items": {
"type": "string"
}
}
}
},
"Scopes": {
"description": "An argument for fine grained behavior control of Tauri commands.\n\nIt can be of any serde serializable type and is used to allow or prevent certain actions inside a Tauri command. The configured scope is passed to the command and will be enforced by the command implementation.\n\n## Example\n\n```json { \"allow\": [{ \"path\": \"$HOME/**\" }], \"deny\": [{ \"path\": \"$HOME/secret.txt\" }] } ```",
"type": "object",
"properties": {
"allow": {
"description": "Data that defines what is allowed by the scope.",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/Value"
}
},
"deny": {
"description": "Data that defines what is denied by the scope. This should be prioritized by validation logic.",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/Value"
}
}
}
},
"Value": {
"description": "All supported ACL values.",
"anyOf": [
{
"description": "Represents a null JSON value.",
"type": "null"
},
{
"description": "Represents a [`bool`].",
"type": "boolean"
},
{
"description": "Represents a valid ACL [`Number`].",
"allOf": [
{
"$ref": "#/definitions/Number"
}
]
},
{
"description": "Represents a [`String`].",
"type": "string"
},
{
"description": "Represents a list of other [`Value`]s.",
"type": "array",
"items": {
"$ref": "#/definitions/Value"
}
},
{
"description": "Represents a map of [`String`] keys to [`Value`]s.",
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/Value"
}
}
]
},
"Number": {
"description": "A valid ACL number.",
"anyOf": [
{
"description": "Represents an [`i64`].",
"type": "integer",
"format": "int64"
},
{
"description": "Represents a [`f64`].",
"type": "number",
"format": "double"
}
]
},
"Target": {
"description": "Platform target.",
"oneOf": [
{
"description": "MacOS.",
"type": "string",
"enum": [
"macOS"
]
},
{
"description": "Windows.",
"type": "string",
"enum": [
"windows"
]
},
{
"description": "Linux.",
"type": "string",
"enum": [
"linux"
]
},
{
"description": "Android.",
"type": "string",
"enum": [
"android"
]
},
{
"description": "iOS.",
"type": "string",
"enum": [
"iOS"
]
}
]
},
"PermissionKind": {
"type": "string",
"oneOf": [
{
"description": "Enables the get_system_info command without any pre-configured scope.",
"type": "string",
"const": "allow-get-system-info",
"markdownDescription": "Enables the get_system_info command without any pre-configured scope."
},
{
"description": "Denies the get_system_info command without any pre-configured scope.",
"type": "string",
"const": "deny-get-system-info",
"markdownDescription": "Denies the get_system_info command without any pre-configured scope."
},
{
"description": "Enables the get_system_usage command without any pre-configured scope.",
"type": "string",
"const": "allow-get-system-usage",
"markdownDescription": "Enables the get_system_usage command without any pre-configured scope."
},
{
"description": "Denies the get_system_usage command without any pre-configured scope.",
"type": "string",
"const": "deny-get-system-usage",
"markdownDescription": "Denies the get_system_usage command without any pre-configured scope."
},
{
"description": "Default permissions for the hardware plugin\n#### This default permission set includes:\n\n- `allow-get-system-info`\n- `allow-get-system-usage`",
"type": "string",
"const": "default",
"markdownDescription": "Default permissions for the hardware plugin\n#### This default permission set includes:\n\n- `allow-get-system-info`\n- `allow-get-system-usage`"
}
]
}
}
}

View File

@ -0,0 +1,31 @@
import { readFileSync } from 'node:fs'
import { dirname, join } from 'node:path'
import { cwd } from 'node:process'
import typescript from '@rollup/plugin-typescript'
const pkg = JSON.parse(readFileSync(join(cwd(), 'package.json'), 'utf8'))
export default {
input: 'guest-js/index.ts',
output: [
{
file: pkg.exports.import,
format: 'esm'
},
{
file: pkg.exports.require,
format: 'cjs'
}
],
plugins: [
typescript({
declaration: true,
declarationDir: dirname(pkg.exports.import)
})
],
external: [
/^@tauri-apps\/api/,
...Object.keys(pkg.dependencies || {}),
...Object.keys(pkg.peerDependencies || {})
]
}

View File

@ -0,0 +1,90 @@
use crate::{
helpers::get_jan_libvulkan_path,
types::{CpuStaticInfo, SystemInfo, SystemUsage},
vendor::{nvidia, vulkan},
SYSTEM_INFO,
};
use sysinfo::System;
use tauri::Runtime;
#[tauri::command]
pub fn get_system_info<R: Runtime>(app: tauri::AppHandle<R>) -> SystemInfo {
SYSTEM_INFO
.get_or_init(|| {
let mut system = System::new();
system.refresh_memory();
let mut gpu_map = std::collections::HashMap::new();
for gpu in nvidia::get_nvidia_gpus() {
gpu_map.insert(gpu.uuid.clone(), gpu);
}
// try system vulkan first
let paths = vec!["".to_string(), get_jan_libvulkan_path(app.clone())];
let mut vulkan_gpus = vec![];
for path in paths {
vulkan_gpus = vulkan::get_vulkan_gpus(&path);
if !vulkan_gpus.is_empty() {
break;
}
}
for gpu in vulkan_gpus {
match gpu_map.get_mut(&gpu.uuid) {
// for existing NVIDIA GPUs, add Vulkan info
Some(nvidia_gpu) => {
nvidia_gpu.vulkan_info = gpu.vulkan_info;
}
None => {
gpu_map.insert(gpu.uuid.clone(), gpu);
}
}
}
let os_type = if cfg!(target_os = "windows") {
"windows"
} else if cfg!(target_os = "macos") {
"macos"
} else if cfg!(target_os = "linux") {
"linux"
} else {
"unknown"
};
let os_name = System::long_os_version().unwrap_or("Unknown".to_string());
SystemInfo {
cpu: CpuStaticInfo::new(),
os_type: os_type.to_string(),
os_name,
total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB
gpus: gpu_map.into_values().collect(),
}
})
.clone()
}
#[tauri::command]
pub fn get_system_usage<R: Runtime>(app: tauri::AppHandle<R>) -> SystemUsage {
let mut system = System::new();
system.refresh_memory();
// need to refresh 2 times to get CPU usage
system.refresh_cpu_all();
std::thread::sleep(sysinfo::MINIMUM_CPU_UPDATE_INTERVAL);
system.refresh_cpu_all();
let cpus = system.cpus();
let cpu_usage =
cpus.iter().map(|cpu| cpu.cpu_usage()).sum::<f32>() / (cpus.len().max(1) as f32);
SystemUsage {
cpu: cpu_usage,
used_memory: system.used_memory() / 1024 / 1024, // bytes to MiB,
total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB,
gpus: get_system_info(app.clone())
.gpus
.iter()
.map(|gpu| gpu.get_usage())
.collect(),
}
}

View File

@ -0,0 +1,4 @@
// https://devicehunt.com/all-pci-vendors
pub const VENDOR_ID_AMD: u32 = 0x1002;
pub const VENDOR_ID_NVIDIA: u32 = 0x10DE;
pub const VENDOR_ID_INTEL: u32 = 0x8086;

View File

@ -0,0 +1,130 @@
use sysinfo::System;
use crate::types::CpuStaticInfo;
impl CpuStaticInfo {
pub fn new() -> Self {
let mut system = System::new();
system.refresh_cpu_all();
let name = system
.cpus()
.first()
.map(|cpu| {
let brand = cpu.brand();
if brand.is_empty() {
cpu.name()
} else {
brand
}
})
.unwrap_or("unknown")
.to_string();
CpuStaticInfo {
name,
core_count: System::physical_core_count().unwrap_or(0),
arch: std::env::consts::ARCH.to_string(),
extensions: CpuStaticInfo::get_extensions(),
}
}
// TODO: see if we need to check for all CPU extensions
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_extensions() -> Vec<String> {
let mut exts = vec![];
// fpu is always present on modern x86 processors,
// but is_x86_feature_detected doesn't support it
exts.push("fpu".to_string());
if is_x86_feature_detected!("mmx") {
exts.push("mmx".to_string());
}
if is_x86_feature_detected!("sse") {
exts.push("sse".to_string());
}
if is_x86_feature_detected!("sse2") {
exts.push("sse2".to_string());
}
if is_x86_feature_detected!("sse3") {
exts.push("sse3".to_string());
}
if is_x86_feature_detected!("ssse3") {
exts.push("ssse3".to_string());
}
if is_x86_feature_detected!("sse4.1") {
exts.push("sse4_1".to_string());
}
if is_x86_feature_detected!("sse4.2") {
exts.push("sse4_2".to_string());
}
if is_x86_feature_detected!("pclmulqdq") {
exts.push("pclmulqdq".to_string());
}
if is_x86_feature_detected!("avx") {
exts.push("avx".to_string());
}
if is_x86_feature_detected!("avx2") {
exts.push("avx2".to_string());
}
if is_x86_feature_detected!("avx512f") {
exts.push("avx512_f".to_string());
}
if is_x86_feature_detected!("avx512dq") {
exts.push("avx512_dq".to_string());
}
if is_x86_feature_detected!("avx512ifma") {
exts.push("avx512_ifma".to_string());
}
if is_x86_feature_detected!("avx512pf") {
exts.push("avx512_pf".to_string());
}
if is_x86_feature_detected!("avx512er") {
exts.push("avx512_er".to_string());
}
if is_x86_feature_detected!("avx512cd") {
exts.push("avx512_cd".to_string());
}
if is_x86_feature_detected!("avx512bw") {
exts.push("avx512_bw".to_string());
}
if is_x86_feature_detected!("avx512vl") {
exts.push("avx512_vl".to_string());
}
if is_x86_feature_detected!("avx512vbmi") {
exts.push("avx512_vbmi".to_string());
}
if is_x86_feature_detected!("avx512vbmi2") {
exts.push("avx512_vbmi2".to_string());
}
if is_x86_feature_detected!("avx512vnni") {
exts.push("avx512_vnni".to_string());
}
if is_x86_feature_detected!("avx512bitalg") {
exts.push("avx512_bitalg".to_string());
}
if is_x86_feature_detected!("avx512vpopcntdq") {
exts.push("avx512_vpopcntdq".to_string());
}
// avx512_4vnniw and avx512_4fmaps are only available on Intel Knights Mill, which are
// very rare. https://en.wikipedia.org/wiki/AVX-512
// is_x86_feature_detected doesn't support them
if is_x86_feature_detected!("avx512vp2intersect") {
exts.push("avx512_vp2intersect".to_string());
}
if is_x86_feature_detected!("aes") {
exts.push("aes".to_string());
}
if is_x86_feature_detected!("f16c") {
exts.push("f16c".to_string());
}
exts
}
// Cortex always returns empty list for non-x86
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn get_extensions() -> Vec<String> {
vec![]
}
}

View File

@ -0,0 +1,33 @@
use crate::{
constants::{VENDOR_ID_AMD, VENDOR_ID_INTEL, VENDOR_ID_NVIDIA},
types::{GpuInfo, GpuUsage, Vendor},
};
impl Vendor {
pub fn from_vendor_id(vendor_id: u32) -> Self {
match vendor_id {
VENDOR_ID_AMD => Vendor::AMD,
VENDOR_ID_NVIDIA => Vendor::NVIDIA,
VENDOR_ID_INTEL => Vendor::Intel,
_ => Vendor::Unknown(vendor_id),
}
}
}
impl GpuInfo {
pub fn get_usage(&self) -> GpuUsage {
match self.vendor {
Vendor::NVIDIA => self.get_usage_nvidia(),
Vendor::AMD => self.get_usage_amd(),
_ => self.get_usage_unsupported(),
}
}
pub fn get_usage_unsupported(&self) -> GpuUsage {
GpuUsage {
uuid: self.uuid.clone(),
used_memory: 0,
total_memory: 0,
}
}
}

View File

@ -0,0 +1,20 @@
use tauri::{path::BaseDirectory, Manager, Runtime};
pub fn get_jan_libvulkan_path<R: Runtime>(app: tauri::AppHandle<R>) -> String {
let lib_name = if cfg!(target_os = "windows") {
"vulkan-1.dll"
} else if cfg!(target_os = "linux") {
"libvulkan.so"
} else {
return "".to_string();
};
// NOTE: this does not work in test mode (mock app)
match app.path().resolve(
format!("resources/lib/{}", lib_name),
BaseDirectory::Resource,
) {
Ok(lib_path) => lib_path.to_string_lossy().to_string(),
Err(_) => "".to_string(),
}
}

View File

@ -0,0 +1,29 @@
mod commands;
mod constants;
pub mod cpu;
pub mod gpu;
mod helpers;
mod types;
pub mod vendor;
pub use constants::*;
pub use helpers::*;
pub use types::*;
use std::sync::OnceLock;
use tauri::Runtime;
static SYSTEM_INFO: OnceLock<SystemInfo> = OnceLock::new();
/// Initialize the hardware plugin
pub fn init<R: Runtime>() -> tauri::plugin::TauriPlugin<R> {
tauri::plugin::Builder::new("hardware")
.invoke_handler(tauri::generate_handler![
commands::get_system_info,
commands::get_system_usage
])
.build()
}
#[cfg(test)]
mod tests;

View File

@ -0,0 +1,16 @@
use crate::commands::*;
use tauri::test::mock_app;
#[test]
fn test_system_info() {
let app = mock_app();
let info = get_system_info(app.handle().clone());
println!("System Static Info: {:?}", info);
}
#[test]
fn test_system_usage() {
let app = mock_app();
let usage = get_system_usage(app.handle().clone());
println!("System Usage Info: {:?}", usage);
}

View File

@ -0,0 +1,71 @@
use serde::Serialize;
use crate::vendor::{nvidia::NvidiaInfo, vulkan::VulkanInfo};
#[derive(Clone, Serialize, Debug)]
pub struct CpuStaticInfo {
pub name: String,
pub core_count: usize,
pub arch: String,
pub extensions: Vec<String>,
}
#[derive(Debug, Clone)]
pub enum Vendor {
AMD,
NVIDIA,
Intel,
Unknown(u32),
}
impl Serialize for Vendor {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Vendor::AMD => "AMD".serialize(serializer),
Vendor::NVIDIA => "NVIDIA".serialize(serializer),
Vendor::Intel => "Intel".serialize(serializer),
Vendor::Unknown(vendor_id) => {
let formatted = format!("Unknown (vendor_id: {})", vendor_id);
serializer.serialize_str(&formatted)
}
}
}
}
#[derive(Clone, Debug, Serialize)]
pub struct GpuInfo {
pub name: String,
pub total_memory: u64,
pub vendor: Vendor,
pub uuid: String,
pub driver_version: String,
pub nvidia_info: Option<NvidiaInfo>,
pub vulkan_info: Option<VulkanInfo>,
}
#[derive(Serialize, Clone, Debug)]
pub struct SystemInfo {
pub cpu: CpuStaticInfo,
pub os_type: String,
pub os_name: String,
pub total_memory: u64,
pub gpus: Vec<GpuInfo>,
}
#[derive(Serialize, Clone, Debug)]
pub struct GpuUsage {
pub uuid: String,
pub used_memory: u64,
pub total_memory: u64,
}
#[derive(Serialize, Clone, Debug)]
pub struct SystemUsage {
pub cpu: f32,
pub used_memory: u64,
pub total_memory: u64,
pub gpus: Vec<GpuUsage>,
}

View File

@ -1,4 +1,4 @@
use super::{GpuInfo, GpuUsage};
use crate::types::{GpuInfo, GpuUsage};
impl GpuInfo {
#[cfg(not(target_os = "linux"))]

View File

@ -0,0 +1,6 @@
pub mod amd;
pub mod nvidia;
pub mod vulkan;
#[cfg(test)]
mod tests;

View File

@ -1,4 +1,4 @@
use super::{GpuInfo, GpuUsage, Vendor};
use crate::types::{GpuInfo, GpuUsage, Vendor};
use nvml_wrapper::{error::NvmlError, Nvml};
use std::sync::OnceLock;
@ -103,18 +103,3 @@ pub fn get_nvidia_gpus() -> Vec<GpuInfo> {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_nvidia_gpus() {
let gpus = get_nvidia_gpus();
for (i, gpu) in gpus.iter().enumerate() {
println!("GPU {}:", i);
println!(" {:?}", gpu);
println!(" {:?}", gpu.get_usage());
}
}
}

View File

@ -0,0 +1,21 @@
use crate::vendor::{nvidia, vulkan};
#[test]
fn test_get_nvidia_gpus() {
let gpus = nvidia::get_nvidia_gpus();
for (i, gpu) in gpus.iter().enumerate() {
println!("GPU {}:", i);
println!(" {:?}", gpu);
println!(" {:?}", gpu.get_usage());
}
}
#[test]
fn test_get_vulkan_gpus() {
let gpus = vulkan::get_vulkan_gpus("");
for (i, gpu) in gpus.iter().enumerate() {
println!("GPU {}:", i);
println!(" {:?}", gpu);
println!(" {:?}", gpu.get_usage());
}
}

View File

@ -1,4 +1,4 @@
use super::{GpuInfo, Vendor};
use crate::types::{GpuInfo, Vendor};
use ash::{vk, Entry};
#[derive(Debug, Clone, serde::Serialize)]
@ -128,18 +128,3 @@ fn get_vulkan_gpus_internal(lib_path: &str) -> Result<Vec<GpuInfo>, Box<dyn std:
Ok(device_info_list)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_vulkan_gpus() {
let gpus = get_vulkan_gpus("");
for (i, gpu) in gpus.iter().enumerate() {
println!("GPU {}:", i);
println!(" {:?}", gpu);
println!(" {:?}", gpu.get_usage());
}
}
}

View File

@ -0,0 +1,14 @@
{
"compilerOptions": {
"target": "es2021",
"module": "esnext",
"moduleResolution": "bundler",
"skipLibCheck": true,
"strict": true,
"noUnusedLocals": true,
"noImplicitAny": true,
"noEmit": true
},
"include": ["guest-js/*.ts"],
"exclude": ["dist-js", "node_modules"]
}

View File

@ -0,0 +1,17 @@
/.vs
.DS_Store
.Thumbs.db
*.sublime*
.idea/
debug.log
package-lock.json
.vscode/settings.json
yarn.lock
/.tauri
/target
Cargo.lock
node_modules/
dist-js
dist

View File

@ -0,0 +1,36 @@
[package]
name = "tauri-plugin-llamacpp"
version = "0.6.599"
authors = ["Jan <service@jan.ai>"]
description = "Tauri plugin for managing Jan LlamaCpp server processes and model loading"
license = "MIT"
repository = "https://github.com/menloresearch/jan"
edition = "2021"
rust-version = "1.77.2"
exclude = ["/examples", "/dist-js", "/guest-js", "/node_modules"]
links = "tauri-plugin-llamacpp"
[dependencies]
base64 = "0.22.1"
byteorder = "1.5.0"
hmac = "0.12.1"
jan-utils = { path = "../../utils" }
log = "0.4"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
sha2 = "0.10.9"
sysinfo = "0.34.2"
tauri = { version = "2.5.0", default-features = false, features = [] }
thiserror = "2.0.12"
tokio = { version = "1", features = ["full"] }
# Windows-specific dependencies
[target.'cfg(windows)'.dependencies]
windows-sys = { version = "0.60.2", features = ["Win32_Storage_FileSystem"] }
# Unix-specific dependencies
[target.'cfg(unix)'.dependencies]
nix = { version = "=0.30.1", features = ["signal", "process"] }
[build-dependencies]
tauri-plugin = { version = "2.3.1", features = ["build"] }

View File

@ -0,0 +1,21 @@
const COMMANDS: &[&str] = &[
// Cleanup command
"cleanup_llama_processes",
// LlamaCpp server commands
"load_llama_model",
"unload_llama_model",
"get_devices",
"generate_api_key",
"is_process_running",
"get_random_port",
"find_session_by_model",
"get_loaded_models",
"get_all_sessions",
"get_session_by_model",
// GGUF commands
"read_gguf_metadata",
];
fn main() {
tauri_plugin::Builder::new(COMMANDS).build();
}

View File

@ -0,0 +1,93 @@
import { invoke } from '@tauri-apps/api/core'
// Types
export interface SessionInfo {
pid: number;
port: number;
model_id: string;
model_path: string;
api_key: string;
}
export interface DeviceInfo {
id: string;
name: string;
memory: number;
}
export interface GgufMetadata {
version: number;
tensor_count: number;
metadata: Record<string, string>;
}
// Cleanup commands
export async function cleanupLlamaProcesses(): Promise<void> {
return await invoke('plugin:llamacpp|cleanup_llama_processes');
}
// LlamaCpp server commands
export async function loadLlamaModel(
backendPath: string,
libraryPath?: string,
args: string[] = []
): Promise<SessionInfo> {
return await invoke('plugin:llamacpp|load_llama_model', {
backendPath,
libraryPath,
args
});
}
export async function unloadLlamaModel(pid: number): Promise<void> {
return await invoke('plugin:llamacpp|unload_llama_model', { pid });
}
export async function getDevices(
backendPath: string,
libraryPath?: string
): Promise<DeviceInfo[]> {
return await invoke('plugin:llamacpp|get_devices', {
backendPath,
libraryPath
});
}
export async function generateApiKey(
modelId: string,
apiSecret: string
): Promise<string> {
return await invoke('plugin:llamacpp|generate_api_key', {
modelId,
apiSecret
});
}
export async function isProcessRunning(pid: number): Promise<boolean> {
return await invoke('plugin:llamacpp|is_process_running', { pid });
}
export async function getRandomPort(): Promise<number> {
return await invoke('plugin:llamacpp|get_random_port');
}
export async function findSessionByModel(modelId: string): Promise<SessionInfo | null> {
return await invoke('plugin:llamacpp|find_session_by_model', { modelId });
}
export async function getLoadedModels(): Promise<string[]> {
return await invoke('plugin:llamacpp|get_loaded_models');
}
export async function getAllSessions(): Promise<SessionInfo[]> {
return await invoke('plugin:llamacpp|get_all_sessions');
}
export async function getSessionByModel(modelId: string): Promise<SessionInfo | null> {
return await invoke('plugin:llamacpp|get_session_by_model', { modelId });
}
// GGUF commands
export async function readGgufMetadata(path: string): Promise<GgufMetadata> {
return await invoke('plugin:llamacpp|read_gguf_metadata', { path });
}

View File

@ -0,0 +1,33 @@
{
"name": "@janhq/tauri-plugin-llamacpp-api",
"version": "0.6.6",
"private": true,
"description": "",
"type": "module",
"types": "./dist-js/index.d.ts",
"main": "./dist-js/index.cjs",
"module": "./dist-js/index.js",
"exports": {
"types": "./dist-js/index.d.ts",
"import": "./dist-js/index.js",
"require": "./dist-js/index.cjs"
},
"files": [
"dist-js",
"README.md"
],
"scripts": {
"build": "rollup -c",
"prepublishOnly": "yarn build",
"pretest": "yarn build"
},
"dependencies": {
"@tauri-apps/api": ">=2.0.0-beta.6"
},
"devDependencies": {
"@rollup/plugin-typescript": "^12.0.0",
"rollup": "^4.9.6",
"tslib": "^2.6.2",
"typescript": "^5.3.3"
}
}

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-cleanup-llama-processes"
description = "Enables the cleanup_llama_processes command without any pre-configured scope."
commands.allow = ["cleanup_llama_processes"]
[[permission]]
identifier = "deny-cleanup-llama-processes"
description = "Denies the cleanup_llama_processes command without any pre-configured scope."
commands.deny = ["cleanup_llama_processes"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-find-session-by-model"
description = "Enables the find_session_by_model command without any pre-configured scope."
commands.allow = ["find_session_by_model"]
[[permission]]
identifier = "deny-find-session-by-model"
description = "Denies the find_session_by_model command without any pre-configured scope."
commands.deny = ["find_session_by_model"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-generate-api-key"
description = "Enables the generate_api_key command without any pre-configured scope."
commands.allow = ["generate_api_key"]
[[permission]]
identifier = "deny-generate-api-key"
description = "Denies the generate_api_key command without any pre-configured scope."
commands.deny = ["generate_api_key"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-get-all-sessions"
description = "Enables the get_all_sessions command without any pre-configured scope."
commands.allow = ["get_all_sessions"]
[[permission]]
identifier = "deny-get-all-sessions"
description = "Denies the get_all_sessions command without any pre-configured scope."
commands.deny = ["get_all_sessions"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-get-devices"
description = "Enables the get_devices command without any pre-configured scope."
commands.allow = ["get_devices"]
[[permission]]
identifier = "deny-get-devices"
description = "Denies the get_devices command without any pre-configured scope."
commands.deny = ["get_devices"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-get-loaded-models"
description = "Enables the get_loaded_models command without any pre-configured scope."
commands.allow = ["get_loaded_models"]
[[permission]]
identifier = "deny-get-loaded-models"
description = "Denies the get_loaded_models command without any pre-configured scope."
commands.deny = ["get_loaded_models"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-get-random-port"
description = "Enables the get_random_port command without any pre-configured scope."
commands.allow = ["get_random_port"]
[[permission]]
identifier = "deny-get-random-port"
description = "Denies the get_random_port command without any pre-configured scope."
commands.deny = ["get_random_port"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-get-session-by-model"
description = "Enables the get_session_by_model command without any pre-configured scope."
commands.allow = ["get_session_by_model"]
[[permission]]
identifier = "deny-get-session-by-model"
description = "Denies the get_session_by_model command without any pre-configured scope."
commands.deny = ["get_session_by_model"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-is-process-running"
description = "Enables the is_process_running command without any pre-configured scope."
commands.allow = ["is_process_running"]
[[permission]]
identifier = "deny-is-process-running"
description = "Denies the is_process_running command without any pre-configured scope."
commands.deny = ["is_process_running"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-load-llama-model"
description = "Enables the load_llama_model command without any pre-configured scope."
commands.allow = ["load_llama_model"]
[[permission]]
identifier = "deny-load-llama-model"
description = "Denies the load_llama_model command without any pre-configured scope."
commands.deny = ["load_llama_model"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-read-gguf-metadata"
description = "Enables the read_gguf_metadata command without any pre-configured scope."
commands.allow = ["read_gguf_metadata"]
[[permission]]
identifier = "deny-read-gguf-metadata"
description = "Denies the read_gguf_metadata command without any pre-configured scope."
commands.deny = ["read_gguf_metadata"]

View File

@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-unload-llama-model"
description = "Enables the unload_llama_model command without any pre-configured scope."
commands.allow = ["unload_llama_model"]
[[permission]]
identifier = "deny-unload-llama-model"
description = "Denies the unload_llama_model command without any pre-configured scope."
commands.deny = ["unload_llama_model"]

View File

@ -0,0 +1,340 @@
## Default Permission
Default permissions for the llamacpp plugin
#### This default permission set includes the following:
- `allow-cleanup-llama-processes`
- `allow-load-llama-model`
- `allow-unload-llama-model`
- `allow-get-devices`
- `allow-generate-api-key`
- `allow-is-process-running`
- `allow-get-random-port`
- `allow-find-session-by-model`
- `allow-get-loaded-models`
- `allow-get-all-sessions`
- `allow-get-session-by-model`
- `allow-read-gguf-metadata`
## Permission Table
<table>
<tr>
<th>Identifier</th>
<th>Description</th>
</tr>
<tr>
<td>
`llamacpp:allow-cleanup-llama-processes`
</td>
<td>
Enables the cleanup_llama_processes command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-cleanup-llama-processes`
</td>
<td>
Denies the cleanup_llama_processes command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-find-session-by-model`
</td>
<td>
Enables the find_session_by_model command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-find-session-by-model`
</td>
<td>
Denies the find_session_by_model command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-generate-api-key`
</td>
<td>
Enables the generate_api_key command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-generate-api-key`
</td>
<td>
Denies the generate_api_key command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-get-all-sessions`
</td>
<td>
Enables the get_all_sessions command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-get-all-sessions`
</td>
<td>
Denies the get_all_sessions command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-get-devices`
</td>
<td>
Enables the get_devices command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-get-devices`
</td>
<td>
Denies the get_devices command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-get-loaded-models`
</td>
<td>
Enables the get_loaded_models command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-get-loaded-models`
</td>
<td>
Denies the get_loaded_models command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-get-random-port`
</td>
<td>
Enables the get_random_port command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-get-random-port`
</td>
<td>
Denies the get_random_port command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-get-session-by-model`
</td>
<td>
Enables the get_session_by_model command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-get-session-by-model`
</td>
<td>
Denies the get_session_by_model command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-is-process-running`
</td>
<td>
Enables the is_process_running command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-is-process-running`
</td>
<td>
Denies the is_process_running command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-load-llama-model`
</td>
<td>
Enables the load_llama_model command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-load-llama-model`
</td>
<td>
Denies the load_llama_model command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-read-gguf-metadata`
</td>
<td>
Enables the read_gguf_metadata command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-read-gguf-metadata`
</td>
<td>
Denies the read_gguf_metadata command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:allow-unload-llama-model`
</td>
<td>
Enables the unload_llama_model command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`llamacpp:deny-unload-llama-model`
</td>
<td>
Denies the unload_llama_model command without any pre-configured scope.
</td>
</tr>
</table>

View File

@ -0,0 +1,21 @@
[default]
description = "Default permissions for the llamacpp plugin"
permissions = [
# Cleanup commands
"allow-cleanup-llama-processes",
# LlamaCpp server commands
"allow-load-llama-model",
"allow-unload-llama-model",
"allow-get-devices",
"allow-generate-api-key",
"allow-is-process-running",
"allow-get-random-port",
"allow-find-session-by-model",
"allow-get-loaded-models",
"allow-get-all-sessions",
"allow-get-session-by-model",
# GGUF commands
"allow-read-gguf-metadata"
]

View File

@ -0,0 +1,450 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "PermissionFile",
"description": "Permission file that can define a default permission, a set of permissions or a list of inlined permissions.",
"type": "object",
"properties": {
"default": {
"description": "The default permission set for the plugin",
"anyOf": [
{
"$ref": "#/definitions/DefaultPermission"
},
{
"type": "null"
}
]
},
"set": {
"description": "A list of permissions sets defined",
"type": "array",
"items": {
"$ref": "#/definitions/PermissionSet"
}
},
"permission": {
"description": "A list of inlined permissions",
"default": [],
"type": "array",
"items": {
"$ref": "#/definitions/Permission"
}
}
},
"definitions": {
"DefaultPermission": {
"description": "The default permission set of the plugin.\n\nWorks similarly to a permission with the \"default\" identifier.",
"type": "object",
"required": [
"permissions"
],
"properties": {
"version": {
"description": "The version of the permission.",
"type": [
"integer",
"null"
],
"format": "uint64",
"minimum": 1.0
},
"description": {
"description": "Human-readable description of what the permission does. Tauri convention is to use `<h4>` headings in markdown content for Tauri documentation generation purposes.",
"type": [
"string",
"null"
]
},
"permissions": {
"description": "All permissions this set contains.",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"PermissionSet": {
"description": "A set of direct permissions grouped together under a new name.",
"type": "object",
"required": [
"description",
"identifier",
"permissions"
],
"properties": {
"identifier": {
"description": "A unique identifier for the permission.",
"type": "string"
},
"description": {
"description": "Human-readable description of what the permission does.",
"type": "string"
},
"permissions": {
"description": "All permissions this set contains.",
"type": "array",
"items": {
"$ref": "#/definitions/PermissionKind"
}
}
}
},
"Permission": {
"description": "Descriptions of explicit privileges of commands.\n\nIt can enable commands to be accessible in the frontend of the application.\n\nIf the scope is defined it can be used to fine grain control the access of individual or multiple commands.",
"type": "object",
"required": [
"identifier"
],
"properties": {
"version": {
"description": "The version of the permission.",
"type": [
"integer",
"null"
],
"format": "uint64",
"minimum": 1.0
},
"identifier": {
"description": "A unique identifier for the permission.",
"type": "string"
},
"description": {
"description": "Human-readable description of what the permission does. Tauri internal convention is to use `<h4>` headings in markdown content for Tauri documentation generation purposes.",
"type": [
"string",
"null"
]
},
"commands": {
"description": "Allowed or denied commands when using this permission.",
"default": {
"allow": [],
"deny": []
},
"allOf": [
{
"$ref": "#/definitions/Commands"
}
]
},
"scope": {
"description": "Allowed or denied scoped when using this permission.",
"allOf": [
{
"$ref": "#/definitions/Scopes"
}
]
},
"platforms": {
"description": "Target platforms this permission applies. By default all platforms are affected by this permission.",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/Target"
}
}
}
},
"Commands": {
"description": "Allowed and denied commands inside a permission.\n\nIf two commands clash inside of `allow` and `deny`, it should be denied by default.",
"type": "object",
"properties": {
"allow": {
"description": "Allowed command.",
"default": [],
"type": "array",
"items": {
"type": "string"
}
},
"deny": {
"description": "Denied command, which takes priority.",
"default": [],
"type": "array",
"items": {
"type": "string"
}
}
}
},
"Scopes": {
"description": "An argument for fine grained behavior control of Tauri commands.\n\nIt can be of any serde serializable type and is used to allow or prevent certain actions inside a Tauri command. The configured scope is passed to the command and will be enforced by the command implementation.\n\n## Example\n\n```json { \"allow\": [{ \"path\": \"$HOME/**\" }], \"deny\": [{ \"path\": \"$HOME/secret.txt\" }] } ```",
"type": "object",
"properties": {
"allow": {
"description": "Data that defines what is allowed by the scope.",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/Value"
}
},
"deny": {
"description": "Data that defines what is denied by the scope. This should be prioritized by validation logic.",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/Value"
}
}
}
},
"Value": {
"description": "All supported ACL values.",
"anyOf": [
{
"description": "Represents a null JSON value.",
"type": "null"
},
{
"description": "Represents a [`bool`].",
"type": "boolean"
},
{
"description": "Represents a valid ACL [`Number`].",
"allOf": [
{
"$ref": "#/definitions/Number"
}
]
},
{
"description": "Represents a [`String`].",
"type": "string"
},
{
"description": "Represents a list of other [`Value`]s.",
"type": "array",
"items": {
"$ref": "#/definitions/Value"
}
},
{
"description": "Represents a map of [`String`] keys to [`Value`]s.",
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/Value"
}
}
]
},
"Number": {
"description": "A valid ACL number.",
"anyOf": [
{
"description": "Represents an [`i64`].",
"type": "integer",
"format": "int64"
},
{
"description": "Represents a [`f64`].",
"type": "number",
"format": "double"
}
]
},
"Target": {
"description": "Platform target.",
"oneOf": [
{
"description": "MacOS.",
"type": "string",
"enum": [
"macOS"
]
},
{
"description": "Windows.",
"type": "string",
"enum": [
"windows"
]
},
{
"description": "Linux.",
"type": "string",
"enum": [
"linux"
]
},
{
"description": "Android.",
"type": "string",
"enum": [
"android"
]
},
{
"description": "iOS.",
"type": "string",
"enum": [
"iOS"
]
}
]
},
"PermissionKind": {
"type": "string",
"oneOf": [
{
"description": "Enables the cleanup_llama_processes command without any pre-configured scope.",
"type": "string",
"const": "allow-cleanup-llama-processes",
"markdownDescription": "Enables the cleanup_llama_processes command without any pre-configured scope."
},
{
"description": "Denies the cleanup_llama_processes command without any pre-configured scope.",
"type": "string",
"const": "deny-cleanup-llama-processes",
"markdownDescription": "Denies the cleanup_llama_processes command without any pre-configured scope."
},
{
"description": "Enables the find_session_by_model command without any pre-configured scope.",
"type": "string",
"const": "allow-find-session-by-model",
"markdownDescription": "Enables the find_session_by_model command without any pre-configured scope."
},
{
"description": "Denies the find_session_by_model command without any pre-configured scope.",
"type": "string",
"const": "deny-find-session-by-model",
"markdownDescription": "Denies the find_session_by_model command without any pre-configured scope."
},
{
"description": "Enables the generate_api_key command without any pre-configured scope.",
"type": "string",
"const": "allow-generate-api-key",
"markdownDescription": "Enables the generate_api_key command without any pre-configured scope."
},
{
"description": "Denies the generate_api_key command without any pre-configured scope.",
"type": "string",
"const": "deny-generate-api-key",
"markdownDescription": "Denies the generate_api_key command without any pre-configured scope."
},
{
"description": "Enables the get_all_sessions command without any pre-configured scope.",
"type": "string",
"const": "allow-get-all-sessions",
"markdownDescription": "Enables the get_all_sessions command without any pre-configured scope."
},
{
"description": "Denies the get_all_sessions command without any pre-configured scope.",
"type": "string",
"const": "deny-get-all-sessions",
"markdownDescription": "Denies the get_all_sessions command without any pre-configured scope."
},
{
"description": "Enables the get_devices command without any pre-configured scope.",
"type": "string",
"const": "allow-get-devices",
"markdownDescription": "Enables the get_devices command without any pre-configured scope."
},
{
"description": "Denies the get_devices command without any pre-configured scope.",
"type": "string",
"const": "deny-get-devices",
"markdownDescription": "Denies the get_devices command without any pre-configured scope."
},
{
"description": "Enables the get_loaded_models command without any pre-configured scope.",
"type": "string",
"const": "allow-get-loaded-models",
"markdownDescription": "Enables the get_loaded_models command without any pre-configured scope."
},
{
"description": "Denies the get_loaded_models command without any pre-configured scope.",
"type": "string",
"const": "deny-get-loaded-models",
"markdownDescription": "Denies the get_loaded_models command without any pre-configured scope."
},
{
"description": "Enables the get_random_port command without any pre-configured scope.",
"type": "string",
"const": "allow-get-random-port",
"markdownDescription": "Enables the get_random_port command without any pre-configured scope."
},
{
"description": "Denies the get_random_port command without any pre-configured scope.",
"type": "string",
"const": "deny-get-random-port",
"markdownDescription": "Denies the get_random_port command without any pre-configured scope."
},
{
"description": "Enables the get_session_by_model command without any pre-configured scope.",
"type": "string",
"const": "allow-get-session-by-model",
"markdownDescription": "Enables the get_session_by_model command without any pre-configured scope."
},
{
"description": "Denies the get_session_by_model command without any pre-configured scope.",
"type": "string",
"const": "deny-get-session-by-model",
"markdownDescription": "Denies the get_session_by_model command without any pre-configured scope."
},
{
"description": "Enables the is_process_running command without any pre-configured scope.",
"type": "string",
"const": "allow-is-process-running",
"markdownDescription": "Enables the is_process_running command without any pre-configured scope."
},
{
"description": "Denies the is_process_running command without any pre-configured scope.",
"type": "string",
"const": "deny-is-process-running",
"markdownDescription": "Denies the is_process_running command without any pre-configured scope."
},
{
"description": "Enables the load_llama_model command without any pre-configured scope.",
"type": "string",
"const": "allow-load-llama-model",
"markdownDescription": "Enables the load_llama_model command without any pre-configured scope."
},
{
"description": "Denies the load_llama_model command without any pre-configured scope.",
"type": "string",
"const": "deny-load-llama-model",
"markdownDescription": "Denies the load_llama_model command without any pre-configured scope."
},
{
"description": "Enables the read_gguf_metadata command without any pre-configured scope.",
"type": "string",
"const": "allow-read-gguf-metadata",
"markdownDescription": "Enables the read_gguf_metadata command without any pre-configured scope."
},
{
"description": "Denies the read_gguf_metadata command without any pre-configured scope.",
"type": "string",
"const": "deny-read-gguf-metadata",
"markdownDescription": "Denies the read_gguf_metadata command without any pre-configured scope."
},
{
"description": "Enables the unload_llama_model command without any pre-configured scope.",
"type": "string",
"const": "allow-unload-llama-model",
"markdownDescription": "Enables the unload_llama_model command without any pre-configured scope."
},
{
"description": "Denies the unload_llama_model command without any pre-configured scope.",
"type": "string",
"const": "deny-unload-llama-model",
"markdownDescription": "Denies the unload_llama_model command without any pre-configured scope."
},
{
"description": "Default permissions for the llamacpp plugin\n#### This default permission set includes:\n\n- `allow-cleanup-llama-processes`\n- `allow-load-llama-model`\n- `allow-unload-llama-model`\n- `allow-get-devices`\n- `allow-generate-api-key`\n- `allow-is-process-running`\n- `allow-get-random-port`\n- `allow-find-session-by-model`\n- `allow-get-loaded-models`\n- `allow-get-all-sessions`\n- `allow-get-session-by-model`\n- `allow-read-gguf-metadata`",
"type": "string",
"const": "default",
"markdownDescription": "Default permissions for the llamacpp plugin\n#### This default permission set includes:\n\n- `allow-cleanup-llama-processes`\n- `allow-load-llama-model`\n- `allow-unload-llama-model`\n- `allow-get-devices`\n- `allow-generate-api-key`\n- `allow-is-process-running`\n- `allow-get-random-port`\n- `allow-find-session-by-model`\n- `allow-get-loaded-models`\n- `allow-get-all-sessions`\n- `allow-get-session-by-model`\n- `allow-read-gguf-metadata`"
}
]
}
}
}

View File

@ -0,0 +1,31 @@
import { readFileSync } from 'node:fs'
import { dirname, join } from 'node:path'
import { cwd } from 'node:process'
import typescript from '@rollup/plugin-typescript'
const pkg = JSON.parse(readFileSync(join(cwd(), 'package.json'), 'utf8'))
export default {
input: 'guest-js/index.ts',
output: [
{
file: pkg.exports.import,
format: 'esm'
},
{
file: pkg.exports.require,
format: 'cjs'
}
],
plugins: [
typescript({
declaration: true,
declarationDir: dirname(pkg.exports.import)
})
],
external: [
/^@tauri-apps\/api/,
...Object.keys(pkg.dependencies || {}),
...Object.keys(pkg.peerDependencies || {})
]
}

View File

@ -1,8 +1,15 @@
use crate::core::state::AppState;
use tauri::State;
use tauri::{Manager, Runtime};
pub async fn cleanup_processes(state: State<'_, AppState>) {
let mut map = state.llama_server_process.lock().await;
pub async fn cleanup_processes<R: Runtime>(app_handle: &tauri::AppHandle<R>) {
// Access the global AppState from the main app
let app_state = match app_handle.try_state::<crate::state::LlamacppState>() {
Some(state) => state,
None => {
log::warn!("LlamacppState not found in app_handle");
return;
}
};
let mut map = app_state.llama_server_process.lock().await;
let pids: Vec<i32> = map.keys().cloned().collect();
for pid in pids {
if let Some(session) = map.remove(&pid) {
@ -64,3 +71,11 @@ pub async fn cleanup_processes(state: State<'_, AppState>) {
}
}
}
#[tauri::command]
pub async fn cleanup_llama_processes<R: Runtime>(
app_handle: tauri::AppHandle<R>,
) -> Result<(), String> {
cleanup_processes(&app_handle).await;
Ok(())
}

View File

@ -0,0 +1,319 @@
use base64::{engine::general_purpose, Engine as _};
use hmac::{Hmac, Mac};
use sha2::Sha256;
use std::process::Stdio;
use std::time::Duration;
use tauri::{Manager, Runtime, State};
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::Command;
use tokio::sync::mpsc;
use tokio::time::Instant;
use crate::device::{get_devices_from_backend, DeviceInfo};
use crate::error::{ErrorCode, LlamacppError, ServerError, ServerResult};
use crate::path::{validate_binary_path, validate_model_path};
use crate::process::{
find_session_by_model_id, get_all_active_sessions, get_all_loaded_model_ids,
get_random_available_port, is_process_running_by_pid,
};
use crate::state::{LLamaBackendSession, LlamacppState, SessionInfo};
use jan_utils::{
extract_arg_value, parse_port_from_args, setup_library_path, setup_windows_process_flags,
};
#[cfg(unix)]
use crate::process::graceful_terminate_process;
#[cfg(all(windows, target_arch = "x86_64"))]
use crate::process::force_terminate_process;
type HmacSha256 = Hmac<Sha256>;
#[derive(serde::Serialize, serde::Deserialize)]
pub struct UnloadResult {
success: bool,
error: Option<String>,
}
/// Load a llama model and start the server
#[tauri::command]
pub async fn load_llama_model<R: Runtime>(
app_handle: tauri::AppHandle<R>,
backend_path: &str,
library_path: Option<&str>,
mut args: Vec<String>,
) -> ServerResult<SessionInfo> {
let state: State<LlamacppState> = app_handle.state();
let mut process_map = state.llama_server_process.lock().await;
log::info!("Attempting to launch server at path: {:?}", backend_path);
log::info!("Using arguments: {:?}", args);
validate_binary_path(backend_path)?;
let port = parse_port_from_args(&args);
let model_path_pb = validate_model_path(&mut args)?;
let api_key = extract_arg_value(&args, "--api-key");
let model_id = extract_arg_value(&args, "-a");
// Configure the command to run the server
let mut command = Command::new(backend_path);
command.args(args);
setup_library_path(library_path, &mut command);
command.stdout(Stdio::piped());
command.stderr(Stdio::piped());
setup_windows_process_flags(&mut command);
// Spawn the child process
let mut child = command.spawn().map_err(ServerError::Io)?;
let stderr = child.stderr.take().expect("stderr was piped");
let stdout = child.stdout.take().expect("stdout was piped");
// Create channels for communication between tasks
let (ready_tx, mut ready_rx) = mpsc::channel::<bool>(1);
// Spawn task to monitor stdout for readiness
let _stdout_task = tokio::spawn(async move {
let mut reader = BufReader::new(stdout);
let mut byte_buffer = Vec::new();
loop {
byte_buffer.clear();
match reader.read_until(b'\n', &mut byte_buffer).await {
Ok(0) => break, // EOF
Ok(_) => {
let line = String::from_utf8_lossy(&byte_buffer);
let line = line.trim_end();
if !line.is_empty() {
log::info!("[llamacpp stdout] {}", line);
}
}
Err(e) => {
log::error!("Error reading stdout: {}", e);
break;
}
}
}
});
// Spawn task to capture stderr and monitor for errors
let stderr_task = tokio::spawn(async move {
let mut reader = BufReader::new(stderr);
let mut byte_buffer = Vec::new();
let mut stderr_buffer = String::new();
loop {
byte_buffer.clear();
match reader.read_until(b'\n', &mut byte_buffer).await {
Ok(0) => break, // EOF
Ok(_) => {
let line = String::from_utf8_lossy(&byte_buffer);
let line = line.trim_end();
if !line.is_empty() {
stderr_buffer.push_str(line);
stderr_buffer.push('\n');
log::info!("[llamacpp] {}", line);
// Check for readiness indicator - llama-server outputs this when ready
let line_lower = line.to_string().to_lowercase();
if line_lower.contains("server is listening on")
|| line_lower.contains("starting the main loop")
|| line_lower.contains("server listening on")
{
log::info!("Model appears to be ready based on logs: '{}'", line);
let _ = ready_tx.send(true).await;
}
}
}
Err(e) => {
log::error!("Error reading logs: {}", e);
break;
}
}
}
stderr_buffer
});
// Check if process exited early
if let Some(status) = child.try_wait()? {
if !status.success() {
let stderr_output = stderr_task.await.unwrap_or_default();
log::error!("llama.cpp failed early with code {:?}", status);
log::error!("{}", stderr_output);
return Err(LlamacppError::from_stderr(&stderr_output).into());
}
}
// Wait for server to be ready or timeout
let timeout_duration = Duration::from_secs(180); // 3 minutes timeout
let start_time = Instant::now();
log::info!("Waiting for model session to be ready...");
loop {
tokio::select! {
// Server is ready
Some(true) = ready_rx.recv() => {
log::info!("Model is ready to accept requests!");
break;
}
// Check for process exit more frequently
_ = tokio::time::sleep(Duration::from_millis(50)) => {
// Check if process exited
if let Some(status) = child.try_wait()? {
let stderr_output = stderr_task.await.unwrap_or_default();
if !status.success() {
log::error!("llama.cpp exited with error code {:?}", status);
return Err(LlamacppError::from_stderr(&stderr_output).into());
} else {
log::error!("llama.cpp exited successfully but without ready signal");
return Err(LlamacppError::from_stderr(&stderr_output).into());
}
}
// Timeout check
if start_time.elapsed() > timeout_duration {
log::error!("Timeout waiting for server to be ready");
let _ = child.kill().await;
let stderr_output = stderr_task.await.unwrap_or_default();
return Err(LlamacppError::new(
ErrorCode::ModelLoadTimedOut,
"The model took too long to load and timed out.".into(),
Some(format!("Timeout: {}s\n\nStderr:\n{}", timeout_duration.as_secs(), stderr_output)),
).into());
}
}
}
}
// Get the PID to use as session ID
let pid = child.id().map(|id| id as i32).unwrap_or(-1);
log::info!("Server process started with PID: {} and is ready", pid);
let session_info = SessionInfo {
pid: pid.clone(),
port: port,
model_id: model_id,
model_path: model_path_pb.display().to_string(),
api_key: api_key,
};
// Insert session info to process_map
process_map.insert(
pid.clone(),
LLamaBackendSession {
child,
info: session_info.clone(),
},
);
Ok(session_info)
}
/// Unload a llama model by terminating its process
#[tauri::command]
pub async fn unload_llama_model<R: Runtime>(
app_handle: tauri::AppHandle<R>,
pid: i32,
) -> ServerResult<UnloadResult> {
let state: State<LlamacppState> = app_handle.state();
let mut map = state.llama_server_process.lock().await;
if let Some(session) = map.remove(&pid) {
let mut child = session.child;
#[cfg(unix)]
{
graceful_terminate_process(&mut child).await;
}
#[cfg(all(windows, target_arch = "x86_64"))]
{
force_terminate_process(&mut child).await;
}
Ok(UnloadResult {
success: true,
error: None,
})
} else {
log::warn!("No server with PID '{}' found", pid);
Ok(UnloadResult {
success: true,
error: None,
})
}
}
/// Get available devices from the llama.cpp backend
#[tauri::command]
pub async fn get_devices(
backend_path: &str,
library_path: Option<&str>,
) -> ServerResult<Vec<DeviceInfo>> {
get_devices_from_backend(backend_path, library_path).await
}
/// Generate API key using HMAC-SHA256
#[tauri::command]
pub fn generate_api_key(model_id: String, api_secret: String) -> Result<String, String> {
let mut mac = HmacSha256::new_from_slice(api_secret.as_bytes())
.map_err(|e| format!("Invalid key length: {}", e))?;
mac.update(model_id.as_bytes());
let result = mac.finalize();
let code_bytes = result.into_bytes();
let hash = general_purpose::STANDARD.encode(code_bytes);
Ok(hash)
}
/// Check if a process is still running
#[tauri::command]
pub async fn is_process_running<R: Runtime>(
app_handle: tauri::AppHandle<R>,
pid: i32,
) -> Result<bool, String> {
is_process_running_by_pid(app_handle, pid).await
}
/// Get a random available port
#[tauri::command]
pub async fn get_random_port<R: Runtime>(app_handle: tauri::AppHandle<R>) -> Result<u16, String> {
get_random_available_port(app_handle).await
}
/// Find session information by model ID
#[tauri::command]
pub async fn find_session_by_model<R: Runtime>(
app_handle: tauri::AppHandle<R>,
model_id: String,
) -> Result<Option<SessionInfo>, String> {
find_session_by_model_id(app_handle, &model_id).await
}
/// Get all loaded model IDs
#[tauri::command]
pub async fn get_loaded_models<R: Runtime>(
app_handle: tauri::AppHandle<R>,
) -> Result<Vec<String>, String> {
get_all_loaded_model_ids(app_handle).await
}
/// Get all active sessions
#[tauri::command]
pub async fn get_all_sessions<R: Runtime>(
app_handle: tauri::AppHandle<R>,
) -> Result<Vec<SessionInfo>, String> {
get_all_active_sessions(app_handle).await
}
/// Get session information by model ID
#[tauri::command]
pub async fn get_session_by_model<R: Runtime>(
app_handle: tauri::AppHandle<R>,
model_id: String,
) -> Result<Option<SessionInfo>, String> {
find_session_by_model_id(app_handle, &model_id).await
}

View File

@ -0,0 +1,227 @@
use serde::{Deserialize, Serialize};
use std::process::Stdio;
use std::time::Duration;
use tokio::process::Command;
use tokio::time::timeout;
use crate::error::{ErrorCode, LlamacppError, ServerError, ServerResult};
use crate::path::validate_binary_path;
use jan_utils::{setup_library_path, setup_windows_process_flags};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeviceInfo {
pub id: String,
pub name: String,
pub mem: i32,
pub free: i32,
}
pub async fn get_devices_from_backend(
backend_path: &str,
library_path: Option<&str>,
) -> ServerResult<Vec<DeviceInfo>> {
log::info!("Getting devices from server at path: {:?}", backend_path);
validate_binary_path(backend_path)?;
// Configure the command to run the server with --list-devices
let mut command = Command::new(backend_path);
command.arg("--list-devices");
// Set up library path
setup_library_path(library_path, &mut command);
command.stdout(Stdio::piped());
command.stderr(Stdio::piped());
setup_windows_process_flags(&mut command);
// Execute the command and wait for completion
let output = timeout(Duration::from_secs(30), command.output())
.await
.map_err(|_| {
LlamacppError::new(
ErrorCode::InternalError,
"Timeout waiting for device list".into(),
None,
)
})?
.map_err(ServerError::Io)?;
// Check if command executed successfully
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
log::error!("llama-server --list-devices failed: {}", stderr);
return Err(LlamacppError::from_stderr(&stderr).into());
}
// Parse the output
let stdout = String::from_utf8_lossy(&output.stdout);
log::info!("Device list output:\n{}", stdout);
parse_device_output(&stdout)
}
fn parse_device_output(output: &str) -> ServerResult<Vec<DeviceInfo>> {
let mut devices = Vec::new();
let mut found_devices_section = false;
for raw in output.lines() {
// detect header (ignoring whitespace)
if raw.trim() == "Available devices:" {
found_devices_section = true;
continue;
}
if !found_devices_section {
continue;
}
// skip blank lines
if raw.trim().is_empty() {
continue;
}
// now parse any non-blank line after the header
let line = raw.trim();
if let Some(device) = parse_device_line(line)? {
devices.push(device);
}
}
if devices.is_empty() && found_devices_section {
log::warn!("No devices found in output");
} else if !found_devices_section {
return Err(LlamacppError::new(
ErrorCode::DeviceListParseFailed,
"Could not find 'Available devices:' section in the backend output.".into(),
Some(output.to_string()),
)
.into());
}
Ok(devices)
}
fn parse_device_line(line: &str) -> ServerResult<Option<DeviceInfo>> {
let line = line.trim();
log::info!("Parsing device line: '{}'", line);
// Expected formats:
// "Vulkan0: Intel(R) Arc(tm) A750 Graphics (DG2) (8128 MiB, 8128 MiB free)"
// "CUDA0: NVIDIA GeForce RTX 4090 (24576 MiB, 24000 MiB free)"
// "SYCL0: Intel(R) Arc(TM) A750 Graphics (8000 MiB, 7721 MiB free)"
// Split by colon to get ID and rest
let parts: Vec<&str> = line.splitn(2, ':').collect();
if parts.len() != 2 {
log::warn!("Skipping malformed device line: {}", line);
return Ok(None);
}
let id = parts[0].trim().to_string();
let rest = parts[1].trim();
// Use regex-like approach to find the memory pattern at the end
// Look for pattern: (number MiB, number MiB free) at the end
if let Some(memory_match) = find_memory_pattern(rest) {
let (memory_start, memory_content) = memory_match;
let name = rest[..memory_start].trim().to_string();
// Parse memory info: "8128 MiB, 8128 MiB free"
let memory_parts: Vec<&str> = memory_content.split(',').collect();
if memory_parts.len() >= 2 {
if let (Ok(total_mem), Ok(free_mem)) = (
parse_memory_value(memory_parts[0].trim()),
parse_memory_value(memory_parts[1].trim()),
) {
log::info!(
"Parsed device - ID: '{}', Name: '{}', Mem: {}, Free: {}",
id,
name,
total_mem,
free_mem
);
return Ok(Some(DeviceInfo {
id,
name,
mem: total_mem,
free: free_mem,
}));
}
}
}
log::warn!("Could not parse device line: {}", line);
Ok(None)
}
fn find_memory_pattern(text: &str) -> Option<(usize, &str)> {
// Find the last parenthesis that contains the memory pattern
let mut last_match = None;
let mut chars = text.char_indices().peekable();
while let Some((start_idx, ch)) = chars.next() {
if ch == '(' {
// Find the closing parenthesis
let remaining = &text[start_idx + 1..];
if let Some(close_pos) = remaining.find(')') {
let content = &remaining[..close_pos];
// Check if this looks like memory info
if is_memory_pattern(content) {
last_match = Some((start_idx, content));
}
}
}
}
last_match
}
fn is_memory_pattern(content: &str) -> bool {
// Check if content matches pattern like "8128 MiB, 8128 MiB free"
// Must contain: numbers, "MiB", comma, "free"
if !(content.contains("MiB") && content.contains("free") && content.contains(',')) {
return false;
}
let parts: Vec<&str> = content.split(',').collect();
if parts.len() != 2 {
return false;
}
parts.iter().all(|part| {
let part = part.trim();
// Each part should start with a number and contain "MiB"
part.split_whitespace()
.next()
.map_or(false, |first_word| first_word.parse::<i32>().is_ok())
&& part.contains("MiB")
})
}
fn parse_memory_value(mem_str: &str) -> ServerResult<i32> {
// Handle formats like "8000 MiB" or "7721 MiB free"
let parts: Vec<&str> = mem_str.split_whitespace().collect();
if parts.is_empty() {
return Err(LlamacppError::new(
ErrorCode::DeviceListParseFailed,
format!("empty memory value: {}", mem_str),
None,
)
.into());
}
// Take the first part which should be the number
let number_str = parts[0];
number_str.parse::<i32>().map_err(|_| {
LlamacppError::new(
ErrorCode::DeviceListParseFailed,
format!("Could not parse memory value: '{}'", number_str),
None,
)
.into()
})
}

View File

@ -0,0 +1,115 @@
use serde::{Deserialize, Serialize};
use thiserror;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum ErrorCode {
BinaryNotFound,
ModelFileNotFound,
LibraryPathInvalid,
// --- Model Loading Errors ---
ModelLoadFailed,
DraftModelLoadFailed,
MultimodalProjectorLoadFailed,
ModelArchNotSupported,
ModelLoadTimedOut,
LlamaCppProcessError,
// --- Memory Errors ---
OutOfMemory,
// --- Internal Application Errors ---
DeviceListParseFailed,
IoError,
InternalError,
}
#[derive(Debug, Clone, Serialize, thiserror::Error)]
#[error("LlamacppError {{ code: {code:?}, message: \"{message}\" }}")]
pub struct LlamacppError {
pub code: ErrorCode,
pub message: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub details: Option<String>,
}
impl LlamacppError {
pub fn new(code: ErrorCode, message: String, details: Option<String>) -> Self {
Self {
code,
message,
details,
}
}
/// Parses stderr from llama.cpp and creates a specific LlamacppError.
pub fn from_stderr(stderr: &str) -> Self {
let lower_stderr = stderr.to_lowercase();
// TODO: add others
let is_out_of_memory = lower_stderr.contains("out of memory")
|| lower_stderr.contains("insufficient memory")
|| lower_stderr.contains("erroroutofdevicememory") // vulkan specific
|| lower_stderr.contains("kiogpucommandbuffercallbackerroroutofmemory") // Metal-specific error code
|| lower_stderr.contains("cuda_error_out_of_memory"); // CUDA-specific
if is_out_of_memory {
return Self::new(
ErrorCode::OutOfMemory,
"Out of memory. The model requires more RAM or VRAM than available.".into(),
Some(stderr.into()),
);
}
if lower_stderr.contains("error loading model architecture") {
return Self::new(
ErrorCode::ModelArchNotSupported,
"The model's architecture is not supported by this version of the backend.".into(),
Some(stderr.into()),
);
}
Self::new(
ErrorCode::LlamaCppProcessError,
"The model process encountered an unexpected error.".into(),
Some(stderr.into()),
)
}
}
// Error type for server commands
#[derive(Debug, thiserror::Error)]
pub enum ServerError {
#[error(transparent)]
Llamacpp(#[from] LlamacppError),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Tauri error: {0}")]
Tauri(#[from] tauri::Error),
}
// impl serialization for tauri
impl serde::Serialize for ServerError {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let error_to_serialize: LlamacppError = match self {
ServerError::Llamacpp(err) => err.clone(),
ServerError::Io(e) => LlamacppError::new(
ErrorCode::IoError,
"An input/output error occurred.".into(),
Some(e.to_string()),
),
ServerError::Tauri(e) => LlamacppError::new(
ErrorCode::InternalError,
"An internal application error occurred.".into(),
Some(e.to_string()),
),
};
error_to_serialize.serialize(serializer)
}
}
pub type ServerResult<T> = Result<T, ServerError>;

View File

@ -0,0 +1,8 @@
use super::helpers;
use super::types::GgufMetadata;
/// Read GGUF metadata from a model file
#[tauri::command]
pub async fn read_gguf_metadata(path: String) -> Result<GgufMetadata, String> {
helpers::read_gguf_metadata(&path).map_err(|e| format!("Failed to read GGUF metadata: {}", e))
}

View File

@ -1,77 +1,28 @@
use byteorder::{LittleEndian, ReadBytesExt};
use serde::Serialize;
use std::{
collections::HashMap,
convert::TryFrom,
fs::File,
io::{self, Read, Seek, BufReader},
path::Path,
};
use std::convert::TryFrom;
use std::fs::File;
use std::io::{self, BufReader, Read, Seek};
use std::path::Path;
#[derive(Debug, Clone, Copy)]
#[repr(u32)]
enum GgufValueType {
Uint8 = 0,
Int8 = 1,
Uint16 = 2,
Int16 = 3,
Uint32 = 4,
Int32 = 5,
Float32 = 6,
Bool = 7,
String = 8,
Array = 9,
Uint64 = 10,
Int64 = 11,
Float64 = 12,
}
use super::types::{GgufMetadata, GgufValueType};
impl TryFrom<u32> for GgufValueType {
type Error = io::Error;
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(Self::Uint8),
1 => Ok(Self::Int8),
2 => Ok(Self::Uint16),
3 => Ok(Self::Int16),
4 => Ok(Self::Uint32),
5 => Ok(Self::Int32),
6 => Ok(Self::Float32),
7 => Ok(Self::Bool),
8 => Ok(Self::String),
9 => Ok(Self::Array),
10 => Ok(Self::Uint64),
11 => Ok(Self::Int64),
12 => Ok(Self::Float64),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Unknown GGUF value type: {}", value),
)),
}
}
}
#[derive(Serialize)]
pub struct GgufMetadata {
version: u32,
tensor_count: u64,
metadata: HashMap<String, String>,
}
fn read_gguf_metadata_internal<P: AsRef<Path>>(path: P) -> io::Result<GgufMetadata> {
pub fn read_gguf_metadata<P: AsRef<Path>>(path: P) -> io::Result<GgufMetadata> {
let mut file = BufReader::new(File::open(path)?);
let mut magic = [0u8; 4];
file.read_exact(&mut magic)?;
if &magic != b"GGUF" {
return Err(io::Error::new(io::ErrorKind::InvalidData, "Not a GGUF file"));
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Not a GGUF file",
));
}
let version = file.read_u32::<LittleEndian>()?;
let tensor_count = file.read_u64::<LittleEndian>()?;
let metadata_count = file.read_u64::<LittleEndian>()?;
let mut metadata_map = HashMap::new();
let mut metadata_map = std::collections::HashMap::new();
for i in 0..metadata_count {
match read_metadata_entry(&mut file, i) {
Ok((key, value)) => {
@ -93,7 +44,10 @@ fn read_gguf_metadata_internal<P: AsRef<Path>>(path: P) -> io::Result<GgufMetada
})
}
fn read_metadata_entry<R: Read + Seek>(reader: &mut R, index: u64) -> io::Result<(String, String)> {
fn read_metadata_entry<R: Read + Seek>(reader: &mut R, index: u64) -> io::Result<(String, String)>
where
R: ReadBytesExt,
{
let key = read_gguf_string(reader).map_err(|e| {
io::Error::new(
io::ErrorKind::InvalidData,
@ -108,7 +62,10 @@ fn read_metadata_entry<R: Read + Seek>(reader: &mut R, index: u64) -> io::Result
Ok((key, value))
}
fn read_gguf_string<R: Read>(reader: &mut R) -> io::Result<String> {
fn read_gguf_string<R: Read>(reader: &mut R) -> io::Result<String>
where
R: ReadBytesExt,
{
let len = reader.read_u64::<LittleEndian>()?;
if len > (1024 * 1024) {
return Err(io::Error::new(
@ -121,10 +78,10 @@ fn read_gguf_string<R: Read>(reader: &mut R) -> io::Result<String> {
Ok(String::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?)
}
fn read_gguf_value<R: Read + Seek>(
reader: &mut R,
value_type: GgufValueType,
) -> io::Result<String> {
fn read_gguf_value<R: Read + Seek>(reader: &mut R, value_type: GgufValueType) -> io::Result<String>
where
R: ReadBytesExt,
{
match value_type {
GgufValueType::Uint8 => Ok(reader.read_u8()?.to_string()),
GgufValueType::Int8 => Ok(reader.read_i8()?.to_string()),
@ -171,7 +128,10 @@ fn skip_array_data<R: Read + Seek>(
reader: &mut R,
elem_type: GgufValueType,
len: u64,
) -> io::Result<()> {
) -> io::Result<()>
where
R: ReadBytesExt,
{
match elem_type {
GgufValueType::Uint8 | GgufValueType::Int8 | GgufValueType::Bool => {
reader.seek(io::SeekFrom::Current(len as i64))?;
@ -199,15 +159,3 @@ fn skip_array_data<R: Read + Seek>(
}
Ok(())
}
#[tauri::command]
pub async fn read_gguf_metadata(path: String) -> Result<GgufMetadata, String> {
// run the blocking code in a separate thread pool
tauri::async_runtime::spawn_blocking(move || {
read_gguf_metadata_internal(path)
.map_err(|e| e.to_string())
})
.await
.unwrap()
}

View File

@ -0,0 +1,3 @@
pub mod commands;
pub mod helpers;
pub mod types;

View File

@ -0,0 +1,54 @@
use serde::Serialize;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::io;
#[derive(Debug, Clone, Copy)]
#[repr(u32)]
pub enum GgufValueType {
Uint8 = 0,
Int8 = 1,
Uint16 = 2,
Int16 = 3,
Uint32 = 4,
Int32 = 5,
Float32 = 6,
Bool = 7,
String = 8,
Array = 9,
Uint64 = 10,
Int64 = 11,
Float64 = 12,
}
impl TryFrom<u32> for GgufValueType {
type Error = io::Error;
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(Self::Uint8),
1 => Ok(Self::Int8),
2 => Ok(Self::Uint16),
3 => Ok(Self::Int16),
4 => Ok(Self::Uint32),
5 => Ok(Self::Int32),
6 => Ok(Self::Float32),
7 => Ok(Self::Bool),
8 => Ok(Self::String),
9 => Ok(Self::Array),
10 => Ok(Self::Uint64),
11 => Ok(Self::Int64),
12 => Ok(Self::Float64),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Unknown GGUF value type: {}", value),
)),
}
}
}
#[derive(Serialize)]
pub struct GgufMetadata {
pub version: u32,
pub tensor_count: u64,
pub metadata: HashMap<String, String>,
}

View File

@ -0,0 +1,43 @@
use tauri::{
plugin::{Builder, TauriPlugin},
Manager, Runtime,
};
pub mod cleanup;
mod commands;
mod device;
mod error;
mod gguf;
mod path;
mod process;
pub mod state;
pub use cleanup::cleanup_llama_processes;
pub use state::LLamaBackendSession;
/// Initializes the plugin.
pub fn init<R: Runtime>() -> TauriPlugin<R> {
Builder::new("llamacpp")
.invoke_handler(tauri::generate_handler![
// Cleanup command
cleanup::cleanup_llama_processes,
// LlamaCpp server commands
commands::load_llama_model,
commands::unload_llama_model,
commands::get_devices,
commands::generate_api_key,
commands::is_process_running,
commands::get_random_port,
commands::find_session_by_model,
commands::get_loaded_models,
commands::get_all_sessions,
commands::get_session_by_model,
// GGUF commands
gguf::commands::read_gguf_metadata,
])
.setup(|app, _api| {
// Initialize and manage the plugin state
app.manage(state::LlamacppState::new());
Ok(())
})
.build()
}

View File

@ -0,0 +1,100 @@
use std::path::PathBuf;
use crate::error::{ErrorCode, LlamacppError, ServerResult};
#[cfg(windows)]
use std::os::windows::ffi::OsStrExt;
#[cfg(windows)]
use std::ffi::OsStr;
#[cfg(windows)]
use windows_sys::Win32::Storage::FileSystem::GetShortPathNameW;
/// Get Windows short path to avoid issues with spaces and special characters
#[cfg(windows)]
pub fn get_short_path<P: AsRef<std::path::Path>>(path: P) -> Option<String> {
let wide: Vec<u16> = OsStr::new(path.as_ref())
.encode_wide()
.chain(Some(0))
.collect();
let mut buffer = vec![0u16; 260];
let len = unsafe { GetShortPathNameW(wide.as_ptr(), buffer.as_mut_ptr(), buffer.len() as u32) };
if len > 0 {
Some(String::from_utf16_lossy(&buffer[..len as usize]))
} else {
None
}
}
/// Validate that a binary path exists and is accessible
pub fn validate_binary_path(backend_path: &str) -> ServerResult<PathBuf> {
let server_path_buf = PathBuf::from(backend_path);
if !server_path_buf.exists() {
let err_msg = format!("Binary not found at {:?}", backend_path);
log::error!(
"Server binary not found at expected path: {:?}",
backend_path
);
return Err(LlamacppError::new(
ErrorCode::BinaryNotFound,
"The llama.cpp server binary could not be found.".into(),
Some(err_msg),
)
.into());
}
Ok(server_path_buf)
}
/// Validate model path exists and update args with platform-appropriate path format
pub fn validate_model_path(args: &mut Vec<String>) -> ServerResult<PathBuf> {
let model_path_index = args.iter().position(|arg| arg == "-m").ok_or_else(|| {
LlamacppError::new(
ErrorCode::ModelLoadFailed,
"Model path argument '-m' is missing.".into(),
None,
)
})?;
let model_path = args.get(model_path_index + 1).cloned().ok_or_else(|| {
LlamacppError::new(
ErrorCode::ModelLoadFailed,
"Model path was not provided after '-m' flag.".into(),
None,
)
})?;
let model_path_pb = PathBuf::from(&model_path);
if !model_path_pb.exists() {
let err_msg = format!(
"Invalid or inaccessible model path: {}",
model_path_pb.display()
);
log::error!("{}", &err_msg);
return Err(LlamacppError::new(
ErrorCode::ModelFileNotFound,
"The specified model file does not exist or is not accessible.".into(),
Some(err_msg),
)
.into());
}
// Update the path in args with appropriate format for the platform
#[cfg(windows)]
{
// use short path on Windows
if let Some(short) = get_short_path(&model_path_pb) {
args[model_path_index + 1] = short;
} else {
args[model_path_index + 1] = model_path_pb.display().to_string();
}
}
#[cfg(not(windows))]
{
args[model_path_index + 1] = model_path_pb.display().to_string();
}
Ok(model_path_pb)
}

View File

@ -0,0 +1,154 @@
use std::collections::HashSet;
use std::time::Duration;
use sysinfo::{Pid, System};
use tauri::{Manager, Runtime, State};
use tokio::time::timeout;
use crate::state::{LlamacppState, SessionInfo};
use jan_utils::generate_random_port;
/// Check if a process is running by PID
pub async fn is_process_running_by_pid<R: Runtime>(
app_handle: tauri::AppHandle<R>,
pid: i32,
) -> Result<bool, String> {
let mut system = System::new();
system.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
let process_pid = Pid::from(pid as usize);
let alive = system.process(process_pid).is_some();
if !alive {
let state: State<LlamacppState> = app_handle.state();
let mut map = state.llama_server_process.lock().await;
map.remove(&pid);
}
Ok(alive)
}
/// Get a random available port, avoiding ports used by existing sessions
pub async fn get_random_available_port<R: Runtime>(
app_handle: tauri::AppHandle<R>,
) -> Result<u16, String> {
// Get all active ports from sessions
let state: State<LlamacppState> = app_handle.state();
let map = state.llama_server_process.lock().await;
let used_ports: HashSet<u16> = map
.values()
.filter_map(|session| {
// Convert valid ports to u16 (filter out placeholder ports like -1)
if session.info.port > 0 && session.info.port <= u16::MAX as i32 {
Some(session.info.port as u16)
} else {
None
}
})
.collect();
drop(map); // unlock early
generate_random_port(&used_ports)
}
/// Gracefully terminate a process on Unix systems
#[cfg(unix)]
pub async fn graceful_terminate_process(child: &mut tokio::process::Child) {
use nix::sys::signal::{kill, Signal};
use nix::unistd::Pid;
if let Some(raw_pid) = child.id() {
let raw_pid = raw_pid as i32;
log::info!("Sending SIGTERM to PID {}", raw_pid);
let _ = kill(Pid::from_raw(raw_pid), Signal::SIGTERM);
match timeout(Duration::from_secs(5), child.wait()).await {
Ok(Ok(status)) => log::info!("Process exited gracefully: {}", status),
Ok(Err(e)) => log::error!("Error waiting after SIGTERM: {}", e),
Err(_) => {
log::warn!("SIGTERM timed out; sending SIGKILL to PID {}", raw_pid);
let _ = kill(Pid::from_raw(raw_pid), Signal::SIGKILL);
match child.wait().await {
Ok(s) => log::info!("Force-killed process exited: {}", s),
Err(e) => log::error!("Error waiting after SIGKILL: {}", e),
}
}
}
}
}
/// Force terminate a process on Windows
#[cfg(all(windows, target_arch = "x86_64"))]
pub async fn force_terminate_process(child: &mut tokio::process::Child) {
if let Some(raw_pid) = child.id() {
log::warn!(
"gracefully killing is unsupported on Windows, force-killing PID {}",
raw_pid
);
// Since we know a graceful shutdown doesn't work and there are no child processes
// to worry about, we can use `child.kill()` directly. On Windows, this is
// a forceful termination via the `TerminateProcess` API.
if let Err(e) = child.kill().await {
log::error!(
"Failed to send kill signal to PID {}: {}. It may have already terminated.",
raw_pid,
e
);
}
match child.wait().await {
Ok(status) => log::info!(
"process {} has been terminated. Final exit status: {}",
raw_pid,
status
),
Err(e) => log::error!(
"Error waiting on child process {} after kill: {}",
raw_pid,
e
),
}
}
}
/// Find a session by model ID
pub async fn find_session_by_model_id<R: Runtime>(
app_handle: tauri::AppHandle<R>,
model_id: &str,
) -> Result<Option<SessionInfo>, String> {
let state: State<LlamacppState> = app_handle.state();
let map = state.llama_server_process.lock().await;
let session_info = map
.values()
.find(|backend_session| backend_session.info.model_id == model_id)
.map(|backend_session| backend_session.info.clone());
Ok(session_info)
}
/// Get all loaded model IDs
pub async fn get_all_loaded_model_ids<R: Runtime>(
app_handle: tauri::AppHandle<R>,
) -> Result<Vec<String>, String> {
let state: State<LlamacppState> = app_handle.state();
let map = state.llama_server_process.lock().await;
let model_ids = map
.values()
.map(|backend_session| backend_session.info.model_id.clone())
.collect();
Ok(model_ids)
}
/// Get all active sessions
pub async fn get_all_active_sessions<R: Runtime>(
app_handle: tauri::AppHandle<R>,
) -> Result<Vec<SessionInfo>, String> {
let state: State<LlamacppState> = app_handle.state();
let map = state.llama_server_process.lock().await;
let sessions: Vec<SessionInfo> = map.values().map(|s| s.info.clone()).collect();
Ok(sessions)
}

View File

@ -0,0 +1,38 @@
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::process::Child;
use tokio::sync::Mutex;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SessionInfo {
pub pid: i32, // opaque handle for unload/chat
pub port: i32, // llama-server output port
pub model_id: String,
pub model_path: String, // path of the loaded model
pub api_key: String,
}
pub struct LLamaBackendSession {
pub child: Child,
pub info: SessionInfo,
}
/// LlamaCpp plugin state
pub struct LlamacppState {
pub llama_server_process: Arc<Mutex<HashMap<i32, LLamaBackendSession>>>,
}
impl Default for LlamacppState {
fn default() -> Self {
Self {
llama_server_process: Arc::new(Mutex::new(HashMap::new())),
}
}
}
impl LlamacppState {
pub fn new() -> Self {
Self::default()
}
}

View File

@ -0,0 +1,14 @@
{
"compilerOptions": {
"target": "es2021",
"module": "esnext",
"moduleResolution": "bundler",
"skipLibCheck": true,
"strict": true,
"noUnusedLocals": true,
"noImplicitAny": true,
"noEmit": true
},
"include": ["guest-js/*.ts"],
"exclude": ["dist-js", "node_modules"]
}

View File

View File

@ -0,0 +1,212 @@
use std::{fs, path::PathBuf};
use tauri::{AppHandle, Manager, Runtime, State};
use super::{
constants::CONFIGURATION_FILE_NAME, helpers::copy_dir_recursive, models::AppConfiguration,
};
use crate::core::state::AppState;
#[tauri::command]
pub fn get_app_configurations<R: Runtime>(app_handle: tauri::AppHandle<R>) -> AppConfiguration {
let mut app_default_configuration = AppConfiguration::default();
if std::env::var("CI").unwrap_or_default() == "e2e" {
return app_default_configuration;
}
let configuration_file = get_configuration_file_path(app_handle.clone());
let default_data_folder = default_data_folder_path(app_handle.clone());
if !configuration_file.exists() {
log::info!(
"App config not found, creating default config at {:?}",
configuration_file
);
app_default_configuration.data_folder = default_data_folder;
if let Err(err) = fs::write(
&configuration_file,
serde_json::to_string(&app_default_configuration).unwrap(),
) {
log::error!("Failed to create default config: {}", err);
}
return app_default_configuration;
}
match fs::read_to_string(&configuration_file) {
Ok(content) => match serde_json::from_str::<AppConfiguration>(&content) {
Ok(app_configurations) => app_configurations,
Err(err) => {
log::error!(
"Failed to parse app config, returning default config instead. Error: {}",
err
);
app_default_configuration
}
},
Err(err) => {
log::error!(
"Failed to read app config, returning default config instead. Error: {}",
err
);
app_default_configuration
}
}
}
#[tauri::command]
pub fn update_app_configuration(
app_handle: tauri::AppHandle,
configuration: AppConfiguration,
) -> Result<(), String> {
let configuration_file = get_configuration_file_path(app_handle);
log::info!(
"update_app_configuration, configuration_file: {:?}",
configuration_file
);
fs::write(
configuration_file,
serde_json::to_string(&configuration).map_err(|e| e.to_string())?,
)
.map_err(|e| e.to_string())
}
#[tauri::command]
pub fn get_jan_data_folder_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> PathBuf {
if cfg!(test) {
let path = std::env::current_dir()
.unwrap_or_else(|_| PathBuf::from("."))
.join("test-data");
if !path.exists() {
let _ = fs::create_dir_all(&path);
}
return path;
}
let app_configurations = get_app_configurations(app_handle);
PathBuf::from(app_configurations.data_folder)
}
#[tauri::command]
pub fn get_configuration_file_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> PathBuf {
let app_path = app_handle.path().app_data_dir().unwrap_or_else(|err| {
log::error!(
"Failed to get app data directory: {}. Using home directory instead.",
err
);
let home_dir = std::env::var(if cfg!(target_os = "windows") {
"USERPROFILE"
} else {
"HOME"
})
.expect("Failed to determine the home directory");
PathBuf::from(home_dir)
});
let package_name = env!("CARGO_PKG_NAME");
#[cfg(target_os = "linux")]
let old_data_dir = {
if let Some(config_path) = dirs::config_dir() {
config_path.join(package_name)
} else {
log::debug!("Could not determine config directory");
app_path
.parent()
.unwrap_or(&app_path.join("../"))
.join(package_name)
}
};
#[cfg(not(target_os = "linux"))]
let old_data_dir = app_path
.parent()
.unwrap_or(&app_path.join("../"))
.join(package_name);
if old_data_dir.exists() {
return old_data_dir.join(CONFIGURATION_FILE_NAME);
} else {
return app_path.join(CONFIGURATION_FILE_NAME);
}
}
#[tauri::command]
pub fn default_data_folder_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> String {
let mut path = app_handle.path().data_dir().unwrap();
let app_name = std::env::var("APP_NAME")
.unwrap_or_else(|_| app_handle.config().product_name.clone().unwrap());
path.push(app_name);
path.push("data");
let mut path_str = path.to_str().unwrap().to_string();
if let Some(stripped) = path.to_str().unwrap().to_string().strip_suffix(".ai.app") {
path_str = stripped.to_string();
}
path_str
}
#[tauri::command]
pub fn get_user_home_path(app: AppHandle) -> String {
return get_app_configurations(app.clone()).data_folder;
}
#[tauri::command]
pub fn change_app_data_folder(
app_handle: tauri::AppHandle,
new_data_folder: String,
) -> Result<(), String> {
// Get current data folder path
let current_data_folder = get_jan_data_folder_path(app_handle.clone());
let new_data_folder_path = PathBuf::from(&new_data_folder);
// Create the new data folder if it doesn't exist
if !new_data_folder_path.exists() {
fs::create_dir_all(&new_data_folder_path)
.map_err(|e| format!("Failed to create new data folder: {}", e))?;
}
// Copy all files from the old folder to the new one
if current_data_folder.exists() {
log::info!(
"Copying data from {:?} to {:?}",
current_data_folder,
new_data_folder_path
);
// Check if this is a parent directory to avoid infinite recursion
if new_data_folder_path.starts_with(&current_data_folder) {
return Err(
"New data folder cannot be a subdirectory of the current data folder".to_string(),
);
}
copy_dir_recursive(
&current_data_folder,
&new_data_folder_path,
&[".uvx", ".npx"],
)
.map_err(|e| format!("Failed to copy data to new folder: {}", e))?;
} else {
log::info!("Current data folder does not exist, nothing to copy");
}
// Update the configuration to point to the new folder
let mut configuration = get_app_configurations(app_handle.clone());
configuration.data_folder = new_data_folder;
// Save the updated configuration
update_app_configuration(app_handle, configuration)
}
#[tauri::command]
pub fn app_token(state: State<'_, AppState>) -> Option<String> {
state.app_token.clone()
}

View File

@ -0,0 +1,2 @@
// App Configuration Constants
pub const CONFIGURATION_FILE_NAME: &str = "settings.json";

View File

@ -0,0 +1,33 @@
use std::{fs, io, path::PathBuf};
/// Recursively copy a directory from src to dst, excluding specified directories
pub fn copy_dir_recursive(
src: &PathBuf,
dst: &PathBuf,
exclude_dirs: &[&str],
) -> Result<(), io::Error> {
if !dst.exists() {
fs::create_dir_all(dst)?;
}
for entry in fs::read_dir(src)? {
let entry = entry?;
let file_type = entry.file_type()?;
let src_path = entry.path();
let dst_path = dst.join(entry.file_name());
if file_type.is_dir() {
// Skip excluded directories
if let Some(dir_name) = entry.file_name().to_str() {
if exclude_dirs.contains(&dir_name) {
continue;
}
}
copy_dir_recursive(&src_path, &dst_path, exclude_dirs)?;
} else {
fs::copy(&src_path, &dst_path)?;
}
}
Ok(())
}

View File

@ -0,0 +1,4 @@
pub mod commands;
mod constants;
pub mod helpers;
pub mod models;

View File

@ -0,0 +1,16 @@
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct AppConfiguration {
pub data_folder: String,
// Add other fields as needed
}
impl AppConfiguration {
pub fn default() -> Self {
Self {
data_folder: String::from("./data"), // Set a default value for the data_folder
// Add other fields with default values as needed
}
}
}

View File

@ -1,435 +0,0 @@
use serde::{Deserialize, Serialize};
use std::{fs, io, path::PathBuf};
use tauri::{AppHandle, Manager, Runtime, State};
use crate::core::{mcp::clean_up_mcp_servers, utils::extensions::inference_llamacpp_extension::cleanup::cleanup_processes};
use super::{server, setup, state::AppState};
const CONFIGURATION_FILE_NAME: &str = "settings.json";
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct AppConfiguration {
pub data_folder: String,
// Add other fields as needed
}
impl AppConfiguration {
pub fn default() -> Self {
Self {
data_folder: String::from("./data"), // Set a default value for the data_folder
// Add other fields with default values as needed
}
}
}
#[tauri::command]
pub fn get_app_configurations<R: Runtime>(app_handle: tauri::AppHandle<R>) -> AppConfiguration {
let mut app_default_configuration = AppConfiguration::default();
if std::env::var("CI").unwrap_or_default() == "e2e" {
return app_default_configuration;
}
let configuration_file = get_configuration_file_path(app_handle.clone());
let default_data_folder = default_data_folder_path(app_handle.clone());
if !configuration_file.exists() {
log::info!(
"App config not found, creating default config at {:?}",
configuration_file
);
app_default_configuration.data_folder = default_data_folder;
if let Err(err) = fs::write(
&configuration_file,
serde_json::to_string(&app_default_configuration).unwrap(),
) {
log::error!("Failed to create default config: {}", err);
}
return app_default_configuration;
}
match fs::read_to_string(&configuration_file) {
Ok(content) => match serde_json::from_str::<AppConfiguration>(&content) {
Ok(app_configurations) => app_configurations,
Err(err) => {
log::error!(
"Failed to parse app config, returning default config instead. Error: {}",
err
);
app_default_configuration
}
},
Err(err) => {
log::error!(
"Failed to read app config, returning default config instead. Error: {}",
err
);
app_default_configuration
}
}
}
#[tauri::command]
pub fn update_app_configuration(
app_handle: tauri::AppHandle,
configuration: AppConfiguration,
) -> Result<(), String> {
let configuration_file = get_configuration_file_path(app_handle);
log::info!(
"update_app_configuration, configuration_file: {:?}",
configuration_file
);
fs::write(
configuration_file,
serde_json::to_string(&configuration).map_err(|e| e.to_string())?,
)
.map_err(|e| e.to_string())
}
#[tauri::command]
pub fn get_jan_data_folder_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> PathBuf {
if cfg!(test) {
let path = std::env::current_dir()
.unwrap_or_else(|_| PathBuf::from("."))
.join("test-data");
if !path.exists() {
let _ = fs::create_dir_all(&path);
}
return path;
}
let app_configurations = get_app_configurations(app_handle);
PathBuf::from(app_configurations.data_folder)
}
#[tauri::command]
pub fn get_jan_extensions_path(app_handle: tauri::AppHandle) -> PathBuf {
get_jan_data_folder_path(app_handle).join("extensions")
}
#[tauri::command]
pub fn factory_reset(app_handle: tauri::AppHandle, state: State<'_, AppState>) {
// close window
let windows = app_handle.webview_windows();
for (label, window) in windows.iter() {
window.close().unwrap_or_else(|_| {
log::warn!("Failed to close window: {:?}", label);
});
}
let data_folder = get_jan_data_folder_path(app_handle.clone());
log::info!("Factory reset, removing data folder: {:?}", data_folder);
tauri::async_runtime::block_on(async {
clean_up_mcp_servers(state.clone()).await;
cleanup_processes(state).await;
if data_folder.exists() {
if let Err(e) = fs::remove_dir_all(&data_folder) {
log::error!("Failed to remove data folder: {}", e);
return;
}
}
// Recreate the data folder
let _ = fs::create_dir_all(&data_folder).map_err(|e| e.to_string());
// Reset the configuration
let mut default_config: AppConfiguration = AppConfiguration::default();
default_config.data_folder = default_data_folder_path(app_handle.clone());
let _ = update_app_configuration(app_handle.clone(), default_config);
app_handle.restart();
});
}
#[tauri::command]
pub fn get_configuration_file_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> PathBuf {
let app_path = app_handle.path().app_data_dir().unwrap_or_else(|err| {
log::error!(
"Failed to get app data directory: {}. Using home directory instead.",
err
);
let home_dir = std::env::var(if cfg!(target_os = "windows") {
"USERPROFILE"
} else {
"HOME"
})
.expect("Failed to determine the home directory");
PathBuf::from(home_dir)
});
let package_name = env!("CARGO_PKG_NAME");
#[cfg(target_os = "linux")]
let old_data_dir = {
if let Some(config_path) = dirs::config_dir() {
config_path.join(package_name)
} else {
log::debug!("Could not determine config directory");
app_path
.parent()
.unwrap_or(&app_path.join("../"))
.join(package_name)
}
};
#[cfg(not(target_os = "linux"))]
let old_data_dir = app_path
.parent()
.unwrap_or(&app_path.join("../"))
.join(package_name);
if old_data_dir.exists() {
return old_data_dir.join(CONFIGURATION_FILE_NAME);
} else {
return app_path.join(CONFIGURATION_FILE_NAME);
}
}
#[tauri::command]
pub fn default_data_folder_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> String {
let mut path = app_handle.path().data_dir().unwrap();
let app_name = std::env::var("APP_NAME")
.unwrap_or_else(|_| app_handle.config().product_name.clone().unwrap());
path.push(app_name);
path.push("data");
let mut path_str = path.to_str().unwrap().to_string();
if let Some(stripped) = path.to_str().unwrap().to_string().strip_suffix(".ai.app") {
path_str = stripped.to_string();
}
path_str
}
#[tauri::command]
pub fn relaunch(app: AppHandle) {
app.restart()
}
#[tauri::command]
pub fn open_app_directory(app: AppHandle) {
let app_path = app.path().app_data_dir().unwrap();
if cfg!(target_os = "windows") {
std::process::Command::new("explorer")
.arg(app_path)
.spawn()
.expect("Failed to open app directory");
} else if cfg!(target_os = "macos") {
std::process::Command::new("open")
.arg(app_path)
.spawn()
.expect("Failed to open app directory");
} else {
std::process::Command::new("xdg-open")
.arg(app_path)
.spawn()
.expect("Failed to open app directory");
}
}
#[tauri::command]
pub fn open_file_explorer(path: String) {
let path = PathBuf::from(path);
if cfg!(target_os = "windows") {
std::process::Command::new("explorer")
.arg(path)
.spawn()
.expect("Failed to open file explorer");
} else if cfg!(target_os = "macos") {
std::process::Command::new("open")
.arg(path)
.spawn()
.expect("Failed to open file explorer");
} else {
std::process::Command::new("xdg-open")
.arg(path)
.spawn()
.expect("Failed to open file explorer");
}
}
#[tauri::command]
pub fn install_extensions(app: AppHandle) {
if let Err(err) = setup::install_extensions(app, true) {
log::error!("Failed to install extensions: {}", err);
}
}
#[tauri::command]
pub fn get_active_extensions(app: AppHandle) -> Vec<serde_json::Value> {
let mut path = get_jan_extensions_path(app);
path.push("extensions.json");
log::info!("get jan extensions, path: {:?}", path);
let contents = fs::read_to_string(path);
let contents: Vec<serde_json::Value> = match contents {
Ok(data) => match serde_json::from_str::<Vec<serde_json::Value>>(&data) {
Ok(exts) => exts
.into_iter()
.map(|ext| {
serde_json::json!({
"url": ext["url"],
"name": ext["name"],
"productName": ext["productName"],
"active": ext["_active"],
"description": ext["description"],
"version": ext["version"]
})
})
.collect(),
Err(error) => {
log::error!("Failed to parse extensions.json: {}", error);
vec![]
}
},
Err(error) => {
log::error!("Failed to read extensions.json: {}", error);
vec![]
}
};
return contents;
}
#[tauri::command]
pub fn get_user_home_path(app: AppHandle) -> String {
return get_app_configurations(app.clone()).data_folder;
}
/// Recursively copy a directory from src to dst, excluding specified directories
fn copy_dir_recursive(src: &PathBuf, dst: &PathBuf, exclude_dirs: &[&str]) -> Result<(), io::Error> {
if !dst.exists() {
fs::create_dir_all(dst)?;
}
for entry in fs::read_dir(src)? {
let entry = entry?;
let file_type = entry.file_type()?;
let src_path = entry.path();
let dst_path = dst.join(entry.file_name());
if file_type.is_dir() {
// Skip excluded directories
if let Some(dir_name) = entry.file_name().to_str() {
if exclude_dirs.contains(&dir_name) {
continue;
}
}
copy_dir_recursive(&src_path, &dst_path, exclude_dirs)?;
} else {
fs::copy(&src_path, &dst_path)?;
}
}
Ok(())
}
#[tauri::command]
pub fn change_app_data_folder(
app_handle: tauri::AppHandle,
new_data_folder: String,
) -> Result<(), String> {
// Get current data folder path
let current_data_folder = get_jan_data_folder_path(app_handle.clone());
let new_data_folder_path = PathBuf::from(&new_data_folder);
// Create the new data folder if it doesn't exist
if !new_data_folder_path.exists() {
fs::create_dir_all(&new_data_folder_path)
.map_err(|e| format!("Failed to create new data folder: {}", e))?;
}
// Copy all files from the old folder to the new one
if current_data_folder.exists() {
log::info!(
"Copying data from {:?} to {:?}",
current_data_folder,
new_data_folder_path
);
// Check if this is a parent directory to avoid infinite recursion
if new_data_folder_path.starts_with(&current_data_folder) {
return Err(
"New data folder cannot be a subdirectory of the current data folder".to_string(),
);
}
copy_dir_recursive(&current_data_folder, &new_data_folder_path, &[".uvx", ".npx"])
.map_err(|e| format!("Failed to copy data to new folder: {}", e))?;
} else {
log::info!("Current data folder does not exist, nothing to copy");
}
// Update the configuration to point to the new folder
let mut configuration = get_app_configurations(app_handle.clone());
configuration.data_folder = new_data_folder;
// Save the updated configuration
update_app_configuration(app_handle, configuration)
}
#[tauri::command]
pub fn app_token(state: State<'_, AppState>) -> Option<String> {
state.app_token.clone()
}
#[tauri::command]
pub async fn start_server(
state: State<'_, AppState>,
host: String,
port: u16,
prefix: String,
api_key: String,
trusted_hosts: Vec<String>,
) -> Result<bool, String> {
let server_handle = state.server_handle.clone();
let sessions = state.llama_server_process.clone();
server::start_server(
server_handle,
sessions,
host,
port,
prefix,
api_key,
vec![trusted_hosts],
)
.await
.map_err(|e| e.to_string())?;
Ok(true)
}
#[tauri::command]
pub async fn stop_server(state: State<'_, AppState>) -> Result<(), String> {
let server_handle = state.server_handle.clone();
server::stop_server(server_handle)
.await
.map_err(|e| e.to_string())?;
Ok(())
}
#[tauri::command]
pub async fn get_server_status(state: State<'_, AppState>) -> Result<bool, String> {
let server_handle = state.server_handle.clone();
Ok(server::is_server_running(server_handle).await)
}
#[tauri::command]
pub async fn read_logs(app: AppHandle) -> Result<String, String> {
let log_path = get_jan_data_folder_path(app).join("logs").join("app.log");
if log_path.exists() {
let content = fs::read_to_string(log_path).map_err(|e| e.to_string())?;
Ok(content)
} else {
Err(format!("Log file not found"))
}
}

View File

@ -0,0 +1,68 @@
use super::helpers::{_download_files_internal, err_to_string};
use super::models::DownloadItem;
use crate::core::app::commands::get_jan_data_folder_path;
use crate::core::state::AppState;
use std::collections::HashMap;
use tauri::State;
use tokio_util::sync::CancellationToken;
#[tauri::command]
pub async fn download_files(
app: tauri::AppHandle,
state: State<'_, AppState>,
items: Vec<DownloadItem>,
task_id: &str,
headers: HashMap<String, String>,
) -> Result<(), String> {
// insert cancel tokens
let cancel_token = CancellationToken::new();
{
let mut download_manager = state.download_manager.lock().await;
if download_manager.cancel_tokens.contains_key(task_id) {
return Err(format!("task_id {} exists", task_id));
}
download_manager
.cancel_tokens
.insert(task_id.to_string(), cancel_token.clone());
}
// TODO: Support resuming downloads when FE is ready
let result = _download_files_internal(
app.clone(),
&items,
&headers,
task_id,
false,
cancel_token.clone(),
)
.await;
// cleanup
{
let mut download_manager = state.download_manager.lock().await;
download_manager.cancel_tokens.remove(task_id);
}
// delete files if cancelled
if cancel_token.is_cancelled() {
let jan_data_folder = get_jan_data_folder_path(app.clone());
for item in items {
let save_path = jan_data_folder.join(&item.save_path);
let _ = std::fs::remove_file(&save_path); // don't check error
}
}
result.map_err(err_to_string)
}
#[tauri::command]
pub async fn cancel_download_task(state: State<'_, AppState>, task_id: &str) -> Result<(), String> {
// NOTE: might want to add User-Agent header
let mut download_manager = state.download_manager.lock().await;
if let Some(token) = download_manager.cancel_tokens.remove(task_id) {
token.cancel();
log::info!("Cancelled download task: {}", task_id);
Ok(())
} else {
Err(format!("No download task: {}", task_id))
}
}

View File

@ -0,0 +1,365 @@
use super::models::{DownloadEvent, DownloadItem, ProxyConfig};
use crate::core::app::commands::get_jan_data_folder_path;
use futures_util::StreamExt;
use jan_utils::normalize_path;
use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
use std::collections::HashMap;
use std::time::Duration;
use tauri::Emitter;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tokio_util::sync::CancellationToken;
use url::Url;
pub fn err_to_string<E: std::fmt::Display>(e: E) -> String {
format!("Error: {}", e)
}
pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> {
// Validate proxy URL format
if let Err(e) = Url::parse(&config.url) {
return Err(format!("Invalid proxy URL '{}': {}", config.url, e));
}
// Check if proxy URL has valid scheme
let url = Url::parse(&config.url).unwrap(); // Safe to unwrap as we just validated it
match url.scheme() {
"http" | "https" | "socks4" | "socks5" => {}
scheme => return Err(format!("Unsupported proxy scheme: {}", scheme)),
}
// Validate authentication credentials
if config.username.is_some() && config.password.is_none() {
return Err("Username provided without password".to_string());
}
if config.password.is_some() && config.username.is_none() {
return Err("Password provided without username".to_string());
}
// Validate no_proxy entries
if let Some(no_proxy) = &config.no_proxy {
for entry in no_proxy {
if entry.is_empty() {
return Err("Empty no_proxy entry".to_string());
}
// Basic validation for wildcard patterns
if entry.starts_with("*.") && entry.len() < 3 {
return Err(format!("Invalid wildcard pattern: {}", entry));
}
}
}
// SSL verification settings are all optional booleans, no validation needed
Ok(())
}
pub fn create_proxy_from_config(config: &ProxyConfig) -> Result<reqwest::Proxy, String> {
// Validate the configuration first
validate_proxy_config(config)?;
let mut proxy = reqwest::Proxy::all(&config.url).map_err(err_to_string)?;
// Add authentication if provided
if let (Some(username), Some(password)) = (&config.username, &config.password) {
proxy = proxy.basic_auth(username, password);
}
Ok(proxy)
}
pub fn should_bypass_proxy(url: &str, no_proxy: &[String]) -> bool {
if no_proxy.is_empty() {
return false;
}
// Parse the URL to get the host
let parsed_url = match Url::parse(url) {
Ok(u) => u,
Err(_) => return false,
};
let host = match parsed_url.host_str() {
Some(h) => h,
None => return false,
};
// Check if host matches any no_proxy entry
for entry in no_proxy {
if entry == "*" {
return true;
}
// Simple wildcard matching
if entry.starts_with("*.") {
let domain = &entry[2..];
if host.ends_with(domain) {
return true;
}
} else if host == entry {
return true;
}
}
false
}
pub fn _get_client_for_item(
item: &DownloadItem,
header_map: &HeaderMap,
) -> Result<reqwest::Client, String> {
let mut client_builder = reqwest::Client::builder()
.http2_keep_alive_timeout(Duration::from_secs(15))
.default_headers(header_map.clone());
// Add proxy configuration if provided
if let Some(proxy_config) = &item.proxy {
// Handle SSL verification settings
if proxy_config.ignore_ssl.unwrap_or(false) {
client_builder = client_builder.danger_accept_invalid_certs(true);
log::info!("SSL certificate verification disabled for URL {}", item.url);
}
// Note: reqwest doesn't have fine-grained SSL verification controls
// for verify_proxy_ssl, verify_proxy_host_ssl, verify_peer_ssl, verify_host_ssl
// These settings are handled by the underlying TLS implementation
// Check if this URL should bypass proxy
let no_proxy = proxy_config.no_proxy.as_deref().unwrap_or(&[]);
if !should_bypass_proxy(&item.url, no_proxy) {
let proxy = create_proxy_from_config(proxy_config)?;
client_builder = client_builder.proxy(proxy);
log::info!("Using proxy {} for URL {}", proxy_config.url, item.url);
} else {
log::info!("Bypassing proxy for URL {}", item.url);
}
}
client_builder.build().map_err(err_to_string)
}
pub fn _convert_headers(
headers: &HashMap<String, String>,
) -> Result<HeaderMap, Box<dyn std::error::Error>> {
let mut header_map = HeaderMap::new();
for (k, v) in headers {
let key = HeaderName::from_bytes(k.as_bytes())?;
let value = HeaderValue::from_str(v)?;
header_map.insert(key, value);
}
Ok(header_map)
}
pub async fn _get_file_size(
client: &reqwest::Client,
url: &str,
) -> Result<u64, Box<dyn std::error::Error>> {
let resp = client.head(url).send().await?;
if !resp.status().is_success() {
return Err(format!("Failed to get file size: HTTP status {}", resp.status()).into());
}
// this is buggy, always return 0 for HEAD request
// Ok(resp.content_length().unwrap_or(0))
match resp.headers().get("content-length") {
Some(value) => {
let value_str = value.to_str()?;
let value_u64: u64 = value_str.parse()?;
Ok(value_u64)
}
None => Ok(0),
}
}
pub async fn _download_files_internal(
app: tauri::AppHandle,
items: &[DownloadItem],
headers: &HashMap<String, String>,
task_id: &str,
resume: bool,
cancel_token: CancellationToken,
) -> Result<(), String> {
log::info!("Start download task: {}", task_id);
let header_map = _convert_headers(headers).map_err(err_to_string)?;
let total_size = {
let mut total_size = 0u64;
for item in items.iter() {
let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?;
total_size += _get_file_size(&client, &item.url)
.await
.map_err(err_to_string)?;
}
total_size
};
log::info!("Total download size: {}", total_size);
let mut evt = DownloadEvent {
transferred: 0,
total: total_size,
};
let evt_name = format!("download-{}", task_id);
// save file under Jan data folder
let jan_data_folder = get_jan_data_folder_path(app.clone());
for item in items.iter() {
let save_path = jan_data_folder.join(&item.save_path);
let save_path = normalize_path(&save_path);
if !save_path.starts_with(&jan_data_folder) {
return Err(format!(
"Path {} is outside of Jan data folder {}",
save_path.display(),
jan_data_folder.display()
));
}
// Create parent directories if they don't exist
if let Some(parent) = save_path.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent)
.await
.map_err(err_to_string)?;
}
}
let current_extension = save_path.extension().unwrap_or_default().to_string_lossy();
let append_extension = |ext: &str| {
if current_extension.is_empty() {
ext.to_string()
} else {
format!("{}.{}", current_extension, ext)
}
};
let tmp_save_path = save_path.with_extension(append_extension("tmp"));
let url_save_path = save_path.with_extension(append_extension("url"));
let mut should_resume = resume
&& tmp_save_path.exists()
&& tokio::fs::read_to_string(&url_save_path)
.await
.map(|url| url == item.url) // check if we resume the same URL
.unwrap_or(false);
tokio::fs::write(&url_save_path, item.url.clone())
.await
.map_err(err_to_string)?;
log::info!("Started downloading: {}", item.url);
let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?;
let mut download_delta = 0u64;
let resp = if should_resume {
let downloaded_size = tmp_save_path.metadata().map_err(err_to_string)?.len();
match _get_maybe_resume(&client, &item.url, downloaded_size).await {
Ok(resp) => {
log::info!(
"Resume download: {}, already downloaded {} bytes",
item.url,
downloaded_size
);
download_delta += downloaded_size;
resp
}
Err(e) => {
// fallback to normal download
log::warn!("Failed to resume download: {}", e);
should_resume = false;
_get_maybe_resume(&client, &item.url, 0).await?
}
}
} else {
_get_maybe_resume(&client, &item.url, 0).await?
};
let mut stream = resp.bytes_stream();
let file = if should_resume {
// resume download, append to existing file
tokio::fs::OpenOptions::new()
.write(true)
.append(true)
.open(&tmp_save_path)
.await
.map_err(err_to_string)?
} else {
// start new download, create a new file
File::create(&tmp_save_path).await.map_err(err_to_string)?
};
let mut writer = tokio::io::BufWriter::new(file);
// write chunk to file
while let Some(chunk) = stream.next().await {
if cancel_token.is_cancelled() {
if !should_resume {
tokio::fs::remove_dir_all(&save_path.parent().unwrap())
.await
.ok();
}
log::info!("Download cancelled for task: {}", task_id);
app.emit(&evt_name, evt.clone()).unwrap();
return Ok(());
}
let chunk = chunk.map_err(err_to_string)?;
writer.write_all(&chunk).await.map_err(err_to_string)?;
download_delta += chunk.len() as u64;
// only update every 10 MB
if download_delta >= 10 * 1024 * 1024 {
evt.transferred += download_delta;
app.emit(&evt_name, evt.clone()).unwrap();
download_delta = 0u64;
}
}
writer.flush().await.map_err(err_to_string)?;
evt.transferred += download_delta;
// rename tmp file to final file
tokio::fs::rename(&tmp_save_path, &save_path)
.await
.map_err(err_to_string)?;
tokio::fs::remove_file(&url_save_path)
.await
.map_err(err_to_string)?;
log::info!("Finished downloading: {}", item.url);
}
app.emit(&evt_name, evt.clone()).unwrap();
Ok(())
}
pub async fn _get_maybe_resume(
client: &reqwest::Client,
url: &str,
start_bytes: u64,
) -> Result<reqwest::Response, String> {
if start_bytes > 0 {
let resp = client
.get(url)
.header("Range", format!("bytes={}-", start_bytes))
.send()
.await
.map_err(err_to_string)?;
if resp.status() != reqwest::StatusCode::PARTIAL_CONTENT {
return Err(format!(
"Failed to resume download: HTTP status {}, {}",
resp.status(),
resp.text().await.unwrap_or_default()
));
}
Ok(resp)
} else {
let resp = client.get(url).send().await.map_err(err_to_string)?;
if !resp.status().is_success() {
return Err(format!(
"Failed to download: HTTP status {}, {}",
resp.status(),
resp.text().await.unwrap_or_default()
));
}
Ok(resp)
}
}

View File

@ -0,0 +1,6 @@
pub mod commands;
pub mod helpers;
pub mod models;
#[cfg(test)]
mod tests;

View File

@ -0,0 +1,29 @@
use std::collections::HashMap;
use tokio_util::sync::CancellationToken;
#[derive(Default)]
pub struct DownloadManagerState {
pub cancel_tokens: HashMap<String, CancellationToken>,
}
#[derive(serde::Deserialize, Clone, Debug)]
pub struct ProxyConfig {
pub url: String,
pub username: Option<String>,
pub password: Option<String>,
pub no_proxy: Option<Vec<String>>, // List of domains to bypass proxy
pub ignore_ssl: Option<bool>, // Ignore SSL certificate verification
}
#[derive(serde::Deserialize, Clone, Debug)]
pub struct DownloadItem {
pub url: String,
pub save_path: String,
pub proxy: Option<ProxyConfig>,
}
#[derive(serde::Serialize, Clone, Debug)]
pub struct DownloadEvent {
pub transferred: u64,
pub total: u64,
}

View File

@ -0,0 +1,341 @@
use super::helpers::*;
use super::models::*;
use reqwest::header::HeaderMap;
use std::collections::HashMap;
// Helper function to create a minimal proxy config for testing
fn create_test_proxy_config(url: &str) -> ProxyConfig {
ProxyConfig {
url: url.to_string(),
username: None,
password: None,
no_proxy: None,
ignore_ssl: None,
}
}
#[test]
fn test_validate_proxy_config() {
// Valid HTTP proxy
let config = ProxyConfig {
url: "http://proxy.example.com:8080".to_string(),
username: Some("user".to_string()),
password: Some("pass".to_string()),
no_proxy: Some(vec!["localhost".to_string(), "*.example.com".to_string()]),
ignore_ssl: Some(true),
};
assert!(validate_proxy_config(&config).is_ok());
// Valid HTTPS proxy
let config = ProxyConfig {
url: "https://proxy.example.com:8080".to_string(),
username: None,
password: None,
no_proxy: None,
ignore_ssl: None,
};
assert!(validate_proxy_config(&config).is_ok());
// Valid SOCKS5 proxy
let config = ProxyConfig {
url: "socks5://proxy.example.com:1080".to_string(),
username: None,
password: None,
no_proxy: None,
ignore_ssl: None,
};
assert!(validate_proxy_config(&config).is_ok());
// Invalid URL
let config = create_test_proxy_config("invalid-url");
assert!(validate_proxy_config(&config).is_err());
// Unsupported scheme
let config = create_test_proxy_config("ftp://proxy.example.com:21");
assert!(validate_proxy_config(&config).is_err());
// Username without password
let mut config = create_test_proxy_config("http://proxy.example.com:8080");
config.username = Some("user".to_string());
assert!(validate_proxy_config(&config).is_err());
// Password without username
let mut config = create_test_proxy_config("http://proxy.example.com:8080");
config.password = Some("pass".to_string());
assert!(validate_proxy_config(&config).is_err());
// Empty no_proxy entry
let mut config = create_test_proxy_config("http://proxy.example.com:8080");
config.no_proxy = Some(vec!["".to_string()]);
assert!(validate_proxy_config(&config).is_err());
// Invalid wildcard pattern
let mut config = create_test_proxy_config("http://proxy.example.com:8080");
config.no_proxy = Some(vec!["*.".to_string()]);
assert!(validate_proxy_config(&config).is_err());
}
#[test]
fn test_should_bypass_proxy() {
let no_proxy = vec![
"localhost".to_string(),
"127.0.0.1".to_string(),
"*.example.com".to_string(),
"specific.domain.com".to_string(),
];
// Should bypass for localhost
assert!(should_bypass_proxy("http://localhost:8080/path", &no_proxy));
// Should bypass for 127.0.0.1
assert!(should_bypass_proxy("https://127.0.0.1:3000/api", &no_proxy));
// Should bypass for wildcard match
assert!(should_bypass_proxy(
"http://sub.example.com/path",
&no_proxy
));
assert!(should_bypass_proxy("https://api.example.com/v1", &no_proxy));
// Should bypass for specific domain
assert!(should_bypass_proxy(
"http://specific.domain.com/test",
&no_proxy
));
// Should NOT bypass for other domains
assert!(!should_bypass_proxy("http://other.com/path", &no_proxy));
assert!(!should_bypass_proxy("https://example.org/api", &no_proxy));
// Should bypass everything with "*"
let wildcard_no_proxy = vec!["*".to_string()];
assert!(should_bypass_proxy(
"http://any.domain.com/path",
&wildcard_no_proxy
));
// Empty no_proxy should not bypass anything
let empty_no_proxy = vec![];
assert!(!should_bypass_proxy(
"http://any.domain.com/path",
&empty_no_proxy
));
}
#[test]
fn test_create_proxy_from_config() {
// Valid configuration should work
let mut config = create_test_proxy_config("http://proxy.example.com:8080");
config.username = Some("user".to_string());
config.password = Some("pass".to_string());
assert!(create_proxy_from_config(&config).is_ok());
// Invalid configuration should fail
let config = create_test_proxy_config("invalid-url");
assert!(create_proxy_from_config(&config).is_err());
}
#[test]
fn test_convert_headers() {
let mut headers = HashMap::new();
headers.insert("User-Agent".to_string(), "test-agent".to_string());
headers.insert("Authorization".to_string(), "Bearer token".to_string());
let header_map = _convert_headers(&headers).unwrap();
assert_eq!(header_map.len(), 2);
assert_eq!(header_map.get("User-Agent").unwrap(), "test-agent");
assert_eq!(header_map.get("Authorization").unwrap(), "Bearer token");
}
#[test]
fn test_proxy_ssl_verification_settings() {
// Test proxy config with SSL verification settings
let mut config = create_test_proxy_config("https://proxy.example.com:8080");
config.ignore_ssl = Some(true);
// Should validate successfully
assert!(validate_proxy_config(&config).is_ok());
// Test with all SSL settings as false
config.ignore_ssl = Some(false);
// Should still validate successfully
assert!(validate_proxy_config(&config).is_ok());
}
#[test]
fn test_proxy_config_with_mixed_ssl_settings() {
// Test with mixed SSL settings - ignore_ssl true, others false
let mut config = create_test_proxy_config("https://proxy.example.com:8080");
config.ignore_ssl = Some(true);
assert!(validate_proxy_config(&config).is_ok());
assert!(create_proxy_from_config(&config).is_ok());
}
#[test]
fn test_proxy_config_ssl_defaults() {
// Test with no SSL settings (should use None defaults)
let config = create_test_proxy_config("https://proxy.example.com:8080");
assert_eq!(config.ignore_ssl, None);
assert!(validate_proxy_config(&config).is_ok());
assert!(create_proxy_from_config(&config).is_ok());
}
#[test]
fn test_download_item_with_ssl_proxy() {
// Test that DownloadItem can be created with SSL proxy configuration
let mut proxy_config = create_test_proxy_config("https://proxy.example.com:8080");
proxy_config.ignore_ssl = Some(true);
let download_item = DownloadItem {
url: "https://example.com/file.zip".to_string(),
save_path: "downloads/file.zip".to_string(),
proxy: Some(proxy_config),
};
assert!(download_item.proxy.is_some());
let proxy = download_item.proxy.unwrap();
assert_eq!(proxy.ignore_ssl, Some(true));
}
#[test]
fn test_client_creation_with_ssl_settings() {
// Test client creation with SSL settings
let mut proxy_config = create_test_proxy_config("https://proxy.example.com:8080");
proxy_config.ignore_ssl = Some(true);
let download_item = DownloadItem {
url: "https://example.com/file.zip".to_string(),
save_path: "downloads/file.zip".to_string(),
proxy: Some(proxy_config),
};
let header_map = HeaderMap::new();
let result = _get_client_for_item(&download_item, &header_map);
// Should create client successfully even with SSL settings
assert!(result.is_ok());
}
#[test]
fn test_proxy_config_with_http_and_ssl_settings() {
// Test that SSL settings work with HTTP proxy (though not typically used)
let mut config = create_test_proxy_config("http://proxy.example.com:8080");
config.ignore_ssl = Some(true);
assert!(validate_proxy_config(&config).is_ok());
assert!(create_proxy_from_config(&config).is_ok());
}
#[test]
fn test_proxy_config_with_socks_and_ssl_settings() {
// Test that SSL settings work with SOCKS proxy
let mut config = create_test_proxy_config("socks5://proxy.example.com:1080");
config.ignore_ssl = Some(false);
assert!(validate_proxy_config(&config).is_ok());
// SOCKS proxies are not supported by reqwest::Proxy::all()
// This test should expect an error for SOCKS proxies
let result = create_proxy_from_config(&config);
assert!(result.is_err());
// Test with HTTP proxy instead which is supported
let mut http_config = create_test_proxy_config("http://proxy.example.com:8080");
http_config.ignore_ssl = Some(false);
assert!(validate_proxy_config(&http_config).is_ok());
assert!(create_proxy_from_config(&http_config).is_ok());
}
#[test]
fn test_download_item_creation() {
let item = DownloadItem {
url: "https://example.com/file.tar.gz".to_string(),
save_path: "models/test.tar.gz".to_string(),
proxy: None,
};
assert_eq!(item.url, "https://example.com/file.tar.gz");
assert_eq!(item.save_path, "models/test.tar.gz");
}
#[test]
fn test_download_event_creation() {
let event = DownloadEvent {
transferred: 1024,
total: 2048,
};
assert_eq!(event.transferred, 1024);
assert_eq!(event.total, 2048);
}
#[test]
fn test_err_to_string() {
let error = "Test error";
let result = err_to_string(error);
assert_eq!(result, "Error: Test error");
}
#[test]
fn test_convert_headers_valid() {
let mut headers = HashMap::new();
headers.insert("Content-Type".to_string(), "application/json".to_string());
headers.insert("Authorization".to_string(), "Bearer token123".to_string());
let result = _convert_headers(&headers);
assert!(result.is_ok());
let header_map = result.unwrap();
assert_eq!(header_map.len(), 2);
assert_eq!(header_map.get("Content-Type").unwrap(), "application/json");
assert_eq!(header_map.get("Authorization").unwrap(), "Bearer token123");
}
#[test]
fn test_convert_headers_invalid_header_name() {
let mut headers = HashMap::new();
headers.insert("Invalid\nHeader".to_string(), "value".to_string());
let result = _convert_headers(&headers);
assert!(result.is_err());
}
#[test]
fn test_convert_headers_invalid_header_value() {
let mut headers = HashMap::new();
headers.insert("Content-Type".to_string(), "invalid\nvalue".to_string());
let result = _convert_headers(&headers);
assert!(result.is_err());
}
#[test]
fn test_download_manager_state_default() {
let state = DownloadManagerState::default();
assert!(state.cancel_tokens.is_empty());
}
#[test]
fn test_download_event_serialization() {
let event = DownloadEvent {
transferred: 512,
total: 1024,
};
let json = serde_json::to_string(&event).unwrap();
assert!(json.contains("\"transferred\":512"));
assert!(json.contains("\"total\":1024"));
}
#[test]
fn test_download_item_deserialization() {
let json = r#"{"url":"https://example.com/file.zip","save_path":"downloads/file.zip"}"#;
let item: DownloadItem = serde_json::from_str(json).unwrap();
assert_eq!(item.url, "https://example.com/file.zip");
assert_eq!(item.save_path, "downloads/file.zip");
}

View File

@ -0,0 +1,53 @@
use std::fs;
use std::path::PathBuf;
use tauri::AppHandle;
use crate::core::app::commands::get_jan_data_folder_path;
use crate::core::setup;
#[tauri::command]
pub fn get_jan_extensions_path(app_handle: tauri::AppHandle) -> PathBuf {
get_jan_data_folder_path(app_handle).join("extensions")
}
#[tauri::command]
pub fn install_extensions(app: AppHandle) {
if let Err(err) = setup::install_extensions(app, true) {
log::error!("Failed to install extensions: {}", err);
}
}
#[tauri::command]
pub fn get_active_extensions(app: AppHandle) -> Vec<serde_json::Value> {
let mut path = get_jan_extensions_path(app);
path.push("extensions.json");
log::info!("get jan extensions, path: {:?}", path);
let contents = fs::read_to_string(path);
let contents: Vec<serde_json::Value> = match contents {
Ok(data) => match serde_json::from_str::<Vec<serde_json::Value>>(&data) {
Ok(exts) => exts
.into_iter()
.map(|ext| {
serde_json::json!({
"url": ext["url"],
"name": ext["name"],
"productName": ext["productName"],
"active": ext["_active"],
"description": ext["description"],
"version": ext["version"]
})
})
.collect(),
Err(error) => {
log::error!("Failed to parse extensions.json: {}", error);
vec![]
}
},
Err(error) => {
log::error!("Failed to read extensions.json: {}", error);
vec![]
}
};
return contents;
}

View File

@ -0,0 +1 @@
pub mod commands;

View File

@ -0,0 +1,204 @@
// WARNING: These APIs will be deprecated soon due to removing FS API access from frontend.
// It's added to ensure the legacy implementation from frontend still functions before removal.
use super::helpers::resolve_path;
use super::models::FileStat;
use std::fs;
use tauri::Runtime;
#[tauri::command]
pub fn rm<R: Runtime>(app_handle: tauri::AppHandle<R>, args: Vec<String>) -> Result<(), String> {
if args.is_empty() || args[0].is_empty() {
return Err("rm error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
if path.is_file() {
fs::remove_file(&path).map_err(|e| e.to_string())?;
} else if path.is_dir() {
fs::remove_dir_all(&path).map_err(|e| e.to_string())?;
} else {
return Err("rm error: Path does not exist".to_string());
}
Ok(())
}
#[tauri::command]
pub fn mkdir<R: Runtime>(app_handle: tauri::AppHandle<R>, args: Vec<String>) -> Result<(), String> {
if args.is_empty() || args[0].is_empty() {
return Err("mkdir error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
fs::create_dir_all(&path).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn join_path<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<String, String> {
if args.is_empty() {
return Err("join_path error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
let joined_path = args[1..].iter().fold(path, |acc, part| acc.join(part));
Ok(joined_path.to_string_lossy().to_string())
}
#[tauri::command]
pub fn exists_sync<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<bool, String> {
if args.is_empty() || args[0].is_empty() {
return Err("exist_sync error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
Ok(path.exists())
}
#[tauri::command]
pub fn file_stat<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: String,
) -> Result<FileStat, String> {
if args.is_empty() {
return Err("file_stat error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args);
let metadata = fs::metadata(&path).map_err(|e| e.to_string())?;
let is_directory = metadata.is_dir();
let size = if is_directory { 0 } else { metadata.len() };
let file_stat = FileStat { is_directory, size };
Ok(file_stat)
}
#[tauri::command]
pub fn read_file_sync<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<String, String> {
if args.is_empty() || args[0].is_empty() {
return Err("read_file_sync error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
fs::read_to_string(&path).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn write_file_sync<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<(), String> {
if args.len() < 2 || args[0].is_empty() || args[1].is_empty() {
return Err("write_file_sync error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
let content = &args[1];
fs::write(&path, content).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn readdir_sync<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<Vec<String>, String> {
if args.is_empty() || args[0].is_empty() {
return Err("read_dir_sync error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
let entries = fs::read_dir(&path).map_err(|e| e.to_string())?;
let paths: Vec<String> = entries
.filter_map(|entry| entry.ok())
.map(|entry| entry.path().to_string_lossy().to_string())
.collect();
Ok(paths)
}
#[tauri::command]
pub fn write_yaml(
app: tauri::AppHandle,
data: serde_json::Value,
save_path: &str,
) -> Result<(), String> {
// TODO: have an internal function to check scope
let jan_data_folder = crate::core::app::commands::get_jan_data_folder_path(app.clone());
let save_path = jan_utils::normalize_path(&jan_data_folder.join(save_path));
if !save_path.starts_with(&jan_data_folder) {
return Err(format!(
"Error: save path {} is not under jan_data_folder {}",
save_path.to_string_lossy(),
jan_data_folder.to_string_lossy(),
));
}
let file = fs::File::create(&save_path).map_err(|e| e.to_string())?;
let mut writer = std::io::BufWriter::new(file);
serde_yaml::to_writer(&mut writer, &data).map_err(|e| e.to_string())?;
Ok(())
}
#[tauri::command]
pub fn read_yaml(app: tauri::AppHandle, path: &str) -> Result<serde_json::Value, String> {
let jan_data_folder = crate::core::app::commands::get_jan_data_folder_path(app.clone());
let path = jan_utils::normalize_path(&jan_data_folder.join(path));
if !path.starts_with(&jan_data_folder) {
return Err(format!(
"Error: path {} is not under jan_data_folder {}",
path.to_string_lossy(),
jan_data_folder.to_string_lossy(),
));
}
let file = fs::File::open(&path).map_err(|e| e.to_string())?;
let reader = std::io::BufReader::new(file);
let data: serde_json::Value = serde_yaml::from_reader(reader).map_err(|e| e.to_string())?;
Ok(data)
}
#[tauri::command]
pub fn decompress(app: tauri::AppHandle, path: &str, output_dir: &str) -> Result<(), String> {
let jan_data_folder = crate::core::app::commands::get_jan_data_folder_path(app.clone());
let path_buf = jan_utils::normalize_path(&jan_data_folder.join(path));
if !path_buf.starts_with(&jan_data_folder) {
return Err(format!(
"Error: path {} is not under jan_data_folder {}",
path_buf.to_string_lossy(),
jan_data_folder.to_string_lossy(),
));
}
let output_dir_buf = jan_utils::normalize_path(&jan_data_folder.join(output_dir));
if !output_dir_buf.starts_with(&jan_data_folder) {
return Err(format!(
"Error: output directory {} is not under jan_data_folder {}",
output_dir_buf.to_string_lossy(),
jan_data_folder.to_string_lossy(),
));
}
// Ensure output directory exists
fs::create_dir_all(&output_dir_buf).map_err(|e| {
format!(
"Failed to create output directory {}: {}",
output_dir_buf.to_string_lossy(),
e
)
})?;
let file = fs::File::open(&path_buf).map_err(|e| e.to_string())?;
if path.ends_with(".tar.gz") {
let tar = flate2::read::GzDecoder::new(file);
let mut archive = tar::Archive::new(tar);
archive.unpack(&output_dir_buf).map_err(|e| e.to_string())?;
} else {
return Err("Unsupported file format. Only .tar.gz is supported.".to_string());
}
Ok(())
}

View File

@ -0,0 +1,23 @@
use crate::core::app::commands::get_jan_data_folder_path;
use jan_utils::normalize_file_path;
use std::path::PathBuf;
use tauri::Runtime;
pub fn resolve_path<R: Runtime>(app_handle: tauri::AppHandle<R>, path: &str) -> PathBuf {
let path = if path.starts_with("file:/") || path.starts_with("file:\\") {
let normalized = normalize_file_path(path);
let relative_normalized = normalized
.trim_start_matches(std::path::MAIN_SEPARATOR)
.trim_start_matches('/')
.trim_start_matches('\\');
get_jan_data_folder_path(app_handle).join(relative_normalized)
} else {
PathBuf::from(path)
};
if path.starts_with("http://") || path.starts_with("https://") {
path
} else {
path.canonicalize().unwrap_or(path)
}
}

View File

@ -0,0 +1,6 @@
pub mod commands;
pub mod helpers;
pub mod models;
#[cfg(test)]
mod tests;

View File

@ -0,0 +1,6 @@
#[derive(serde::Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct FileStat {
pub is_directory: bool,
pub size: u64,
}

View File

@ -0,0 +1,90 @@
use super::commands::*;
use crate::core::app::commands::get_jan_data_folder_path;
use std::fs::{self, File};
use std::io::Write;
use tauri::test::mock_app;
#[test]
fn test_rm() {
let app = mock_app();
let path = "test_rm_dir";
fs::create_dir_all(get_jan_data_folder_path(app.handle().clone()).join(path)).unwrap();
let args = vec![format!("file://{}", path).to_string()];
let result = rm(app.handle().clone(), args);
assert!(result.is_ok());
assert!(!get_jan_data_folder_path(app.handle().clone())
.join(path)
.exists());
}
#[test]
fn test_mkdir() {
let app = mock_app();
let path = "test_mkdir_dir";
let args = vec![format!("file://{}", path).to_string()];
let result = mkdir(app.handle().clone(), args);
assert!(result.is_ok());
assert!(get_jan_data_folder_path(app.handle().clone())
.join(path)
.exists());
let _ = fs::remove_dir_all(get_jan_data_folder_path(app.handle().clone()).join(path));
}
#[test]
fn test_join_path() {
let app = mock_app();
let path = "file://test_dir";
let args = vec![path.to_string(), "test_file".to_string()];
let result = join_path(app.handle().clone(), args).unwrap();
assert_eq!(
result,
get_jan_data_folder_path(app.handle().clone())
.join(&format!("test_dir{}test_file", std::path::MAIN_SEPARATOR))
.to_string_lossy()
.to_string()
);
}
#[test]
fn test_exists_sync() {
let app = mock_app();
let path = "file://test_exists_sync_file";
let dir_path = get_jan_data_folder_path(app.handle().clone());
fs::create_dir_all(&dir_path).unwrap();
let file_path = dir_path.join("test_exists_sync_file");
File::create(&file_path).unwrap();
let args: Vec<String> = vec![path.to_string()];
let result = exists_sync(app.handle().clone(), args).unwrap();
assert!(result);
fs::remove_file(file_path).unwrap();
}
#[test]
fn test_read_file_sync() {
let app = mock_app();
let path = "file://test_read_file_sync_file";
let dir_path = get_jan_data_folder_path(app.handle().clone());
fs::create_dir_all(&dir_path).unwrap();
let file_path = dir_path.join("test_read_file_sync_file");
let mut file = File::create(&file_path).unwrap();
file.write_all(b"test content").unwrap();
let args = vec![path.to_string()];
let result = read_file_sync(app.handle().clone(), args).unwrap();
assert_eq!(result, "test content".to_string());
fs::remove_file(file_path).unwrap();
}
#[test]
fn test_readdir_sync() {
let app = mock_app();
let dir_path = get_jan_data_folder_path(app.handle().clone()).join("test_readdir_sync_dir");
fs::create_dir_all(&dir_path).unwrap();
File::create(dir_path.join("file1.txt")).unwrap();
File::create(dir_path.join("file2.txt")).unwrap();
let args = vec![dir_path.to_string_lossy().to_string()];
let result = readdir_sync(app.handle().clone(), args).unwrap();
assert_eq!(result.len(), 2);
let _ = fs::remove_dir_all(dir_path);
}

View File

@ -1,246 +0,0 @@
// WARNING: These APIs will be deprecated soon due to removing FS API access from frontend.
// It's added to ensure the legacy implementation from frontend still functions before removal.
use crate::core::cmd::get_jan_data_folder_path;
use std::fs;
use std::path::PathBuf;
use tauri::Runtime;
#[tauri::command]
pub fn rm<R: Runtime>(app_handle: tauri::AppHandle<R>, args: Vec<String>) -> Result<(), String> {
if args.is_empty() || args[0].is_empty() {
return Err("rm error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
if path.is_file() {
fs::remove_file(&path).map_err(|e| e.to_string())?;
} else if path.is_dir() {
fs::remove_dir_all(&path).map_err(|e| e.to_string())?;
} else {
return Err("rm error: Path does not exist".to_string());
}
Ok(())
}
#[tauri::command]
pub fn mkdir<R: Runtime>(app_handle: tauri::AppHandle<R>, args: Vec<String>) -> Result<(), String> {
if args.is_empty() || args[0].is_empty() {
return Err("mkdir error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
fs::create_dir_all(&path).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn join_path<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<String, String> {
if args.is_empty() {
return Err("join_path error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
let joined_path = args[1..].iter().fold(path, |acc, part| acc.join(part));
Ok(joined_path.to_string_lossy().to_string())
}
#[tauri::command]
pub fn exists_sync<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<bool, String> {
if args.is_empty() || args[0].is_empty() {
return Err("exist_sync error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
Ok(path.exists())
}
#[derive(serde::Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct FileStat {
pub is_directory: bool,
pub size: u64,
}
#[tauri::command]
pub fn file_stat<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: String,
) -> Result<FileStat, String> {
if args.is_empty() {
return Err("file_stat error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args);
let metadata = fs::metadata(&path).map_err(|e| e.to_string())?;
let is_directory = metadata.is_dir();
let size = if is_directory { 0 } else { metadata.len() };
let file_stat = FileStat { is_directory, size };
Ok(file_stat)
}
#[tauri::command]
pub fn read_file_sync<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<String, String> {
if args.is_empty() || args[0].is_empty() {
return Err("read_file_sync error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
fs::read_to_string(&path).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn write_file_sync<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<(), String> {
if args.len() < 2 || args[0].is_empty() || args[1].is_empty() {
return Err("write_file_sync error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
let content = &args[1];
fs::write(&path, content).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn readdir_sync<R: Runtime>(
app_handle: tauri::AppHandle<R>,
args: Vec<String>,
) -> Result<Vec<String>, String> {
if args.is_empty() || args[0].is_empty() {
return Err("read_dir_sync error: Invalid argument".to_string());
}
let path = resolve_path(app_handle, &args[0]);
let entries = fs::read_dir(&path).map_err(|e| e.to_string())?;
let paths: Vec<String> = entries
.filter_map(|entry| entry.ok())
.map(|entry| entry.path().to_string_lossy().to_string())
.collect();
Ok(paths)
}
fn normalize_file_path(path: &str) -> String {
path.replace("file:/", "").replace("file:\\", "")
}
fn resolve_path<R: Runtime>(app_handle: tauri::AppHandle<R>, path: &str) -> PathBuf {
let path = if path.starts_with("file:/") || path.starts_with("file:\\") {
let normalized = normalize_file_path(path);
let relative_normalized = normalized
.trim_start_matches(std::path::MAIN_SEPARATOR)
.trim_start_matches('/')
.trim_start_matches('\\');
get_jan_data_folder_path(app_handle).join(relative_normalized)
} else {
PathBuf::from(path)
};
if path.starts_with("http://") || path.starts_with("https://") {
path
} else {
path.canonicalize().unwrap_or(path)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::{self, File};
use std::io::Write;
use tauri::test::mock_app;
#[test]
fn test_rm() {
let app = mock_app();
let path = "test_rm_dir";
fs::create_dir_all(get_jan_data_folder_path(app.handle().clone()).join(path)).unwrap();
let args = vec![format!("file://{}", path).to_string()];
let result = rm(app.handle().clone(), args);
assert!(result.is_ok());
assert!(!get_jan_data_folder_path(app.handle().clone())
.join(path)
.exists());
}
#[test]
fn test_mkdir() {
let app = mock_app();
let path = "test_mkdir_dir";
let args = vec![format!("file://{}", path).to_string()];
let result = mkdir(app.handle().clone(), args);
assert!(result.is_ok());
assert!(get_jan_data_folder_path(app.handle().clone())
.join(path)
.exists());
let _ = fs::remove_dir_all(get_jan_data_folder_path(app.handle().clone()).join(path));
}
#[test]
fn test_join_path() {
let app = mock_app();
let path = "file://test_dir";
let args = vec![path.to_string(), "test_file".to_string()];
let result = join_path(app.handle().clone(), args).unwrap();
assert_eq!(
result,
get_jan_data_folder_path(app.handle().clone())
.join(&format!("test_dir{}test_file", std::path::MAIN_SEPARATOR))
.to_string_lossy()
.to_string()
);
}
#[test]
fn test_exists_sync() {
let app = mock_app();
let path = "file://test_exists_sync_file";
let dir_path = get_jan_data_folder_path(app.handle().clone());
fs::create_dir_all(&dir_path).unwrap();
let file_path = dir_path.join("test_exists_sync_file");
File::create(&file_path).unwrap();
let args: Vec<String> = vec![path.to_string()];
let result = exists_sync(app.handle().clone(), args).unwrap();
assert!(result);
fs::remove_file(file_path).unwrap();
}
#[test]
fn test_read_file_sync() {
let app = mock_app();
let path = "file://test_read_file_sync_file";
let dir_path = get_jan_data_folder_path(app.handle().clone());
fs::create_dir_all(&dir_path).unwrap();
let file_path = dir_path.join("test_read_file_sync_file");
let mut file = File::create(&file_path).unwrap();
file.write_all(b"test content").unwrap();
let args = vec![path.to_string()];
let result = read_file_sync(app.handle().clone(), args).unwrap();
assert_eq!(result, "test content".to_string());
fs::remove_file(file_path).unwrap();
}
#[test]
fn test_readdir_sync() {
let app = mock_app();
let dir_path = get_jan_data_folder_path(app.handle().clone()).join("test_readdir_sync_dir");
fs::create_dir_all(&dir_path).unwrap();
File::create(dir_path.join("file1.txt")).unwrap();
File::create(dir_path.join("file2.txt")).unwrap();
let args = vec![dir_path.to_string_lossy().to_string()];
let result = readdir_sync(app.handle().clone(), args).unwrap();
assert_eq!(result.len(), 2);
let _ = fs::remove_dir_all(dir_path);
}
}

View File

@ -1,359 +0,0 @@
pub mod amd;
pub mod nvidia;
pub mod vulkan;
use std::sync::OnceLock;
use sysinfo::System;
use tauri::{path::BaseDirectory, Manager};
static SYSTEM_INFO: OnceLock<SystemInfo> = OnceLock::new();
#[derive(Clone, serde::Serialize, Debug)]
struct CpuStaticInfo {
name: String,
core_count: usize,
arch: String,
extensions: Vec<String>,
}
impl CpuStaticInfo {
fn new() -> Self {
let mut system = System::new();
system.refresh_cpu_all();
let name = system
.cpus()
.first()
.map(|cpu| {
let brand = cpu.brand();
if brand.is_empty() {
cpu.name()
} else {
brand
}
})
.unwrap_or("unknown")
.to_string();
CpuStaticInfo {
name,
core_count: System::physical_core_count().unwrap_or(0),
arch: std::env::consts::ARCH.to_string(),
extensions: CpuStaticInfo::get_extensions(),
}
}
// TODO: see if we need to check for all CPU extensions
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_extensions() -> Vec<String> {
let mut exts = vec![];
// fpu is always present on modern x86 processors,
// but is_x86_feature_detected doesn't support it
exts.push("fpu".to_string());
if is_x86_feature_detected!("mmx") {
exts.push("mmx".to_string());
}
if is_x86_feature_detected!("sse") {
exts.push("sse".to_string());
}
if is_x86_feature_detected!("sse2") {
exts.push("sse2".to_string());
}
if is_x86_feature_detected!("sse3") {
exts.push("sse3".to_string());
}
if is_x86_feature_detected!("ssse3") {
exts.push("ssse3".to_string());
}
if is_x86_feature_detected!("sse4.1") {
exts.push("sse4_1".to_string());
}
if is_x86_feature_detected!("sse4.2") {
exts.push("sse4_2".to_string());
}
if is_x86_feature_detected!("pclmulqdq") {
exts.push("pclmulqdq".to_string());
}
if is_x86_feature_detected!("avx") {
exts.push("avx".to_string());
}
if is_x86_feature_detected!("avx2") {
exts.push("avx2".to_string());
}
if is_x86_feature_detected!("avx512f") {
exts.push("avx512_f".to_string());
}
if is_x86_feature_detected!("avx512dq") {
exts.push("avx512_dq".to_string());
}
if is_x86_feature_detected!("avx512ifma") {
exts.push("avx512_ifma".to_string());
}
if is_x86_feature_detected!("avx512pf") {
exts.push("avx512_pf".to_string());
}
if is_x86_feature_detected!("avx512er") {
exts.push("avx512_er".to_string());
}
if is_x86_feature_detected!("avx512cd") {
exts.push("avx512_cd".to_string());
}
if is_x86_feature_detected!("avx512bw") {
exts.push("avx512_bw".to_string());
}
if is_x86_feature_detected!("avx512vl") {
exts.push("avx512_vl".to_string());
}
if is_x86_feature_detected!("avx512vbmi") {
exts.push("avx512_vbmi".to_string());
}
if is_x86_feature_detected!("avx512vbmi2") {
exts.push("avx512_vbmi2".to_string());
}
if is_x86_feature_detected!("avx512vnni") {
exts.push("avx512_vnni".to_string());
}
if is_x86_feature_detected!("avx512bitalg") {
exts.push("avx512_bitalg".to_string());
}
if is_x86_feature_detected!("avx512vpopcntdq") {
exts.push("avx512_vpopcntdq".to_string());
}
// avx512_4vnniw and avx512_4fmaps are only available on Intel Knights Mill, which are
// very rare. https://en.wikipedia.org/wiki/AVX-512
// is_x86_feature_detected doesn't support them
if is_x86_feature_detected!("avx512vp2intersect") {
exts.push("avx512_vp2intersect".to_string());
}
if is_x86_feature_detected!("aes") {
exts.push("aes".to_string());
}
if is_x86_feature_detected!("f16c") {
exts.push("f16c".to_string());
}
exts
}
// Cortex always returns empty list for non-x86
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn get_extensions() -> Vec<String> {
vec![]
}
}
// https://devicehunt.com/all-pci-vendors
pub const VENDOR_ID_AMD: u32 = 0x1002;
pub const VENDOR_ID_NVIDIA: u32 = 0x10DE;
pub const VENDOR_ID_INTEL: u32 = 0x8086;
#[derive(Debug, Clone)]
pub enum Vendor {
AMD,
NVIDIA,
Intel,
Unknown(u32),
}
impl serde::Serialize for Vendor {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Vendor::AMD => "AMD".serialize(serializer),
Vendor::NVIDIA => "NVIDIA".serialize(serializer),
Vendor::Intel => "Intel".serialize(serializer),
Vendor::Unknown(vendor_id) => {
let formatted = format!("Unknown (vendor_id: {})", vendor_id);
serializer.serialize_str(&formatted)
}
}
}
}
impl Vendor {
pub fn from_vendor_id(vendor_id: u32) -> Self {
match vendor_id {
VENDOR_ID_AMD => Vendor::AMD,
VENDOR_ID_NVIDIA => Vendor::NVIDIA,
VENDOR_ID_INTEL => Vendor::Intel,
_ => Vendor::Unknown(vendor_id),
}
}
}
#[derive(Clone, Debug, serde::Serialize)]
pub struct GpuInfo {
pub name: String,
pub total_memory: u64,
pub vendor: Vendor,
pub uuid: String,
pub driver_version: String,
pub nvidia_info: Option<nvidia::NvidiaInfo>,
pub vulkan_info: Option<vulkan::VulkanInfo>,
}
impl GpuInfo {
pub fn get_usage(&self) -> GpuUsage {
match self.vendor {
Vendor::NVIDIA => self.get_usage_nvidia(),
Vendor::AMD => self.get_usage_amd(),
_ => self.get_usage_unsupported(),
}
}
pub fn get_usage_unsupported(&self) -> GpuUsage {
GpuUsage {
uuid: self.uuid.clone(),
used_memory: 0,
total_memory: 0,
}
}
}
#[derive(serde::Serialize, Clone, Debug)]
pub struct SystemInfo {
cpu: CpuStaticInfo,
os_type: String,
os_name: String,
total_memory: u64,
gpus: Vec<GpuInfo>,
}
#[derive(serde::Serialize, Clone, Debug)]
pub struct GpuUsage {
uuid: String,
used_memory: u64,
total_memory: u64,
}
#[derive(serde::Serialize, Clone, Debug)]
pub struct SystemUsage {
cpu: f32,
used_memory: u64,
total_memory: u64,
gpus: Vec<GpuUsage>,
}
fn get_jan_libvulkan_path<R: tauri::Runtime>(app: tauri::AppHandle<R>) -> String {
let lib_name = if cfg!(target_os = "windows") {
"vulkan-1.dll"
} else if cfg!(target_os = "linux") {
"libvulkan.so"
} else {
return "".to_string();
};
// NOTE: this does not work in test mode (mock app)
match app.path().resolve(
format!("resources/lib/{}", lib_name),
BaseDirectory::Resource,
) {
Ok(lib_path) => lib_path.to_string_lossy().to_string(),
Err(_) => "".to_string(),
}
}
#[tauri::command]
pub fn get_system_info<R: tauri::Runtime>(app: tauri::AppHandle<R>) -> SystemInfo {
SYSTEM_INFO
.get_or_init(|| {
let mut system = System::new();
system.refresh_memory();
let mut gpu_map = std::collections::HashMap::new();
for gpu in nvidia::get_nvidia_gpus() {
gpu_map.insert(gpu.uuid.clone(), gpu);
}
// try system vulkan first
let paths = vec!["".to_string(), get_jan_libvulkan_path(app.clone())];
let mut vulkan_gpus = vec![];
for path in paths {
vulkan_gpus = vulkan::get_vulkan_gpus(&path);
if !vulkan_gpus.is_empty() {
break;
}
}
for gpu in vulkan_gpus {
match gpu_map.get_mut(&gpu.uuid) {
// for existing NVIDIA GPUs, add Vulkan info
Some(nvidia_gpu) => {
nvidia_gpu.vulkan_info = gpu.vulkan_info;
}
None => {
gpu_map.insert(gpu.uuid.clone(), gpu);
}
}
}
let os_type = if cfg!(target_os = "windows") {
"windows"
} else if cfg!(target_os = "macos") {
"macos"
} else if cfg!(target_os = "linux") {
"linux"
} else {
"unknown"
};
let os_name = System::long_os_version().unwrap_or("Unknown".to_string());
SystemInfo {
cpu: CpuStaticInfo::new(),
os_type: os_type.to_string(),
os_name,
total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB
gpus: gpu_map.into_values().collect(),
}
})
.clone()
}
#[tauri::command]
pub fn get_system_usage<R: tauri::Runtime>(app: tauri::AppHandle<R>) -> SystemUsage {
let mut system = System::new();
system.refresh_memory();
// need to refresh 2 times to get CPU usage
system.refresh_cpu_all();
std::thread::sleep(sysinfo::MINIMUM_CPU_UPDATE_INTERVAL);
system.refresh_cpu_all();
let cpus = system.cpus();
let cpu_usage =
cpus.iter().map(|cpu| cpu.cpu_usage()).sum::<f32>() / (cpus.len().max(1) as f32);
SystemUsage {
cpu: cpu_usage,
used_memory: system.used_memory() / 1024 / 1024, // bytes to MiB,
total_memory: system.total_memory() / 1024 / 1024, // bytes to MiB,
gpus: get_system_info(app.clone())
.gpus
.iter()
.map(|gpu| gpu.get_usage())
.collect(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use tauri::test::mock_app;
#[test]
fn test_system_info() {
let app = mock_app();
let info = get_system_info(app.handle().clone());
println!("System Static Info: {:?}", info);
}
#[test]
fn test_system_usage() {
let app = mock_app();
let usage = get_system_usage(app.handle().clone());
println!("System Usage Info: {:?}", usage);
}
}

View File

@ -0,0 +1,238 @@
use rmcp::model::{CallToolRequestParam, CallToolResult, Tool};
use rmcp::{service::RunningService, RoleClient};
use serde_json::{Map, Value};
use std::{collections::HashMap, sync::Arc};
use tauri::{AppHandle, Emitter, Runtime, State};
use tokio::{sync::Mutex, time::timeout};
use super::{
constants::{DEFAULT_MCP_CONFIG, MCP_TOOL_CALL_TIMEOUT},
helpers::{restart_active_mcp_servers, start_mcp_server_with_restart, stop_mcp_servers},
};
use crate::core::{app::commands::get_jan_data_folder_path, state::AppState};
use std::fs;
#[tauri::command]
pub async fn activate_mcp_server<R: Runtime>(
app: tauri::AppHandle<R>,
state: State<'_, AppState>,
name: String,
config: Value,
) -> Result<(), String> {
let servers: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>> =
state.mcp_servers.clone();
// Use the modified start_mcp_server_with_restart that returns first attempt result
start_mcp_server_with_restart(app, servers, name, config, Some(3)).await
}
#[tauri::command]
pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) -> Result<(), String> {
log::info!("Deactivating MCP server: {}", name);
// First, mark server as manually deactivated to prevent restart
// Remove from active servers list to prevent restart
{
let mut active_servers = state.mcp_active_servers.lock().await;
active_servers.remove(&name);
log::info!("Removed MCP server {} from active servers list", name);
}
// Mark as not successfully connected to prevent restart logic
{
let mut connected = state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), false);
log::info!("Marked MCP server {} as not successfully connected", name);
}
// Reset restart count
{
let mut counts = state.mcp_restart_counts.lock().await;
counts.remove(&name);
log::info!("Reset restart count for MCP server {}", name);
}
// Now remove and stop the server
let servers = state.mcp_servers.clone();
let mut servers_map = servers.lock().await;
let service = servers_map
.remove(&name)
.ok_or_else(|| format!("Server {} not found", name))?;
// Release the lock before calling cancel
drop(servers_map);
service.cancel().await.map_err(|e| e.to_string())?;
log::info!("Server {name} stopped successfully and marked as deactivated.");
Ok(())
}
#[tauri::command]
pub async fn restart_mcp_servers(app: AppHandle, state: State<'_, AppState>) -> Result<(), String> {
let servers = state.mcp_servers.clone();
// Stop the servers
stop_mcp_servers(state.mcp_servers.clone()).await?;
// Restart only previously active servers (like cortex)
restart_active_mcp_servers(&app, servers).await?;
app.emit("mcp-update", "MCP servers updated")
.map_err(|e| format!("Failed to emit event: {}", e))?;
Ok(())
}
/// Reset MCP restart count for a specific server (like cortex reset)
#[tauri::command]
pub async fn reset_mcp_restart_count(
state: State<'_, AppState>,
server_name: String,
) -> Result<(), String> {
let mut counts = state.mcp_restart_counts.lock().await;
let count = match counts.get_mut(&server_name) {
Some(count) => count,
None => return Ok(()), // Server not found, nothing to reset
};
let old_count = *count;
*count = 0;
log::info!(
"MCP server {} restart count reset from {} to 0.",
server_name,
old_count
);
Ok(())
}
#[tauri::command]
pub async fn get_connected_servers(
_app: AppHandle,
state: State<'_, AppState>,
) -> Result<Vec<String>, String> {
let servers = state.mcp_servers.clone();
let servers_map = servers.lock().await;
Ok(servers_map.keys().cloned().collect())
}
/// Retrieves all available tools from all MCP servers
///
/// # Arguments
/// * `state` - Application state containing MCP server connections
///
/// # Returns
/// * `Result<Vec<Tool>, String>` - A vector of all tools if successful, or an error message if failed
///
/// This function:
/// 1. Locks the MCP servers mutex to access server connections
/// 2. Iterates through all connected servers
/// 3. Gets the list of tools from each server
/// 4. Combines all tools into a single vector
/// 5. Returns the combined list of all available tools
#[tauri::command]
pub async fn get_tools(state: State<'_, AppState>) -> Result<Vec<Tool>, String> {
let servers = state.mcp_servers.lock().await;
let mut all_tools: Vec<Tool> = Vec::new();
for (_, service) in servers.iter() {
// List tools with timeout
let tools_future = service.list_all_tools();
let tools = match timeout(MCP_TOOL_CALL_TIMEOUT, tools_future).await {
Ok(result) => result.map_err(|e| e.to_string())?,
Err(_) => {
log::warn!(
"Listing tools timed out after {} seconds",
MCP_TOOL_CALL_TIMEOUT.as_secs()
);
continue; // Skip this server and continue with others
}
};
for tool in tools {
all_tools.push(tool);
}
}
Ok(all_tools)
}
/// Calls a tool on an MCP server by name with optional arguments
///
/// # Arguments
/// * `state` - Application state containing MCP server connections
/// * `tool_name` - Name of the tool to call
/// * `arguments` - Optional map of argument names to values
///
/// # Returns
/// * `Result<CallToolResult, String>` - Result of the tool call if successful, or error message if failed
///
/// This function:
/// 1. Locks the MCP servers mutex to access server connections
/// 2. Searches through all servers for one containing the named tool
/// 3. When found, calls the tool on that server with the provided arguments
/// 4. Returns error if no server has the requested tool
#[tauri::command]
pub async fn call_tool(
state: State<'_, AppState>,
tool_name: String,
arguments: Option<Map<String, Value>>,
) -> Result<CallToolResult, String> {
let servers = state.mcp_servers.lock().await;
// Iterate through servers and find the first one that contains the tool
for (_, service) in servers.iter() {
let tools = match service.list_all_tools().await {
Ok(tools) => tools,
Err(_) => continue, // Skip this server if we can't list tools
};
if !tools.iter().any(|t| t.name == tool_name) {
continue; // Tool not found in this server, try next
}
println!("Found tool {} in server", tool_name);
// Call the tool with timeout
let tool_call = service.call_tool(CallToolRequestParam {
name: tool_name.clone().into(),
arguments,
});
return match timeout(MCP_TOOL_CALL_TIMEOUT, tool_call).await {
Ok(result) => result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds",
tool_name,
MCP_TOOL_CALL_TIMEOUT.as_secs()
)),
};
}
Err(format!("Tool {} not found", tool_name))
}
#[tauri::command]
pub async fn get_mcp_configs(app: AppHandle) -> Result<String, String> {
let mut path = get_jan_data_folder_path(app);
path.push("mcp_config.json");
log::info!("read mcp configs, path: {:?}", path);
// Create default empty config if file doesn't exist
if !path.exists() {
log::info!("mcp_config.json not found, creating default empty config");
fs::write(&path, DEFAULT_MCP_CONFIG)
.map_err(|e| format!("Failed to create default MCP config: {}", e))?;
}
fs::read_to_string(path).map_err(|e| e.to_string())
}
#[tauri::command]
pub async fn save_mcp_configs(app: AppHandle, configs: String) -> Result<(), String> {
let mut path = get_jan_data_folder_path(app);
path.push("mcp_config.json");
log::info!("save mcp configs, path: {:?}", path);
fs::write(path, configs).map_err(|e| e.to_string())
}

View File

@ -0,0 +1,46 @@
use std::time::Duration;
// MCP Constants
pub const MCP_TOOL_CALL_TIMEOUT: Duration = Duration::from_secs(30);
pub const MCP_BASE_RESTART_DELAY_MS: u64 = 1000; // Start with 1 second
pub const MCP_MAX_RESTART_DELAY_MS: u64 = 30000; // Cap at 30 seconds
pub const MCP_BACKOFF_MULTIPLIER: f64 = 2.0; // Double the delay each time
pub const DEFAULT_MCP_CONFIG: &str = r#"{
"mcpServers": {
"browsermcp": {
"command": "npx",
"args": ["@browsermcp/mcp"],
"env": {},
"active": false
},
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"],
"env": {},
"active": false
},
"serper": {
"command": "npx",
"args": ["-y", "serper-search-scrape-mcp-server"],
"env": { "SERPER_API_KEY": "YOUR_SERPER_API_KEY_HERE" },
"active": false
},
"filesystem": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
"/path/to/other/allowed/dir"
],
"env": {},
"active": false
},
"sequential-thinking": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"],
"env": {},
"active": false
}
}
}"#;

View File

@ -1,7 +1,5 @@
use rmcp::model::{CallToolRequestParam, CallToolResult, Tool};
use rmcp::{service::RunningService, transport::TokioChildProcess, RoleClient, ServiceExt};
use serde_json::{Map, Value};
use std::fs;
use serde_json::Value;
use std::{collections::HashMap, env, sync::Arc, time::Duration};
use tauri::{AppHandle, Emitter, Manager, Runtime, State};
use tokio::{
@ -10,59 +8,11 @@ use tokio::{
time::{sleep, timeout},
};
use super::{
cmd::get_jan_data_folder_path,
state::AppState,
utils::can_override_npx,
use super::constants::{
MCP_BACKOFF_MULTIPLIER, MCP_BASE_RESTART_DELAY_MS, MCP_MAX_RESTART_DELAY_MS,
};
const DEFAULT_MCP_CONFIG: &str = r#"{
"mcpServers": {
"browsermcp": {
"command": "npx",
"args": ["@browsermcp/mcp"],
"env": {},
"active": false
},
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"],
"env": {},
"active": false
},
"serper": {
"command": "npx",
"args": ["-y", "serper-search-scrape-mcp-server"],
"env": { "SERPER_API_KEY": "YOUR_SERPER_API_KEY_HERE" },
"active": false
},
"filesystem": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
"/path/to/other/allowed/dir"
],
"env": {},
"active": false
},
"sequential-thinking": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"],
"env": {},
"active": false
}
}
}
"#;
// Timeout for MCP tool calls (30 seconds)
const MCP_TOOL_CALL_TIMEOUT: Duration = Duration::from_secs(30);
// MCP server restart configuration with exponential backoff
const MCP_BASE_RESTART_DELAY_MS: u64 = 1000; // Start with 1 second
const MCP_MAX_RESTART_DELAY_MS: u64 = 30000; // Cap at 30 seconds
const MCP_BACKOFF_MULTIPLIER: f64 = 2.0; // Double the delay each time
use crate::core::{app::commands::get_jan_data_folder_path, state::AppState};
use jan_utils::can_override_npx;
/// Calculate exponential backoff delay with jitter
///
@ -71,43 +21,43 @@ const MCP_BACKOFF_MULTIPLIER: f64 = 2.0; // Double the delay each time
///
/// # Returns
/// * `u64` - Delay in milliseconds, capped at MCP_MAX_RESTART_DELAY_MS
fn calculate_exponential_backoff_delay(attempt: u32) -> u64 {
pub fn calculate_exponential_backoff_delay(attempt: u32) -> u64 {
use std::cmp;
// Calculate base exponential delay: base_delay * multiplier^(attempt-1)
let exponential_delay = (MCP_BASE_RESTART_DELAY_MS as f64)
* MCP_BACKOFF_MULTIPLIER.powi((attempt - 1) as i32);
let exponential_delay =
(MCP_BASE_RESTART_DELAY_MS as f64) * MCP_BACKOFF_MULTIPLIER.powi((attempt - 1) as i32);
// Cap the delay at maximum
let capped_delay = cmp::min(exponential_delay as u64, MCP_MAX_RESTART_DELAY_MS);
// Add jitter (±25% randomness) to prevent thundering herd
let jitter_range = (capped_delay as f64 * 0.25) as u64;
let jitter = if jitter_range > 0 {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
// Use attempt number as seed for deterministic but varied jitter
let mut hasher = DefaultHasher::new();
attempt.hash(&mut hasher);
let hash = hasher.finish();
// Convert hash to jitter value in range [-jitter_range, +jitter_range]
let jitter_offset = (hash % (jitter_range * 2)) as i64 - jitter_range as i64;
jitter_offset
} else {
0
};
// Apply jitter while ensuring delay stays positive and within bounds
let final_delay = cmp::max(
100, // Minimum 100ms delay
cmp::min(
MCP_MAX_RESTART_DELAY_MS,
(capped_delay as i64 + jitter) as u64
)
(capped_delay as i64 + jitter) as u64,
),
);
final_delay
}
@ -156,7 +106,7 @@ pub async fn run_mcp_commands<R: Runtime>(
let servers_clone = servers_state.clone();
let name_clone = name.clone();
let config_clone = config.clone();
// Spawn task for initial startup attempt
let handle = tokio::spawn(async move {
// Only wait for the initial startup attempt, not the monitoring
@ -166,44 +116,47 @@ pub async fn run_mcp_commands<R: Runtime>(
name_clone.clone(),
config_clone.clone(),
Some(3), // Default max restarts for startup
).await;
)
.await;
// If initial startup failed, we still want to continue with other servers
if let Err(e) = &result {
log::error!("Initial startup failed for MCP server {}: {}", name_clone, e);
log::error!(
"Initial startup failed for MCP server {}: {}",
name_clone,
e
);
}
(name_clone, result)
});
startup_handles.push(handle);
}
// Wait for all initial startup attempts to complete
let mut successful_count = 0;
let mut failed_count = 0;
for handle in startup_handles {
match handle.await {
Ok((name, result)) => {
match result {
Ok(_) => {
log::info!("MCP server {} initialized successfully", name);
successful_count += 1;
}
Err(e) => {
log::error!("MCP server {} failed to initialize: {}", name, e);
failed_count += 1;
}
Ok((name, result)) => match result {
Ok(_) => {
log::info!("MCP server {} initialized successfully", name);
successful_count += 1;
}
}
Err(e) => {
log::error!("MCP server {} failed to initialize: {}", name, e);
failed_count += 1;
}
},
Err(e) => {
log::error!("Failed to join startup task: {}", e);
failed_count += 1;
}
}
}
log::info!(
"MCP server initialization complete: {} successful, {} failed",
successful_count,
@ -214,17 +167,17 @@ pub async fn run_mcp_commands<R: Runtime>(
}
/// Monitor MCP server health without removing it from the HashMap
async fn monitor_mcp_server_handle(
pub async fn monitor_mcp_server_handle(
servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
) -> Option<rmcp::service::QuitReason> {
log::info!("Monitoring MCP server {} health", name);
// Monitor server health with periodic checks
loop {
// Small delay between health checks
sleep(Duration::from_secs(5)).await;
// Check if server is still healthy by trying to list tools
let health_check_result = {
let servers = servers_state.lock().await;
@ -250,10 +203,13 @@ async fn monitor_mcp_server_handle(
return Some(rmcp::service::QuitReason::Closed);
}
};
if !health_check_result {
// Server failed health check - remove it and return
log::error!("MCP server {} failed health check, removing from active servers", name);
log::error!(
"MCP server {} failed health check, removing from active servers",
name
);
let mut servers = servers_state.lock().await;
if let Some(service) = servers.remove(&name) {
// Try to cancel the service gracefully
@ -266,7 +222,7 @@ async fn monitor_mcp_server_handle(
/// Starts an MCP server with restart monitoring
/// Returns the result of the first start attempt, then continues with restart monitoring
async fn start_mcp_server_with_restart<R: Runtime>(
pub async fn start_mcp_server_with_restart<R: Runtime>(
app: AppHandle<R>,
servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
@ -277,12 +233,12 @@ async fn start_mcp_server_with_restart<R: Runtime>(
let restart_counts = app_state.mcp_restart_counts.clone();
let active_servers_state = app_state.mcp_active_servers.clone();
let successfully_connected = app_state.mcp_successfully_connected.clone();
// Store active server config for restart purposes
store_active_server_config(&active_servers_state, &name, &config).await;
let max_restarts = max_restarts.unwrap_or(5);
// Try the first start attempt and return its result
log::info!("Starting MCP server {} (Initial attempt)", name);
let first_start_result = schedule_mcp_start_task(
@ -290,19 +246,20 @@ async fn start_mcp_server_with_restart<R: Runtime>(
servers_state.clone(),
name.clone(),
config.clone(),
).await;
)
.await;
match first_start_result {
Ok(_) => {
log::info!("MCP server {} started successfully on first attempt", name);
reset_restart_count(&restart_counts, &name).await;
// Check if server was marked as successfully connected (passed verification)
let was_verified = {
let connected = successfully_connected.lock().await;
connected.get(&name).copied().unwrap_or(false)
};
if was_verified {
// Only spawn monitoring task if server passed verification
spawn_server_monitoring_task(
@ -313,24 +270,32 @@ async fn start_mcp_server_with_restart<R: Runtime>(
max_restarts,
restart_counts,
successfully_connected,
).await;
)
.await;
Ok(())
} else {
// Server failed verification, don't monitor for restarts
log::error!("MCP server {} failed verification after startup", name);
Err(format!("MCP server {} failed verification after startup", name))
Err(format!(
"MCP server {} failed verification after startup",
name
))
}
}
Err(e) => {
log::error!("Failed to start MCP server {} on first attempt: {}", name, e);
log::error!(
"Failed to start MCP server {} on first attempt: {}",
name,
e
);
Err(e)
}
}
}
/// Helper function to handle the restart loop logic
async fn start_restart_loop<R: Runtime>(
pub async fn start_restart_loop<R: Runtime>(
app: AppHandle<R>,
servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
@ -353,11 +318,12 @@ async fn start_restart_loop<R: Runtime>(
name,
max_restarts
);
if let Err(e) = app.emit("mcp_max_restarts_reached",
if let Err(e) = app.emit(
"mcp_max_restarts_reached",
serde_json::json!({
"server": name,
"max_restarts": max_restarts
})
}),
) {
log::error!("Failed to emit mcp_max_restarts_reached event: {e}");
}
@ -387,18 +353,19 @@ async fn start_restart_loop<R: Runtime>(
servers_state.clone(),
name.clone(),
config.clone(),
).await;
)
.await;
match start_result {
Ok(_) => {
log::info!("MCP server {} restarted successfully.", name);
// Check if server passed verification (was marked as successfully connected)
let passed_verification = {
let connected = successfully_connected.lock().await;
connected.get(&name).copied().unwrap_or(false)
};
if !passed_verification {
log::error!(
"MCP server {} failed verification after restart - stopping permanently",
@ -406,7 +373,7 @@ async fn start_restart_loop<R: Runtime>(
);
break;
}
// Reset restart count on successful restart with verification
{
let mut counts = restart_counts.lock().await;
@ -423,10 +390,8 @@ async fn start_restart_loop<R: Runtime>(
}
// Monitor the server again
let quit_reason = monitor_mcp_server_handle(
servers_state.clone(),
name.clone(),
).await;
let quit_reason =
monitor_mcp_server_handle(servers_state.clone(), name.clone()).await;
log::info!("MCP server {} quit with reason: {:?}", name, quit_reason);
@ -464,7 +429,7 @@ async fn start_restart_loop<R: Runtime>(
}
Err(e) => {
log::error!("Failed to restart MCP server {}: {}", name, e);
// Check if server was marked as successfully connected before
let was_connected = {
let connected = successfully_connected.lock().await;
@ -485,21 +450,7 @@ async fn start_restart_loop<R: Runtime>(
}
}
#[tauri::command]
pub async fn activate_mcp_server<R: Runtime>(
app: tauri::AppHandle<R>,
state: State<'_, AppState>,
name: String,
config: Value,
) -> Result<(), String> {
let servers: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>> =
state.mcp_servers.clone();
// Use the modified start_mcp_server_with_restart that returns first attempt result
start_mcp_server_with_restart(app, servers, name, config, Some(3)).await
}
async fn schedule_mcp_start_task<R: Runtime>(
pub async fn schedule_mcp_start_task<R: Runtime>(
app: tauri::AppHandle<R>,
servers: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
@ -511,7 +462,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
.parent()
.expect("Executable must have a parent directory");
let bin_path = exe_parent_path.to_path_buf();
let (command, args, envs) = extract_command_args(&config)
.ok_or_else(|| format!("Failed to extract command args from config for {name}"))?;
@ -535,12 +486,12 @@ async fn schedule_mcp_start_task<R: Runtime>(
cmd.arg("run");
cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap().to_string());
}
#[cfg(windows)]
{
cmd.creation_flags(0x08000000); // CREATE_NO_WINDOW: prevents shell window on Windows
}
let app_path_str = app_path.to_str().unwrap().to_string();
let log_file_path = format!("{}/logs/app.log", app_path_str);
match std::fs::OpenOptions::new()
@ -568,13 +519,14 @@ async fn schedule_mcp_start_task<R: Runtime>(
}
});
let process = TokioChildProcess::new(cmd)
.map_err(|e| {
log::error!("Failed to run command {name}: {e}");
format!("Failed to run command {name}: {e}")
})?;
let process = TokioChildProcess::new(cmd).map_err(|e| {
log::error!("Failed to run command {name}: {e}");
format!("Failed to run command {name}: {e}")
})?;
let service = ().serve(process).await
let service = ()
.serve(process)
.await
.map_err(|e| format!("Failed to start MCP server {name}: {e}"))?;
// Get peer info and clone the needed values before moving the service
@ -595,15 +547,18 @@ async fn schedule_mcp_start_task<R: Runtime>(
// This prevents race conditions where the server quits immediately
let verification_delay = Duration::from_millis(500);
sleep(verification_delay).await;
// Check if server is still running after the verification delay
let server_still_running = {
let servers_map = servers.lock().await;
servers_map.contains_key(&name)
};
if !server_still_running {
return Err(format!("MCP server {} quit immediately after starting", name));
return Err(format!(
"MCP server {} quit immediately after starting",
name
));
}
// Mark server as successfully connected (for restart policy)
@ -626,48 +581,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
Ok(())
}
#[tauri::command]
pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) -> Result<(), String> {
log::info!("Deactivating MCP server: {}", name);
// First, mark server as manually deactivated to prevent restart
// Remove from active servers list to prevent restart
{
let mut active_servers = state.mcp_active_servers.lock().await;
active_servers.remove(&name);
log::info!("Removed MCP server {} from active servers list", name);
}
// Mark as not successfully connected to prevent restart logic
{
let mut connected = state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), false);
log::info!("Marked MCP server {} as not successfully connected", name);
}
// Reset restart count
{
let mut counts = state.mcp_restart_counts.lock().await;
counts.remove(&name);
log::info!("Reset restart count for MCP server {}", name);
}
// Now remove and stop the server
let servers = state.mcp_servers.clone();
let mut servers_map = servers.lock().await;
let service = servers_map.remove(&name)
.ok_or_else(|| format!("Server {} not found", name))?;
// Release the lock before calling cancel
drop(servers_map);
service.cancel().await.map_err(|e| e.to_string())?;
log::info!("Server {name} stopped successfully and marked as deactivated.");
Ok(())
}
fn extract_command_args(
pub fn extract_command_args(
config: &Value,
) -> Option<(String, Vec<Value>, serde_json::Map<String, Value>)> {
let obj = config.as_object()?;
@ -681,27 +595,12 @@ fn extract_command_args(
Some((command, args, envs))
}
fn extract_active_status(config: &Value) -> Option<bool> {
pub fn extract_active_status(config: &Value) -> Option<bool> {
let obj = config.as_object()?;
let active = obj.get("active")?.as_bool()?;
Some(active)
}
#[tauri::command]
pub async fn restart_mcp_servers(app: AppHandle, state: State<'_, AppState>) -> Result<(), String> {
let servers = state.mcp_servers.clone();
// Stop the servers
stop_mcp_servers(state.mcp_servers.clone()).await?;
// Restart only previously active servers (like cortex)
restart_active_mcp_servers(&app, servers).await?;
app.emit("mcp-update", "MCP servers updated")
.map_err(|e| format!("Failed to emit event: {}", e))?;
Ok(())
}
/// Restart only servers that were previously active (like cortex restart behavior)
pub async fn restart_active_mcp_servers<R: Runtime>(
app: &AppHandle<R>,
@ -709,18 +608,21 @@ pub async fn restart_active_mcp_servers<R: Runtime>(
) -> Result<(), String> {
let app_state = app.state::<AppState>();
let active_servers = app_state.mcp_active_servers.lock().await;
log::info!("Restarting {} previously active MCP servers", active_servers.len());
log::info!(
"Restarting {} previously active MCP servers",
active_servers.len()
);
for (name, config) in active_servers.iter() {
log::info!("Restarting MCP server: {}", name);
// Start server with restart monitoring - spawn async task
let app_clone = app.clone();
let servers_clone = servers_state.clone();
let name_clone = name.clone();
let config_clone = config.clone();
tauri::async_runtime::spawn(async move {
let _ = start_mcp_server_with_restart(
app_clone,
@ -728,37 +630,20 @@ pub async fn restart_active_mcp_servers<R: Runtime>(
name_clone,
config_clone,
Some(3), // Default max restarts for startup
).await;
)
.await;
});
}
Ok(())
}
/// Reset MCP restart count for a specific server (like cortex reset)
#[tauri::command]
pub async fn reset_mcp_restart_count(state: State<'_, AppState>, server_name: String) -> Result<(), String> {
let mut counts = state.mcp_restart_counts.lock().await;
let count = match counts.get_mut(&server_name) {
Some(count) => count,
None => return Ok(()), // Server not found, nothing to reset
};
let old_count = *count;
*count = 0;
log::info!("MCP server {} restart count reset from {} to 0.", server_name, old_count);
Ok(())
}
pub async fn clean_up_mcp_servers(
state: State<'_, AppState>,
) {
pub async fn clean_up_mcp_servers(state: State<'_, AppState>) {
log::info!("Cleaning up MCP servers");
// Stop all running MCP servers
let _ = stop_mcp_servers(state.mcp_servers.clone()).await;
// Clear active servers and restart counts
{
let mut active_servers = state.mcp_active_servers.lock().await;
@ -785,139 +670,8 @@ pub async fn stop_mcp_servers(
Ok(())
}
#[tauri::command]
pub async fn get_connected_servers(
_app: AppHandle,
state: State<'_, AppState>,
) -> Result<Vec<String>, String> {
let servers = state.mcp_servers.clone();
let servers_map = servers.lock().await;
Ok(servers_map.keys().cloned().collect())
}
/// Retrieves all available tools from all MCP servers
///
/// # Arguments
/// * `state` - Application state containing MCP server connections
///
/// # Returns
/// * `Result<Vec<Tool>, String>` - A vector of all tools if successful, or an error message if failed
///
/// This function:
/// 1. Locks the MCP servers mutex to access server connections
/// 2. Iterates through all connected servers
/// 3. Gets the list of tools from each server
/// 4. Combines all tools into a single vector
/// 5. Returns the combined list of all available tools
#[tauri::command]
pub async fn get_tools(state: State<'_, AppState>) -> Result<Vec<Tool>, String> {
let servers = state.mcp_servers.lock().await;
let mut all_tools: Vec<Tool> = Vec::new();
for (_, service) in servers.iter() {
// List tools with timeout
let tools_future = service.list_all_tools();
let tools = match timeout(MCP_TOOL_CALL_TIMEOUT, tools_future).await {
Ok(result) => result.map_err(|e| e.to_string())?,
Err(_) => {
log::warn!(
"Listing tools timed out after {} seconds",
MCP_TOOL_CALL_TIMEOUT.as_secs()
);
continue; // Skip this server and continue with others
}
};
for tool in tools {
all_tools.push(tool);
}
}
Ok(all_tools)
}
/// Calls a tool on an MCP server by name with optional arguments
///
/// # Arguments
/// * `state` - Application state containing MCP server connections
/// * `tool_name` - Name of the tool to call
/// * `arguments` - Optional map of argument names to values
///
/// # Returns
/// * `Result<CallToolResult, String>` - Result of the tool call if successful, or error message if failed
///
/// This function:
/// 1. Locks the MCP servers mutex to access server connections
/// 2. Searches through all servers for one containing the named tool
/// 3. When found, calls the tool on that server with the provided arguments
/// 4. Returns error if no server has the requested tool
#[tauri::command]
pub async fn call_tool(
state: State<'_, AppState>,
tool_name: String,
arguments: Option<Map<String, Value>>,
) -> Result<CallToolResult, String> {
let servers = state.mcp_servers.lock().await;
// Iterate through servers and find the first one that contains the tool
for (_, service) in servers.iter() {
let tools = match service.list_all_tools().await {
Ok(tools) => tools,
Err(_) => continue, // Skip this server if we can't list tools
};
if !tools.iter().any(|t| t.name == tool_name) {
continue; // Tool not found in this server, try next
}
println!("Found tool {} in server", tool_name);
// Call the tool with timeout
let tool_call = service.call_tool(CallToolRequestParam {
name: tool_name.clone().into(),
arguments,
});
return match timeout(MCP_TOOL_CALL_TIMEOUT, tool_call).await {
Ok(result) => result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds",
tool_name,
MCP_TOOL_CALL_TIMEOUT.as_secs()
)),
};
}
Err(format!("Tool {} not found", tool_name))
}
#[tauri::command]
pub async fn get_mcp_configs(app: AppHandle) -> Result<String, String> {
let mut path = get_jan_data_folder_path(app);
path.push("mcp_config.json");
log::info!("read mcp configs, path: {:?}", path);
// Create default empty config if file doesn't exist
if !path.exists() {
log::info!("mcp_config.json not found, creating default empty config");
fs::write(&path, DEFAULT_MCP_CONFIG)
.map_err(|e| format!("Failed to create default MCP config: {}", e))?;
}
fs::read_to_string(path).map_err(|e| e.to_string())
}
#[tauri::command]
pub async fn save_mcp_configs(app: AppHandle, configs: String) -> Result<(), String> {
let mut path = get_jan_data_folder_path(app);
path.push("mcp_config.json");
log::info!("save mcp configs, path: {:?}", path);
fs::write(path, configs).map_err(|e| e.to_string())
}
/// Store active server configuration for restart purposes
async fn store_active_server_config(
pub async fn store_active_server_config(
active_servers_state: &Arc<Mutex<HashMap<String, Value>>>,
name: &str,
config: &Value,
@ -926,18 +680,14 @@ async fn store_active_server_config(
active_servers.insert(name.to_string(), config.clone());
}
/// Reset restart count for a server
async fn reset_restart_count(
restart_counts: &Arc<Mutex<HashMap<String, u32>>>,
name: &str,
) {
pub async fn reset_restart_count(restart_counts: &Arc<Mutex<HashMap<String, u32>>>, name: &str) {
let mut counts = restart_counts.lock().await;
counts.insert(name.to_string(), 0);
}
/// Spawn the server monitoring task for handling restarts
async fn spawn_server_monitoring_task<R: Runtime>(
pub async fn spawn_server_monitoring_task<R: Runtime>(
app: AppHandle<R>,
servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>>,
name: String,
@ -950,15 +700,17 @@ async fn spawn_server_monitoring_task<R: Runtime>(
let servers_clone = servers_state.clone();
let name_clone = name.clone();
let config_clone = config.clone();
tauri::async_runtime::spawn(async move {
// Monitor the server using RunningService's JoinHandle<QuitReason>
let quit_reason = monitor_mcp_server_handle(
servers_clone.clone(),
name_clone.clone(),
).await;
let quit_reason =
monitor_mcp_server_handle(servers_clone.clone(), name_clone.clone()).await;
log::info!("MCP server {} quit with reason: {:?}", name_clone, quit_reason);
log::info!(
"MCP server {} quit with reason: {:?}",
name_clone,
quit_reason
);
// Check if we should restart based on connection status and quit reason
if should_restart_server(&successfully_connected, &name_clone, &quit_reason).await {
@ -971,13 +723,14 @@ async fn spawn_server_monitoring_task<R: Runtime>(
max_restarts,
restart_counts,
successfully_connected,
).await;
)
.await;
}
});
}
/// Determine if a server should be restarted based on its connection status and quit reason
async fn should_restart_server(
pub async fn should_restart_server(
successfully_connected: &Arc<Mutex<HashMap<String, bool>>>,
name: &str,
quit_reason: &Option<rmcp::service::QuitReason>,
@ -1009,44 +762,3 @@ async fn should_restart_server(
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use std::fs::File;
use std::io::Write;
use std::sync::Arc;
use tauri::test::mock_app;
use tokio::sync::Mutex;
#[tokio::test]
async fn test_run_mcp_commands() {
let app = mock_app();
// Get the app path where the config should be created
let app_path = get_jan_data_folder_path(app.handle().clone());
let config_path = app_path.join("mcp_config.json");
// Ensure the directory exists
if let Some(parent) = config_path.parent() {
std::fs::create_dir_all(parent).expect("Failed to create parent directory");
}
// Create a mock mcp_config.json file at the correct location
let mut file: File = File::create(&config_path).expect("Failed to create config file");
file.write_all(b"{\"mcpServers\":{}}")
.expect("Failed to write to config file");
// Call the run_mcp_commands function
let servers_state: Arc<Mutex<HashMap<String, RunningService<RoleClient, ()>>>> =
Arc::new(Mutex::new(HashMap::new()));
let result = run_mcp_commands(app.handle(), servers_state).await;
// Assert that the function returns Ok(())
assert!(result.is_ok());
// Clean up the mock config file
std::fs::remove_file(&config_path).expect("Failed to remove config file");
}
}

View File

@ -0,0 +1,6 @@
pub mod commands;
mod constants;
pub mod helpers;
#[cfg(test)]
mod tests;

Some files were not shown because too many files have changed in this diff Show More