Refactor session PID types from string to number across backend and extension

- Changed `pid` field in `SessionInfo` from `string` to `number`/`i32` in TypeScript and Rust.
- Updated `activeSessions` map key from `string` to `number` to align with new PID type.
- Adjusted process monitoring logic to correctly handle numeric PIDs.
- Removed fallback UUID-based PID generation in favor of numeric fallback (-1).
- Added PID cleanup logic in `is_process_running` when the process is no longer alive.
- Bumped application version from 0.5.16 to 0.6.900 in `tauri.conf.json`.
This commit is contained in:
Akarshan 2025-07-04 21:40:54 +05:30
parent dbdc031583
commit d4a3d6a0d6
No known key found for this signature in database
GPG Key ID: D75C9634A870665F
6 changed files with 32 additions and 21 deletions

View File

@ -161,7 +161,7 @@ export interface modelInfo {
export type listResult = modelInfo[]
export interface SessionInfo {
pid: string // opaque handle for unload/chat
pid: number // opaque handle for unload/chat
port: number // llama-server output port (corrected from portid)
model_id: string, //name of the model
model_path: string // path of the loaded model

View File

@ -114,7 +114,7 @@ export default class llamacpp_extension extends AIEngine {
readonly providerId: string = 'llamacpp'
private config: LlamacppConfig
private activeSessions: Map<string, SessionInfo> = new Map()
private activeSessions: Map<number, SessionInfo> = new Map()
private providerPath!: string
private apiSecret: string = 'Jan'
@ -724,7 +724,7 @@ export default class llamacpp_extension extends AIEngine {
} catch (e) {}
await this.sleep(500) // 500 sec interval during rechecks
}
await this.unload(sInfo.pid)
await this.unload(sInfo.model_id)
throw new Error(
`Timed out loading model after ${timeoutMs}... killing llamacpp`
)
@ -967,10 +967,12 @@ export default class llamacpp_extension extends AIEngine {
if (!sessionInfo) {
throw new Error(`No active session found for model: ${opts.model}`)
}
const result = invoke<boolean>('is_process_running', {
// check if the process is alive
const result = await invoke<boolean>('is_process_running', {
pid: sessionInfo.pid,
})
if (!result) {
this.activeSessions.delete(sessionInfo.pid)
throw new Error('Model have crashed! Please reload!')
}
const baseUrl = `http://localhost:${sessionInfo.port}/v1`

View File

@ -24,7 +24,7 @@ pub struct AppState {
pub mcp_active_servers: Arc<Mutex<HashMap<String, serde_json::Value>>>,
pub mcp_successfully_connected: Arc<Mutex<HashMap<String, bool>>>,
pub server_handle: Arc<Mutex<Option<ServerHandle>>>,
pub llama_server_process: Arc<Mutex<HashMap<String, LLamaBackendSession>>>,
pub llama_server_process: Arc<Mutex<HashMap<i32, LLamaBackendSession>>>,
}
pub fn generate_app_token() -> String {
rand::thread_rng()

View File

@ -3,7 +3,7 @@ use crate::core::state::AppState;
pub async fn cleanup_processes(state: State<'_, AppState>) {
let mut map = state.llama_server_process.lock().await;
let pids: Vec<String> = map.keys().cloned().collect();
let pids: Vec<i32> = map.keys().cloned().collect();
for pid in pids {
if let Some(session) = map.remove(&pid) {
let mut child = session.child;

View File

@ -9,7 +9,6 @@ use tauri::State; // Import Manager trait
use thiserror;
use tokio::process::Command;
use tokio::time::timeout;
use uuid::Uuid;
use crate::core::state::AppState;
use crate::core::state::LLamaBackendSession;
@ -44,8 +43,8 @@ type ServerResult<T> = Result<T, ServerError>;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SessionInfo {
pub pid: String, // opaque handle for unload/chat
pub port: String, // llama-server output port
pub pid: i32, // opaque handle for unload/chat
pub port: i32, // llama-server output port
pub model_id: String,
pub model_path: String, // path of the loaded model
pub api_key: String,
@ -82,12 +81,19 @@ pub async fn load_llama_model(
)));
}
let port = args
let port_str = args
.iter()
.position(|arg| arg == "--port")
.and_then(|i| args.get(i + 1))
.cloned()
.unwrap_or_default();
let port: i32 = match port_str.parse() {
Ok(p) => p,
Err(_) => {
eprintln!("Invalid port value: '{}', using default 8080", port_str);
8080
}
};
let model_path = args
.iter()
@ -146,10 +152,7 @@ pub async fn load_llama_model(
let child = command.spawn().map_err(ServerError::Io)?;
// Get the PID to use as session ID
let pid = child.id().map(|id| id.to_string()).unwrap_or_else(|| {
// Fallback in case we can't get the PID for some reason
format!("unknown_pid_{}", Uuid::new_v4())
});
let pid = child.id().map(|id| id as i32).unwrap_or(-1);
log::info!("Server process started with PID: {}", pid);
let session_info = SessionInfo {
@ -175,7 +178,7 @@ pub async fn load_llama_model(
// --- Unload Command ---
#[tauri::command]
pub async fn unload_llama_model(
pid: String,
pid: i32,
state: State<'_, AppState>,
) -> ServerResult<UnloadResult> {
let mut map = state.llama_server_process.lock().await;
@ -212,9 +215,7 @@ pub async fn unload_llama_model(
if let Some(raw_pid) = child.id() {
log::info!("Sending Ctrl-C to PID {}", raw_pid);
let ok: i32 = unsafe {
GenerateConsoleCtrlEvent(CTRL_C_EVENT, raw_pid as u32)
};
let ok: i32 = unsafe { GenerateConsoleCtrlEvent(CTRL_C_EVENT, raw_pid as u32) };
if ok == 0 {
log::error!("Failed to send Ctrl-C to PID {}", raw_pid);
}
@ -266,9 +267,17 @@ pub fn generate_api_key(model_id: String, api_secret: String) -> Result<String,
// process aliveness check
#[tauri::command]
pub fn is_process_running(pid: u32) -> Result<bool, String> {
pub async fn is_process_running(pid: i32, state: State<'_, AppState>) -> Result<bool, String> {
let mut system = System::new();
system.refresh_processes(ProcessesToUpdate::All, true);
let process_pid = Pid::from(pid as usize);
Ok(system.process(process_pid).is_some())
let alive = system.process(process_pid).is_some();
if !alive {
let mut map = state.llama_server_process.lock().await;
map.remove(&pid);
}
Ok(alive)
}

View File

@ -1,7 +1,7 @@
{
"$schema": "https://schema.tauri.app/config/2",
"productName": "Jan",
"version": "0.5.16",
"version": "0.6.900",
"identifier": "jan.ai.app",
"build": {
"frontendDist": "../web-app/dist",