* add llamacpp plugin * Refactor llamacpp plugin * add utils plugin * remove utils folder * add hardware implementation * add utils folder + move utils function * organize cargo files * refactor utils src * refactor util * apply fmt * fmt * Update gguf + reformat * add permission for gguf commands * fix cargo test windows * revert yarn lock * remove cargo.lock for hardware plugin * ignore cargo.lock file * Fix hardware invoke + refactor hardware + refactor tests, constants * use api wrapper in extension to invoke hardware call + api wrapper build integration * add newline at EOF (per Akarshan) * add vi mock for getSystemInfo
39 lines
886 B
Rust
39 lines
886 B
Rust
use serde::{Deserialize, Serialize};
|
|
use std::collections::HashMap;
|
|
use std::sync::Arc;
|
|
use tokio::process::Child;
|
|
use tokio::sync::Mutex;
|
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub struct SessionInfo {
|
|
pub pid: i32, // opaque handle for unload/chat
|
|
pub port: i32, // llama-server output port
|
|
pub model_id: String,
|
|
pub model_path: String, // path of the loaded model
|
|
pub api_key: String,
|
|
}
|
|
|
|
pub struct LLamaBackendSession {
|
|
pub child: Child,
|
|
pub info: SessionInfo,
|
|
}
|
|
|
|
/// LlamaCpp plugin state
|
|
pub struct LlamacppState {
|
|
pub llama_server_process: Arc<Mutex<HashMap<i32, LLamaBackendSession>>>,
|
|
}
|
|
|
|
impl Default for LlamacppState {
|
|
fn default() -> Self {
|
|
Self {
|
|
llama_server_process: Arc::new(Mutex::new(HashMap::new())),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl LlamacppState {
|
|
pub fn new() -> Self {
|
|
Self::default()
|
|
}
|
|
}
|