(WIP)randomly generate api-key hash each session

This commit is contained in:
Akarshan Biswas 2025-05-28 09:52:25 +05:30 committed by Louis
parent 1dd762f0cf
commit 31971e7821
No known key found for this signature in database
GPG Key ID: 44FA9F4D33C37DE2
3 changed files with 17 additions and 3 deletions

View File

@ -110,6 +110,7 @@ export interface sessionInfo {
port: number // llama-server output port (corrected from portid)
modelName: string, //name of the model
modelPath: string // path of the loaded model
api_key: string
}
// 4. /unload

View File

@ -23,6 +23,7 @@ import {
} from '@janhq/core'
import { invoke } from '@tauri-apps/api/core'
import { createHmac } from 'crypto'
type LlamacppConfig = {
n_gpu_layers: number;
@ -83,10 +84,11 @@ export default class llamacpp_extension extends AIEngine {
readonly providerId: string = 'llamacpp'
private config: LlamacppConfig
private downloadManager
private downloadManager: any
private activeSessions: Map<string, sessionInfo> = new Map()
private modelsBasePath!: string
private enginesBasePath!: string
private apiSecret: string = "Jan"
override async onLoad(): Promise<void> {
super.onLoad() // Calls registerEngine() from AIEngine
@ -128,6 +130,11 @@ export default class llamacpp_extension extends AIEngine {
this.config[key] = value
}
private generateApiKey(modelId: string): string {
const hash = createHmac('sha256', this.apiSecret).update(modelId).digest("base64")
return hash
}
// Implement the required LocalProvider interface methods
override async list(): Promise<modelInfo[]> {
const modelsDir = await joinPath([this.modelsBasePath, this.provider])
@ -289,6 +296,9 @@ export default class llamacpp_extension extends AIEngine {
// disable llama-server webui
args.push('--no-webui')
// update key for security; TODO: (qnixsynapse) Make it more secure
const api_key = this.generateApiKey(opts.modelPath)
args.push(`--api-key ${api_key}`)
// model option is required
// TODO: llama.cpp extension lookup model path based on modelId
@ -456,7 +466,7 @@ export default class llamacpp_extension extends AIEngine {
const url = `${baseUrl}/chat/completions`
const headers = {
'Content-Type': 'application/json',
'Authorization': `Bearer test-k`,
'Authorization': `Bearer ${sessionInfo.api_key}`,
}
const body = JSON.stringify(opts)

View File

@ -42,6 +42,7 @@ pub struct SessionInfo {
pub session_id: String, // opaque handle for unload/chat
pub port: u16, // llama-server output port
pub model_path: String, // path of the loaded model
pub api_key: String,
}
#[derive(serde::Serialize, serde::Deserialize)]
@ -85,7 +86,8 @@ pub async fn load_llama_model(
// Configure the command to run the server
let mut command = Command::new(server_path);
let model_path = args[0].replace("-m", "");
let model_path = args[2].replace("-m", "");
let api_key = args[1].replace("--api-key", "")
command.args(args);
// Optional: Redirect stdio if needed (e.g., for logging within Jan)
@ -110,6 +112,7 @@ pub async fn load_llama_model(
session_id: pid, // Use PID as session ID
port,
model_path,
api_key,
};
Ok(session_info)