(WIP)randomly generate api-key hash each session
This commit is contained in:
parent
1dd762f0cf
commit
31971e7821
@ -110,6 +110,7 @@ export interface sessionInfo {
|
|||||||
port: number // llama-server output port (corrected from portid)
|
port: number // llama-server output port (corrected from portid)
|
||||||
modelName: string, //name of the model
|
modelName: string, //name of the model
|
||||||
modelPath: string // path of the loaded model
|
modelPath: string // path of the loaded model
|
||||||
|
api_key: string
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. /unload
|
// 4. /unload
|
||||||
|
|||||||
@ -23,6 +23,7 @@ import {
|
|||||||
} from '@janhq/core'
|
} from '@janhq/core'
|
||||||
|
|
||||||
import { invoke } from '@tauri-apps/api/core'
|
import { invoke } from '@tauri-apps/api/core'
|
||||||
|
import { createHmac } from 'crypto'
|
||||||
|
|
||||||
type LlamacppConfig = {
|
type LlamacppConfig = {
|
||||||
n_gpu_layers: number;
|
n_gpu_layers: number;
|
||||||
@ -83,10 +84,11 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
readonly providerId: string = 'llamacpp'
|
readonly providerId: string = 'llamacpp'
|
||||||
|
|
||||||
private config: LlamacppConfig
|
private config: LlamacppConfig
|
||||||
private downloadManager
|
private downloadManager: any
|
||||||
private activeSessions: Map<string, sessionInfo> = new Map()
|
private activeSessions: Map<string, sessionInfo> = new Map()
|
||||||
private modelsBasePath!: string
|
private modelsBasePath!: string
|
||||||
private enginesBasePath!: string
|
private enginesBasePath!: string
|
||||||
|
private apiSecret: string = "Jan"
|
||||||
|
|
||||||
override async onLoad(): Promise<void> {
|
override async onLoad(): Promise<void> {
|
||||||
super.onLoad() // Calls registerEngine() from AIEngine
|
super.onLoad() // Calls registerEngine() from AIEngine
|
||||||
@ -128,6 +130,11 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
this.config[key] = value
|
this.config[key] = value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private generateApiKey(modelId: string): string {
|
||||||
|
const hash = createHmac('sha256', this.apiSecret).update(modelId).digest("base64")
|
||||||
|
return hash
|
||||||
|
}
|
||||||
|
|
||||||
// Implement the required LocalProvider interface methods
|
// Implement the required LocalProvider interface methods
|
||||||
override async list(): Promise<modelInfo[]> {
|
override async list(): Promise<modelInfo[]> {
|
||||||
const modelsDir = await joinPath([this.modelsBasePath, this.provider])
|
const modelsDir = await joinPath([this.modelsBasePath, this.provider])
|
||||||
@ -289,6 +296,9 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
|
|
||||||
// disable llama-server webui
|
// disable llama-server webui
|
||||||
args.push('--no-webui')
|
args.push('--no-webui')
|
||||||
|
// update key for security; TODO: (qnixsynapse) Make it more secure
|
||||||
|
const api_key = this.generateApiKey(opts.modelPath)
|
||||||
|
args.push(`--api-key ${api_key}`)
|
||||||
|
|
||||||
// model option is required
|
// model option is required
|
||||||
// TODO: llama.cpp extension lookup model path based on modelId
|
// TODO: llama.cpp extension lookup model path based on modelId
|
||||||
@ -456,7 +466,7 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
const url = `${baseUrl}/chat/completions`
|
const url = `${baseUrl}/chat/completions`
|
||||||
const headers = {
|
const headers = {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
'Authorization': `Bearer test-k`,
|
'Authorization': `Bearer ${sessionInfo.api_key}`,
|
||||||
}
|
}
|
||||||
|
|
||||||
const body = JSON.stringify(opts)
|
const body = JSON.stringify(opts)
|
||||||
|
|||||||
@ -42,6 +42,7 @@ pub struct SessionInfo {
|
|||||||
pub session_id: String, // opaque handle for unload/chat
|
pub session_id: String, // opaque handle for unload/chat
|
||||||
pub port: u16, // llama-server output port
|
pub port: u16, // llama-server output port
|
||||||
pub model_path: String, // path of the loaded model
|
pub model_path: String, // path of the loaded model
|
||||||
|
pub api_key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(serde::Serialize, serde::Deserialize)]
|
#[derive(serde::Serialize, serde::Deserialize)]
|
||||||
@ -85,7 +86,8 @@ pub async fn load_llama_model(
|
|||||||
// Configure the command to run the server
|
// Configure the command to run the server
|
||||||
let mut command = Command::new(server_path);
|
let mut command = Command::new(server_path);
|
||||||
|
|
||||||
let model_path = args[0].replace("-m", "");
|
let model_path = args[2].replace("-m", "");
|
||||||
|
let api_key = args[1].replace("--api-key", "")
|
||||||
command.args(args);
|
command.args(args);
|
||||||
|
|
||||||
// Optional: Redirect stdio if needed (e.g., for logging within Jan)
|
// Optional: Redirect stdio if needed (e.g., for logging within Jan)
|
||||||
@ -110,6 +112,7 @@ pub async fn load_llama_model(
|
|||||||
session_id: pid, // Use PID as session ID
|
session_id: pid, // Use PID as session ID
|
||||||
port,
|
port,
|
||||||
model_path,
|
model_path,
|
||||||
|
api_key,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(session_info)
|
Ok(session_info)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user