feat: add port parameter to generateApiKey for secure model-specific API keys

The generateApiKey method now incorporates the model's port to create a unique,
port-specific API key, enhancing security by ensuring keys are tied to both
model ID and port. This change supports better isolation between models
running on different ports. Code formatting improvements were also made
for consistency and readability.
This commit is contained in:
Akarshan Biswas 2025-06-01 21:15:22 +05:30 committed by Louis
parent 4dfdcd68d5
commit e3d6cbd80f
No known key found for this signature in database
GPG Key ID: 44FA9F4D33C37DE2

View File

@ -168,9 +168,9 @@ export default class llamacpp_extension extends AIEngine {
}
}
private async generateApiKey(modelId: string): Promise<string> {
private async generateApiKey(modelId: string, port: string): Promise<string> {
const hash = await invoke<string>('generate_api_key', {
modelId: modelId,
modelId: modelId + port,
apiSecret: this.apiSecret,
})
return hash
@ -371,6 +371,7 @@ export default class llamacpp_extension extends AIEngine {
const taskId = this.createDownloadTaskId(modelId)
await this.downloadManager.cancelDownload(taskId)
}
/**
* Function to find a random port
*/
@ -417,23 +418,30 @@ export default class llamacpp_extension extends AIEngine {
modelId,
'model.yml',
])
const modelConfig = await invoke<ModelConfig>('read_yaml', { path: modelConfigPath })
const modelConfig = await invoke<ModelConfig>('read_yaml', {
path: modelConfigPath,
})
const port = await this.getRandomPort()
// disable llama-server webui
args.push('--no-webui')
// update key for security; TODO: (qnixsynapse) Make it more secure
const api_key = await this.generateApiKey(modelId)
const api_key = await this.generateApiKey(modelId, String(port))
args.push('--api-key', api_key)
// model option is required
// NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path
const modelPath = await joinPath([janDataFolderPath, modelConfig.model_path])
const modelPath = await joinPath([
janDataFolderPath,
modelConfig.model_path,
])
args.push('-m', modelPath)
args.push('-a', modelId)
args.push('--port', String(port))
if (modelConfig.mmproj_path) {
const mmprojPath = await joinPath([janDataFolderPath, modelConfig.mmproj_path])
const mmprojPath = await joinPath([
janDataFolderPath,
modelConfig.mmproj_path,
])
args.push('--mmproj', mmprojPath)
}
@ -475,7 +483,10 @@ export default class llamacpp_extension extends AIEngine {
console.log('Calling Tauri command llama_load with args:', args)
try {
const sInfo = await invoke<sessionInfo>('load_llama_model', { backendPath, args })
const sInfo = await invoke<sessionInfo>('load_llama_model', {
backendPath,
args,
})
// Store the session info for later use
this.activeSessions.set(sInfo.pid, sInfo)
@ -496,7 +507,7 @@ export default class llamacpp_extension extends AIEngine {
try {
// Pass the PID as the session_id
const result = await invoke<unloadResult>('unload_llama_model', {
pid
pid,
})
// If successful, remove from active sessions
@ -583,8 +594,9 @@ export default class llamacpp_extension extends AIEngine {
}
private findSessionByModel(modelId: string): sessionInfo | undefined {
return Array.from(this.activeSessions.values())
.find(session => session.modelId === modelId);
return Array.from(this.activeSessions.values()).find(
(session) => session.modelId === modelId
)
}
override async chat(
@ -596,7 +608,6 @@ export default class llamacpp_extension extends AIEngine {
}
const baseUrl = `http://localhost:${sessionInfo.port}/v1`
const url = `${baseUrl}/chat/completions`
console.log(`Using api-key: ${sessionInfo.apiKey}`)
const headers = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${sessionInfo.apiKey}`,