feat: add port parameter to generateApiKey for secure model-specific API keys
The generateApiKey method now incorporates the model's port to create a unique, port-specific API key, enhancing security by ensuring keys are tied to both model ID and port. This change supports better isolation between models running on different ports. Code formatting improvements were also made for consistency and readability.
This commit is contained in:
parent
4dfdcd68d5
commit
e3d6cbd80f
@ -168,9 +168,9 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private async generateApiKey(modelId: string): Promise<string> {
|
private async generateApiKey(modelId: string, port: string): Promise<string> {
|
||||||
const hash = await invoke<string>('generate_api_key', {
|
const hash = await invoke<string>('generate_api_key', {
|
||||||
modelId: modelId,
|
modelId: modelId + port,
|
||||||
apiSecret: this.apiSecret,
|
apiSecret: this.apiSecret,
|
||||||
})
|
})
|
||||||
return hash
|
return hash
|
||||||
@ -371,6 +371,7 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
const taskId = this.createDownloadTaskId(modelId)
|
const taskId = this.createDownloadTaskId(modelId)
|
||||||
await this.downloadManager.cancelDownload(taskId)
|
await this.downloadManager.cancelDownload(taskId)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Function to find a random port
|
* Function to find a random port
|
||||||
*/
|
*/
|
||||||
@ -417,23 +418,30 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
modelId,
|
modelId,
|
||||||
'model.yml',
|
'model.yml',
|
||||||
])
|
])
|
||||||
const modelConfig = await invoke<ModelConfig>('read_yaml', { path: modelConfigPath })
|
const modelConfig = await invoke<ModelConfig>('read_yaml', {
|
||||||
|
path: modelConfigPath,
|
||||||
|
})
|
||||||
const port = await this.getRandomPort()
|
const port = await this.getRandomPort()
|
||||||
|
|
||||||
// disable llama-server webui
|
// disable llama-server webui
|
||||||
args.push('--no-webui')
|
args.push('--no-webui')
|
||||||
// update key for security; TODO: (qnixsynapse) Make it more secure
|
const api_key = await this.generateApiKey(modelId, String(port))
|
||||||
const api_key = await this.generateApiKey(modelId)
|
|
||||||
args.push('--api-key', api_key)
|
args.push('--api-key', api_key)
|
||||||
|
|
||||||
// model option is required
|
// model option is required
|
||||||
// NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path
|
// NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path
|
||||||
const modelPath = await joinPath([janDataFolderPath, modelConfig.model_path])
|
const modelPath = await joinPath([
|
||||||
|
janDataFolderPath,
|
||||||
|
modelConfig.model_path,
|
||||||
|
])
|
||||||
args.push('-m', modelPath)
|
args.push('-m', modelPath)
|
||||||
args.push('-a', modelId)
|
args.push('-a', modelId)
|
||||||
args.push('--port', String(port))
|
args.push('--port', String(port))
|
||||||
if (modelConfig.mmproj_path) {
|
if (modelConfig.mmproj_path) {
|
||||||
const mmprojPath = await joinPath([janDataFolderPath, modelConfig.mmproj_path])
|
const mmprojPath = await joinPath([
|
||||||
|
janDataFolderPath,
|
||||||
|
modelConfig.mmproj_path,
|
||||||
|
])
|
||||||
args.push('--mmproj', mmprojPath)
|
args.push('--mmproj', mmprojPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -475,7 +483,10 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
console.log('Calling Tauri command llama_load with args:', args)
|
console.log('Calling Tauri command llama_load with args:', args)
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const sInfo = await invoke<sessionInfo>('load_llama_model', { backendPath, args })
|
const sInfo = await invoke<sessionInfo>('load_llama_model', {
|
||||||
|
backendPath,
|
||||||
|
args,
|
||||||
|
})
|
||||||
|
|
||||||
// Store the session info for later use
|
// Store the session info for later use
|
||||||
this.activeSessions.set(sInfo.pid, sInfo)
|
this.activeSessions.set(sInfo.pid, sInfo)
|
||||||
@ -496,7 +507,7 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
try {
|
try {
|
||||||
// Pass the PID as the session_id
|
// Pass the PID as the session_id
|
||||||
const result = await invoke<unloadResult>('unload_llama_model', {
|
const result = await invoke<unloadResult>('unload_llama_model', {
|
||||||
pid
|
pid,
|
||||||
})
|
})
|
||||||
|
|
||||||
// If successful, remove from active sessions
|
// If successful, remove from active sessions
|
||||||
@ -583,8 +594,9 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private findSessionByModel(modelId: string): sessionInfo | undefined {
|
private findSessionByModel(modelId: string): sessionInfo | undefined {
|
||||||
return Array.from(this.activeSessions.values())
|
return Array.from(this.activeSessions.values()).find(
|
||||||
.find(session => session.modelId === modelId);
|
(session) => session.modelId === modelId
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
override async chat(
|
override async chat(
|
||||||
@ -596,7 +608,6 @@ export default class llamacpp_extension extends AIEngine {
|
|||||||
}
|
}
|
||||||
const baseUrl = `http://localhost:${sessionInfo.port}/v1`
|
const baseUrl = `http://localhost:${sessionInfo.port}/v1`
|
||||||
const url = `${baseUrl}/chat/completions`
|
const url = `${baseUrl}/chat/completions`
|
||||||
console.log(`Using api-key: ${sessionInfo.apiKey}`)
|
|
||||||
const headers = {
|
const headers = {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
'Authorization': `Bearer ${sessionInfo.apiKey}`,
|
'Authorization': `Bearer ${sessionInfo.apiKey}`,
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user