fix: Update placeholder text and error message (#6263)

This commit improves the clarity of the llama.cpp extension.

- Corrected a placeholder example from `GGML_VK_VISIBLE_DEVICES='0,1'` to `GGML_VK_VISIBLE_DEVICES=0,1` for better accuracy.
- Changed an ambiguous error message from `"Failed to load llama-server: ${error}"` to the more specific `"Failed to load llamacpp backend"`.
This commit is contained in:
Akarshan Biswas 2025-08-21 16:01:31 +05:30 committed by GitHub
parent 5c3a6fec32
commit 9c25480c7b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 2 additions and 2 deletions

View File

@ -17,7 +17,7 @@
"controllerType": "input",
"controllerProps": {
"value": "none",
"placeholder": "Eg, GGML_VK_VISIBLE_DEVICES='0,1'",
"placeholder": "Eg. GGML_VK_VISIBLE_DEVICES=0,1",
"type": "text",
"textAlign": "right"
}

View File

@ -1763,7 +1763,7 @@ export default class llamacpp_extension extends AIEngine {
return dList
} catch (error) {
logger.error('Failed to query devices:\n', error)
throw new Error(`Failed to load llama-server: ${error}`)
throw new Error("Failed to load llamacpp backend")
}
}