fix(yarn-mistral): broken gguf model
This commit is contained in:
parent
51eeccfcde
commit
dc9c84a817
@ -1,31 +0,0 @@
|
|||||||
{
|
|
||||||
"sources": [
|
|
||||||
{
|
|
||||||
"url": "https://huggingface.co/TheBloke/Yarn-Mistral-7B-128k-GGUF/resolve/main/yarn-mistral-7b-128k.Q4_K_M.gguf"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"id": "yarn-mistral-7b",
|
|
||||||
"object": "model",
|
|
||||||
"name": "Yarn Mistral 7B Q4",
|
|
||||||
"version": "1.0",
|
|
||||||
"description": "Yarn Mistral 7B is a language model for long context and supports a 128k token context window.",
|
|
||||||
"format": "gguf",
|
|
||||||
"settings": {
|
|
||||||
"ctx_len": 4096,
|
|
||||||
"prompt_template": "{prompt}"
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"temperature": 0.7,
|
|
||||||
"top_p": 0.95,
|
|
||||||
"stream": true,
|
|
||||||
"max_tokens": 4096,
|
|
||||||
"frequency_penalty": 0,
|
|
||||||
"presence_penalty": 0
|
|
||||||
},
|
|
||||||
"metadata": {
|
|
||||||
"author": "NousResearch, The Bloke",
|
|
||||||
"tags": ["7B", "Finetuned"],
|
|
||||||
"size": 4370000000
|
|
||||||
},
|
|
||||||
"engine": "nitro"
|
|
||||||
}
|
|
||||||
Loading…
x
Reference in New Issue
Block a user