* fix(mistral-ins): clean redundant parameters * add(yarn-mistral): update new requested model * fix(trinity-v1): delete trinity v1 from the hub * add(tulu-2-70b): llama 70b alternative * fix(lzlv-70b): delete lzlv-70b and changed to tulu-2 * fix(mistral-ins): upgrade model version to v0.2 * fix(model-extention): pump version to 1.0.18 * add(dolphin 8x7b): update the current best moe finetuned model * add(openchat): the best 7b model * fix(tinyllama): pump version of the model to v1 * fix(stealth): upgrade stealth to v1.3 * Revert "fix(stealth): upgrade stealth to v1.3" This reverts commit da24df3fb5d69f93d92cc4dd45f991d548aff6aa. * fix(stealth): upgrade version to v1.3
23 lines
665 B
JSON
23 lines
665 B
JSON
{
|
|
"source_url": "https://huggingface.co/TheBloke/Yarn-Mistral-7B-128k-GGUF/resolve/main/yarn-mistral-7b-128k.Q4_K_M.gguf",
|
|
"id": "yarn-mistral-7b",
|
|
"object": "model",
|
|
"name": "Yarn Mistral 7B Q4",
|
|
"version": "1.0",
|
|
"description": "Yarn Mistral 7B is a language model for long context and supports a 128k token context window.",
|
|
"format": "gguf",
|
|
"settings": {
|
|
"ctx_len": 4096,
|
|
"prompt_template": "{prompt}"
|
|
},
|
|
"parameters": {
|
|
"max_tokens": 4096
|
|
},
|
|
"metadata": {
|
|
"author": "NousResearch, The Bloke",
|
|
"tags": ["7B","Finetuned"],
|
|
"size": 4370000000
|
|
},
|
|
"engine": "nitro"
|
|
}
|
|
|