35 lines
998 B
JSON
35 lines
998 B
JSON
{
|
|
"sources": [
|
|
{
|
|
"filename": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf",
|
|
"url": "https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf"
|
|
}
|
|
],
|
|
"id": "mixtral-8x7b-instruct",
|
|
"object": "model",
|
|
"name": "Mixtral 8x7B Instruct Q4",
|
|
"version": "1.1",
|
|
"description": "The Mixtral-8x7B is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms 70B models on most benchmarks.",
|
|
"format": "gguf",
|
|
"settings": {
|
|
"ctx_len": 32768,
|
|
"prompt_template": "[INST] {prompt} [/INST]",
|
|
"llama_model_path": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf",
|
|
"ngl": 100
|
|
},
|
|
"parameters": {
|
|
"temperature": 0.7,
|
|
"top_p": 0.95,
|
|
"stream": true,
|
|
"max_tokens": 32768,
|
|
"frequency_penalty": 0,
|
|
"presence_penalty": 0
|
|
},
|
|
"metadata": {
|
|
"author": "MistralAI, TheBloke",
|
|
"tags": ["70B", "Foundational Model"],
|
|
"size": 26440000000
|
|
},
|
|
"engine": "llama-cpp"
|
|
}
|