* chore: add llama 3.1 8B gguf model * chore: add llama3.1 70B model * chore: add models to rollup * chore: fix tag * chore: fix size * fix: 8b model * Chore/add gemma2 model (#3471) * feat: add gemma 2 * feat: add gemma 2 * feat: correct ngl --------- Co-authored-by: Van QA <van@jan.ai> * feat: add featured tag --------- Co-authored-by: Van Pham <64197333+Van-QA@users.noreply.github.com> Co-authored-by: Van QA <van@jan.ai>
36 lines
1.2 KiB
JSON
36 lines
1.2 KiB
JSON
{
|
|
"sources": [
|
|
{
|
|
"filename": "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf",
|
|
"url": "https://huggingface.co/bartowski/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf"
|
|
}
|
|
],
|
|
"id": "llama3-8b-instruct",
|
|
"object": "model",
|
|
"name": "Llama 3 8B Q4",
|
|
"version": "1.2",
|
|
"description": "Meta's Llama 3 excels at general usage situations, including chat, general world knowledge, and coding.",
|
|
"format": "gguf",
|
|
"settings": {
|
|
"ctx_len": 8192,
|
|
"prompt_template": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
|
"llama_model_path": "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf",
|
|
"ngl": 33
|
|
},
|
|
"parameters": {
|
|
"temperature": 0.7,
|
|
"top_p": 0.95,
|
|
"stream": true,
|
|
"max_tokens": 8192,
|
|
"stop": ["<|end_of_text|>","<|eot_id|>"],
|
|
"frequency_penalty": 0,
|
|
"presence_penalty": 0
|
|
},
|
|
"metadata": {
|
|
"author": "MetaAI",
|
|
"tags": ["8B", "Featured"],
|
|
"size": 4920000000
|
|
},
|
|
"engine": "nitro"
|
|
}
|