22 lines
963 B
JSON
22 lines
963 B
JSON
{
|
|
"source_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v0.6/resolve/main/ggml-model-q4_0.gguf",
|
|
"id": "tinyllama-1.1b",
|
|
"object": "model",
|
|
"name": "TinyLlama Chat 1.1B Q4",
|
|
"version": "1.0",
|
|
"description": "The TinyLlama project, featuring a 1.1B parameter Llama model, is pretrained on an expansive 3 trillion token dataset. Its design ensures easy integration with various Llama-based open-source projects. Despite its smaller size, it efficiently utilizes lower computational and memory resources, drawing on GPT-4's analytical prowess to enhance its conversational abilities and versatility.",
|
|
"format": "gguf",
|
|
"settings": {
|
|
"ctx_len": 2048,
|
|
"prompt_template": "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>"
|
|
},
|
|
"parameters": {
|
|
"max_tokens": 2048
|
|
},
|
|
"metadata": {
|
|
"author": "TinyLlama",
|
|
"tags": ["Tiny", "Foundation Model"],
|
|
"size": 637000000
|
|
},
|
|
"engine": "nitro"
|
|
} |