2023-12-06 12:34:40 +07:00

24 lines
955 B
JSON

{
"source_url": "https://huggingface.co/TheBloke/neural-chat-7B-v3-1-GGUF/resolve/main/neural-chat-7b-v3-1.Q4_K_M.gguf",
"id": "neural-chat-7b",
"object": "model",
"name": "Neural Chat 7B Q4",
"version": "1.0",
"description": "The Neural Chat 7B model, developed on the foundation of mistralai/Mistral-7B-v0.1, has been fine-tuned using the Open-Orca/SlimOrca dataset and aligned with the Direct Preference Optimization (DPO) algorithm. It has demonstrated substantial improvements in various AI tasks and performance well on the open_llm_leaderboard.",
"format": "gguf",
"settings": {
"ctx_len": 2048,
"system_prompt": "### System:\n",
"user_prompt": "### User:\n",
"ai_prompt": "### Assistant:\n"
},
"parameters": {
"max_tokens": 2048
},
"metadata": {
"author": "Intel, The Bloke",
"tags": ["Recommended", "7B", "Finetuned"],
"size": 4370000000
}
}