{ "source_url": "https://huggingface.co/TheBloke/NeuralHermes-2.5-Mistral-7B-GGUF/resolve/main/neuralhermes-2.5-mistral-7b.Q4_K_M.gguf", "id": "neuralhermes-7b", "object": "model", "name": "NeuralHermes 7B", "version": "1.0", "description": "NeuralHermes 2.5 has been enhanced using Direct Preference Optimization. This fine-tuning, inspired by the RLHF process of Neural-chat-7b and OpenHermes-2.5-Mistral-7B, has led to improved performance across several benchmarks.", "format": "gguf", "settings": { "ctx_len": 4096, "system_prompt": "<|im_start|>system\n", "user_prompt": "<|im_end|>\n<|im_start|>user\n", "ai_prompt": "<|im_end|>\n<|im_start|>assistant\n" }, "parameters": { "max_tokens": 4096 }, "metadata": { "author": "Intel, The Bloke", "tags": ["General Use", "Code", "Big Context Length"], "size": 4370000000 } }