* draft model.json * islm3b update * capybara 34b update * deepseek coder update * dolphin yi update * fix the maxtokens of islm * lzlv 70b update * marx3b update * mythomax 13b update * update neural chat 7b * noromaid 20b update * update openchat 7b * openhermes7b update * openorca 7b * orca 13b update * phind 34b update * rocket 3b update * starling 7b update * storytelling 70b update * tiefighter 13B * update tiefighter tags * tinyllama update * wizard coder 13b * update wizard coder 13b description * wizard coder 34b update * wizard coder minor fix * xwin 70b update * yarn 70b * yi 34b * zephyr beta 7b * neuralhermes-7b update * change path + ctxlen * update id * fix startling
24 lines
929 B
JSON
24 lines
929 B
JSON
{
|
|
"source_url": "https://huggingface.co/TheBloke/NeuralHermes-2.5-Mistral-7B-GGUF/resolve/main/neuralhermes-2.5-mistral-7b.Q4_K_M.gguf",
|
|
"id": "neuralhermes-7b",
|
|
"object": "model",
|
|
"name": "NeuralHermes 7B",
|
|
"version": "1.0",
|
|
"description": "NeuralHermes 2.5 has been enhanced using Direct Preference Optimization. This fine-tuning, inspired by the RLHF process of Neural-chat-7b and OpenHermes-2.5-Mistral-7B, has led to improved performance across several benchmarks.",
|
|
"format": "gguf",
|
|
"settings": {
|
|
"ctx_len": 4096,
|
|
"system_prompt": "<|im_start|>system\n",
|
|
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
|
|
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
|
|
},
|
|
"parameters": {
|
|
"max_tokens": 4096
|
|
},
|
|
"metadata": {
|
|
"author": "Intel, The Bloke",
|
|
"tags": ["General Use", "Code", "Big Context Length"],
|
|
"size": 4370000000
|
|
}
|
|
}
|
|
|