* add(mixtral): add model.json for mixtral * archived some models + update the model.json * add(model): add pandora 10.7b * fix(model): update description * fix(model): pump vers and change the featured model to trinity * fix(model): archive neuralchat * fix(model): decapriated all old models * fix(trinity): add cover image and change description * fix(trinity): update cover png * add(pandora): cover image * fix(pandora): cover image * add(mixtral): add model.json for mixtral * archived some models + update the model.json * add(model): add pandora 10.7b * fix(model): update description * fix(model): pump vers and change the featured model to trinity * fix(model): archive neuralchat * fix(model): decapriated all old models * fix(trinity): add cover image and change description * fix(trinity): update cover png * add(pandora): cover image * fix(pandora): cover image * chore: model desc nits * fix(models): adjust the size for solars and pandoras * add(mixtral): description --------- Co-authored-by: 0xSage <n@pragmatic.vc>
23 lines
752 B
JSON
23 lines
752 B
JSON
{
|
|
"source_url": "https://huggingface.co/janhq/openhermes-2.5-neural-chat-v3-3-slerp-GGUF/resolve/main/openhermes-2.5-neural-chat-v3-3-slerp.Q4_K_M.gguf",
|
|
"id": "openhermes-neural-7b",
|
|
"object": "model",
|
|
"name": "OpenHermes Neural 7B Q4",
|
|
"version": "1.0",
|
|
"description": "OpenHermes Neural is a merged model using the TIES method.",
|
|
"format": "gguf",
|
|
"settings": {
|
|
"ctx_len": 2048,
|
|
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
|
|
},
|
|
"parameters": {
|
|
"max_tokens": 2048
|
|
},
|
|
"metadata": {
|
|
"author": "Intel, Jan",
|
|
"tags": ["7B", "Merged"],
|
|
"size": 4370000000
|
|
},
|
|
"engine": "nitro"
|
|
}
|