* feat: Support multiple model binaries * fix: Update downloadModel with multiple binaries handler * feat: Add 3 models with multiple binaries * chore: fix model download * fix: model file lookup & model path * chore: add .prettierrc * chore: refactor docs * chore: bump model version * fix(capybara): add filename * fix(codeninja): add file name + llama model path * fix(default): add llama model path * fix(deepseek coder): add filename * fix(deepseek 33B): add filename * fix(dolphin mixtral): add filename * fix(llama2-chat): add filename * fix(llama2-70B): add filename * fix(mistral 7b): add filename + model path * fix(bakllava): correct size model * fix(llava-7b): correct size model * fix(llava-13b): correct size model * fix(mixtral-8x7b): add file name + modelpath * fix(noramaid-7b): add file name + modelpath * fix(openchat-7b): add file name + modelpath * fix(openhermes-7b): add file name + modelpath * fix(phi2-3b): add file name + modelpath * fix(phind): add file name + modelpath * fix(solarslerp): add file name + modelpath * fix(starling): add file name + modelpath * fix(stealth): add file name + modelpath * fix(tinyllama): add file name + modelpath * fix(trinity): add file name + modelpath * fix(tulu): add file name + modelpath * fix(wizardcoder): add file name + modelpath * fix(yi): add file name + modelpath * update from source -> sources Signed-off-by: James <james@jan.ai> --------- Signed-off-by: James <james@jan.ai> Co-authored-by: hiro <vuonghoainam.work@gmail.com> Co-authored-by: hahuyhoang411 <hahuyhoanghhh41@gmail.com> Co-authored-by: James <james@jan.ai>
36 lines
1.1 KiB
JSON
36 lines
1.1 KiB
JSON
{
|
|
"sources": [
|
|
{
|
|
"filename": "mistral-7b-instruct-v0.2.Q4_K_M.gguf",
|
|
"url": "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf"
|
|
}
|
|
],
|
|
"id": "mistral-ins-7b-q4",
|
|
"object": "model",
|
|
"name": "Mistral Instruct 7B Q4",
|
|
"version": "1.0",
|
|
"description": "Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
|
|
"format": "gguf",
|
|
"settings": {
|
|
"ctx_len": 4096,
|
|
"prompt_template": "[INST] {prompt} [/INST]",
|
|
"llama_model_path": "mistral-7b-instruct-v0.2.Q4_K_M.gguf"
|
|
},
|
|
"parameters": {
|
|
"temperature": 0.7,
|
|
"top_p": 0.95,
|
|
"stream": true,
|
|
"max_tokens": 4096,
|
|
"stop": [],
|
|
"frequency_penalty": 0,
|
|
"presence_penalty": 0
|
|
},
|
|
"metadata": {
|
|
"author": "MistralAI, The Bloke",
|
|
"tags": ["Featured", "7B", "Foundational Model"],
|
|
"size": 4370000000,
|
|
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/mistral-ins-7b-q4/cover.png"
|
|
},
|
|
"engine": "nitro"
|
|
}
|