jan/models/config/default-model.json
Louis 0e48be67e8
feat: support multiple model binaries (#1659)
* feat: Support multiple model binaries

* fix: Update downloadModel with multiple binaries handler

* feat: Add 3 models with multiple binaries

* chore: fix model download

* fix: model file lookup & model path

* chore: add .prettierrc

* chore: refactor docs

* chore: bump model version

* fix(capybara): add filename

* fix(codeninja): add file name + llama model path

* fix(default): add llama model path

* fix(deepseek coder): add filename

* fix(deepseek 33B): add filename

* fix(dolphin mixtral): add filename

* fix(llama2-chat): add filename

* fix(llama2-70B): add filename

* fix(mistral 7b): add filename + model path

* fix(bakllava): correct size model

* fix(llava-7b): correct size model

* fix(llava-13b): correct size model

* fix(mixtral-8x7b): add file name + modelpath

* fix(noramaid-7b): add file name + modelpath

* fix(openchat-7b): add file name + modelpath

* fix(openhermes-7b): add file name + modelpath

* fix(phi2-3b): add file name + modelpath

* fix(phind): add file name + modelpath

* fix(solarslerp): add file name + modelpath

* fix(starling): add file name + modelpath

* fix(stealth): add file name + modelpath

* fix(tinyllama): add file name + modelpath

* fix(trinity): add file name + modelpath

* fix(tulu): add file name + modelpath

* fix(wizardcoder): add file name + modelpath

* fix(yi): add file name + modelpath

* update from source -> sources

Signed-off-by: James <james@jan.ai>

---------

Signed-off-by: James <james@jan.ai>
Co-authored-by: hiro <vuonghoainam.work@gmail.com>
Co-authored-by: hahuyhoang411 <hahuyhoanghhh41@gmail.com>
Co-authored-by: James <james@jan.ai>
2024-01-25 14:05:33 +07:00

37 lines
697 B
JSON

{
"object": "model",
"version": 1,
"format": "gguf",
"sources": [
{
"url": "N/A",
"filename": "N/A"
}
],
"id": "N/A",
"name": "N/A",
"created": 0,
"description": "User self import model",
"settings": {
"ctx_len": 4096,
"embedding": false,
"prompt_template": "{system_message}\n### Instruction: {prompt}\n### Response:",
"llama_model_path": "N/A"
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 2048,
"stop": ["<endofstring>"],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "User",
"tags": [],
"size": 0
},
"engine": "nitro"
}