Merge pull request #2410 from janhq/chore/update-modelhub
Chore: Update model hub v0.4.10
This commit is contained in:
commit
a336c1394d
@ -1,34 +0,0 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "nous-capybara-34b.Q5_K_M.gguf",
|
||||
"url": "https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF/resolve/main/nous-capybara-34b.Q5_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "capybara-34b",
|
||||
"object": "model",
|
||||
"name": "Capybara 200k 34B Q5",
|
||||
"version": "1.0",
|
||||
"description": "Nous Capybara 34B is a long context length model that supports 200K tokens.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "USER:\n{prompt}\nASSISTANT:",
|
||||
"llama_model_path": "nous-capybara-34b.Q5_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "NousResearch, The Bloke",
|
||||
"tags": ["34B", "Finetuned"],
|
||||
"size": 24320000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
35
models/command-r-34b/model.json
Normal file
35
models/command-r-34b/model.json
Normal file
@ -0,0 +1,35 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "c4ai-command-r-v01-Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/andrewcanis/c4ai-command-r-v01-GGUF/resolve/main/c4ai-command-r-v01-Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "command-r-34b",
|
||||
"object": "model",
|
||||
"name": "Command-R v01 34B Q4",
|
||||
"version": "1.0",
|
||||
"description": "C4AI Command-R developed by CohereAI is optimized for a variety of use cases including reasoning, summarization, and question answering.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>",
|
||||
"llama_model_path": "c4ai-command-r-v01-Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "CohereAI",
|
||||
"tags": ["34B", "Finetuned"],
|
||||
"size": 21500000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
|
||||
@ -1,34 +0,0 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/TheBloke/dolphin-2.7-mixtral-8x7b-GGUF/resolve/main/dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "dolphin-2.7-mixtral-8x7b",
|
||||
"object": "model",
|
||||
"name": "Dolphin 8x7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Dolphin is an uncensored model built on Mixtral-8x7b. It is good at programming tasks.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
|
||||
"llama_model_path": "dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Cognitive Computations, TheBloke",
|
||||
"tags": ["70B", "Finetuned"],
|
||||
"size": 26440000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
@ -9,7 +9,7 @@
|
||||
"object": "model",
|
||||
"name": "Dolphin Phi-2 2.7B Q8",
|
||||
"version": "1.0",
|
||||
"description": "Dolphin Phi-2 is a 2.7B model, fine-tuned for chat, excelling in common sense and logical reasoning benchmarks.",
|
||||
"description": "Dolphin Phi-2 is a good alternative for Phi-2 in chatting",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
|
||||
@ -27,7 +27,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Google",
|
||||
"tags": ["2B", "Finetuned"],
|
||||
"tags": ["2B", "Finetuned", "Tiny"],
|
||||
"size": 1500000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
"object": "model",
|
||||
"name": "Gemma 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Gemma is built from the same technology with Google's Gemini.",
|
||||
"description": "Google's Gemma is built for multilingual purpose",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
@ -27,7 +27,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Google",
|
||||
"tags": ["7B", "Finetuned"],
|
||||
"tags": ["7B", "Finetuned", "Featured"],
|
||||
"size": 5330000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
|
||||
35
models/hermes-pro-7b/model.json
Normal file
35
models/hermes-pro-7b/model.json
Normal file
@ -0,0 +1,35 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "Hermes-2-Pro-Mistral-7B.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/resolve/main/Hermes-2-Pro-Mistral-7B.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "hermes-pro-7b",
|
||||
"object": "model",
|
||||
"name": "Hermes Pro 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Hermes Pro is superior in Roleplaying, Reasoning and Explaining problem.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
|
||||
"llama_model_path": "Hermes-2-Pro-Mistral-7B.Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "NousResearch",
|
||||
"tags": ["7B", "Finetuned", "Featured"],
|
||||
"size": 4370000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
|
||||
@ -5,11 +5,11 @@
|
||||
"url": "https://huggingface.co/TheBloke/Llama-2-70B-Chat-GGUF/resolve/main/llama-2-70b-chat.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "llama2-chat-70b-q4",
|
||||
"id": "llama2-chat-70b",
|
||||
"object": "model",
|
||||
"name": "Llama 2 Chat 70B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
|
||||
"description": "Llama 2 specifically designed for a comprehensive understanding the world.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
@ -26,7 +26,7 @@
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "MetaAI, The Bloke",
|
||||
"author": "MetaAI",
|
||||
"tags": ["70B", "Foundational Model"],
|
||||
"size": 43920000000
|
||||
},
|
||||
@ -5,11 +5,11 @@
|
||||
"url": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "llama2-chat-7b-q4",
|
||||
"id": "llama2-chat-7b",
|
||||
"object": "model",
|
||||
"name": "Llama 2 Chat 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
|
||||
"description": "Llama 2 specifically designed for a comprehensive understanding the world.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
@ -26,7 +26,7 @@
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "MetaAI, The Bloke",
|
||||
"author": "MetaAI",
|
||||
"tags": ["7B", "Foundational Model"],
|
||||
"size": 4080000000
|
||||
},
|
||||
@ -1,35 +0,0 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "ggml-model-q5_k.gguf",
|
||||
"url": "https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/ggml-model-q5_k.gguf"
|
||||
},
|
||||
{
|
||||
"filename": "mmproj-model-f16.gguf",
|
||||
"url": "https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/mmproj-model-f16.gguf"
|
||||
}
|
||||
],
|
||||
"id": "llava-1.5-13b-q5",
|
||||
"object": "model",
|
||||
"name": "LlaVa 1.5 13B Q5 K",
|
||||
"version": "1.1",
|
||||
"description": "LlaVa 1.5 can bring vision understanding to Jan",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"vision_model": true,
|
||||
"text_model": false,
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
|
||||
"llama_model_path": "ggml-model-q5_k.gguf",
|
||||
"mmproj": "mmproj-model-f16.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Mys",
|
||||
"tags": ["Vision"],
|
||||
"size": 9850000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
@ -1,35 +0,0 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "ggml-model-q5_k.gguf",
|
||||
"url": "https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-q5_k.gguf"
|
||||
},
|
||||
{
|
||||
"filename": "mmproj-model-f16.gguf",
|
||||
"url": "https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf"
|
||||
}
|
||||
],
|
||||
"id": "llava-1.5-7b-q5",
|
||||
"object": "model",
|
||||
"name": "LlaVa 1.5 7B Q5 K",
|
||||
"version": "1.1",
|
||||
"description": "LlaVa 1.5 can bring vision understanding to Jan",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"vision_model": true,
|
||||
"text_model": false,
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
|
||||
"llama_model_path": "ggml-model-q5_k.gguf",
|
||||
"mmproj": "mmproj-model-f16.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Mys",
|
||||
"tags": ["Vision"],
|
||||
"size": 5400000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
35
models/llava-13b/model.json
Normal file
35
models/llava-13b/model.json
Normal file
@ -0,0 +1,35 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "llava-v1.6-vicuna-13b.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/cjpais/llava-v1.6-vicuna-13b-gguf/resolve/main/llava-v1.6-vicuna-13b.Q4_K_M.gguf"
|
||||
},
|
||||
{
|
||||
"filename": "mmproj-model-f16.gguf",
|
||||
"url": "https://huggingface.co/cjpais/llava-v1.6-vicuna-13b-gguf/resolve/main/mmproj-model-f16.gguf"
|
||||
}
|
||||
],
|
||||
"id": "llava-13b",
|
||||
"object": "model",
|
||||
"name": "LlaVa 13B Q4",
|
||||
"version": "1.1",
|
||||
"description": "LlaVa can bring vision understanding to Jan",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"vision_model": true,
|
||||
"text_model": false,
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
|
||||
"llama_model_path": "llava-v1.6-vicuna-13b.Q4_K_M.gguf",
|
||||
"mmproj": "mmproj-model-f16.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "liuhaotian",
|
||||
"tags": ["Vision"],
|
||||
"size": 7870000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
35
models/llava-7b/model.json
Normal file
35
models/llava-7b/model.json
Normal file
@ -0,0 +1,35 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "llava-v1.6-mistral-7b.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/llava-v1.6-mistral-7b.Q4_K_M.gguf"
|
||||
},
|
||||
{
|
||||
"filename": "mmproj-model-f16.gguf",
|
||||
"url": "https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/mmproj-model-f16.gguf"
|
||||
}
|
||||
],
|
||||
"id": "llava-7b",
|
||||
"object": "model",
|
||||
"name": "LlaVa 7B",
|
||||
"version": "1.1",
|
||||
"description": "LlaVa can bring vision understanding to Jan",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"vision_model": true,
|
||||
"text_model": false,
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
|
||||
"llama_model_path": "llava-v1.6-mistral-7b.Q4_K_M.gguf",
|
||||
"mmproj": "mmproj-model-f16.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "liuhaotian",
|
||||
"tags": ["Vision"],
|
||||
"size": 4370000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
34
models/miqu-70b/model.json
Normal file
34
models/miqu-70b/model.json
Normal file
@ -0,0 +1,34 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "miqu-1-70b.q4_k_m.gguf",
|
||||
"url": "https://huggingface.co/miqudev/miqu-1-70b/resolve/main/miqu-1-70b.q4_k_m.gguf"
|
||||
}
|
||||
],
|
||||
"id": "miqu-70b",
|
||||
"object": "model",
|
||||
"name": "Mistral 70B Q4",
|
||||
"version": "1.0",
|
||||
"description": "A leak weight of Mistral 70B model.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "[INST] {prompt} [/INST]",
|
||||
"llama_model_path": "miqu-1-70b.q4_k_m.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "miqudev",
|
||||
"tags": ["70B", "Foundational Model"],
|
||||
"size": 26440000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
|
||||
|
Before Width: | Height: | Size: 229 KiB After Width: | Height: | Size: 229 KiB |
@ -5,11 +5,11 @@
|
||||
"url": "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "mistral-ins-7b-q4",
|
||||
"id": "mistral-7b",
|
||||
"object": "model",
|
||||
"name": "Mistral Instruct 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
|
||||
"description": "Mistral Instruct 7b model, specifically designed for a comprehensive understanding of the world.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
@ -26,10 +26,10 @@
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "MistralAI, The Bloke",
|
||||
"author": "MistralAI",
|
||||
"tags": ["Featured", "7B", "Foundational Model"],
|
||||
"size": 4370000000,
|
||||
"cover": "https://raw.githubusercontent.com/janhq/jan/dev/models/mistral-ins-7b-q4/cover.png"
|
||||
"cover": "https://raw.githubusercontent.com/janhq/jan/dev/models/mistral-7b/cover.png"
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
@ -1,20 +1,20 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "Noromaid-7b-v0.1.1.q5_k_m.gguf",
|
||||
"url": "https://huggingface.co/NeverSleep/Noromaid-7b-v0.1.1-GGUF/resolve/main/Noromaid-7b-v0.1.1.q5_k_m.gguf"
|
||||
"filename": "Noromaid-7B-0.4-DPO.q4_k_m.gguf",
|
||||
"url": "https://huggingface.co/NeverSleep/Noromaid-7B-0.4-DPO-GGUF/resolve/main/Noromaid-7B-0.4-DPO.q4_k_m.gguf"
|
||||
}
|
||||
],
|
||||
"id": "noromaid-7b",
|
||||
"object": "model",
|
||||
"name": "Noromaid 7B Q5",
|
||||
"name": "Noromaid 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "The Noromaid 7b model is designed for role-playing with human-like behavior.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "### Instruction:{prompt}\n### Response:",
|
||||
"llama_model_path": "Noromaid-7b-v0.1.1.q5_k_m.gguf"
|
||||
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
|
||||
"llama_model_path": "Noromaid-7B-0.4-DPO.q4_k_m.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
@ -27,8 +27,8 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "NeverSleep",
|
||||
"tags": ["7B", "Merged"],
|
||||
"size": 5130000000
|
||||
"tags": ["7B", "Finetuned"],
|
||||
"size": 4370000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
"object": "model",
|
||||
"name": "Openchat-3.5 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "The performance of this open-source model surpasses that of ChatGPT-3.5 and Grok-1 across various benchmarks.",
|
||||
"description": "The performance of Openchat surpasses ChatGPT-3.5 and Grok-1 across various benchmarks.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
|
||||
@ -1,34 +0,0 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "phi-2.Q8_0.gguf",
|
||||
"url": "https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q8_0.gguf"
|
||||
}
|
||||
],
|
||||
"id": "phi-2-3b",
|
||||
"object": "model",
|
||||
"name": "Phi-2 3B Q8",
|
||||
"version": "1.0",
|
||||
"description": "Phi-2 is a 2.7B model, excelling in common sense and logical reasoning benchmarks, trained with synthetic texts and filtered websites.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "Intruct:\n{prompt}\nOutput:",
|
||||
"llama_model_path": "phi-2.Q8_0.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Microsoft",
|
||||
"tags": ["3B", "Foundational Model"],
|
||||
"size": 2960000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
@ -1,20 +1,20 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "phind-codellama-34b-v2.Q5_K_M.gguf",
|
||||
"filename": "phind-codellama-34b-v2.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "phind-34b",
|
||||
"object": "model",
|
||||
"name": "Phind 34B Q5",
|
||||
"name": "Phind 34B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Phind 34B is fine-tuned on 1.5B tokens of high-quality programming data. This multi-lingual model excels in various programming languages and is designed to be steerable and user-friendly.",
|
||||
"description": "Phind 34B is the best Open-source coding model.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "### System Prompt\n{system_message}\n### User Message\n{prompt}\n### Assistant",
|
||||
"llama_model_path": "phind-codellama-34b-v2.Q5_K_M.gguf"
|
||||
"llama_model_path": "phind-codellama-34b-v2.Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
@ -26,7 +26,7 @@
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Phind, The Bloke",
|
||||
"author": "Phind",
|
||||
"tags": ["34B", "Finetuned"],
|
||||
"size": 20220000000
|
||||
},
|
||||
|
||||
@ -1,33 +0,0 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "solar-10.7b-slerp.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/janhq/Solar-10.7B-SLERP-GGUF/resolve/main/solar-10.7b-slerp.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "solar-10.7b-slerp",
|
||||
"object": "model",
|
||||
"name": "Solar Slerp 10.7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "This model uses the Slerp merge method from SOLAR Instruct and Pandora-v1",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "### User: {prompt}\n### Assistant:",
|
||||
"llama_model_path": "solar-10.7b-slerp.Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Jan",
|
||||
"tags": ["13B", "Finetuned"],
|
||||
"size": 6360000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
@ -9,7 +9,7 @@
|
||||
"object": "model",
|
||||
"name": "Stable Zephyr 3B Q8",
|
||||
"version": "1.0",
|
||||
"description": "StableLM Zephyr 3B is trained for safe and reliable chatting.",
|
||||
"description": "StableLM Zephyr 3B is a best model for low-end machine.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
@ -27,7 +27,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "StabilityAI",
|
||||
"tags": ["3B", "Finetuned"],
|
||||
"tags": ["3B", "Finetuned", "Tiny"],
|
||||
"size": 2970000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
|
||||
@ -1,34 +0,0 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "starling-lm-7b-alpha.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "starling-7b",
|
||||
"object": "model",
|
||||
"name": "Starling alpha 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Starling 7B, an upgrade of Openchat 3.5 using RLAIF, is really good at various benchmarks, especially with GPT-4 judging its performance.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:",
|
||||
"llama_model_path": "starling-lm-7b-alpha.Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"stop": ["<|end_of_turn|>"],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Berkeley-nest, The Bloke",
|
||||
"tags": ["7B", "Finetuned"],
|
||||
"size": 4370000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
@ -5,7 +5,7 @@
|
||||
"url": "https://huggingface.co/janhq/stealth-v1.3-GGUF/resolve/main/stealth-v1.3.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "stealth-v1.2-7b",
|
||||
"id": "stealth-7b",
|
||||
"object": "model",
|
||||
"name": "Stealth 7B Q4",
|
||||
"version": "1.0",
|
||||
@ -1,34 +0,0 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "tinyllama-1.1b",
|
||||
"object": "model",
|
||||
"name": "TinyLlama Chat 1.1B Q4",
|
||||
"version": "1.0",
|
||||
"description": "TinyLlama is a tiny model with only 1.1B. It's a good model for less powerful computers.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>",
|
||||
"llama_model_path": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 2048,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "TinyLlama",
|
||||
"tags": ["Tiny", "Foundation Model"],
|
||||
"size": 669000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
|
Before Width: | Height: | Size: 352 KiB After Width: | Height: | Size: 352 KiB |
@ -5,7 +5,7 @@
|
||||
"url": "https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "trinity-v1.2-7b",
|
||||
"id": "trinity-7b",
|
||||
"object": "model",
|
||||
"name": "Trinity-v1.2 7B Q4",
|
||||
"version": "1.0",
|
||||
@ -28,7 +28,7 @@
|
||||
"author": "Jan",
|
||||
"tags": ["7B", "Merged", "Featured"],
|
||||
"size": 4370000000,
|
||||
"cover": "https://raw.githubusercontent.com/janhq/jan/dev/models/trinity-v1.2-7b/cover.png"
|
||||
"cover": "https://raw.githubusercontent.com/janhq/jan/dev/models/trinity-7b/cover.png"
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
@ -1,33 +0,0 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "tulu-2-dpo-70b.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/TheBloke/tulu-2-dpo-70B-GGUF/resolve/main/tulu-2-dpo-70b.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "tulu-2-70b",
|
||||
"object": "model",
|
||||
"name": "Tulu 2 70B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Tulu 70B is a strong alternative to Llama 2 70b Chat to act as helpful assistants.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "<|user|>\n{prompt}\n<|assistant|>",
|
||||
"llama_model_path": "tulu-2-dpo-70b.Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Lizpreciatior, The Bloke",
|
||||
"tags": ["70B", "Finetuned"],
|
||||
"size": 41400000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
35
models/vistral-7b/model.json
Normal file
35
models/vistral-7b/model.json
Normal file
@ -0,0 +1,35 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "vistral-7b-chat-dpo.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/janhq/vistral-7b-chat-dpo-GGUF/resolve/main/vistral-7b-chat-dpo.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "vistral-7b",
|
||||
"object": "model",
|
||||
"name": "Vistral 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Vistral 7B has a deep understanding of Vietnamese.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "[INST] <<SYS>>\n{system_message}\n<</SYS>>\n{prompt} [/INST]",
|
||||
"llama_model_path": "vistral-7b-chat-dpo.Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"max_tokens": 4096,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Viet Mistral, Jan",
|
||||
"tags": ["7B", "Finetuned"],
|
||||
"size": 4410000000
|
||||
},
|
||||
"engine": "nitro"
|
||||
}
|
||||
|
||||
@ -1,20 +1,20 @@
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"filename": "yi-34b-chat.Q5_K_M.gguf",
|
||||
"url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q5_K_M.gguf"
|
||||
"filename": "yi-34b-chat.Q4_K_M.gguf",
|
||||
"url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q4_K_M.gguf"
|
||||
}
|
||||
],
|
||||
"id": "yi-34b",
|
||||
"object": "model",
|
||||
"name": "Yi 34B Q5",
|
||||
"name": "Yi 34B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
|
||||
"llama_model_path": "yi-34b-chat.Q5_K_M.gguf"
|
||||
"llama_model_path": "yi-34b-chat.Q4_K_M.gguf"
|
||||
},
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user