diff --git a/models/openchat-7b/model.json b/models/openchat-7b/model.json deleted file mode 100644 index 1fd6bb259..000000000 --- a/models/openchat-7b/model.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "source_url": "https://huggingface.co/TheBloke/openchat_3.5-GGUF/resolve/main/openchat_3.5.Q4_K_M.gguf", - "id": "openchat-7b", - "object": "model", - "name": "Open Chat 3.5 7B", - "version": "1.0", - "description": "OpenChat represents a breakthrough in the realm of open-source language models. By implementing the C-RLFT fine-tuning strategy, inspired by offline reinforcement learning, this 7B model achieves results on par with ChatGPT (March).", - "format": "gguf", - "settings": { - "ctx_len": 4096, - "system_prompt": "", - "user_prompt": "GPT4 User: ", - "ai_prompt": "<|end_of_turn|>\nGPT4 Assistant: " - }, - "parameters": { - "max_tokens": 4096 - }, - "metadata": { - "author": "OpenChat, The Bloke", - "tags": ["General", "Code"], - "size": 4370000000 - } - } - \ No newline at end of file diff --git a/models/openhermes-mistral-7b/model.json b/models/openhermes-mistral-7b/model.json deleted file mode 100644 index 6b64363d5..000000000 --- a/models/openhermes-mistral-7b/model.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "source_url": "https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF/resolve/main/openhermes-2.5-mistral-7b.Q4_K_M.gguf", - "id": "openhermes-mistral-7b", - "object": "model", - "name": "Openhermes 2.5 Mistral 7B", - "version": "1.0", - "description": "The OpenHermes 2.5 Mistral 7B incorporates additional code datasets, more than a million GPT-4 generated data examples, and other high-quality open datasets. This enhancement led to significant improvement in benchmarks, highlighting its improved skill in handling code-centric tasks.", - "format": "gguf", - "settings": { - "ctx_len": 4096, - "system_prompt": "<|im_start|>system\n", - "user_prompt": "<|im_end|>\n<|im_start|>user\n", - "ai_prompt": "<|im_end|>\n<|im_start|>assistant\n" - }, - "parameters": { - "max_tokens": 4096 - }, - "metadata": { - "author": "Teknium, The Bloke", - "tags": ["General", "Roleplay"], - "size": 4370000000 - } - } - \ No newline at end of file diff --git a/models/openorca-7b/model.json b/models/openorca-7b/model.json deleted file mode 100644 index 42c88212c..000000000 --- a/models/openorca-7b/model.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "source_url": "https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-GGUF/resolve/main/mistral-7b-openorca.Q4_K_M.gguf", - "id": "openorca-7b", - "object": "model", - "name": "OpenOrca 7B", - "version": "1.0", - "description": "OpenOrca 8k 7B is a model based on Mistral 7B, fine-tuned using the OpenOrca dataset. Notably ranked first on the HF Leaderboard for models under 30B, it excels in efficiency and accessibility.", - "format": "gguf", - "settings": { - "ctx_len": 4096, - "system_prompt": "<|im_start|>system\n", - "user_prompt": "<|im_end|>\n<|im_start|>user\n", - "ai_prompt": "<|im_end|>\n<|im_start|>assistant\n" - }, - "parameters": { - "max_tokens": 4096 - }, - "metadata": { - "author": "OpenOrca, The Bloke", - "tags": ["General", "Code"], - "size": 4370000000 - } - } - \ No newline at end of file