Merge branch 'main' into 813-feat-json-schema-for-frontend-parameters
This commit is contained in:
commit
dd8558edd6
@ -183,6 +183,11 @@ export interface Model {
|
||||
*/
|
||||
version: number;
|
||||
|
||||
/**
|
||||
* The format of the model.
|
||||
*/
|
||||
format: string;
|
||||
|
||||
/**
|
||||
* The model download source. It can be an external url or a local filepath.
|
||||
*/
|
||||
|
||||
@ -33,7 +33,6 @@ export function handleAppUpdates() {
|
||||
|
||||
/* App Update Error */
|
||||
autoUpdater.on("error", (info: any) => {
|
||||
dialog.showMessageBox({ message: info.message });
|
||||
WindowManager.instance.currentWindow?.webContents.send(
|
||||
"APP_UPDATE_ERROR",
|
||||
{}
|
||||
|
||||
@ -96,7 +96,7 @@ export default class JanAssistantExtension implements AssistantExtension {
|
||||
name: "Jan",
|
||||
description: "A default assistant that can use all downloaded models",
|
||||
model: "*",
|
||||
instructions: "Your name is Jan.",
|
||||
instructions: "",
|
||||
tools: undefined,
|
||||
file_ids: [],
|
||||
metadata: undefined,
|
||||
|
||||
@ -1 +1 @@
|
||||
0.1.20
|
||||
0.1.21
|
||||
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF/resolve/main/nous-capybara-34b.Q5_K_M.gguf",
|
||||
"id": "capybara-34b",
|
||||
"object": "model",
|
||||
"name": "Capybara 200k 34B",
|
||||
"version": 1.0,
|
||||
"name": "Capybara 200k 34B Q5",
|
||||
"version": "1.0",
|
||||
"description": "Nous Capybara 34B, a variant of the Yi-34B model, is the first Nous model with a 200K context length, trained for three epochs on the innovative Capybara dataset.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "USER: ",
|
||||
"ai_prompt": "ASSISTANT: "
|
||||
"user_prompt": "USER:\n",
|
||||
"ai_prompt": "ASSISTANT:\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "NousResearch, The Bloke",
|
||||
"tags": ["General", "Big Context Length"],
|
||||
"tags": ["Medium", "Finetuned"],
|
||||
"size": 24320000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,23 +1,23 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-base-GGUF/resolve/main/deepseek-coder-1.3b-base.Q4_K_M.gguf",
|
||||
"source_url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-instruct-GGUF/resolve/main/deepseek-coder-1.3b-instruct.Q8_0.gguf",
|
||||
"id": "deepseek-coder-1.3b",
|
||||
"object": "model",
|
||||
"name": "Deepseek Coder 1.3B",
|
||||
"name": "Deepseek Coder 1.3B Q8",
|
||||
"version": "1.0",
|
||||
"description": "",
|
||||
"description": "Deepseek Coder trained on 2T tokens (87% code, 13% English/Chinese), excelling in project-level code completion with advanced capabilities across multiple programming languages.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "",
|
||||
"ai_prompt": ""
|
||||
"user_prompt": "### Instruction:\n",
|
||||
"ai_prompt": "### Response:\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "deepseek, The Bloke",
|
||||
"tags": ["Code"],
|
||||
"size": 870000000
|
||||
"author": "Deepseek, The Bloke",
|
||||
"tags": ["Tiny", "Foundational Model"],
|
||||
"size": 1430000000
|
||||
}
|
||||
}
|
||||
|
||||
24
models/deepseek-coder-34b/model.json
Normal file
24
models/deepseek-coder-34b/model.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/deepseek-coder-33B-instruct-GGUF/resolve/main/deepseek-coder-33b-instruct.Q5_K_M.gguf",
|
||||
"id": "deepseek-coder-34b",
|
||||
"object": "model",
|
||||
"name": "Deepseek Coder 33B Q5",
|
||||
"version": "1.0",
|
||||
"description": "Deepseek Coder trained on 2T tokens (87% code, 13% English/Chinese), excelling in project-level code completion with advanced capabilities across multiple programming languages.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "### Instruction:\n",
|
||||
"ai_prompt": "### Response:\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Deepseek, The Bloke",
|
||||
"tags": ["Medium", "Foundational Model"],
|
||||
"size": 26040000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/dolphin-2_2-yi-34b-GGUF/resolve/main/dolphin-2_2-yi-34b.Q5_K_M.gguf",
|
||||
"id": "dolphin-yi-34b",
|
||||
"object": "model",
|
||||
"name": "Dolphin Yi 34B",
|
||||
"version": "1.0",
|
||||
"description": "Dolphin, based on the Yi-34B model and enhanced with features like conversation and empathy, is trained on a unique dataset for advanced multi-turn conversations. Notably uncensored, it requires careful implementation of an alignment layer for ethical use.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "<|im_start|>system\n",
|
||||
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
|
||||
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "ehartford, The Bloke",
|
||||
"tags": ["General Use", "Role-playing"],
|
||||
"size": 24320000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/UmbrellaCorp/IS-LM-3B_GGUF/resolve/main/IS-LM-Q4_K_M.gguf",
|
||||
"id": "islm-3b",
|
||||
"object": "model",
|
||||
"name": "IS LM 3B",
|
||||
"version": "1.0",
|
||||
"description": "IS LM 3B, based on the StableLM 3B model is specifically finetuned for economic analysis using DataForge Economics and QLoRA over three epochs, enhancing its proficiency in economic forecasting and analysis.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "USER: ",
|
||||
"ai_prompt": "ASSISTANT: "
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "UmbrellaCorp, The Bloke",
|
||||
"tags": ["General Use", "Economics"],
|
||||
"size": 1710000000
|
||||
}
|
||||
}
|
||||
|
||||
24
models/llama2-chat-70b-q4/model.json
Normal file
24
models/llama2-chat-70b-q4/model.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/Llama-2-70B-Chat-GGUF/resolve/main/llama-2-70b-chat.Q4_K_M.gguf",
|
||||
"id": "llama2-chat-70b-q4",
|
||||
"object": "model",
|
||||
"name": "Llama 2 Chat 70B Q4",
|
||||
"version": "1.0",
|
||||
"description": "This is a 4-bit quantized version of Meta AI's Llama 2 Chat 70b model.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "[INST] <<SYS>>\n",
|
||||
"user_prompt": "<</SYS>>\n",
|
||||
"ai_prompt": "[/INST]"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "MetaAI, The Bloke",
|
||||
"tags": ["Large", "Foundational Model"],
|
||||
"size": 43920000000
|
||||
}
|
||||
}
|
||||
|
||||
24
models/llama2-chat-7b-q4/model.json
Normal file
24
models/llama2-chat-7b-q4/model.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf",
|
||||
"id": "llama2-chat-7b-q4",
|
||||
"object": "model",
|
||||
"name": "Llama 2 Chat 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "This is a 4-bit quantized iteration of Meta AI's Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "[INST] <<SYS>>\n",
|
||||
"user_prompt": "<</SYS>>\n",
|
||||
"ai_prompt": "[/INST]"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "MetaAI, The Bloke",
|
||||
"tags": ["Small", "Foundational Model"],
|
||||
"size": 4080000000
|
||||
}
|
||||
}
|
||||
|
||||
24
models/llama2-chat-7b-q5/model.json
Normal file
24
models/llama2-chat-7b-q5/model.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf",
|
||||
"id": "llama2-chat-7b-q5",
|
||||
"object": "model",
|
||||
"name": "Llama 2 Chat 7B Q5",
|
||||
"version": "1.0",
|
||||
"description": "This is a 5-bit quantized iteration of Meta AI's Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "[INST] <<SYS>>\n",
|
||||
"user_prompt": "<</SYS>>\n",
|
||||
"ai_prompt": "[/INST]"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "MetaAI, The Bloke",
|
||||
"tags": ["Small", "Foundational Model"],
|
||||
"size": 4780000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/lzlv_70B-GGUF/resolve/main/lzlv_70b_fp16_hf.Q5_K_M.gguf",
|
||||
"id": "lzlv-70b",
|
||||
"object": "model",
|
||||
"name": "Lzlv 70B",
|
||||
"name": "Lzlv 70B Q4",
|
||||
"version": "1.0",
|
||||
"description": "lzlv_70B is a sophisticated AI model designed for roleplaying and creative tasks. This merge aims to combine intelligence with creativity, seemingly outperforming its individual components in complex scenarios and creative outputs.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "USER: ",
|
||||
"ai_prompt": "ASSISTANT: "
|
||||
"user_prompt": "USER:\n",
|
||||
"ai_prompt": "ASSISTANT:\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "lizpreciatior, The Bloke",
|
||||
"tags": ["General Use", "Role-playing"],
|
||||
"author": "Lizpreciatior, The Bloke",
|
||||
"tags": ["Large", "Finetuned"],
|
||||
"size": 48750000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,23 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/Marx-3B-v3-GGUF/resolve/main/marx-3b-v3.Q4_K_M.gguf",
|
||||
"id": "marx-3b",
|
||||
"object": "model",
|
||||
"name": "Marx 3B",
|
||||
"version": "1.0",
|
||||
"description": "Marx 3B, based on the StableLM 3B model is specifically finetuned for chating using EverythingLM data and QLoRA over two epochs, enhancing its proficiency in understand general knowledege.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "### HUMAN: ",
|
||||
"ai_prompt": "### RESPONSE: "
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Bohan Du, The Bloke",
|
||||
"tags": ["General Use"],
|
||||
"size": 1620000000
|
||||
}
|
||||
}
|
||||
24
models/mistral-ins-7b-q4/model.json
Normal file
24
models/mistral-ins-7b-q4/model.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf",
|
||||
"id": "mistral-ins-7b-q4",
|
||||
"object": "model",
|
||||
"name": "Mistral Instruct 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "This is a 4-bit quantized iteration of MistralAI's Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "<s>[INST]",
|
||||
"ai_prompt": "[/INST]"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "MistralAI, The Bloke",
|
||||
"tags": ["Small", "Foundational Model"],
|
||||
"size": 4370000000
|
||||
}
|
||||
}
|
||||
|
||||
24
models/mistral-ins-7b-q5/model.json
Normal file
24
models/mistral-ins-7b-q5/model.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q5_K_M.gguf",
|
||||
"id": "mistral-ins-7b-q5",
|
||||
"object": "model",
|
||||
"name": "Mistral Instruct 7B Q5",
|
||||
"version": "1.0",
|
||||
"description": "This is a 5-bit quantized iteration of MistralAI's Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "<s>[INST]",
|
||||
"ai_prompt": "[/INST]"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "MistralAI, The Bloke",
|
||||
"tags": ["Small", "Foundational Model"],
|
||||
"size": 5130000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF/resolve/main/mythomax-l2-13b.Q5_K_M.gguf",
|
||||
"id": "mythomax-13b",
|
||||
"object": "model",
|
||||
"name": "Mythomax L2 13B",
|
||||
"version": "1.0",
|
||||
"description": "Mythomax L2 13b, an advanced AI model derived from MythoMix, merges MythoLogic-L2's deep comprehension with Huginn's writing skills through a unique tensor merge technique, excelling in roleplaying and storytelling.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "### Instruction: ",
|
||||
"ai_prompt": "### Response: "
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Gryphe, The Bloke",
|
||||
"tags": ["Role-playing"],
|
||||
"size": 9230000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/neural-chat-7B-v3-1-GGUF/resolve/main/neural-chat-7b-v3-1.Q4_K_M.gguf",
|
||||
"id": "neural-chat-7b",
|
||||
"object": "model",
|
||||
"name": "Neural Chat 7B",
|
||||
"name": "Neural Chat 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "The Neural Chat 7B model, developed on the foundation of mistralai/Mistral-7B-v0.1, has been fine-tuned using the Open-Orca/SlimOrca dataset and aligned with the Direct Preference Optimization (DPO) algorithm. It has demonstrated substantial improvements in various AI tasks and performance well on the open_llm_leaderboard.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "### System: ",
|
||||
"user_prompt": "### User: ",
|
||||
"ai_prompt": "### Assistant: "
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "### System:\n",
|
||||
"user_prompt": "### User:\n",
|
||||
"ai_prompt": "### Assistant:\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Intel, The Bloke",
|
||||
"tags": ["General Use", "Role-playing", "Big Context Length"],
|
||||
"tags": ["Recommended", "Small", "Finetuned"],
|
||||
"size": 4370000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/NeuralHermes-2.5-Mistral-7B-GGUF/resolve/main/neuralhermes-2.5-mistral-7b.Q4_K_M.gguf",
|
||||
"id": "neuralhermes-7b",
|
||||
"object": "model",
|
||||
"name": "NeuralHermes 7B",
|
||||
"version": "1.0",
|
||||
"description": "NeuralHermes 2.5 has been enhanced using Direct Preference Optimization. This fine-tuning, inspired by the RLHF process of Neural-chat-7b and OpenHermes-2.5-Mistral-7B, has led to improved performance across several benchmarks.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "<|im_start|>system\n",
|
||||
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
|
||||
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Intel, The Bloke",
|
||||
"tags": ["General Use", "Code", "Big Context Length"],
|
||||
"size": 4370000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/Noromaid-20B-v0.1.1-GGUF/resolve/main/noromaid-20b-v0.1.1.Q4_K_M.gguf",
|
||||
"id": "noromaid-20b",
|
||||
"object": "model",
|
||||
"name": "Noromaid 20B",
|
||||
"name": "Noromaid 20B Q4",
|
||||
"version": "1.0",
|
||||
"description": "The Noromaid 20b model is designed for role-playing and general use, featuring a unique touch with the no_robots dataset that enhances human-like behavior.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "### Instruction: ",
|
||||
"ai_prompt": "### Response: "
|
||||
"user_prompt": "### Instruction:\n",
|
||||
"ai_prompt": "### Response:\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "NeverSleep, The Bloke",
|
||||
"tags": ["Role-playing"],
|
||||
"tags": ["Medium", "Finetuned"],
|
||||
"size": 12040000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/openchat_3.5-GGUF/resolve/main/openchat_3.5.Q4_K_M.gguf",
|
||||
"id": "openchat-7b",
|
||||
"object": "model",
|
||||
"name": "Open Chat 3.5 7B",
|
||||
"version": "1.0",
|
||||
"description": "OpenChat represents a breakthrough in the realm of open-source language models. By implementing the C-RLFT fine-tuning strategy, inspired by offline reinforcement learning, this 7B model achieves results on par with ChatGPT (March).",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "GPT4 User: ",
|
||||
"ai_prompt": "<|end_of_turn|>\nGPT4 Assistant: "
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenChat, The Bloke",
|
||||
"tags": ["General", "Code"],
|
||||
"size": 4370000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF/resolve/main/openhermes-2.5-mistral-7b.Q4_K_M.gguf",
|
||||
"id": "openhermes-mistral-7b",
|
||||
"object": "model",
|
||||
"name": "Openhermes 2.5 Mistral 7B",
|
||||
"version": "1.0",
|
||||
"description": "The OpenHermes 2.5 Mistral 7B incorporates additional code datasets, more than a million GPT-4 generated data examples, and other high-quality open datasets. This enhancement led to significant improvement in benchmarks, highlighting its improved skill in handling code-centric tasks.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "<|im_start|>system\n",
|
||||
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
|
||||
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Teknium, The Bloke",
|
||||
"tags": ["General", "Roleplay"],
|
||||
"size": 4370000000
|
||||
}
|
||||
}
|
||||
|
||||
24
models/openhermes-neural-7b/model.json
Normal file
24
models/openhermes-neural-7b/model.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/OpenHermes-2.5-neural-chat-7B-v3-2-7B-GGUF/resolve/main/openhermes-2.5-neural-chat-7b-v3-2-7b.Q4_K_M.gguf",
|
||||
"id": "openhermes-neural-7b",
|
||||
"object": "model",
|
||||
"name": "OpenHermes Neural 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "OpenHermes Neural is a merged model from OpenHermes-2.5-Mistral-7B and neural-chat-7b-v3-2 with the TIES method.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "<|im_start|>system\n",
|
||||
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
|
||||
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Intel, The Bloke",
|
||||
"tags": ["Recommended", "Small", "Merged"],
|
||||
"size": 4370000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/Orca-2-13B-GGUF/resolve/main/orca-2-13b.Q5_K_M.gguf",
|
||||
"id": "openorca-13b",
|
||||
"object": "model",
|
||||
"name": "Orca 2 13B",
|
||||
"name": "Orca 2 13B Q5",
|
||||
"version": "1.0",
|
||||
"description": "Orca 2 is a finetuned version of LLAMA-2, designed primarily for single-turn responses in reasoning, reading comprehension, math problem solving, and text summarization.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "<|im_start|>system\n",
|
||||
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
|
||||
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Microsoft, The Bloke",
|
||||
"tags": ["General Use"],
|
||||
"tags": ["Medium", "Finetuned"],
|
||||
"size": 9230000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-GGUF/resolve/main/mistral-7b-openorca.Q4_K_M.gguf",
|
||||
"id": "openorca-7b",
|
||||
"object": "model",
|
||||
"name": "OpenOrca 7B",
|
||||
"version": "1.0",
|
||||
"description": "OpenOrca 8k 7B is a model based on Mistral 7B, fine-tuned using the OpenOrca dataset. Notably ranked first on the HF Leaderboard for models under 30B, it excels in efficiency and accessibility.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "<|im_start|>system\n",
|
||||
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
|
||||
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenOrca, The Bloke",
|
||||
"tags": ["General", "Code"],
|
||||
"size": 4370000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf",
|
||||
"id": "phind-34b",
|
||||
"object": "model",
|
||||
"name": "Phind 34B",
|
||||
"name": "Phind 34B Q5",
|
||||
"version": "1.0",
|
||||
"description": "Phind-CodeLlama-34B-v2 is an AI model fine-tuned on 1.5B tokens of high-quality programming data. It's a SOTA open-source model in coding. This multi-lingual model excels in various programming languages, including Python, C/C++, TypeScript, Java, and is designed to be steerable and user-friendly.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "### System Prompt\n",
|
||||
"user_prompt": "### User Message\n",
|
||||
"ai_prompt": "### Assistant\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Phind, The Bloke",
|
||||
"tags": ["Code", "Big Context Length"],
|
||||
"tags": ["Medium", "Finetuned"],
|
||||
"size": 24320000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/rocket-3B-GGUF/resolve/main/rocket-3b.Q4_K_M.gguf",
|
||||
"id": "rocket-3b",
|
||||
"object": "model",
|
||||
"name": "Rocket 3B",
|
||||
"name": "Rocket 3B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Rocket-3B is a GPT-like model, primarily English, fine-tuned on diverse public datasets. It outperforms larger models in benchmarks, showcasing superior understanding and text generation, making it an effective chat model for its size.",
|
||||
"format": "gguf",
|
||||
@ -13,11 +13,11 @@
|
||||
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "pansophic, The Bloke",
|
||||
"tags": ["General Use"],
|
||||
"author": "Pansophic, The Bloke",
|
||||
"tags": ["Tiny", "Finetuned"],
|
||||
"size": 1710000000
|
||||
}
|
||||
}
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_M.gguf",
|
||||
"id": "starling-7b",
|
||||
"object": "model",
|
||||
"name": "Strarling alpha 7B",
|
||||
"name": "Strarling alpha 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "Starling-RM-7B-alpha is a language model finetuned with Reinforcement Learning from AI Feedback from Openchat 3.5. It stands out for its impressive performance using GPT-4 as a judge, making it one of the top-performing models in its category.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "GPT4 User: ",
|
||||
"ai_prompt": "<|end_of_turn|>\nGPT4 Assistant: "
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Berkeley-nest, The Bloke",
|
||||
"tags": ["General", "Code"],
|
||||
"tags": ["Recommended", "Small","Finetuned"],
|
||||
"size": 4370000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/GOAT-70B-Storytelling-GGUF/resolve/main/goat-70b-storytelling.Q5_K_M.gguf",
|
||||
"id": "storytelling-70b",
|
||||
"object": "model",
|
||||
"name": "Storytelling 70B",
|
||||
"version": "1.0",
|
||||
"description": "The GOAT-70B-Storytelling model is designed for autonomous story-writing, including crafting books and movie scripts. Based on the LLaMA 2 70B architecture, this model excels in generating cohesive and engaging narratives using inputs like plot outlines and character profiles.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "### USER: ",
|
||||
"ai_prompt": "\n### ASSISTANT: "
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "GOAT-AI, The Bloke",
|
||||
"tags": ["General Use", "Writing"],
|
||||
"size": 48750000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/LLaMA2-13B-Tiefighter-GGUF/resolve/main/llama2-13b-tiefighter.Q5_K_M.gguf",
|
||||
"id": "tiefighter-13b",
|
||||
"object": "model",
|
||||
"name": "Tiefighter 13B",
|
||||
"name": "Tiefighter 13B Q5",
|
||||
"version": "1.0",
|
||||
"description": "Tiefighter-13B is a highly creative, merged AI model achieved by combining various 'LORAs' on top of an existing merge, particularly focusing on storytelling and improvisation. This model excels in story writing, chatbots, and adventuring, and is designed to perform better with less detailed inputs, leveraging its inherent creativity.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "### Instruction: ",
|
||||
"ai_prompt": "\n### Response: "
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "KoboldAI, The Bloke",
|
||||
"tags": ["General Use", "Role-playing", "Writing"],
|
||||
"tags": ["Medium", "Finetuned"],
|
||||
"size": 9230000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
"source_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v0.6/resolve/main/ggml-model-q4_0.gguf",
|
||||
"id": "tinyllama-1.1b",
|
||||
"object": "model",
|
||||
"name": "TinyLlama Chat 1.1B",
|
||||
"name": "TinyLlama Chat 1.1B Q4",
|
||||
"version": "1.0",
|
||||
"description": "The TinyLlama project, featuring a 1.1B parameter Llama model, is pretrained on an expansive 3 trillion token dataset. Its design ensures easy integration with various Llama-based open-source projects. Despite its smaller size, it efficiently utilizes lower computational and memory resources, drawing on GPT-4's analytical prowess to enhance its conversational abilities and versatility.",
|
||||
"format": "gguf",
|
||||
@ -17,7 +17,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "TinyLlama",
|
||||
"tags": ["General Use"],
|
||||
"tags": ["Tiny", "Foundation Model"],
|
||||
"size": 637000000
|
||||
}
|
||||
}
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf",
|
||||
"id": "wizardcoder-13b",
|
||||
"object": "model",
|
||||
"name": "Wizard Coder Python 13B",
|
||||
"name": "Wizard Coder Python 13B Q5",
|
||||
"version": "1.0",
|
||||
"description": "WizardCoder-Python-13B is a Python coding model major models like ChatGPT-3.5. This model based on the Llama2 architecture, demonstrate high proficiency in specific domains like coding and mathematics.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "### Instruction:\n",
|
||||
"ai_prompt": "### Response:\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "WizardLM, The Bloke",
|
||||
"tags": ["Code", "Big Context Length"],
|
||||
"tags": ["Recommended", "Medium", "Finetuned"],
|
||||
"size": 9230000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-34B-V1.0-GGUF/resolve/main/wizardcoder-python-34b-v1.0.Q5_K_M.gguf",
|
||||
"id": "wizardcoder-34b",
|
||||
"object": "model",
|
||||
"name": "Wizard Coder Python 34B",
|
||||
"version": "1.0",
|
||||
"description": "WizardCoder-Python-34B is a Python coding model major models like ChatGPT-3.5. This model based on the Llama2 architecture, demonstrate high proficiency in specific domains like coding and mathematics.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "### Instruction:\n",
|
||||
"ai_prompt": "### Response:\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "WizardLM, The Bloke",
|
||||
"tags": ["Code", "Big Context Length"],
|
||||
"size": 24320000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/Xwin-LM-70B-V0.1-GGUF/resolve/main/xwin-lm-70b-v0.1.Q5_K_M.gguf",
|
||||
"id": "xwin-70b",
|
||||
"object": "model",
|
||||
"name": "Xwin LM 70B",
|
||||
"version": "1.0",
|
||||
"description": "Xwin-LM, based on Llama2 models, emphasizes alignment and exhibits advanced language understanding, text generation, and role-playing abilities.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"system_prompt": "",
|
||||
"user_prompt": "USER: ",
|
||||
"ai_prompt": "ASSISTANT: "
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Xwin-LM, The Bloke",
|
||||
"tags": ["General Use", "Role-playing"],
|
||||
"size": 48750000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,21 +0,0 @@
|
||||
{
|
||||
"source_url": "https://huggingface.co/TheBloke/Yarn-Llama-2-70B-32k-GGUF/resolve/main/yarn-llama-2-70b-32k.Q5_K_M.gguf",
|
||||
"id": "yarn-70b",
|
||||
"object": "model",
|
||||
"name": "Yarn 32k 70B",
|
||||
"version": "1,0",
|
||||
"description": "Yarn-Llama-2-70b-32k is designed specifically for handling long contexts. It represents an extension of the Llama-2-70b-hf model, now supporting a 32k token context window.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
},
|
||||
"metadata": {
|
||||
"author": "NousResearch, The Bloke",
|
||||
"tags": ["General Use", "Big Context Length"],
|
||||
"size": 48750000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q5_K_M.gguf",
|
||||
"id": "yi-34b",
|
||||
"object": "model",
|
||||
"name": "Yi 34B",
|
||||
"name": "Yi 34B Q5",
|
||||
"version": "1.0",
|
||||
"description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "<|im_start|>system\n",
|
||||
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
|
||||
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "01-ai, The Bloke",
|
||||
"tags": ["General", "Role-playing", "Writing"],
|
||||
"tags": ["Medium", "Foundational Model"],
|
||||
"size": 24320000000
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,22 +2,22 @@
|
||||
"source_url": "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/resolve/main/zephyr-7b-beta.Q4_K_M.gguf",
|
||||
"id": "zephyr-beta-7b",
|
||||
"object": "model",
|
||||
"name": "Zephyr Beta 7B",
|
||||
"name": "Zephyr Beta 7B Q4",
|
||||
"version": "1.0",
|
||||
"description": "The Zephyr-7B-β model marks the second iteration in the Zephyr series, designed to function as an effective assistant. It has been fine-tuned from the mistralai/Mistral-7B-v0.1 base model, utilizing a combination of public and synthetic datasets with the application of Direct Preference Optimization.",
|
||||
"description": "The Zephyr-7B-β model is trained by HuggingFace, designed to function as a practical assistant. It has been fine-tuned from the mistralai/Mistral-7B-v0.1 base model, utilizing a combination of public and synthetic datasets with the application of Direct Preference Optimization.",
|
||||
"format": "gguf",
|
||||
"settings": {
|
||||
"ctx_len": 4096,
|
||||
"ctx_len": 2048,
|
||||
"system_prompt": "<|system|>\n",
|
||||
"user_prompt": "</s>\n<|user|>\n",
|
||||
"ai_prompt": "</s>\n<|assistant|>\n"
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096
|
||||
"max_tokens": 2048
|
||||
},
|
||||
"metadata": {
|
||||
"author": "HuggingFaceH4, The Bloke",
|
||||
"tags": ["General Use", "Big Context Length"],
|
||||
"tags": ["Small", "Finetuned"],
|
||||
"size": 4370000000
|
||||
}
|
||||
}
|
||||
|
||||
1
server/.gitignore
vendored
Normal file
1
server/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
data
|
||||
0
server/data/.gitkeep
Normal file
0
server/data/.gitkeep
Normal file
0
server/data/models/.gitkeep
Normal file
0
server/data/models/.gitkeep
Normal file
0
server/data/threads/.gitkeep
Normal file
0
server/data/threads/.gitkeep
Normal file
BIN
server/icons/icon.png
Normal file
BIN
server/icons/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 38 KiB |
0
server/lib/.gitkeep
Normal file
0
server/lib/.gitkeep
Normal file
19
server/main.ts
Normal file
19
server/main.ts
Normal file
@ -0,0 +1,19 @@
|
||||
import fastify from 'fastify'
|
||||
import dotenv from 'dotenv'
|
||||
import v1API from './v1'
|
||||
const server = fastify()
|
||||
|
||||
dotenv.config()
|
||||
server.register(v1API, {prefix: "/api/v1"})
|
||||
|
||||
|
||||
const JAN_API_PORT = Number.parseInt(process.env.JAN_API_PORT || '1337')
|
||||
const JAN_API_HOST = process.env.JAN_API_HOST || "0.0.0.0"
|
||||
|
||||
server.listen({
|
||||
port: JAN_API_PORT,
|
||||
host: JAN_API_HOST
|
||||
}).then(() => {
|
||||
console.log(`JAN API listening at: http://${JAN_API_HOST}:${JAN_API_PORT}`);
|
||||
})
|
||||
|
||||
5
server/nodemon.json
Normal file
5
server/nodemon.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"watch": ["main.ts", "v1"],
|
||||
"ext": "ts, json",
|
||||
"exec": "tsc && node ./build/main.js"
|
||||
}
|
||||
32
server/package.json
Normal file
32
server/package.json
Normal file
@ -0,0 +1,32 @@
|
||||
{
|
||||
"name": "jan-server",
|
||||
"version": "0.1.3",
|
||||
"main": "./build/main.js",
|
||||
"author": "Jan <service@jan.ai>",
|
||||
"license": "AGPL-3.0",
|
||||
"homepage": "https://jan.ai",
|
||||
"description": "Use offline LLMs with your own data. Run open source models like Llama2 or Falcon on your internal computers/servers.",
|
||||
"build": "",
|
||||
"scripts": {
|
||||
"lint": "eslint . --ext \".js,.jsx,.ts,.tsx\"",
|
||||
"test:e2e": "playwright test --workers=1",
|
||||
"dev": "nodemon .",
|
||||
"build": "tsc"
|
||||
},
|
||||
"dependencies": {
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/body-parser": "^1.19.5",
|
||||
"@types/npmcli__arborist": "^5.6.4",
|
||||
"@typescript-eslint/eslint-plugin": "^6.7.3",
|
||||
"@typescript-eslint/parser": "^6.7.3",
|
||||
"dotenv": "^16.3.1",
|
||||
"eslint-plugin-react": "^7.33.2",
|
||||
"fastify": "^4.24.3",
|
||||
"nodemon": "^3.0.1",
|
||||
"run-script-os": "^1.1.6"
|
||||
},
|
||||
"installConfig": {
|
||||
"hoistingLimits": "workspaces"
|
||||
}
|
||||
}
|
||||
22
server/tsconfig.json
Normal file
22
server/tsconfig.json
Normal file
@ -0,0 +1,22 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "es5",
|
||||
"module": "commonjs",
|
||||
"noImplicitAny": true,
|
||||
"sourceMap": true,
|
||||
"strict": true,
|
||||
"outDir": "./build",
|
||||
"rootDir": "./",
|
||||
"noEmitOnError": true,
|
||||
"esModuleInterop": true,
|
||||
"baseUrl": ".",
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
"paths": { "*": ["node_modules/*"] },
|
||||
"typeRoots": ["node_modules/@types"]
|
||||
},
|
||||
// "sourceMap": true,
|
||||
|
||||
"include": ["./**/*.ts"],
|
||||
"exclude": ["core", "build", "dist", "tests", "node_modules"]
|
||||
}
|
||||
8
server/v1/assistants/index.ts
Normal file
8
server/v1/assistants/index.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { FastifyInstance, FastifyPluginAsync, FastifyPluginOptions } from 'fastify'
|
||||
|
||||
const router: FastifyPluginAsync = async (app: FastifyInstance, opts: FastifyPluginOptions) => {
|
||||
//TODO: Add controllers for assistants here
|
||||
// app.get("/", controller)
|
||||
// app.post("/", controller)
|
||||
}
|
||||
export default router;
|
||||
11
server/v1/chat/index.ts
Normal file
11
server/v1/chat/index.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import { FastifyInstance, FastifyPluginAsync, FastifyPluginOptions } from 'fastify'
|
||||
|
||||
const router: FastifyPluginAsync = async (app: FastifyInstance, opts: FastifyPluginOptions) => {
|
||||
//TODO: Add controllers for here
|
||||
// app.get("/", controller)
|
||||
|
||||
app.post("/", (req, res) => {
|
||||
req.body
|
||||
})
|
||||
}
|
||||
export default router;
|
||||
37
server/v1/index.ts
Normal file
37
server/v1/index.ts
Normal file
@ -0,0 +1,37 @@
|
||||
import assistantsAPI from './assistants'
|
||||
import chatCompletionAPI from './chat'
|
||||
import modelsAPI from './models'
|
||||
import threadsAPI from './threads'
|
||||
|
||||
import { FastifyInstance, FastifyPluginAsync } from 'fastify'
|
||||
|
||||
const router: FastifyPluginAsync = async (app: FastifyInstance, opts) => {
|
||||
app.register(
|
||||
assistantsAPI,
|
||||
{
|
||||
prefix: "/assistants"
|
||||
}
|
||||
)
|
||||
|
||||
app.register(
|
||||
chatCompletionAPI,
|
||||
{
|
||||
prefix: "/chat/completion"
|
||||
}
|
||||
)
|
||||
|
||||
app.register(
|
||||
modelsAPI,
|
||||
{
|
||||
prefix: "/models"
|
||||
}
|
||||
)
|
||||
|
||||
app.register(
|
||||
threadsAPI,
|
||||
{
|
||||
prefix: "/threads"
|
||||
}
|
||||
)
|
||||
}
|
||||
export default router;
|
||||
23
server/v1/models/downloadModel.ts
Normal file
23
server/v1/models/downloadModel.ts
Normal file
@ -0,0 +1,23 @@
|
||||
import { RouteHandlerMethod, FastifyRequest, FastifyReply } from 'fastify'
|
||||
import { MODEL_FOLDER_PATH } from "./index"
|
||||
import fs from 'fs/promises'
|
||||
|
||||
const controller: RouteHandlerMethod = async (req: FastifyRequest, res: FastifyReply) => {
|
||||
//TODO: download models impl
|
||||
//Mirror logic from JanModelExtension.downloadModel?
|
||||
let model = req.body.model;
|
||||
|
||||
// Fetching logic
|
||||
// const directoryPath = join(MODEL_FOLDER_PATH, model.id)
|
||||
// await fs.mkdir(directoryPath)
|
||||
|
||||
// const path = join(directoryPath, model.id)
|
||||
// downloadFile(model.source_url, path)
|
||||
// TODO: Different model downloader from different model vendor
|
||||
|
||||
res.status(200).send({
|
||||
status: "Ok"
|
||||
})
|
||||
}
|
||||
|
||||
export default controller;
|
||||
61
server/v1/models/index.ts
Normal file
61
server/v1/models/index.ts
Normal file
@ -0,0 +1,61 @@
|
||||
|
||||
export const MODEL_FOLDER_PATH = "./data/models"
|
||||
export const _modelMetadataFileName = 'model.json'
|
||||
|
||||
import fs from 'fs/promises'
|
||||
import { Model } from '@janhq/core'
|
||||
import { join } from 'path'
|
||||
|
||||
// map string => model object
|
||||
let modelIndex = new Map<String, Model>();
|
||||
async function buildModelIndex(){
|
||||
let modelIds = await fs.readdir(MODEL_FOLDER_PATH);
|
||||
// TODO: read modelFolders to get model info, mirror JanModelExtension?
|
||||
try{
|
||||
for(let modelId in modelIds){
|
||||
let path = join(MODEL_FOLDER_PATH, modelId)
|
||||
let fileData = await fs.readFile(join(path, _modelMetadataFileName))
|
||||
modelIndex.set(modelId, JSON.parse(fileData.toString("utf-8")) as Model)
|
||||
}
|
||||
}
|
||||
catch(err){
|
||||
console.error("build model index failed. ", err);
|
||||
}
|
||||
}
|
||||
buildModelIndex()
|
||||
|
||||
import { FastifyInstance, FastifyPluginAsync, FastifyPluginOptions } from 'fastify'
|
||||
import downloadModelController from './downloadModel'
|
||||
import { startModel, stopModel } from './modelOp'
|
||||
|
||||
const router: FastifyPluginAsync = async (app: FastifyInstance, opts: FastifyPluginOptions) => {
|
||||
//TODO: Add controllers declaration here
|
||||
|
||||
///////////// CRUD ////////////////
|
||||
// Model listing
|
||||
app.get("/", async (req, res) => {
|
||||
res.status(200).send(
|
||||
modelIndex.values()
|
||||
)
|
||||
})
|
||||
|
||||
// Retrieve model info
|
||||
app.get("/:id", (req, res) => {
|
||||
res.status(200).send(
|
||||
modelIndex.get(req.params.id)
|
||||
)
|
||||
})
|
||||
|
||||
// Delete model
|
||||
app.delete("/:id", (req, res) => {
|
||||
modelIndex.delete(req.params)
|
||||
|
||||
// TODO: delete on disk
|
||||
})
|
||||
|
||||
///////////// Other ops ////////////////
|
||||
app.post("/", downloadModelController)
|
||||
app.put("/start", startModel)
|
||||
app.put("/stop", stopModel)
|
||||
}
|
||||
export default router;
|
||||
11
server/v1/models/modelOp.ts
Normal file
11
server/v1/models/modelOp.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import {FastifyRequest, FastifyReply} from 'fastify'
|
||||
|
||||
export async function startModel(req: FastifyRequest, res: FastifyReply): Promise<void> {
|
||||
|
||||
|
||||
}
|
||||
|
||||
export async function stopModel(req: FastifyRequest, res: FastifyReply): Promise<void> {
|
||||
|
||||
|
||||
}
|
||||
8
server/v1/threads/index.ts
Normal file
8
server/v1/threads/index.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { FastifyInstance, FastifyPluginAsync, FastifyPluginOptions } from 'fastify'
|
||||
|
||||
const router: FastifyPluginAsync = async (app: FastifyInstance, opts: FastifyPluginOptions) => {
|
||||
//TODO: Add controllers declaration here
|
||||
|
||||
// app.get()
|
||||
}
|
||||
export default router;
|
||||
@ -10,6 +10,7 @@ const badgeVariants = cva('badge', {
|
||||
secondary: 'badge-secondary',
|
||||
danger: 'badge-danger',
|
||||
outline: 'badge-outline',
|
||||
pink: 'badge-pink',
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
|
||||
@ -2,7 +2,11 @@
|
||||
@apply focus:ring-ring border-border inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-offset-2;
|
||||
|
||||
&-primary {
|
||||
@apply bg-primary text-primary-foreground hover:bg-primary/80 border-transparent;
|
||||
@apply border-transparent bg-blue-100 text-blue-600;
|
||||
}
|
||||
|
||||
&-pink {
|
||||
@apply border-transparent bg-pink-100 text-pink-700;
|
||||
}
|
||||
|
||||
&-success {
|
||||
@ -14,7 +18,7 @@
|
||||
}
|
||||
|
||||
&-danger {
|
||||
@apply bg-danger text-danger-foreground hover:bg-danger/80 border-transparent;
|
||||
@apply border-transparent bg-red-100 text-red-700;
|
||||
}
|
||||
|
||||
&-outline {
|
||||
|
||||
@ -13,7 +13,9 @@ const buttonVariants = cva('btn', {
|
||||
danger: 'btn-danger',
|
||||
outline: 'btn-outline',
|
||||
secondary: 'btn-secondary',
|
||||
secondaryBlue: 'btn-secondary-blue',
|
||||
ghost: 'btn-ghost',
|
||||
success: 'btn-success',
|
||||
},
|
||||
size: {
|
||||
sm: 'btn-sm',
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
.btn {
|
||||
@apply inline-flex items-center justify-center whitespace-nowrap rounded-md font-semibold transition-colors;
|
||||
@apply inline-flex items-center justify-center whitespace-nowrap rounded-lg font-semibold transition-colors;
|
||||
@apply focus-visible:ring-ring cursor-pointer focus-visible:outline-none focus-visible:ring-1;
|
||||
@apply disabled:pointer-events-none disabled:opacity-50;
|
||||
|
||||
@ -7,6 +7,10 @@
|
||||
@apply bg-primary hover:bg-primary/90 text-white;
|
||||
}
|
||||
|
||||
&-secondary-blue {
|
||||
@apply bg-blue-200 text-blue-900 hover:bg-blue-500/80;
|
||||
}
|
||||
|
||||
&-danger {
|
||||
@apply bg-danger text-danger-foreground hover:bg-danger/90;
|
||||
}
|
||||
@ -19,6 +23,10 @@
|
||||
@apply bg-secondary text-secondary-foreground hover:bg-secondary/80;
|
||||
}
|
||||
|
||||
&-success {
|
||||
@apply bg-green-500 text-white hover:bg-green-500/80;
|
||||
}
|
||||
|
||||
&-ghost {
|
||||
@apply hover:bg-primary hover:text-primary-foreground;
|
||||
}
|
||||
@ -58,6 +66,9 @@
|
||||
&.btn-secondary {
|
||||
@apply bg-secondary hover:bg-secondary/80;
|
||||
}
|
||||
&.btn-secondary-blue {
|
||||
@apply bg-blue-200 text-blue-900 hover:bg-blue-200/80;
|
||||
}
|
||||
&.btn-danger {
|
||||
@apply bg-danger hover:bg-danger/90;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
.select {
|
||||
@apply ring-offset-background placeholder:text-muted-foreground border-border flex h-9 w-full items-center justify-between whitespace-nowrap rounded-md border bg-transparent px-3 py-2 text-sm shadow-sm disabled:cursor-not-allowed disabled:opacity-50 [&>span]:line-clamp-1;
|
||||
@apply placeholder:text-muted-foreground border-border flex h-9 w-full items-center justify-between whitespace-nowrap rounded-md border bg-transparent px-3 py-2 text-sm shadow-sm disabled:cursor-not-allowed disabled:opacity-50 [&>span]:line-clamp-1;
|
||||
|
||||
&-caret {
|
||||
@apply h-4 w-4 opacity-50;
|
||||
@ -18,7 +18,7 @@
|
||||
}
|
||||
|
||||
&-item {
|
||||
@apply hover:bg-secondary relative my-1 block w-full cursor-pointer select-none items-center rounded-sm px-4 py-2 text-sm outline-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50;
|
||||
@apply hover:bg-secondary relative my-1 block w-full cursor-pointer select-none items-center rounded-sm px-4 py-2 text-sm data-[disabled]:pointer-events-none data-[disabled]:opacity-50;
|
||||
}
|
||||
|
||||
&-trigger-viewport {
|
||||
|
||||
@ -48,8 +48,8 @@ export default function CardSidebar({
|
||||
>
|
||||
<ChevronDownIcon
|
||||
className={twMerge(
|
||||
'h-5 w-5 flex-none rotate-180 text-gray-400',
|
||||
show && 'rotate-0'
|
||||
'h-5 w-5 flex-none text-gray-400',
|
||||
show && 'rotate-180'
|
||||
)}
|
||||
/>
|
||||
<span className="font-bold">{title}</span>
|
||||
|
||||
@ -19,6 +19,7 @@ import { twMerge } from 'tailwind-merge'
|
||||
|
||||
import { MainViewState } from '@/constants/screens'
|
||||
|
||||
import { useActiveModel } from '@/hooks/useActiveModel'
|
||||
import { getDownloadedModels } from '@/hooks/useGetDownloadedModels'
|
||||
|
||||
import { useMainViewState } from '@/hooks/useMainViewState'
|
||||
@ -35,6 +36,7 @@ export default function DropdownListSidebar() {
|
||||
const activeThread = useAtomValue(activeThreadAtom)
|
||||
const [selected, setSelected] = useState<Model | undefined>()
|
||||
const { setMainViewState } = useMainViewState()
|
||||
const { activeModel, stateModel } = useActiveModel()
|
||||
|
||||
useEffect(() => {
|
||||
getDownloadedModels().then((downloadedModels) => {
|
||||
@ -42,18 +44,22 @@ export default function DropdownListSidebar() {
|
||||
if (downloadedModels.length > 0) {
|
||||
setSelected(
|
||||
downloadedModels.filter(
|
||||
(x) => x.id === activeThread?.assistants[0].model.id
|
||||
(x) =>
|
||||
x.id === activeThread?.assistants[0].model.id ||
|
||||
x.id === activeModel?.id
|
||||
)[0] || downloadedModels[0]
|
||||
)
|
||||
setSelectedModel(
|
||||
downloadedModels.filter(
|
||||
(x) => x.id === activeThread?.assistants[0].model.id
|
||||
(x) =>
|
||||
x.id === activeThread?.assistants[0].model.id ||
|
||||
x.id === activeModel?.id
|
||||
)[0] || downloadedModels[0]
|
||||
)
|
||||
}
|
||||
})
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [activeThread])
|
||||
}, [activeThread, activeModel, stateModel.loading])
|
||||
|
||||
return (
|
||||
<Select
|
||||
|
||||
@ -12,6 +12,7 @@ import {
|
||||
ModalHeader,
|
||||
Button,
|
||||
ModalTitle,
|
||||
Progress,
|
||||
} from '@janhq/uikit'
|
||||
|
||||
import { atom, useAtomValue } from 'jotai'
|
||||
@ -21,7 +22,6 @@ import { useDownloadState } from '@/hooks/useDownloadState'
|
||||
import { formatDownloadPercentage } from '@/utils/converter'
|
||||
|
||||
import { extensionManager } from '@/extension'
|
||||
import { downloadingModelsAtom } from '@/helpers/atoms/Model.atom'
|
||||
|
||||
type Props = {
|
||||
model: Model
|
||||
@ -46,7 +46,20 @@ export default function ModalCancelDownload({ model, isFromList }: Props) {
|
||||
{cancelText}
|
||||
</Button>
|
||||
) : (
|
||||
<Button>{cancelText}</Button>
|
||||
<Button themes="secondaryBlue">
|
||||
<div className="flex items-center space-x-2">
|
||||
<span className="inline-block">Cancel</span>
|
||||
<Progress
|
||||
className="inline-block h-2 w-[80px] bg-blue-100"
|
||||
value={
|
||||
formatDownloadPercentage(downloadState?.percent, {
|
||||
hidePercentage: true,
|
||||
}) as number
|
||||
}
|
||||
/>
|
||||
<span>{formatDownloadPercentage(downloadState.percent)}</span>
|
||||
</div>
|
||||
</Button>
|
||||
)}
|
||||
</ModalTrigger>
|
||||
<ModalContent>
|
||||
|
||||
@ -48,6 +48,48 @@ export default function useSendChatMessage() {
|
||||
const { startModel } = useActiveModel()
|
||||
const [queuedMessage, setQueuedMessage] = useState(false)
|
||||
|
||||
const resendChatMessage = async () => {
|
||||
if (!activeThread) {
|
||||
console.error('No active thread')
|
||||
return
|
||||
}
|
||||
|
||||
updateThreadWaiting(activeThread.id, true)
|
||||
|
||||
const messages: ChatCompletionMessage[] = [
|
||||
activeThread.assistants[0]?.instructions,
|
||||
]
|
||||
.filter((e) => e && e.trim() !== '')
|
||||
.map<ChatCompletionMessage>((instructions) => {
|
||||
const systemMessage: ChatCompletionMessage = {
|
||||
role: ChatCompletionRole.System,
|
||||
content: instructions,
|
||||
}
|
||||
return systemMessage
|
||||
})
|
||||
.concat(
|
||||
currentMessages.map<ChatCompletionMessage>((msg) => ({
|
||||
role: msg.role,
|
||||
content: msg.content[0]?.text.value ?? '',
|
||||
}))
|
||||
)
|
||||
|
||||
const messageRequest: MessageRequest = {
|
||||
id: ulid(),
|
||||
messages: messages,
|
||||
threadId: activeThread.id,
|
||||
}
|
||||
|
||||
const modelId = selectedModel?.id ?? activeThread.assistants[0].model.id
|
||||
|
||||
if (activeModel?.id !== modelId) {
|
||||
setQueuedMessage(true)
|
||||
await startModel(modelId)
|
||||
setQueuedMessage(false)
|
||||
}
|
||||
events.emit(EventName.OnMessageSent, messageRequest)
|
||||
}
|
||||
|
||||
const sendChatMessage = async () => {
|
||||
if (!currentPrompt || currentPrompt.trim().length === 0) {
|
||||
return
|
||||
@ -97,6 +139,7 @@ export default function useSendChatMessage() {
|
||||
const messages: ChatCompletionMessage[] = [
|
||||
activeThread.assistants[0]?.instructions,
|
||||
]
|
||||
.filter((e) => e && e.trim() !== '')
|
||||
.map<ChatCompletionMessage>((instructions) => {
|
||||
const systemMessage: ChatCompletionMessage = {
|
||||
role: ChatCompletionRole.System,
|
||||
@ -162,6 +205,7 @@ export default function useSendChatMessage() {
|
||||
|
||||
return {
|
||||
sendChatMessage,
|
||||
resendChatMessage,
|
||||
queuedMessage,
|
||||
}
|
||||
}
|
||||
|
||||
BIN
web/public/images/hub-banner.png
Normal file
BIN
web/public/images/hub-banner.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 278 KiB |
@ -1,7 +1,5 @@
|
||||
import {
|
||||
ChatCompletionMessage,
|
||||
EventName,
|
||||
MessageRequest,
|
||||
MessageStatus,
|
||||
ExtensionType,
|
||||
ThreadMessage,
|
||||
@ -15,6 +13,8 @@ import { twMerge } from 'tailwind-merge'
|
||||
|
||||
import { toaster } from '@/containers/Toast'
|
||||
|
||||
import useSendChatMessage from '@/hooks/useSendChatMessage'
|
||||
|
||||
import { extensionManager } from '@/extension'
|
||||
import {
|
||||
deleteMessageAtom,
|
||||
@ -26,12 +26,9 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
|
||||
const deleteMessage = useSetAtom(deleteMessageAtom)
|
||||
const thread = useAtomValue(activeThreadAtom)
|
||||
const messages = useAtomValue(getCurrentChatMessagesAtom)
|
||||
// const threadStateAtom = useMemo(
|
||||
// () => atom((get) => get(threadStatesAtom)[thread?.id ?? '']),
|
||||
// [thread?.id]
|
||||
// )
|
||||
// const threadState = useAtomValue(threadStateAtom)
|
||||
const stopInference = async () => {
|
||||
const { resendChatMessage } = useSendChatMessage()
|
||||
|
||||
const onStopInferenceClick = async () => {
|
||||
await extensionManager
|
||||
.get<InferenceExtension>(ExtensionType.Inference)
|
||||
?.stopInference()
|
||||
@ -43,13 +40,25 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
|
||||
}, 300)
|
||||
}
|
||||
|
||||
const onDeleteClick = async () => {
|
||||
deleteMessage(message.id ?? '')
|
||||
if (thread) {
|
||||
await extensionManager
|
||||
.get<ConversationalExtension>(ExtensionType.Conversational)
|
||||
?.writeMessages(
|
||||
thread.id,
|
||||
messages.filter((msg) => msg.id !== message.id)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={twMerge('flex flex-row items-center')}>
|
||||
<div className="flex overflow-hidden rounded-md border border-border bg-background/20">
|
||||
{message.status === MessageStatus.Pending && (
|
||||
<div
|
||||
className="cursor-pointer border-r border-border px-2 py-2 hover:bg-background/80"
|
||||
onClick={() => stopInference()}
|
||||
onClick={onStopInferenceClick}
|
||||
>
|
||||
<StopCircle size={14} />
|
||||
</div>
|
||||
@ -58,20 +67,7 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
|
||||
message.id === messages[messages.length - 1]?.id && (
|
||||
<div
|
||||
className="cursor-pointer border-r border-border px-2 py-2 hover:bg-background/80"
|
||||
onClick={() => {
|
||||
const messageRequest: MessageRequest = {
|
||||
id: message.id ?? '',
|
||||
messages: messages.slice(0, -1).map((e) => {
|
||||
const msg: ChatCompletionMessage = {
|
||||
role: e.role,
|
||||
content: e.content[0].text.value,
|
||||
}
|
||||
return msg
|
||||
}),
|
||||
threadId: message.thread_id ?? '',
|
||||
}
|
||||
events.emit(EventName.OnMessageSent, messageRequest)
|
||||
}}
|
||||
onClick={resendChatMessage}
|
||||
>
|
||||
<RefreshCcw size={14} />
|
||||
</div>
|
||||
@ -87,21 +83,14 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
|
||||
>
|
||||
<Copy size={14} />
|
||||
</div>
|
||||
<div
|
||||
className="cursor-pointer px-2 py-2 hover:bg-background/80"
|
||||
onClick={async () => {
|
||||
deleteMessage(message.id ?? '')
|
||||
if (thread)
|
||||
await extensionManager
|
||||
.get<ConversationalExtension>(ExtensionType.Conversational)
|
||||
?.writeMessages(
|
||||
thread.id,
|
||||
messages.filter((msg) => msg.id !== message.id)
|
||||
)
|
||||
}}
|
||||
>
|
||||
<Trash2Icon size={14} />
|
||||
</div>
|
||||
{message.status === MessageStatus.Ready && (
|
||||
<div
|
||||
className="cursor-pointer px-2 py-2 hover:bg-background/80"
|
||||
onClick={onDeleteClick}
|
||||
>
|
||||
<Trash2Icon size={14} />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@ -29,7 +29,11 @@ const marked = new Marked(
|
||||
if (lang === undefined || lang === '') {
|
||||
return hljs.highlightAuto(code).value
|
||||
}
|
||||
return hljs.highlight(code, { language: lang }).value
|
||||
try {
|
||||
return hljs.highlight(code, { language: lang }).value
|
||||
} catch (err) {
|
||||
return hljs.highlight(code, { language: 'javascript' }).value
|
||||
}
|
||||
},
|
||||
}),
|
||||
{
|
||||
|
||||
@ -137,7 +137,7 @@ const ChatScreen = () => {
|
||||
|
||||
{stateModel.loading && (
|
||||
<div className="mb-1 mt-2 py-2 text-center">
|
||||
<span className="rounded-lg border border-border px-4 py-2 shadow-lg">
|
||||
<span className="rounded-lg border border-border bg-blue-200 px-4 py-2 font-semibold text-blue-600 shadow-lg">
|
||||
Starting model {stateModel.model}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* eslint-disable react/display-name */
|
||||
|
||||
import { forwardRef } from 'react'
|
||||
import { forwardRef, useState } from 'react'
|
||||
|
||||
import { Model } from '@janhq/core'
|
||||
import { Badge } from '@janhq/uikit'
|
||||
@ -12,58 +12,80 @@ type Props = {
|
||||
}
|
||||
|
||||
const ExploreModelItem = forwardRef<HTMLDivElement, Props>(({ model }, ref) => {
|
||||
const [open, setOpen] = useState('')
|
||||
|
||||
const handleToggle = () => {
|
||||
if (open === model.id) {
|
||||
setOpen('')
|
||||
} else {
|
||||
setOpen(model.id)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={ref}
|
||||
className="mb-4 flex flex-col rounded-md border border-border bg-background/60"
|
||||
className="mb-6 flex flex-col overflow-hidden rounded-xl border border-border bg-background/60"
|
||||
>
|
||||
<ExploreModelItemHeader model={model} />
|
||||
<div className="flex flex-col p-4">
|
||||
<div className="mb-4 flex flex-col gap-1">
|
||||
<span className="font-semibold">About</span>
|
||||
<p>{model.description}</p>
|
||||
</div>
|
||||
|
||||
<div className="mb-4 flex space-x-6 border-b border-border pb-4">
|
||||
<div>
|
||||
<span className="font-semibold">Author</span>
|
||||
<p className="mt-1 font-medium">{model.metadata.author}</p>
|
||||
</div>
|
||||
<div>
|
||||
<span className="mb-1 font-semibold">Compatibility</span>
|
||||
<div className="mt-1 flex gap-2">
|
||||
{/* <Badge
|
||||
themes="secondary"
|
||||
className="line-clamp-1 lg:line-clamp-none"
|
||||
title={`${toGigabytes(
|
||||
model.metadata.maxRamRequired // TODO: check this
|
||||
)} RAM required`}
|
||||
>
|
||||
{toGigabytes(model.metadata.maxRamRequired)} RAM required
|
||||
</Badge> */}
|
||||
<ExploreModelItemHeader
|
||||
model={model}
|
||||
onClick={handleToggle}
|
||||
open={open}
|
||||
/>
|
||||
{open === model.id && (
|
||||
<div className="flex">
|
||||
<div className="flex w-full flex-col border-t border-border p-4 ">
|
||||
<div className="mb-6 flex flex-col gap-1">
|
||||
<span className="font-semibold">About</span>
|
||||
<p className="text-muted-foreground">
|
||||
{model.description || '-'}
|
||||
</p>
|
||||
</div>
|
||||
<div className="flex space-x-10">
|
||||
<div>
|
||||
<span className="font-semibold text-muted-foreground">
|
||||
Author
|
||||
</span>
|
||||
<p className="mt-2 line-clamp-1 font-medium">
|
||||
{model.metadata.author}
|
||||
</p>
|
||||
</div>
|
||||
<div>
|
||||
<span className="mb-1 font-semibold text-muted-foreground">
|
||||
Model ID
|
||||
</span>
|
||||
<p className="mt-2 line-clamp-1 font-medium">{model.id}</p>
|
||||
</div>
|
||||
<div>
|
||||
<span className="mb-1 font-semibold text-muted-foreground">
|
||||
Tags
|
||||
</span>
|
||||
<div className="mt-2 flex space-x-2">
|
||||
{model.metadata.tags.map((tag, i) => (
|
||||
<Badge key={i} themes="primary" className="line-clamp-1">
|
||||
{tag}
|
||||
</Badge>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="grid grid-cols-3 items-center gap-4">
|
||||
<div>
|
||||
<span className="font-semibold">Version</span>
|
||||
<div className="mt-2 flex space-x-2">
|
||||
<Badge themes="outline">v{model.version}</Badge>
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<span className="font-semibold">Tags</span>
|
||||
<div className="mt-2 flex space-x-2">
|
||||
{model.metadata.tags.map((tag, i) => (
|
||||
<Badge key={i} themes="outline">
|
||||
{tag}
|
||||
</Badge>
|
||||
))}
|
||||
<div className="w-48 flex-shrink-0 border-l border-t border-border p-4">
|
||||
<div>
|
||||
<span className="font-semibold text-muted-foreground">
|
||||
Format
|
||||
</span>
|
||||
<p className="mt-2 font-medium uppercase">{model.format}</p>
|
||||
</div>
|
||||
{/* <div className="mt-4">
|
||||
<span className="font-semibold text-muted-foreground">
|
||||
Compatibility
|
||||
</span>
|
||||
<p className="mt-2 font-medium">-</p>
|
||||
</div> */}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
})
|
||||
|
||||
@ -1,17 +1,22 @@
|
||||
/* eslint-disable react-hooks/exhaustive-deps */
|
||||
import { useCallback, useMemo, useState } from 'react'
|
||||
import { useCallback, useMemo } from 'react'
|
||||
|
||||
import { Model } from '@janhq/core'
|
||||
import { Badge, Button } from '@janhq/uikit'
|
||||
import { Button } from '@janhq/uikit'
|
||||
|
||||
import { atom, useAtomValue } from 'jotai'
|
||||
|
||||
import { ChevronDownIcon } from 'lucide-react'
|
||||
|
||||
import { twMerge } from 'tailwind-merge'
|
||||
|
||||
import ModalCancelDownload from '@/containers/ModalCancelDownload'
|
||||
|
||||
import { MainViewState } from '@/constants/screens'
|
||||
|
||||
import { ModelPerformance, TagType } from '@/constants/tagType'
|
||||
// import { ModelPerformance, TagType } from '@/constants/tagType'
|
||||
|
||||
import { useActiveModel } from '@/hooks/useActiveModel'
|
||||
import useDownloadModel from '@/hooks/useDownloadModel'
|
||||
import { useDownloadState } from '@/hooks/useDownloadState'
|
||||
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
|
||||
@ -21,17 +26,21 @@ import { toGigabytes } from '@/utils/converter'
|
||||
|
||||
type Props = {
|
||||
model: Model
|
||||
onClick: () => void
|
||||
open: string
|
||||
}
|
||||
|
||||
const ExploreModelItemHeader: React.FC<Props> = ({ model }) => {
|
||||
const ExploreModelItemHeader: React.FC<Props> = ({ model, onClick, open }) => {
|
||||
const { downloadModel } = useDownloadModel()
|
||||
const { downloadedModels } = useGetDownloadedModels()
|
||||
const { modelDownloadStateAtom, downloadStates } = useDownloadState()
|
||||
const [title, setTitle] = useState<string>('Recommended')
|
||||
const { startModel } = useActiveModel()
|
||||
// const [title, setTitle] = useState<string>('Recommended')
|
||||
|
||||
// const [performanceTag, setPerformanceTag] = useState<TagType>(
|
||||
// ModelPerformance.PerformancePositive
|
||||
// )
|
||||
|
||||
const [performanceTag, setPerformanceTag] = useState<TagType>(
|
||||
ModelPerformance.PerformancePositive
|
||||
)
|
||||
const downloadAtom = useMemo(
|
||||
() => atom((get) => get(modelDownloadStateAtom)[model.id]),
|
||||
[model.id]
|
||||
@ -47,19 +56,20 @@ const ExploreModelItemHeader: React.FC<Props> = ({ model }) => {
|
||||
const isDownloaded = downloadedModels.find((md) => md.id === model.id) != null
|
||||
|
||||
let downloadButton = (
|
||||
<Button onClick={() => onDownloadClick()}>
|
||||
{model.metadata.size
|
||||
? `Download (${toGigabytes(model.metadata.size)})`
|
||||
: 'Download'}
|
||||
</Button>
|
||||
<Button onClick={() => onDownloadClick()}>Download</Button>
|
||||
)
|
||||
|
||||
const onUseModelClick = () => {
|
||||
startModel(model.id)
|
||||
setMainViewState(MainViewState.Thread)
|
||||
}
|
||||
|
||||
if (isDownloaded) {
|
||||
downloadButton = (
|
||||
<Button
|
||||
onClick={() => {
|
||||
setMainViewState(MainViewState.MyModels)
|
||||
}}
|
||||
themes="success"
|
||||
className="min-w-[98px]"
|
||||
onClick={onUseModelClick}
|
||||
>
|
||||
Use
|
||||
</Button>
|
||||
@ -70,29 +80,42 @@ const ExploreModelItemHeader: React.FC<Props> = ({ model }) => {
|
||||
downloadButton = <ModalCancelDownload model={model} />
|
||||
}
|
||||
|
||||
const renderBadge = (performance: TagType) => {
|
||||
switch (performance) {
|
||||
case ModelPerformance.PerformancePositive:
|
||||
return <Badge themes="success">{title}</Badge>
|
||||
// const renderBadge = (performance: TagType) => {
|
||||
// switch (performance) {
|
||||
// case ModelPerformance.PerformancePositive:
|
||||
// return <Badge themes="success">{title}</Badge>
|
||||
|
||||
case ModelPerformance.PerformanceNeutral:
|
||||
return <Badge themes="secondary">{title}</Badge>
|
||||
// case ModelPerformance.PerformanceNeutral:
|
||||
// return <Badge themes="secondary">{title}</Badge>
|
||||
|
||||
case ModelPerformance.PerformanceNegative:
|
||||
return <Badge themes="danger">{title}</Badge>
|
||||
// case ModelPerformance.PerformanceNegative:
|
||||
// return <Badge themes="danger">{title}</Badge>
|
||||
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
// default:
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
|
||||
return (
|
||||
<div className="flex items-center justify-between rounded-t-md border-b border-border bg-background/50 px-4 py-2">
|
||||
<div
|
||||
className="flex cursor-pointer items-center justify-between rounded-t-md bg-background/50 px-4 py-4"
|
||||
onClick={onClick}
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="font-medium">{model.name}</span>
|
||||
{performanceTag && renderBadge(performanceTag)}
|
||||
<span className="font-bold">{model.name}</span>
|
||||
</div>
|
||||
<div className="inline-flex items-center space-x-2">
|
||||
<span className="mr-4 font-semibold text-muted-foreground">
|
||||
{toGigabytes(model.metadata.size)}
|
||||
</span>
|
||||
{downloadButton}
|
||||
<ChevronDownIcon
|
||||
className={twMerge(
|
||||
'h-5 w-5 flex-none text-gray-400',
|
||||
open === model.id && 'rotate-180'
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
{downloadButton}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@ -6,10 +6,17 @@ type Props = {
|
||||
models: Model[]
|
||||
}
|
||||
|
||||
const ExploreModelList: React.FC<Props> = ({ models }) => (
|
||||
<div className="relative h-full w-full flex-shrink-0">
|
||||
{models?.map((model) => <ExploreModelItem key={model.id} model={model} />)}
|
||||
</div>
|
||||
)
|
||||
const ExploreModelList: React.FC<Props> = ({ models }) => {
|
||||
return (
|
||||
<div className="relative h-full w-full flex-shrink-0">
|
||||
{models
|
||||
?.sort((a) => {
|
||||
if (a.metadata.tags.includes('Recommended')) return -1
|
||||
return 0
|
||||
})
|
||||
?.map((model) => <ExploreModelItem key={model.id} model={model} />)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default ExploreModelList
|
||||
|
||||
@ -1,13 +1,55 @@
|
||||
import { ScrollArea } from '@janhq/uikit'
|
||||
import { useState } from 'react'
|
||||
|
||||
import {
|
||||
Input,
|
||||
ScrollArea,
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
TooltipArrow,
|
||||
Select,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
SelectContent,
|
||||
SelectGroup,
|
||||
SelectItem,
|
||||
} from '@janhq/uikit'
|
||||
|
||||
import { SearchIcon } from 'lucide-react'
|
||||
|
||||
import { Code2Icon, UserIcon } from 'lucide-react'
|
||||
|
||||
import { twMerge } from 'tailwind-merge'
|
||||
|
||||
import Loader from '@/containers/Loader'
|
||||
|
||||
import { useGetConfiguredModels } from '@/hooks/useGetConfiguredModels'
|
||||
|
||||
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
|
||||
|
||||
import ExploreModelList from './ExploreModelList'
|
||||
|
||||
const ExploreModelsScreen = () => {
|
||||
const { loading, models } = useGetConfiguredModels()
|
||||
const [searchValue, setsearchValue] = useState('')
|
||||
const [tabActive, setTabActive] = useState('Model')
|
||||
const { downloadedModels } = useGetDownloadedModels()
|
||||
const [sortSelected, setSortSelected] = useState('All Model')
|
||||
const sortMenu = ['All Model', 'Recommended', 'Downloaded']
|
||||
|
||||
const filteredModels = models.filter((x) => {
|
||||
if (sortSelected === 'Downloaded') {
|
||||
return (
|
||||
x.name.toLowerCase().includes(searchValue.toLowerCase()) &&
|
||||
downloadedModels.some((y) => y.id === x.id)
|
||||
)
|
||||
} else if (sortSelected === 'Recommended') {
|
||||
return x.metadata.tags.includes('Recommended')
|
||||
} else {
|
||||
return x.name.toLowerCase().includes(searchValue.toLowerCase())
|
||||
}
|
||||
})
|
||||
|
||||
if (loading) return <Loader description="loading ..." />
|
||||
|
||||
return (
|
||||
@ -15,7 +57,87 @@ const ExploreModelsScreen = () => {
|
||||
<div className="h-full w-full p-4">
|
||||
<div className="h-full" data-test-id="testid-explore-models">
|
||||
<ScrollArea>
|
||||
<ExploreModelList models={models} />
|
||||
<div className="relative">
|
||||
<img
|
||||
src="./images/hub-banner.png"
|
||||
alt="Hub Banner"
|
||||
className="w-full object-cover"
|
||||
/>
|
||||
<div className="absolute left-1/2 top-1/2 w-1/3 -translate-x-1/2 -translate-y-1/2">
|
||||
<SearchIcon
|
||||
size={20}
|
||||
className="absolute left-2 top-1/2 -translate-y-1/2 text-muted-foreground"
|
||||
/>
|
||||
<Input
|
||||
placeholder="Search models"
|
||||
className="bg-white pl-9 dark:bg-background"
|
||||
onChange={(e) => {
|
||||
setsearchValue(e.target.value)
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="mx-auto w-4/5 py-6">
|
||||
<div className="flex items-center justify-end">
|
||||
{/* Temporary hide tabs */}
|
||||
{/* <div className="inline-flex overflow-hidden rounded-lg border border-border">
|
||||
<div
|
||||
className={twMerge(
|
||||
'flex cursor-pointer items-center space-x-2 border-r border-border px-3 py-2',
|
||||
tabActive === 'Model' && 'bg-secondary'
|
||||
)}
|
||||
onClick={() => setTabActive('Model')}
|
||||
>
|
||||
<Code2Icon size={20} className="text-muted-foreground" />
|
||||
<span className="font-semibold">Model</span>
|
||||
</div>
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<div
|
||||
className={twMerge(
|
||||
'pointer-events-none flex cursor-pointer items-center space-x-2 px-3 py-2 text-muted-foreground',
|
||||
tabActive === 'Assistant' && 'bg-secondary'
|
||||
)}
|
||||
onClick={() => setTabActive('Assistant')}
|
||||
>
|
||||
<UserIcon size={20} className="text-muted-foreground" />
|
||||
<span className="font-semibold">Assistant</span>
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="top" sideOffset={10}>
|
||||
<span className="font-bold">Coming Soon</span>
|
||||
<TooltipArrow />
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</div> */}
|
||||
|
||||
<Select
|
||||
value={sortSelected}
|
||||
onValueChange={(value) => {
|
||||
setSortSelected(value)
|
||||
}}
|
||||
>
|
||||
<SelectTrigger className="w-[200px]">
|
||||
<SelectValue placeholder="Sort By"></SelectValue>
|
||||
</SelectTrigger>
|
||||
<SelectContent className="right-0 block w-full min-w-[200px] pr-0">
|
||||
<SelectGroup>
|
||||
{sortMenu.map((x, i) => {
|
||||
return (
|
||||
<SelectItem key={i} value={x}>
|
||||
<span className="line-clamp-1 block">{x}</span>
|
||||
</SelectItem>
|
||||
)
|
||||
})}
|
||||
</SelectGroup>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
|
||||
<div className="mt-6">
|
||||
<ExploreModelList models={filteredModels} />
|
||||
</div>
|
||||
</div>
|
||||
</ScrollArea>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user