From 28ac386f263c558027f58efaf1eb628b23e9b208 Mon Sep 17 00:00:00 2001 From: hieu-jan <150573299+hieu-jan@users.noreply.github.com> Date: Fri, 1 Mar 2024 18:28:02 +0900 Subject: [PATCH] docs: migrate import using absolute filepath content --- docs/docs/quickstart/models/import-models.mdx | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/docs/docs/quickstart/models/import-models.mdx b/docs/docs/quickstart/models/import-models.mdx index 73cbc4656..01251f5e7 100644 --- a/docs/docs/quickstart/models/import-models.mdx +++ b/docs/docs/quickstart/models/import-models.mdx @@ -24,6 +24,72 @@ import janModel from './assets/jan-model-hub.png'; This guide will show you how to perform manual import. In this guide, we are using a GGUF model from [HuggingFace](https://huggingface.co/) and our latest model, [Trinity](https://huggingface.co/janhq/trinity-v1-GGUF), as an example. +## Newer versions - nightly versions and v0.4.7+ + +Starting from version 0.4.7, Jan has introduced the capability to import models using an absolute file path. It allows you to import models from any directory on your computer. + +### 1. Get the Absolute Filepath of the Model + +After downloading the model from HuggingFace, get the absolute filepath of the model. + +### 2. Configure the Model JSON + +1. Navigate to the `~/jan/models` folder. +2. Create a folder named ``, for example, `tinyllama`. +3. Create a `model.json` file inside the folder, including the following configurations: + +- Ensure the `id` property matches the folder name you created. +- Ensure the `url` property is the direct binary download link ending in `.gguf`. Now, you can use the absolute filepath of the model file. +- Ensure the `engine` property is set to `nitro`. + +```json +{ + "sources": [ + { + "filename": "tinyllama.gguf", + // highlight-next-line + "url": "" + } + ], + "id": "tinyllama-1.1b", + "object": "model", + "name": "(Absolute Path) TinyLlama Chat 1.1B Q4", + "version": "1.0", + "description": "TinyLlama is a tiny model with only 1.1B. It's a good model for less powerful computers.", + "format": "gguf", + "settings": { + "ctx_len": 4096, + "prompt_template": "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>", + "llama_model_path": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" + }, + "parameters": { + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "max_tokens": 2048, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "TinyLlama", + "tags": ["Tiny", "Foundation Model"], + "size": 669000000 + }, + "engine": "nitro" +} +``` + +:::warning + +- If you are using Windows, you need to use double backslashes in the url property, for example: `C:\\Users\\username\\filename.gguf`. + +::: + +### 3. Done! + +If your model doesn't show up in the **Model Selector** in conversations, **restart the app** or contact us via our [Discord community](https://discord.gg/Dt7MxDyNNZ). + ## Newer versions - nightly versions and v0.4.4+ ### 1. Create a Model Folder