Merge branch 'docs' into update-api-reference

This commit is contained in:
Hieu 2024-01-26 00:52:27 +09:00 committed by GitHub
commit dd5d4aecc1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
72 changed files with 1527 additions and 918 deletions

View File

@ -5,6 +5,7 @@ on:
branches: branches:
- main - main
- dev - dev
- docs
paths: paths:
- 'docs/**' - 'docs/**'
- '.github/workflows/jan-docs.yml' - '.github/workflows/jan-docs.yml'
@ -12,6 +13,7 @@ on:
branches: branches:
- main - main
- dev - dev
- docs
paths: paths:
- 'docs/**' - 'docs/**'
- '.github/workflows/jan-docs.yml' - '.github/workflows/jan-docs.yml'
@ -91,13 +93,13 @@ jobs:
Preview URL: ${{ steps.deployCloudflarePages.outputs.url }} Preview URL: ${{ steps.deployCloudflarePages.outputs.url }}
- name: Add Custome Domain file - name: Add Custome Domain file
if: github.event_name == 'push' && github.ref == 'refs/heads/main' && github.event.pull_request.head.repo.full_name != github.repository if: github.event_name == 'push' && github.ref == 'refs/heads/docs' && github.event.pull_request.head.repo.full_name != github.repository
run: echo "${{ vars.DOCUSAURUS_DOMAIN }}" > ./docs/build/CNAME run: echo "${{ vars.DOCUSAURUS_DOMAIN }}" > ./docs/build/CNAME
# Popular action to deploy to GitHub Pages: # Popular action to deploy to GitHub Pages:
# Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus # Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
if: github.event_name == 'push' && github.ref == 'refs/heads/main' && github.event.pull_request.head.repo.full_name != github.repository if: github.event_name == 'push' && github.ref == 'refs/heads/docs' && github.event.pull_request.head.repo.full_name != github.repository
uses: peaceiris/actions-gh-pages@v3 uses: peaceiris/actions-gh-pages@v3
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -16,7 +16,6 @@ on:
jobs: jobs:
set-public-provider: set-public-provider:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch'
outputs: outputs:
public_provider: ${{ steps.set-public-provider.outputs.public_provider }} public_provider: ${{ steps.set-public-provider.outputs.public_provider }}
ref: ${{ steps.set-public-provider.outputs.ref }} ref: ${{ steps.set-public-provider.outputs.ref }}

View File

@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<tr style="text-align:center"> <tr style="text-align:center">
<td style="text-align:center"><b>Experimental (Nightly Build)</b></td> <td style="text-align:center"><b>Experimental (Nightly Build)</b></td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.4-166.exe'> <a href='https://delta.jan.ai/latest/jan-win-x64-0.4.4-180.exe'>
<img src='./docs/static/img/windows.png' style="height:14px; width: 14px" /> <img src='./docs/static/img/windows.png' style="height:14px; width: 14px" />
<b>jan.exe</b> <b>jan.exe</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.4-166.dmg'> <a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.4-180.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" /> <img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b> <b>Intel</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.4-166.dmg'> <a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.4-180.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" /> <img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b> <b>M1/M2</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.4-166.deb'> <a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.4-180.deb'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" /> <img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.deb</b> <b>jan.deb</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.4-166.AppImage'> <a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.4-180.AppImage'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" /> <img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.AppImage</b> <b>jan.AppImage</b>
</a> </a>

5
core/.prettierignore Normal file
View File

@ -0,0 +1,5 @@
.next/
node_modules/
dist/
*.hbs
*.mdx

View File

@ -265,19 +265,22 @@ export const downloadModel = async (
const modelBinaryPath = join(directoryPath, modelId) const modelBinaryPath = join(directoryPath, modelId)
const request = require('request') const request = require('request')
const rq = request({ url: model.source_url, strictSSL, proxy })
const progress = require('request-progress') const progress = require('request-progress')
progress(rq, {})
.on('progress', function (state: any) { for (const source of model.sources) {
console.log('progress', JSON.stringify(state, null, 2)) const rq = request({ url: source, strictSSL, proxy })
}) progress(rq, {})
.on('error', function (err: Error) { .on('progress', function (state: any) {
console.error('error', err) console.debug('progress', JSON.stringify(state, null, 2))
}) })
.on('end', function () { .on('error', function (err: Error) {
console.log('end') console.error('error', err)
}) })
.pipe(fs.createWriteStream(modelBinaryPath)) .on('end', function () {
console.debug('end')
})
.pipe(fs.createWriteStream(modelBinaryPath))
}
return { return {
message: `Starting download ${modelId}`, message: `Starting download ${modelId}`,

View File

@ -21,6 +21,11 @@ export enum InferenceEngine {
hf_endpoint = 'hf_endpoint', hf_endpoint = 'hf_endpoint',
} }
export type ModelArtifact = {
filename: string
url: string
}
/** /**
* Model type defines the shape of a model object. * Model type defines the shape of a model object.
* @stored * @stored
@ -45,7 +50,7 @@ export type Model = {
/** /**
* The model download source. It can be an external url or a local filepath. * The model download source. It can be an external url or a local filepath.
*/ */
source_url: string sources: ModelArtifact[]
/** /**
* The model identifier, which can be referenced in the API endpoints. * The model identifier, which can be referenced in the API endpoints.
@ -107,6 +112,8 @@ export type ModelSettingParams = {
system_prompt?: string system_prompt?: string
ai_prompt?: string ai_prompt?: string
user_prompt?: string user_prompt?: string
llama_model_path?: string
mmproj?: string
} }
/** /**

View File

@ -56,7 +56,6 @@ jan/ # Jan root folder
- Each `model` folder contains a `model.json` file, which is a representation of a model. - Each `model` folder contains a `model.json` file, which is a representation of a model.
- `model.json` contains metadata and default parameters used to run a model. - `model.json` contains metadata and default parameters used to run a model.
- The only required field is `source_url`.
### Example ### Example
@ -64,36 +63,43 @@ Here's a standard example `model.json` for a GGUF model.
```js ```js
{ {
"id": "zephyr-7b", // Defaults to foldername "id": "zephyr-7b", // Defaults to foldername
"object": "model", // Defaults to "model" "object": "model", // Defaults to "model"
"source_url": "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf", "sources": [
"name": "Zephyr 7B", // Defaults to foldername {
"owned_by": "you", // Defaults to "you" "filename": "zephyr-7b-beta.Q4_K_M.gguf",
"version": "1", // Defaults to 1 "url": "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf"
"created": 1231231, // Defaults to file creation time }
"description": null, // Defaults to null ],
"state": enum[null, "ready"] "name": "Zephyr 7B", // Defaults to foldername
"format": "ggufv3", // Defaults to "ggufv3" "owned_by": "you", // Defaults to "you"
"engine": "nitro", // engine_id specified in jan/engine folder "version": "1", // Defaults to 1
"engine_parameters": { // Engine parameters inside model.json can override "created": 1231231, // Defaults to file creation time
"ctx_len": 2048, // the value inside the base engine.json "description": null, // Defaults to null
"format": "ggufv3", // Defaults to "ggufv3"
"engine": "nitro", // engine_id specified in jan/engine folder
"engine_parameters": {
// Engine parameters inside model.json can override
"ctx_len": 4096, // the value inside the base engine.json
"ngl": 100, "ngl": 100,
"embedding": true, "embedding": true,
"n_parallel": 4, "n_parallel": 4
}, },
"model_parameters": { // Models are called parameters "model_parameters": {
// Models are called parameters
"stream": true, "stream": true,
"max_tokens": 2048, "max_tokens": 4096,
"stop": ["<endofstring>"], // This usually can be left blank, only used with specific need from model author "stop": ["<endofstring>"], // This usually can be left blank, only used with specific need from model author
"frequency_penalty": 0, "frequency_penalty": 0,
"presence_penalty": 0, "presence_penalty": 0,
"temperature": 0.7, "temperature": 0.7,
"top_p": 0.95 "top_p": 0.95
}, },
"metadata": {}, // Defaults to {} "metadata": {}, // Defaults to {}
"assets": [ // Defaults to current dir "assets": [
"file://.../zephyr-7b-q4_k_m.bin", // Defaults to current dir
] "file://.../zephyr-7b-q4_k_m.bin"
]
} }
``` ```

View File

@ -31,7 +31,6 @@ In this section, we will show you how to import a GGUF model from [HuggingFace](
## Manually Importing a Downloaded Model (nightly versions and v0.4.4+) ## Manually Importing a Downloaded Model (nightly versions and v0.4.4+)
### 1. Create a Model Folder ### 1. Create a Model Folder
Navigate to the `~/jan/models` folder. You can find this folder by going to `App Settings` > `Advanced` > `Open App Directory`. Navigate to the `~/jan/models` folder. You can find this folder by going to `App Settings` > `Advanced` > `Open App Directory`.
@ -92,7 +91,7 @@ Drag and drop your model binary into this folder, ensuring the `modelname.gguf`
#### 3. Voila #### 3. Voila
If your model doesn't show up in the Model Selector in conversations, please restart the app. If your model doesn't show up in the Model Selector in conversations, please restart the app.
If that doesn't work, please feel free to join our [Discord community](https://discord.gg/Dt7MxDyNNZ) for support, updates, and discussions. If that doesn't work, please feel free to join our [Discord community](https://discord.gg/Dt7MxDyNNZ) for support, updates, and discussions.
@ -190,14 +189,18 @@ Edit `model.json` and include the following configurations:
- Ensure the filename must be `model.json`. - Ensure the filename must be `model.json`.
- Ensure the `id` property matches the folder name you created. - Ensure the `id` property matches the folder name you created.
- Ensure the GGUF filename should match the `id` property exactly. - Ensure the GGUF filename should match the `id` property exactly.
- Ensure the `source_url` property is the direct binary download link ending in `.gguf`. In HuggingFace, you can find the direct links in the `Files and versions` tab. - Ensure the `source.url` property is the direct binary download link ending in `.gguf`. In HuggingFace, you can find the direct links in the `Files and versions` tab.
- Ensure you are using the correct `prompt_template`. This is usually provided in the HuggingFace model's description page. - Ensure you are using the correct `prompt_template`. This is usually provided in the HuggingFace model's description page.
- Ensure the `state` property is set to `ready`.
```json title="model.json" ```json title="model.json"
{ {
// highlight-start // highlight-start
"source_url": "https://huggingface.co/janhq/trinity-v1-GGUF/resolve/main/trinity-v1.Q4_K_M.gguf", "sources": [
{
"filename": "trinity-v1.Q4_K_M.gguf",
"url": "https://huggingface.co/janhq/trinity-v1-GGUF/resolve/main/trinity-v1.Q4_K_M.gguf"
}
],
"id": "trinity-v1-7b", "id": "trinity-v1-7b",
// highlight-end // highlight-end
"object": "model", "object": "model",
@ -208,7 +211,8 @@ Edit `model.json` and include the following configurations:
"settings": { "settings": {
"ctx_len": 4096, "ctx_len": 4096,
// highlight-next-line // highlight-next-line
"prompt_template": "{system_message}\n### Instruction:\n{prompt}\n### Response:" "prompt_template": "{system_message}\n### Instruction:\n{prompt}\n### Response:",
"llama_model_path": "trinity-v1.Q4_K_M.gguf"
}, },
"parameters": { "parameters": {
"max_tokens": 4096 "max_tokens": 4096
@ -218,9 +222,7 @@ Edit `model.json` and include the following configurations:
"tags": ["7B", "Merged"], "tags": ["7B", "Merged"],
"size": 4370000000 "size": 4370000000
}, },
"engine": "nitro", "engine": "nitro"
// highlight-next-line
"state": "ready"
} }
``` ```

View File

@ -40,7 +40,12 @@ Navigate to the `~/jan/models` folder. Create a folder named `gpt-3.5-turbo-16k`
```json title="~/jan/models/gpt-3.5-turbo-16k/model.json" ```json title="~/jan/models/gpt-3.5-turbo-16k/model.json"
{ {
"source_url": "https://openai.com", "sources": [
{
"filename": "openai",
"url": "https://openai.com"
}
],
// highlight-next-line // highlight-next-line
"id": "gpt-3.5-turbo-16k", "id": "gpt-3.5-turbo-16k",
"object": "model", "object": "model",
@ -55,8 +60,7 @@ Navigate to the `~/jan/models` folder. Create a folder named `gpt-3.5-turbo-16k`
"author": "OpenAI", "author": "OpenAI",
"tags": ["General", "Big Context Length"] "tags": ["General", "Big Context Length"]
}, },
"engine": "openai", "engine": "openai"
"state": "ready"
// highlight-end // highlight-end
} }
``` ```
@ -118,7 +122,12 @@ Navigate to the `~/jan/models` folder. Create a folder named `mistral-ins-7b-q4`
```json title="~/jan/models/mistral-ins-7b-q4/model.json" ```json title="~/jan/models/mistral-ins-7b-q4/model.json"
{ {
"source_url": "https://jan.ai", "sources": [
{
"filename": "janai",
"url": "https://jan.ai"
}
],
// highlight-next-line // highlight-next-line
"id": "mistral-ins-7b-q4", "id": "mistral-ins-7b-q4",
"object": "model", "object": "model",
@ -134,8 +143,7 @@ Navigate to the `~/jan/models` folder. Create a folder named `mistral-ins-7b-q4`
"tags": ["remote", "awesome"] "tags": ["remote", "awesome"]
}, },
// highlight-start // highlight-start
"engine": "openai", "engine": "openai"
"state": "ready"
// highlight-end // highlight-end
} }
``` ```

View File

@ -49,7 +49,12 @@ Navigate to the `~/jan/models` folder. Create a folder named `<openrouter-modeln
```json title="~/jan/models/openrouter-dolphin-mixtral-8x7b/model.json" ```json title="~/jan/models/openrouter-dolphin-mixtral-8x7b/model.json"
{ {
"source_url": "https://openrouter.ai/", "sources": [
{
"filename": "openrouter",
"url": "https://openrouter.ai/"
}
],
"id": "cognitivecomputations/dolphin-mixtral-8x7b", "id": "cognitivecomputations/dolphin-mixtral-8x7b",
"object": "model", "object": "model",
"name": "Dolphin 2.6 Mixtral 8x7B", "name": "Dolphin 2.6 Mixtral 8x7B",
@ -63,8 +68,7 @@ Navigate to the `~/jan/models` folder. Create a folder named `<openrouter-modeln
"tags": ["General", "Big Context Length"] "tags": ["General", "Big Context Length"]
}, },
// highlight-start // highlight-start
"engine": "openai", "engine": "openai"
"state": "ready"
// highlight-end // highlight-end
} }
``` ```

View File

@ -59,7 +59,12 @@ Navigate to the `~/jan/models` folder. Create a folder named `<your-deployment-n
```json title="~/jan/models/gpt-35-hieu-jan/model.json" ```json title="~/jan/models/gpt-35-hieu-jan/model.json"
{ {
"source_url": "https://hieujan.openai.azure.com", "sources": [
{
"filename": "azure_openai",
"url": "https://hieujan.openai.azure.com"
}
],
// highlight-next-line // highlight-next-line
"id": "gpt-35-hieu-jan", "id": "gpt-35-hieu-jan",
"object": "model", "object": "model",
@ -75,8 +80,7 @@ Navigate to the `~/jan/models` folder. Create a folder named `<your-deployment-n
"tags": ["General", "Big Context Length"] "tags": ["General", "Big Context Length"]
}, },
// highlight-start // highlight-start
"engine": "openai", "engine": "openai"
"state": "ready"
// highlight-end // highlight-end
} }
``` ```

View File

@ -17,4 +17,8 @@ keywords:
] ]
--- ---
1. You may receive an error response `Error occurred: Unexpected token '<', "<!DOCTYPE"...is not valid JSON`, when you start a chat with OpenAI models. Using a VPN may help fix the issue. You may receive an error response `Error occurred: Unexpected token '<', "<!DOCTYPE"...is not valid JSON`, when you start a chat with OpenAI models.
1. Check that you added an OpenAI API key. You can get an API key from OpenAI's [developer platform](https://platform.openai.com/). Alternatively, we recommend you download a local model from Jan Hub, which remains free to use and runs on your own computer!
2. Using a VPN may help fix the issue.

View File

@ -1 +1 @@
0.2.11 0.2.12

View File

@ -119,11 +119,19 @@ async function runModel(
wrapper.model.settings.ai_prompt = prompt.ai_prompt; wrapper.model.settings.ai_prompt = prompt.ai_prompt;
} }
const modelFolderPath = path.join(janRoot, "models", wrapper.model.id);
const modelPath = wrapper.model.settings.llama_model_path
? path.join(modelFolderPath, wrapper.model.settings.llama_model_path)
: currentModelFile;
currentSettings = { currentSettings = {
llama_model_path: currentModelFile,
...wrapper.model.settings, ...wrapper.model.settings,
llama_model_path: modelPath,
// This is critical and requires real CPU physical core count (or performance core) // This is critical and requires real CPU physical core count (or performance core)
cpu_threads: Math.max(1, nitroResourceProbe.numCpuPhysicalCore), cpu_threads: Math.max(1, nitroResourceProbe.numCpuPhysicalCore),
...(wrapper.model.settings.mmproj && {
mmproj: path.join(modelFolderPath, wrapper.model.settings.mmproj),
}),
}; };
console.log(currentSettings); console.log(currentSettings);
return runNitroAndLoadModel(); return runNitroAndLoadModel();

View File

@ -1,6 +1,6 @@
{ {
"name": "@janhq/model-extension", "name": "@janhq/model-extension",
"version": "1.0.22", "version": "1.0.23",
"description": "Model Management Extension provides model exploration and seamless downloads", "description": "Model Management Extension provides model exploration and seamless downloads",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/module.js", "module": "dist/module.js",

View File

@ -80,16 +80,34 @@ export default class JanModelExtension extends ModelExtension {
const modelDirPath = await joinPath([JanModelExtension._homeDir, model.id]) const modelDirPath = await joinPath([JanModelExtension._homeDir, model.id])
if (!(await fs.existsSync(modelDirPath))) await fs.mkdirSync(modelDirPath) if (!(await fs.existsSync(modelDirPath))) await fs.mkdirSync(modelDirPath)
// try to retrieve the download file name from the source url if (model.sources.length > 1) {
// if it fails, use the model ID as the file name // path to model binaries
const extractedFileName = await model.source_url.split('/').pop() for (const source of model.sources) {
let path = this.extractFileName(source.url)
if (source.filename) {
path = await joinPath([modelDirPath, source.filename])
}
downloadFile(source.url, path, network)
}
} else {
const fileName = this.extractFileName(model.sources[0]?.url)
const path = await joinPath([modelDirPath, fileName])
downloadFile(model.sources[0]?.url, path, network)
}
}
/**
* try to retrieve the download file name from the source url
*/
private extractFileName(url: string): string {
const extractedFileName = url.split('/').pop()
const fileName = extractedFileName const fileName = extractedFileName
.toLowerCase() .toLowerCase()
.endsWith(JanModelExtension._supportedModelFormat) .endsWith(JanModelExtension._supportedModelFormat)
? extractedFileName ? extractedFileName
: model.id : extractedFileName + JanModelExtension._supportedModelFormat
const path = await joinPath([modelDirPath, fileName]) return fileName
downloadFile(model.source_url, path, network)
} }
/** /**
@ -98,6 +116,7 @@ export default class JanModelExtension extends ModelExtension {
* @returns {Promise<void>} A promise that resolves when the download has been cancelled. * @returns {Promise<void>} A promise that resolves when the download has been cancelled.
*/ */
async cancelModelDownload(modelId: string): Promise<void> { async cancelModelDownload(modelId: string): Promise<void> {
const model = await this.getConfiguredModels()
return abortDownload( return abortDownload(
await joinPath([JanModelExtension._homeDir, modelId, modelId]) await joinPath([JanModelExtension._homeDir, modelId, modelId])
).then(async () => { ).then(async () => {
@ -163,15 +182,16 @@ export default class JanModelExtension extends ModelExtension {
.then((files: string[]) => { .then((files: string[]) => {
// or model binary exists in the directory // or model binary exists in the directory
// model binary name can match model ID or be a .gguf file and not be an incompleted model file // model binary name can match model ID or be a .gguf file and not be an incompleted model file
// TODO: Check diff between urls, filenames
return ( return (
files.includes(modelDir) || files.includes(modelDir) ||
files.some( files.filter(
(file) => (file) =>
file file
.toLowerCase() .toLowerCase()
.includes(JanModelExtension._supportedModelFormat) && .includes(JanModelExtension._supportedModelFormat) &&
!file.endsWith(JanModelExtension._incompletedModelFileName) !file.endsWith(JanModelExtension._incompletedModelFileName)
) )?.length >= model.sources.length
) )
}) })
} }
@ -198,7 +218,6 @@ export default class JanModelExtension extends ModelExtension {
const readJsonPromises = allDirectories.map(async (dirName) => { const readJsonPromises = allDirectories.map(async (dirName) => {
// filter out directories that don't match the selector // filter out directories that don't match the selector
// read model.json // read model.json
const jsonPath = await joinPath([ const jsonPath = await joinPath([
JanModelExtension._homeDir, JanModelExtension._homeDir,
@ -226,7 +245,21 @@ export default class JanModelExtension extends ModelExtension {
const modelData = results.map((result) => { const modelData = results.map((result) => {
if (result.status === 'fulfilled') { if (result.status === 'fulfilled') {
try { try {
return result.value as Model // This to ensure backward compatibility with `model.json` with `source_url`
const tmpModel =
typeof result.value === 'object'
? result.value
: JSON.parse(result.value)
if (tmpModel['source_url'] != null) {
tmpModel['source'] = [
{
filename: tmpModel.id,
url: tmpModel['source_url'],
},
]
}
return tmpModel as Model
} catch { } catch {
console.debug(`Unable to parse model metadata: ${result.value}`) console.debug(`Unable to parse model metadata: ${result.value}`)
return undefined return undefined

View File

@ -0,0 +1,33 @@
{
"sources": [
{
"filename": "ggml-model-q5_k.gguf",
"url": "https://huggingface.co/mys/ggml_bakllava-1/resolve/main/ggml-model-q5_k.gguf"
},
{
"filename": "mmproj-model-f16.gguf",
"url": "https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf"
}
],
"id": "bakllava-1",
"object": "model",
"name": "BakLlava 1",
"version": "1.0",
"description": "BakLlava 1 can bring vision understanding to Jan",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
"llama_model_path": "ggml-model-q5_k.gguf",
"mmproj": "mmproj-model-f16.gguf"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Mys",
"tags": ["Vision"],
"size": 5750000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF/resolve/main/nous-capybara-34b.Q5_K_M.gguf", "sources": [
"id": "capybara-34b", {
"object": "model", "filename": "nous-capybara-34b.Q5_K_M.gguf",
"name": "Capybara 200k 34B Q5", "url": "https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF/resolve/main/nous-capybara-34b.Q5_K_M.gguf"
"version": "1.0", }
"description": "Nous Capybara 34B is a long context length model that supports 200K tokens.", ],
"format": "gguf", "id": "capybara-34b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Capybara 200k 34B Q5",
"prompt_template": "USER:\n{prompt}\nASSISTANT:" "version": "1.0",
}, "description": "Nous Capybara 34B is a long context length model that supports 200K tokens.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "USER:\n{prompt}\nASSISTANT:",
"max_tokens": 4096, "llama_model_path": "nous-capybara-34b.Q5_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "NousResearch, The Bloke", "max_tokens": 4096,
"tags": ["34B", "Finetuned"], "stop": [],
"size": 24320000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "NousResearch, The Bloke",
"tags": ["34B", "Finetuned"],
"size": 24320000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,33 @@
{ {
"source_url": "https://huggingface.co/beowolx/CodeNinja-1.0-OpenChat-7B-GGUF/resolve/main/codeninja-1.0-openchat-7b.Q4_K_M.gguf", "sources": [
"id": "codeninja-1.0-7b", {
"object": "model", "filename": "codeninja-1.0-openchat-7b.Q4_K_M.gguf",
"name": "CodeNinja 7B Q4", "url": "https://huggingface.co/beowolx/CodeNinja-1.0-OpenChat-7B-GGUF/resolve/main/codeninja-1.0-openchat-7b.Q4_K_M.gguf"
"version": "1.0", }
"description": "CodeNinja is good for coding tasks and can handle various languages including Python, C, C++, Rust, Java, JavaScript, and more.", ],
"format": "gguf", "id": "codeninja-1.0-7b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "CodeNinja 7B Q4",
"prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:" "version": "1.0",
}, "description": "CodeNinja is good for coding tasks and can handle various languages including Python, C, C++, Rust, Java, JavaScript, and more.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:",
"max_tokens": 4096, "llama_model_path": "codeninja-1.0-openchat-7b.Q4_K_M.gguf"
"stop": ["<|end_of_turn|>"], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Beowolx", "max_tokens": 4096,
"tags": ["7B", "Finetuned"], "frequency_penalty": 0,
"size": 4370000000 "presence_penalty": 0
}, },
"engine": "nitro" "metadata": {
} "author": "Beowolx",
"tags": ["7B", "Finetuned"],
"size": 4370000000
},
"engine": "nitro"
}

View File

@ -2,7 +2,12 @@
"object": "model", "object": "model",
"version": 1, "version": 1,
"format": "gguf", "format": "gguf",
"source_url": "N/A", "sources": [
{
"url": "N/A",
"filename": "N/A"
}
],
"id": "N/A", "id": "N/A",
"name": "N/A", "name": "N/A",
"created": 0, "created": 0,
@ -10,7 +15,8 @@
"settings": { "settings": {
"ctx_len": 4096, "ctx_len": 4096,
"embedding": false, "embedding": false,
"prompt_template": "{system_message}\n### Instruction: {prompt}\n### Response:" "prompt_template": "{system_message}\n### Instruction: {prompt}\n### Response:",
"llama_model_path": "N/A"
}, },
"parameters": { "parameters": {
"temperature": 0.7, "temperature": 0.7,

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-instruct-GGUF/resolve/main/deepseek-coder-1.3b-instruct.Q8_0.gguf", "sources": [
"id": "deepseek-coder-1.3b", {
"object": "model", "filename": "deepseek-coder-1.3b-instruct.Q8_0.gguf",
"name": "Deepseek Coder 1.3B Q8", "url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-instruct-GGUF/resolve/main/deepseek-coder-1.3b-instruct.Q8_0.gguf"
"version": "1.0", }
"description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.", ],
"format": "gguf", "id": "deepseek-coder-1.3b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Deepseek Coder 1.3B Q8",
"prompt_template": "### Instruction:\n{prompt}\n### Response:" "version": "1.0",
}, "description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "### Instruction:\n{prompt}\n### Response:",
"max_tokens": 4096, "llama_model_path": "deepseek-coder-1.3b-instruct.Q8_0.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Deepseek, The Bloke", "max_tokens": 4096,
"tags": ["Tiny", "Foundational Model"], "stop": [],
"size": 1430000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "Deepseek, The Bloke",
"tags": ["Tiny", "Foundational Model"],
"size": 1430000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/deepseek-coder-33B-instruct-GGUF/resolve/main/deepseek-coder-33b-instruct.Q5_K_M.gguf", "sources": [
"id": "deepseek-coder-34b", {
"object": "model", "filename": "deepseek-coder-33b-instruct.Q5_K_M.gguf",
"name": "Deepseek Coder 33B Q5", "url": "https://huggingface.co/TheBloke/deepseek-coder-33B-instruct-GGUF/resolve/main/deepseek-coder-33b-instruct.Q5_K_M.gguf"
"version": "1.0", }
"description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.", ],
"format": "gguf", "id": "deepseek-coder-34b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Deepseek Coder 33B Q5",
"prompt_template": "### Instruction:\n{prompt}\n### Response:" "version": "1.0",
}, "description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "### Instruction:\n{prompt}\n### Response:",
"max_tokens": 4096, "llama_model_path": "deepseek-coder-33b-instruct.Q5_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Deepseek, The Bloke", "max_tokens": 4096,
"tags": ["34B", "Foundational Model"], "stop": [],
"size": 19940000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "Deepseek, The Bloke",
"tags": ["34B", "Foundational Model"],
"size": 19940000000
},
"engine": "nitro"
}

View File

@ -1,28 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/dolphin-2.7-mixtral-8x7b-GGUF/resolve/main/dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf", "sources": [
"id": "dolphin-2.7-mixtral-8x7b", {
"object": "model", "filename": "dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf",
"name": "Dolphin 8x7B Q4", "url": "https://huggingface.co/TheBloke/dolphin-2.7-mixtral-8x7b-GGUF/resolve/main/dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf"
"version": "1.0", }
"description": "Dolphin is an uncensored model built on Mixtral-8x7b. It is good at programming tasks.", ],
"format": "gguf", "id": "dolphin-2.7-mixtral-8x7b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Dolphin 8x7B Q4",
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant" "version": "1.0",
}, "description": "Dolphin is an uncensored model built on Mixtral-8x7b. It is good at programming tasks.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"max_tokens": 4096, "llama_model_path": "dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Cognitive Computations, TheBloke", "max_tokens": 4096,
"tags": ["70B", "Finetuned"], "stop": [],
"size": 26440000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "Cognitive Computations, TheBloke",
"tags": ["70B", "Finetuned"],
"size": 26440000000
},
"engine": "nitro"
}

View File

@ -1,18 +1,20 @@
{ {
"source_url": "https://openai.com", "sources": [
"id": "gpt-3.5-turbo-16k-0613", {
"object": "model", "url": "https://openai.com"
"name": "OpenAI GPT 3.5 Turbo 16k 0613", }
"version": "1.0", ],
"description": "OpenAI GPT 3.5 Turbo 16k 0613 model is extremely good", "id": "gpt-3.5-turbo-16k-0613",
"format": "api", "object": "model",
"settings": {}, "name": "OpenAI GPT 3.5 Turbo 16k 0613",
"parameters": {}, "version": "1.0",
"metadata": { "description": "OpenAI GPT 3.5 Turbo 16k 0613 model is extremely good",
"author": "OpenAI", "format": "api",
"tags": ["General", "Big Context Length"] "settings": {},
}, "parameters": {},
"engine": "openai", "metadata": {
"state": "ready" "author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
"engine": "openai"
} }

View File

@ -1,18 +1,20 @@
{ {
"source_url": "https://openai.com", "sources": [
"id": "gpt-3.5-turbo", {
"object": "model", "url": "https://openai.com"
"name": "OpenAI GPT 3.5 Turbo", }
"version": "1.0", ],
"description": "OpenAI GPT 3.5 Turbo model is extremely good", "id": "gpt-3.5-turbo",
"format": "api", "object": "model",
"settings": {}, "name": "OpenAI GPT 3.5 Turbo",
"parameters": {}, "version": "1.0",
"metadata": { "description": "OpenAI GPT 3.5 Turbo model is extremely good",
"author": "OpenAI", "format": "api",
"tags": ["General", "Big Context Length"] "settings": {},
}, "parameters": {},
"engine": "openai", "metadata": {
"state": "ready" "author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
"engine": "openai"
} }

View File

@ -1,18 +1,20 @@
{ {
"source_url": "https://openai.com", "sources": [
"id": "gpt-4", {
"object": "model", "url": "https://openai.com"
"name": "OpenAI GPT 4", }
"version": "1.0", ],
"description": "OpenAI GPT 4 model is extremely good", "id": "gpt-4",
"format": "api", "object": "model",
"settings": {}, "name": "OpenAI GPT 4",
"parameters": {}, "version": "1.0",
"metadata": { "description": "OpenAI GPT 4 model is extremely good",
"author": "OpenAI", "format": "api",
"tags": ["General", "Big Context Length"] "settings": {},
}, "parameters": {},
"engine": "openai", "metadata": {
"state": "ready" "author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
"engine": "openai"
} }

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/Llama-2-70B-Chat-GGUF/resolve/main/llama-2-70b-chat.Q4_K_M.gguf", "sources": [
"id": "llama2-chat-70b-q4", {
"object": "model", "filename": "llama-2-70b-chat.Q4_K_M.gguf",
"name": "Llama 2 Chat 70B Q4", "url": "https://huggingface.co/TheBloke/Llama-2-70B-Chat-GGUF/resolve/main/llama-2-70b-chat.Q4_K_M.gguf"
"version": "1.0", }
"description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.", ],
"format": "gguf", "id": "llama2-chat-70b-q4",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Llama 2 Chat 70B Q4",
"prompt_template": "[INST] <<SYS>>\n{system_message}<</SYS>>\n{prompt}[/INST]" "version": "1.0",
}, "description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "[INST] <<SYS>>\n{system_message}<</SYS>>\n{prompt}[/INST]",
"max_tokens": 4096, "llama_model_path": "llama-2-70b-chat.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "MetaAI, The Bloke", "max_tokens": 4096,
"tags": ["70B", "Foundational Model"], "stop": [],
"size": 43920000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "MetaAI, The Bloke",
"tags": ["70B", "Foundational Model"],
"size": 43920000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf", "sources": [
"id": "llama2-chat-7b-q4", {
"object": "model", "filename": "llama-2-7b-chat.Q4_K_M.gguf",
"name": "Llama 2 Chat 7B Q4", "url": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf"
"version": "1.0", }
"description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.", ],
"format": "gguf", "id": "llama2-chat-7b-q4",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Llama 2 Chat 7B Q4",
"prompt_template": "[INST] <<SYS>>\n{system_message}<</SYS>>\n{prompt}[/INST]" "version": "1.0",
}, "description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "[INST] <<SYS>>\n{system_message}<</SYS>>\n{prompt}[/INST]",
"max_tokens": 4096, "llama_model_path": "llama-2-7b-chat.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "MetaAI, The Bloke", "max_tokens": 4096,
"tags": ["7B", "Foundational Model"], "stop": [],
"size": 4080000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "MetaAI, The Bloke",
"tags": ["7B", "Foundational Model"],
"size": 4080000000
},
"engine": "nitro"
}

View File

@ -0,0 +1,33 @@
{
"sources": [
{
"filename": "ggml-model-q5_k.gguf",
"url": "https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/ggml-model-q5_k.gguf"
},
{
"filename": "mmproj-model-f16.gguf",
"url": "https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/mmproj-model-f16.gguf"
}
],
"id": "llava-1.5-13b-q5",
"object": "model",
"name": "LlaVa 1.5 13B Q5 K",
"version": "1.0",
"description": "LlaVa 1.5 can bring vision understanding to Jan",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
"llama_model_path": "ggml-model-q5_k.gguf",
"mmproj": "mmproj-model-f16.gguf"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Mys",
"tags": ["Vision"],
"size": 9850000000
},
"engine": "nitro"
}

View File

@ -0,0 +1,33 @@
{
"sources": [
{
"filename": "ggml-model-q5_k.gguf",
"url": "https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-q5_k.gguf"
},
{
"filename": "mmproj-model-f16.gguf",
"url": "https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf"
}
],
"id": "llava-1.5-7b-q5",
"object": "model",
"name": "LlaVa 1.5 7B Q5 K",
"version": "1.0",
"description": "LlaVa 1.5 can bring vision understanding to Jan",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
"llama_model_path": "ggml-model-q5_k.gguf",
"mmproj": "mmproj-model-f16.gguf"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Mys",
"tags": ["Vision"],
"size": 5400000000
},
"engine": "nitro"
}

View File

@ -1,30 +1,35 @@
{ {
"source_url": "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf", "sources": [
"id": "mistral-ins-7b-q4", {
"object": "model", "filename": "mistral-7b-instruct-v0.2.Q4_K_M.gguf",
"name": "Mistral Instruct 7B Q4", "url": "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf"
"version": "1.0", }
"description": "Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.", ],
"format": "gguf", "id": "mistral-ins-7b-q4",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Mistral Instruct 7B Q4",
"prompt_template": "[INST] {prompt} [/INST]" "version": "1.0",
}, "description": "Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "[INST] {prompt} [/INST]",
"max_tokens": 4096, "llama_model_path": "mistral-7b-instruct-v0.2.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "MistralAI, The Bloke", "max_tokens": 4096,
"tags": ["Featured", "7B", "Foundational Model"], "stop": [],
"size": 4370000000, "frequency_penalty": 0,
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/mistral-ins-7b-q4/cover.png" "presence_penalty": 0
}, },
"engine": "nitro" "metadata": {
} "author": "MistralAI, The Bloke",
"tags": ["Featured", "7B", "Foundational Model"],
"size": 4370000000,
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/mistral-ins-7b-q4/cover.png"
},
"engine": "nitro"
}

View File

@ -1,28 +1,33 @@
{ {
"source_url": "https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf", "sources": [
"id": "mixtral-8x7b-instruct", {
"object": "model", "filename": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf",
"name": "Mixtral 8x7B Instruct Q4", "url": "https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf"
"version": "1.0", }
"description": "The Mixtral-8x7B is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms 70B models on most benchmarks.", ],
"format": "gguf", "id": "mixtral-8x7b-instruct",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Mixtral 8x7B Instruct Q4",
"prompt_template": "[INST] {prompt} [/INST]" "version": "1.0",
}, "description": "The Mixtral-8x7B is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms 70B models on most benchmarks.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "[INST] {prompt} [/INST]",
"max_tokens": 4096, "llama_model_path": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "MistralAI, TheBloke", "max_tokens": 4096,
"tags": ["70B", "Foundational Model"], "frequency_penalty": 0,
"size": 26440000000 "presence_penalty": 0
}, },
"engine": "nitro" "metadata": {
} "author": "MistralAI, TheBloke",
"tags": ["70B", "Foundational Model"],
"size": 26440000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/NeverSleep/Noromaid-7b-v0.1.1-GGUF/resolve/main/Noromaid-7b-v0.1.1.q5_k_m.gguf", "sources": [
"id": "noromaid-7b", {
"object": "model", "filename": "Noromaid-7b-v0.1.1.q5_k_m.gguf",
"name": "Noromaid 7B Q5", "url": "https://huggingface.co/NeverSleep/Noromaid-7b-v0.1.1-GGUF/resolve/main/Noromaid-7b-v0.1.1.q5_k_m.gguf"
"version": "1.0", }
"description": "The Noromaid 7b model is designed for role-playing with human-like behavior.", ],
"format": "gguf", "id": "noromaid-7b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Noromaid 7B Q5",
"prompt_template": "### Instruction:{prompt}\n### Response:" "version": "1.0",
}, "description": "The Noromaid 7b model is designed for role-playing with human-like behavior.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "### Instruction:{prompt}\n### Response:",
"max_tokens": 4096, "llama_model_path": "Noromaid-7b-v0.1.1.q5_k_m.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "NeverSleep", "max_tokens": 4096,
"tags": ["7B", "Merged"], "stop": [],
"size": 4370000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "NeverSleep",
"tags": ["7B", "Merged"],
"size": 4370000000
},
"engine": "nitro"
}

View File

@ -1,28 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/openchat-3.5-1210-GGUF/resolve/main/openchat-3.5-1210.Q4_K_M.gguf", "sources": [
"id": "openchat-3.5-7b", {
"object": "model", "filename": "openchat-3.5-1210.Q4_K_M.gguf",
"name": "Openchat-3.5 7B Q4", "url": "https://huggingface.co/TheBloke/openchat-3.5-1210-GGUF/resolve/main/openchat-3.5-1210.Q4_K_M.gguf"
"version": "1.0", }
"description": "The performance of this open-source model surpasses that of ChatGPT-3.5 and Grok-1 across various benchmarks.", ],
"format": "gguf", "id": "openchat-3.5-7b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Openchat-3.5 7B Q4",
"prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:" "version": "1.0",
}, "description": "The performance of this open-source model surpasses that of ChatGPT-3.5 and Grok-1 across various benchmarks.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:",
"max_tokens": 4096, "llama_model_path": "openchat-3.5-1210.Q4_K_M.gguf"
"stop": ["<|end_of_turn|>"], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Openchat", "max_tokens": 4096,
"tags": ["Recommended", "7B", "Finetuned"], "stop": ["<|end_of_turn|>"],
"size": 4370000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "Openchat",
"tags": ["Recommended", "7B", "Finetuned"],
"size": 4370000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/janhq/openhermes-2.5-neural-chat-v3-3-slerp-GGUF/resolve/main/openhermes-2.5-neural-chat-v3-3-slerp.Q4_K_M.gguf", "sources": [
"id": "openhermes-neural-7b", {
"object": "model", "filename": "openhermes-2.5-neural-chat-v3-3-slerp.Q4_K_M.gguf",
"name": "OpenHermes Neural 7B Q4", "url": "https://huggingface.co/janhq/openhermes-2.5-neural-chat-v3-3-slerp-GGUF/resolve/main/openhermes-2.5-neural-chat-v3-3-slerp.Q4_K_M.gguf"
"version": "1.0", }
"description": "OpenHermes Neural is a merged model using the TIES method. It performs well in various benchmarks.", ],
"format": "gguf", "id": "openhermes-neural-7b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "OpenHermes Neural 7B Q4",
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant" "version": "1.0",
}, "description": "OpenHermes Neural is a merged model using the TIES method. It performs well in various benchmarks.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"max_tokens": 4096, "llama_model_path": "openhermes-2.5-neural-chat-v3-3-slerp.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Intel, Jan", "max_tokens": 4096,
"tags": ["7B", "Merged", "Featured"], "frequency_penalty": 0,
"size": 4370000000, "presence_penalty": 0
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/openhermes-neural-7b/cover.png" },
}, "metadata": {
"engine": "nitro" "author": "Intel, Jan",
} "tags": ["7B", "Merged", "Featured"],
"size": 4370000000,
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/openhermes-neural-7b/cover.png"
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q8_0.gguf", "sources": [
"id": "phi-2-3b", {
"object": "model", "filename": "phi-2.Q8_0.gguf",
"name": "Phi-2 3B Q8", "url": "https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q8_0.gguf"
"version": "1.0", }
"description": "Phi-2 is a 2.7B model, excelling in common sense and logical reasoning benchmarks, trained with synthetic texts and filtered websites.", ],
"format": "gguf", "id": "phi-2-3b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Phi-2 3B Q8",
"prompt_template": "Intruct:\n{prompt}\nOutput:" "version": "1.0",
}, "description": "Phi-2 is a 2.7B model, excelling in common sense and logical reasoning benchmarks, trained with synthetic texts and filtered websites.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "Intruct:\n{prompt}\nOutput:",
"max_tokens": 4096, "llama_model_path": "phi-2.Q8_0.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Microsoft", "max_tokens": 4096,
"tags": ["3B","Foundational Model"], "stop": [],
"size": 2960000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "Microsoft",
"tags": ["3B", "Foundational Model"],
"size": 2960000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf", "sources": [
"id": "phind-34b", {
"object": "model", "filename": "phind-codellama-34b-v2.Q5_K_M.gguf",
"name": "Phind 34B Q5", "url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf"
"version": "1.0", }
"description": "Phind 34B is fine-tuned on 1.5B tokens of high-quality programming data. This multi-lingual model excels in various programming languages and is designed to be steerable and user-friendly.", ],
"format": "gguf", "id": "phind-34b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Phind 34B Q5",
"prompt_template": "### System Prompt\n{system_message}\n### User Message\n{prompt}\n### Assistant" "version": "1.0",
}, "description": "Phind 34B is fine-tuned on 1.5B tokens of high-quality programming data. This multi-lingual model excels in various programming languages and is designed to be steerable and user-friendly.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "### System Prompt\n{system_message}\n### User Message\n{prompt}\n### Assistant",
"max_tokens": 4096, "llama_model_path": "phind-codellama-34b-v2.Q5_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Phind, The Bloke", "max_tokens": 4096,
"tags": ["34B", "Finetuned"], "stop": [],
"size": 20220000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "Phind, The Bloke",
"tags": ["34B", "Finetuned"],
"size": 20220000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,33 @@
{ {
"source_url": "https://huggingface.co/janhq/Solar-10.7B-SLERP-GGUF/resolve/main/solar-10.7b-slerp.Q4_K_M.gguf", "sources": [
"id": "solar-10.7b-slerp", {
"object": "model", "filename": "solar-10.7b-slerp.Q4_K_M.gguf",
"name": "Solar Slerp 10.7B Q4", "url": "https://huggingface.co/janhq/Solar-10.7B-SLERP-GGUF/resolve/main/solar-10.7b-slerp.Q4_K_M.gguf"
"version": "1.0", }
"description": "This model uses the Slerp merge method from SOLAR Instruct and Pandora-v1", ],
"format": "gguf", "id": "solar-10.7b-slerp",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Solar Slerp 10.7B Q4",
"prompt_template": "### User: {prompt}\n### Assistant:" "version": "1.0",
}, "description": "This model uses the Slerp merge method from SOLAR Instruct and Pandora-v1",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "### User: {prompt}\n### Assistant:",
"max_tokens": 4096, "llama_model_path": "solar-10.7b-slerp.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Jan", "max_tokens": 4096,
"tags": ["13B","Finetuned"], "frequency_penalty": 0,
"size": 6360000000 "presence_penalty": 0
}, },
"engine": "nitro" "metadata": {
} "author": "Jan",
"tags": ["13B", "Finetuned"],
"size": 6360000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_M.gguf", "sources": [
"id": "starling-7b", {
"object": "model", "filename": "starling-lm-7b-alpha.Q4_K_M.gguf",
"name": "Starling alpha 7B Q4", "url": "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_M.gguf"
"version": "1.0", }
"description": "Starling 7B, an upgrade of Openchat 3.5 using RLAIF, is really good at various benchmarks, especially with GPT-4 judging its performance.", ],
"format": "gguf", "id": "starling-7b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Starling alpha 7B Q4",
"prompt_template": "GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:" "version": "1.0",
}, "description": "Starling 7B, an upgrade of Openchat 3.5 using RLAIF, is really good at various benchmarks, especially with GPT-4 judging its performance.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:",
"max_tokens": 4096, "llama_model_path": "starling-lm-7b-alpha.Q4_K_M.gguf"
"stop": ["<|end_of_turn|>"], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Berkeley-nest, The Bloke", "max_tokens": 4096,
"tags": ["7B","Finetuned"], "stop": ["<|end_of_turn|>"],
"size": 4370000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "Berkeley-nest, The Bloke",
"tags": ["7B", "Finetuned"],
"size": 4370000000
},
"engine": "nitro"
}

View File

@ -1,32 +1,33 @@
{ {
"source_url": "https://huggingface.co/janhq/stealth-v1.3-GGUF/resolve/main/stealth-v1.3.Q4_K_M.gguf", "sources": [
"id": "stealth-v1.2-7b", {
"object": "model", "filename": "stealth-v1.3.Q4_K_M.gguf",
"name": "Stealth 7B Q4", "url": "https://huggingface.co/janhq/stealth-v1.3-GGUF/resolve/main/stealth-v1.3.Q4_K_M.gguf"
"version": "1.0", }
"description": "This is a new experimental family designed to enhance Mathematical and Logical abilities.", ],
"format": "gguf", "id": "stealth-v1.2-7b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Stealth 7B Q4",
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant" "version": "1.0",
}, "description": "This is a new experimental family designed to enhance Mathematical and Logical abilities.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"max_tokens": 4096, "llama_model_path": "stealth-v1.3.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Jan", "max_tokens": 4096,
"tags": [ "frequency_penalty": 0,
"7B", "presence_penalty": 0
"Finetuned", },
"Featured" "metadata": {
], "author": "Jan",
"size": 4370000000 "tags": ["7B", "Finetuned", "Featured"],
}, "size": 4370000000
"engine": "nitro" },
} "engine": "nitro"
}

View File

@ -1,5 +1,10 @@
{ {
"source_url": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", "sources": [
{
"filename": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
"url": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
}
],
"id": "tinyllama-1.1b", "id": "tinyllama-1.1b",
"object": "model", "object": "model",
"name": "TinyLlama Chat 1.1B Q4", "name": "TinyLlama Chat 1.1B Q4",
@ -7,8 +12,9 @@
"description": "TinyLlama is a tiny model with only 1.1B. It's a good model for less powerful computers.", "description": "TinyLlama is a tiny model with only 1.1B. It's a good model for less powerful computers.",
"format": "gguf", "format": "gguf",
"settings": { "settings": {
"ctx_len": 2048, "ctx_len": 4096,
"prompt_template": "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>" "prompt_template": "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>",
"llama_model_path": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
}, },
"parameters": { "parameters": {
"temperature": 0.7, "temperature": 0.7,
@ -20,9 +26,9 @@
"presence_penalty": 0 "presence_penalty": 0
}, },
"metadata": { "metadata": {
"author": "TinyLlama", "author": "TinyLlama",
"tags": ["Tiny", "Foundation Model"], "tags": ["Tiny", "Foundation Model"],
"size": 669000000 "size": 669000000
}, },
"engine": "nitro" "engine": "nitro"
} }

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf", "sources": [
"id": "trinity-v1.2-7b", {
"object": "model", "filename": "trinity-v1.2.Q4_K_M.gguf",
"name": "Trinity-v1.2 7B Q4", "url": "https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf"
"version": "1.0", }
"description": "Trinity is an experimental model merge using the Slerp method. Recommended for daily assistance purposes.", ],
"format": "gguf", "id": "trinity-v1.2-7b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Trinity-v1.2 7B Q4",
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant" "version": "1.0",
}, "description": "Trinity is an experimental model merge using the Slerp method. Recommended for daily assistance purposes.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"max_tokens": 4096, "llama_model_path": "trinity-v1.2.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Jan", "max_tokens": 4096,
"tags": ["7B", "Merged", "Featured"], "frequency_penalty": 0,
"size": 4370000000, "presence_penalty": 0
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/trinity-v1.2-7b/cover.png" },
}, "metadata": {
"engine": "nitro" "author": "Jan",
} "tags": ["7B", "Merged", "Featured"],
"size": 4370000000,
"cover": "https://raw.githubusercontent.com/janhq/jan/main/models/trinity-v1.2-7b/cover.png"
},
"engine": "nitro"
}

View File

@ -1,28 +1,33 @@
{ {
"source_url": "https://huggingface.co/TheBloke/tulu-2-dpo-70B-GGUF/resolve/main/tulu-2-dpo-70b.Q4_K_M.gguf", "sources": [
"id": "tulu-2-70b", {
"object": "model", "filename": "tulu-2-dpo-70b.Q4_K_M.gguf",
"name": "Tulu 2 70B Q4", "url": "https://huggingface.co/TheBloke/tulu-2-dpo-70B-GGUF/resolve/main/tulu-2-dpo-70b.Q4_K_M.gguf"
"version": "1.0", }
"description": "Tulu 70B is a strong alternative to Llama 2 70b Chat to act as helpful assistants.", ],
"format": "gguf", "id": "tulu-2-70b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Tulu 2 70B Q4",
"prompt_template": "<|user|>\n{prompt}\n<|assistant|>" "version": "1.0",
}, "description": "Tulu 70B is a strong alternative to Llama 2 70b Chat to act as helpful assistants.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "<|user|>\n{prompt}\n<|assistant|>",
"max_tokens": 4096, "llama_model_path": "tulu-2-dpo-70b.Q4_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "Lizpreciatior, The Bloke", "max_tokens": 4096,
"tags": ["70B", "Finetuned"], "frequency_penalty": 0,
"size": 41400000000 "presence_penalty": 0
}, },
"engine": "nitro" "metadata": {
} "author": "Lizpreciatior, The Bloke",
"tags": ["70B", "Finetuned"],
"size": 41400000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf", "sources": [
"id": "wizardcoder-13b", {
"object": "model", "filename": "wizardcoder-python-13b-v1.0.Q5_K_M.gguf",
"name": "Wizard Coder Python 13B Q5", "url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf"
"version": "1.0", }
"description": "WizardCoder 13B is a Python coding model. This model demonstrate high proficiency in specific domains like coding and mathematics.", ],
"format": "gguf", "id": "wizardcoder-13b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Wizard Coder Python 13B Q5",
"prompt_template": "### Instruction:\n{prompt}\n### Response:" "version": "1.0",
}, "description": "WizardCoder 13B is a Python coding model. This model demonstrate high proficiency in specific domains like coding and mathematics.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "### Instruction:\n{prompt}\n### Response:",
"max_tokens": 4096, "llama_model_path": "wizardcoder-python-13b-v1.0.Q5_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "WizardLM, The Bloke", "max_tokens": 4096,
"tags": ["Recommended", "13B", "Finetuned"], "stop": [],
"size": 7870000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "WizardLM, The Bloke",
"tags": ["Recommended", "13B", "Finetuned"],
"size": 7870000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,31 @@
{ {
"source_url": "https://huggingface.co/TheBloke/Yarn-Mistral-7B-128k-GGUF/resolve/main/yarn-mistral-7b-128k.Q4_K_M.gguf", "sources": [
"id": "yarn-mistral-7b", {
"object": "model", "url": "https://huggingface.co/TheBloke/Yarn-Mistral-7B-128k-GGUF/resolve/main/yarn-mistral-7b-128k.Q4_K_M.gguf"
"name": "Yarn Mistral 7B Q4", }
"version": "1.0", ],
"description": "Yarn Mistral 7B is a language model for long context and supports a 128k token context window.", "id": "yarn-mistral-7b",
"format": "gguf", "object": "model",
"settings": { "name": "Yarn Mistral 7B Q4",
"ctx_len": 4096, "version": "1.0",
"prompt_template": "{prompt}" "description": "Yarn Mistral 7B is a language model for long context and supports a 128k token context window.",
}, "format": "gguf",
"parameters": { "settings": {
"temperature": 0.7, "ctx_len": 4096,
"top_p": 0.95, "prompt_template": "{prompt}"
"stream": true, },
"max_tokens": 4096, "parameters": {
"stop": [], "temperature": 0.7,
"frequency_penalty": 0, "top_p": 0.95,
"presence_penalty": 0 "stream": true,
}, "max_tokens": 4096,
"metadata": { "frequency_penalty": 0,
"author": "NousResearch, The Bloke", "presence_penalty": 0
"tags": ["7B","Finetuned"], },
"size": 4370000000 "metadata": {
}, "author": "NousResearch, The Bloke",
"engine": "nitro" "tags": ["7B", "Finetuned"],
} "size": 4370000000
},
"engine": "nitro"
}

View File

@ -1,29 +1,34 @@
{ {
"source_url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q5_K_M.gguf", "sources": [
"id": "yi-34b", {
"object": "model", "filename": "yi-34b-chat.Q5_K_M.gguf",
"name": "Yi 34B Q5", "url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q5_K_M.gguf"
"version": "1.0", }
"description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.", ],
"format": "gguf", "id": "yi-34b",
"settings": { "object": "model",
"ctx_len": 4096, "name": "Yi 34B Q5",
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant" "version": "1.0",
}, "description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.",
"parameters": { "format": "gguf",
"temperature": 0.7, "settings": {
"top_p": 0.95, "ctx_len": 4096,
"stream": true, "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"max_tokens": 4096, "llama_model_path": "yi-34b-chat.Q5_K_M.gguf"
"stop": [], },
"frequency_penalty": 0, "parameters": {
"presence_penalty": 0 "temperature": 0.7,
}, "top_p": 0.95,
"metadata": { "stream": true,
"author": "01-ai, The Bloke", "max_tokens": 4096,
"tags": ["34B", "Foundational Model"], "stop": [],
"size": 20660000000 "frequency_penalty": 0,
}, "presence_penalty": 0
"engine": "nitro" },
} "metadata": {
"author": "01-ai, The Bloke",
"tags": ["34B", "Foundational Model"],
"size": 20660000000
},
"engine": "nitro"
}

View File

@ -14,6 +14,7 @@ const buttonVariants = cva('btn', {
outline: 'btn-outline', outline: 'btn-outline',
secondary: 'btn-secondary', secondary: 'btn-secondary',
secondaryBlue: 'btn-secondary-blue', secondaryBlue: 'btn-secondary-blue',
secondaryDanger: 'btn-secondary-danger',
ghost: 'btn-ghost', ghost: 'btn-ghost',
success: 'btn-success', success: 'btn-success',
}, },

View File

@ -9,13 +9,17 @@
} }
&-secondary-blue { &-secondary-blue {
@apply bg-blue-200 text-blue-600 hover:bg-blue-500/80; @apply bg-blue-200 text-blue-600 hover:bg-blue-500/50;
} }
&-danger { &-danger {
@apply bg-danger text-danger-foreground hover:bg-danger/90; @apply bg-danger text-danger-foreground hover:bg-danger/90;
} }
&-secondary-danger {
@apply bg-red-200 text-red-600 hover:bg-red-500/50;
}
&-outline { &-outline {
@apply border-input border bg-transparent; @apply border-input border bg-transparent;
} }

View File

@ -1,6 +1,6 @@
.input { .input {
@apply border-border placeholder:text-muted-foreground flex h-9 w-full rounded-lg border bg-transparent px-3 py-1 transition-colors; @apply border-border placeholder:text-muted-foreground flex h-9 w-full rounded-lg border bg-transparent px-3 py-1 transition-colors;
@apply disabled:cursor-not-allowed disabled:opacity-50; @apply disabled:cursor-not-allowed disabled:bg-zinc-100;
@apply focus-within:outline-none focus-visible:outline-0 focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-1; @apply focus-within:outline-none focus-visible:outline-0 focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-1;
@apply file:border-0 file:bg-transparent file:font-medium; @apply file:border-0 file:bg-transparent file:font-medium;
} }

View File

@ -41,7 +41,10 @@ export default function RibbonNav() {
icon: ( icon: (
<MessageCircleIcon <MessageCircleIcon
size={20} size={20}
className="flex-shrink-0 text-muted-foreground" className={twMerge(
'flex-shrink-0 text-muted-foreground',
serverEnabled && 'text-gray-300 dark:text-gray-700'
)}
/> />
), ),
state: MainViewState.Thread, state: MainViewState.Thread,
@ -60,7 +63,7 @@ export default function RibbonNav() {
const secondaryMenus = [ const secondaryMenus = [
{ {
name: 'Local Server', name: 'Local API Server',
icon: ( icon: (
<SquareCodeIcon <SquareCodeIcon
size={20} size={20}

View File

@ -9,11 +9,14 @@ import RibbonNav from '@/containers/Layout/Ribbon'
import TopBar from '@/containers/Layout/TopBar' import TopBar from '@/containers/Layout/TopBar'
import { MainViewState } from '@/constants/screens'
import { useMainViewState } from '@/hooks/useMainViewState' import { useMainViewState } from '@/hooks/useMainViewState'
import { SUCCESS_SET_NEW_DESTINATION } from '@/hooks/useVaultDirectory'
const BaseLayout = (props: PropsWithChildren) => { const BaseLayout = (props: PropsWithChildren) => {
const { children } = props const { children } = props
const { mainViewState } = useMainViewState() const { mainViewState, setMainViewState } = useMainViewState()
const { theme, setTheme } = useTheme() const { theme, setTheme } = useTheme()
@ -21,6 +24,12 @@ const BaseLayout = (props: PropsWithChildren) => {
setTheme(theme as string) setTheme(theme as string)
}, [setTheme, theme]) }, [setTheme, theme])
useEffect(() => {
if (localStorage.getItem(SUCCESS_SET_NEW_DESTINATION) === 'true') {
setMainViewState(MainViewState.Settings)
}
}, [])
return ( return (
<div className="flex h-screen w-screen flex-1 overflow-hidden"> <div className="flex h-screen w-screen flex-1 overflow-hidden">
<RibbonNav /> <RibbonNav />

View File

@ -105,6 +105,7 @@ export default function EventListenerWrapper({ children }: PropsWithChildren) {
}) })
} }
return () => {} return () => {}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []) }, [])
return ( return (

View File

@ -50,10 +50,12 @@ const availableShortcuts = [
const ShortcutModal: React.FC = () => ( const ShortcutModal: React.FC = () => (
<Modal> <Modal>
<ModalTrigger asChild> <ModalTrigger>
<Button size="sm" themes="secondary"> <div>
Show <Button size="sm" themes="secondaryBlue">
</Button> Show
</Button>
</div>
</ModalTrigger> </ModalTrigger>
<ModalContent className="max-w-2xl"> <ModalContent className="max-w-2xl">
<ModalHeader> <ModalHeader>

View File

@ -6,6 +6,7 @@ import {
ModelExtension, ModelExtension,
abortDownload, abortDownload,
joinPath, joinPath,
ModelArtifact,
} from '@janhq/core' } from '@janhq/core'
import { useSetAtom } from 'jotai' import { useSetAtom } from 'jotai'
@ -25,6 +26,23 @@ export default function useDownloadModel() {
const addNewDownloadingModel = useSetAtom(addNewDownloadingModelAtom) const addNewDownloadingModel = useSetAtom(addNewDownloadingModelAtom)
const downloadModel = async (model: Model) => { const downloadModel = async (model: Model) => {
const childrenDownloadProgress: DownloadState[] = []
model.sources.forEach((source: ModelArtifact) => {
childrenDownloadProgress.push({
modelId: source.filename,
time: {
elapsed: 0,
remaining: 0,
},
speed: 0,
percent: 0,
size: {
total: 0,
transferred: 0,
},
})
})
// set an initial download state // set an initial download state
setDownloadState({ setDownloadState({
modelId: model.id, modelId: model.id,
@ -38,6 +56,7 @@ export default function useDownloadModel() {
total: 0, total: 0,
transferred: 0, transferred: 0,
}, },
children: childrenDownloadProgress,
}) })
addNewDownloadingModel(model) addNewDownloadingModel(model)
@ -46,6 +65,7 @@ export default function useDownloadModel() {
.get<ModelExtension>(ExtensionTypeEnum.Model) .get<ModelExtension>(ExtensionTypeEnum.Model)
?.downloadModel(model, { ignoreSSL, proxy }) ?.downloadModel(model, { ignoreSSL, proxy })
} }
const abortModelDownload = async (model: Model) => { const abortModelDownload = async (model: Model) => {
await abortDownload( await abortDownload(
await joinPath(['models', model.id, modelBinFileName(model)]) await joinPath(['models', model.id, modelBinFileName(model)])

View File

@ -1,7 +1,6 @@
import { useEffect, useState } from 'react' import { useEffect, useState } from 'react'
import { ExtensionTypeEnum, ModelExtension } from '@janhq/core' import { ExtensionTypeEnum, ModelExtension, Model } from '@janhq/core'
import { Model } from '@janhq/core'
import { extensionManager } from '@/extension/ExtensionManager' import { extensionManager } from '@/extension/ExtensionManager'
@ -25,6 +24,7 @@ export function useGetConfiguredModels() {
useEffect(() => { useEffect(() => {
fetchModels() fetchModels()
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []) }, [])
return { loading, models } return { loading, models }

View File

@ -6,7 +6,7 @@ import { atom, useAtom } from 'jotai'
import { extensionManager } from '@/extension/ExtensionManager' import { extensionManager } from '@/extension/ExtensionManager'
const downloadedModelsAtom = atom<Model[]>([]) export const downloadedModelsAtom = atom<Model[]>([])
export function useGetDownloadedModels() { export function useGetDownloadedModels() {
const [downloadedModels, setDownloadedModels] = useAtom(downloadedModelsAtom) const [downloadedModels, setDownloadedModels] = useAtom(downloadedModelsAtom)
@ -15,7 +15,8 @@ export function useGetDownloadedModels() {
getDownloadedModels().then((downloadedModels) => { getDownloadedModels().then((downloadedModels) => {
setDownloadedModels(downloadedModels) setDownloadedModels(downloadedModels)
}) })
}, [setDownloadedModels]) // eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
return { downloadedModels, setDownloadedModels } return { downloadedModels, setDownloadedModels }
} }

View File

@ -110,6 +110,7 @@ export default function useRecommendedModel() {
console.debug(`Using last used model ${lastUsedModel.id}`) console.debug(`Using last used model ${lastUsedModel.id}`)
setRecommendedModel(lastUsedModel) setRecommendedModel(lastUsedModel)
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [getAndSortDownloadedModels, activeThread]) }, [getAndSortDownloadedModels, activeThread])
useEffect(() => { useEffect(() => {

View File

@ -13,6 +13,7 @@ export const useSettings = () => {
useEffect(() => { useEffect(() => {
setTimeout(() => validateSettings, 3000) setTimeout(() => validateSettings, 3000)
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []) }, [])
const validateSettings = async () => { const validateSettings = async () => {

View File

@ -0,0 +1,105 @@
import { useEffect } from 'react'
import { fs, AppConfiguration } from '@janhq/core'
import { atom, useAtom } from 'jotai'
import { useMainViewState } from './useMainViewState'
const isSameDirectoryAtom = atom(false)
const isDirectoryConfirmAtom = atom(false)
const isErrorSetNewDestAtom = atom(false)
const currentPathAtom = atom('')
const newDestinationPathAtom = atom('')
export const SUCCESS_SET_NEW_DESTINATION = 'successSetNewDestination'
export function useVaultDirectory() {
const [isSameDirectory, setIsSameDirectory] = useAtom(isSameDirectoryAtom)
const { setMainViewState } = useMainViewState()
const [isDirectoryConfirm, setIsDirectoryConfirm] = useAtom(
isDirectoryConfirmAtom
)
const [isErrorSetNewDest, setIsErrorSetNewDest] = useAtom(
isErrorSetNewDestAtom
)
const [currentPath, setCurrentPath] = useAtom(currentPathAtom)
const [newDestinationPath, setNewDestinationPath] = useAtom(
newDestinationPathAtom
)
useEffect(() => {
window.core?.api
?.getAppConfigurations()
?.then((appConfig: AppConfiguration) => {
setCurrentPath(appConfig.data_folder)
})
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
const setNewDestination = async () => {
const destFolder = await window.core?.api?.selectDirectory()
setNewDestinationPath(destFolder)
if (destFolder) {
console.debug(`Destination folder selected: ${destFolder}`)
try {
const appConfiguration: AppConfiguration =
await window.core?.api?.getAppConfigurations()
const currentJanDataFolder = appConfiguration.data_folder
if (currentJanDataFolder === destFolder) {
console.debug(
`Destination folder is the same as current folder. Ignore..`
)
setIsSameDirectory(true)
setIsDirectoryConfirm(false)
return
} else {
setIsSameDirectory(false)
setIsDirectoryConfirm(true)
}
setIsErrorSetNewDest(false)
} catch (e) {
console.error(`Error: ${e}`)
setIsErrorSetNewDest(true)
}
}
}
const applyNewDestination = async () => {
try {
const appConfiguration: AppConfiguration =
await window.core?.api?.getAppConfigurations()
const currentJanDataFolder = appConfiguration.data_folder
appConfiguration.data_folder = newDestinationPath
await fs.syncFile(currentJanDataFolder, newDestinationPath)
await window.core?.api?.updateAppConfiguration(appConfiguration)
console.debug(
`File sync finished from ${currentPath} to ${newDestinationPath}`
)
setIsErrorSetNewDest(false)
localStorage.setItem(SUCCESS_SET_NEW_DESTINATION, 'true')
await window.core?.api?.relaunch()
} catch (e) {
console.error(`Error: ${e}`)
setIsErrorSetNewDest(true)
}
}
return {
setNewDestination,
newDestinationPath,
applyNewDestination,
isSameDirectory,
setIsDirectoryConfirm,
isDirectoryConfirm,
setIsSameDirectory,
currentPath,
isErrorSetNewDest,
setIsErrorSetNewDest,
}
}

View File

@ -21,7 +21,7 @@
"class-variance-authority": "^0.7.0", "class-variance-authority": "^0.7.0",
"framer-motion": "^10.16.4", "framer-motion": "^10.16.4",
"highlight.js": "^11.9.0", "highlight.js": "^11.9.0",
"jotai": "^2.4.0", "jotai": "^2.6.0",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"lucide-react": "^0.291.0", "lucide-react": "^0.291.0",
"marked": "^9.1.2", "marked": "^9.1.2",

View File

@ -26,7 +26,7 @@ import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import useDownloadModel from '@/hooks/useDownloadModel' import useDownloadModel from '@/hooks/useDownloadModel'
import { useDownloadState } from '@/hooks/useDownloadState' import { useDownloadState } from '@/hooks/useDownloadState'
import { getAssistants } from '@/hooks/useGetAssistants' import { getAssistants } from '@/hooks/useGetAssistants'
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels' import { downloadedModelsAtom } from '@/hooks/useGetDownloadedModels'
import { useMainViewState } from '@/hooks/useMainViewState' import { useMainViewState } from '@/hooks/useMainViewState'
import { toGibibytes } from '@/utils/converter' import { toGibibytes } from '@/utils/converter'
@ -43,8 +43,8 @@ type Props = {
const ExploreModelItemHeader: React.FC<Props> = ({ model, onClick, open }) => { const ExploreModelItemHeader: React.FC<Props> = ({ model, onClick, open }) => {
const { downloadModel } = useDownloadModel() const { downloadModel } = useDownloadModel()
const { downloadedModels } = useGetDownloadedModels() const downloadedModels = useAtomValue(downloadedModelsAtom)
const { modelDownloadStateAtom, downloadStates } = useDownloadState() const { modelDownloadStateAtom } = useDownloadState()
const { requestCreateNewThread } = useCreateNewThread() const { requestCreateNewThread } = useCreateNewThread()
const totalRam = useAtomValue(totalRamAtom) const totalRam = useAtomValue(totalRamAtom)
const serverEnabled = useAtomValue(serverEnabledAtom) const serverEnabled = useAtomValue(serverEnabledAtom)
@ -100,9 +100,7 @@ const ExploreModelItemHeader: React.FC<Props> = ({ model, onClick, open }) => {
)} )}
</Tooltip> </Tooltip>
) )
} } else if (downloadState != null) {
if (downloadState != null && downloadStates.length > 0) {
downloadButton = <ModalCancelDownload model={model} /> downloadButton = <ModalCancelDownload model={model} />
} }

View File

@ -1,3 +1,4 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
'use client' 'use client'
import React, { useEffect, useState } from 'react' import React, { useEffect, useState } from 'react'
@ -55,16 +56,16 @@ const hostAtom = atom('127.0.0.1')
const portAtom = atom('1337') const portAtom = atom('1337')
const LocalServerScreen = () => { const LocalServerScreen = () => {
const [errorRangePort, setErrorRangePort] = useState(false)
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom) const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
const showing = useAtomValue(showRightSideBarAtom) const showing = useAtomValue(showRightSideBarAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom) const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const modelEngineParams = toSettingParams(activeModelParams) const modelEngineParams = toSettingParams(activeModelParams)
const componentDataEngineSetting = getConfigurationsData(modelEngineParams) const componentDataEngineSetting = getConfigurationsData(modelEngineParams)
const { openServerLog, clearServerLog } = useServerLog() const { openServerLog, clearServerLog } = useServerLog()
const { activeModel, startModel, stateModel } = useActiveModel() const { startModel, stateModel } = useActiveModel()
const [selectedModel] = useAtom(selectedModelAtom) const [selectedModel] = useAtom(selectedModelAtom)
const [isCorsEnabled, setIsCorsEnabled] = useAtom(corsEnabledAtom) const [isCorsEnabled, setIsCorsEnabled] = useAtom(corsEnabledAtom)
@ -77,6 +78,15 @@ const LocalServerScreen = () => {
const [firstTimeVisitAPIServer, setFirstTimeVisitAPIServer] = const [firstTimeVisitAPIServer, setFirstTimeVisitAPIServer] =
useState<boolean>(false) useState<boolean>(false)
const handleChangePort = (value: any) => {
if (Number(value) <= 0 || Number(value) >= 65536) {
setErrorRangePort(true)
} else {
setErrorRangePort(false)
}
setPort(value)
}
useEffect(() => { useEffect(() => {
if ( if (
localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === null || localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === null ||
@ -87,6 +97,10 @@ const LocalServerScreen = () => {
} }
}, [firstTimeVisitAPIServer]) }, [firstTimeVisitAPIServer])
useEffect(() => {
handleChangePort(port)
}, [])
return ( return (
<div className="flex h-full w-full"> <div className="flex h-full w-full">
{/* Left SideBar */} {/* Left SideBar */}
@ -102,7 +116,7 @@ const LocalServerScreen = () => {
<Button <Button
block block
themes={serverEnabled ? 'danger' : 'primary'} themes={serverEnabled ? 'danger' : 'primary'}
disabled={stateModel.loading} disabled={stateModel.loading || errorRangePort}
onClick={() => { onClick={() => {
if (serverEnabled) { if (serverEnabled) {
window.core?.api?.stopServer() window.core?.api?.stopServer()
@ -158,13 +172,21 @@ const LocalServerScreen = () => {
</Select> </Select>
<Input <Input
className="w-[60px] flex-shrink-0" className={twMerge(
'w-[70px] flex-shrink-0',
errorRangePort && 'border-danger'
)}
value={port} value={port}
onChange={(e) => setPort(e.target.value)} onChange={(e) => {
maxLength={4} handleChangePort(e.target.value)
}}
maxLength={5}
disabled={serverEnabled} disabled={serverEnabled}
/> />
</div> </div>
{errorRangePort && (
<p className="mt-2 text-xs text-danger">{`The port range should be from 0 to 65536`}</p>
)}
</div> </div>
<div> <div>
<label <label

View File

@ -0,0 +1,57 @@
import React from 'react'
import {
Modal,
ModalPortal,
ModalContent,
ModalHeader,
ModalTitle,
ModalFooter,
ModalClose,
Button,
} from '@janhq/uikit'
import { useVaultDirectory } from '@/hooks/useVaultDirectory'
const ModalChangeDirectory = () => {
const {
isDirectoryConfirm,
setIsDirectoryConfirm,
applyNewDestination,
newDestinationPath,
} = useVaultDirectory()
return (
<Modal
open={isDirectoryConfirm}
onOpenChange={() => setIsDirectoryConfirm(false)}
>
<ModalPortal />
<ModalContent>
<ModalHeader>
<ModalTitle>Relocate Jan Data Folder</ModalTitle>
</ModalHeader>
<p className="text-muted-foreground">
Are you sure you want to relocate Jan data folder to{' '}
<span className="font-medium text-foreground">
{newDestinationPath}
</span>
? A restart will be required afterward.
</p>
<ModalFooter>
<div className="flex gap-x-2">
<ModalClose asChild onClick={() => setIsDirectoryConfirm(false)}>
<Button themes="ghost">Cancel</Button>
</ModalClose>
<ModalClose asChild>
<Button onClick={applyNewDestination} autoFocus>
Yes, Proceed
</Button>
</ModalClose>
</div>
</ModalFooter>
</ModalContent>
</Modal>
)
}
export default ModalChangeDirectory

View File

@ -0,0 +1,44 @@
import React from 'react'
import {
Modal,
ModalPortal,
ModalContent,
ModalHeader,
ModalTitle,
ModalFooter,
ModalClose,
Button,
} from '@janhq/uikit'
import { useVaultDirectory } from '@/hooks/useVaultDirectory'
const ModalErrorSetDestGlobal = () => {
const { isErrorSetNewDest, setIsErrorSetNewDest } = useVaultDirectory()
return (
<Modal
open={isErrorSetNewDest}
onOpenChange={() => setIsErrorSetNewDest(false)}
>
<ModalPortal />
<ModalContent>
<ModalHeader>
<ModalTitle>Error Occurred</ModalTitle>
</ModalHeader>
<p className="text-muted-foreground">
Oops! Something went wrong. Jan data folder remains the same. Please
try again.
</p>
<ModalFooter>
<div className="flex gap-x-2">
<ModalClose asChild onClick={() => setIsErrorSetNewDest(false)}>
<Button themes="danger">Got it</Button>
</ModalClose>
</div>
</ModalFooter>
</ModalContent>
</Modal>
)
}
export default ModalErrorSetDestGlobal

View File

@ -0,0 +1,49 @@
import React from 'react'
import {
Modal,
ModalPortal,
ModalContent,
ModalHeader,
ModalTitle,
ModalFooter,
ModalClose,
Button,
} from '@janhq/uikit'
import { useVaultDirectory } from '@/hooks/useVaultDirectory'
const ModalSameDirectory = () => {
const { isSameDirectory, setIsSameDirectory, setNewDestination } =
useVaultDirectory()
return (
<Modal
open={isSameDirectory}
onOpenChange={() => setIsSameDirectory(false)}
>
<ModalPortal />
<ModalContent>
<ModalHeader>
<ModalTitle>Unable to move files</ModalTitle>
</ModalHeader>
<p className="text-muted-foreground">
{`It seems like the folder you've chosen same with current directory`}
</p>
<ModalFooter>
<div className="flex gap-x-2">
<ModalClose asChild onClick={() => setIsSameDirectory(false)}>
<Button themes="ghost">Cancel</Button>
</ModalClose>
<ModalClose asChild>
<Button themes="danger" onClick={setNewDestination} autoFocus>
Choose a different folder
</Button>
</ModalClose>
</div>
</ModalFooter>
</ModalContent>
</Modal>
)
}
export default ModalSameDirectory

View File

@ -0,0 +1,52 @@
import { Button, Input } from '@janhq/uikit'
import { PencilIcon, FolderOpenIcon } from 'lucide-react'
import { useVaultDirectory } from '@/hooks/useVaultDirectory'
import ModalChangeDirectory from './ModalChangeDirectory'
import ModalErrorSetDestGlobal from './ModalErrorSetDestGlobal'
import ModalSameDirectory from './ModalSameDirectory'
const DataFolder = () => {
const { currentPath, setNewDestination } = useVaultDirectory()
return (
<>
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">
Jan Data Folder
</h6>
</div>
<p className="leading-relaxed">
Where messages, model configurations, and other user data are
placed.
</p>
</div>
<div className="flex items-center gap-x-3">
<div className="relative">
<Input value={currentPath} className="w-[240px] pr-8" disabled />
<FolderOpenIcon
size={16}
className="absolute right-2 top-1/2 -translate-y-1/2"
/>
</div>
<Button
size="sm"
themes="outline"
className="h-9 w-9 p-0"
onClick={setNewDestination}
>
<PencilIcon size={16} />
</Button>
</div>
</div>
<ModalSameDirectory />
<ModalChangeDirectory />
<ModalErrorSetDestGlobal />
</>
)
}
export default DataFolder

View File

@ -9,7 +9,7 @@ import {
ChangeEvent, ChangeEvent,
} from 'react' } from 'react'
import { fs, AppConfiguration } from '@janhq/core' import { fs } from '@janhq/core'
import { Switch, Button, Input } from '@janhq/uikit' import { Switch, Button, Input } from '@janhq/uikit'
import ShortcutModal from '@/containers/ShortcutModal' import ShortcutModal from '@/containers/ShortcutModal'
@ -20,6 +20,8 @@ import { FeatureToggleContext } from '@/context/FeatureToggle'
import { useSettings } from '@/hooks/useSettings' import { useSettings } from '@/hooks/useSettings'
import DataFolder from './DataFolder'
const Advanced = () => { const Advanced = () => {
const { const {
experimentalFeature, experimentalFeature,
@ -31,6 +33,7 @@ const Advanced = () => {
} = useContext(FeatureToggleContext) } = useContext(FeatureToggleContext)
const [partialProxy, setPartialProxy] = useState<string>(proxy) const [partialProxy, setPartialProxy] = useState<string>(proxy)
const [gpuEnabled, setGpuEnabled] = useState<boolean>(false) const [gpuEnabled, setGpuEnabled] = useState<boolean>(false)
const { readSettings, saveSettings, validateSettings, setShowNotification } = const { readSettings, saveSettings, validateSettings, setShowNotification } =
useSettings() useSettings()
const onProxyChange = useCallback( const onProxyChange = useCallback(
@ -46,17 +49,6 @@ const Advanced = () => {
[setPartialProxy, setProxy] [setPartialProxy, setProxy]
) )
// TODO: remove me later.
const [currentPath, setCurrentPath] = useState('')
useEffect(() => {
window.core?.api
?.getAppConfigurations()
?.then((appConfig: AppConfiguration) => {
setCurrentPath(appConfig.data_folder)
})
}, [])
useEffect(() => { useEffect(() => {
readSettings().then((settings) => { readSettings().then((settings) => {
setGpuEnabled(settings.run_mode === 'gpu') setGpuEnabled(settings.run_mode === 'gpu')
@ -73,45 +65,55 @@ const Advanced = () => {
}) })
} }
const onJanVaultDirectoryClick = async () => {
const destFolder = await window.core?.api?.selectDirectory()
if (destFolder) {
console.debug(`Destination folder selected: ${destFolder}`)
try {
const appConfiguration: AppConfiguration =
await window.core?.api?.getAppConfigurations()
const currentJanDataFolder = appConfiguration.data_folder
if (currentJanDataFolder === destFolder) {
console.debug(
`Destination folder is the same as current folder. Ignore..`
)
return
}
appConfiguration.data_folder = destFolder
await fs.syncFile(currentJanDataFolder, destFolder)
await window.core?.api?.updateAppConfiguration(appConfiguration)
console.debug(
`File sync finished from ${currentJanDataFolder} to ${destFolder}`
)
await window.core?.api?.relaunch()
} catch (e) {
console.error(`Error: ${e}`)
}
}
}
return ( return (
<div className="block w-full"> <div className="block w-full">
{/* Keyboard shortcut */}
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">
Keyboard Shortcuts
</h6>
</div>
<p className="leading-relaxed">
Shortcuts that you might find useful in Jan app.
</p>
</div>
<ShortcutModal />
</div>
{/* Experimental */}
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">
Experimental Mode
</h6>
</div>
<p className="leading-relaxed">
Enable experimental features that may be unstable tested.
</p>
</div>
<Switch
checked={experimentalFeature}
onCheckedChange={(e) => {
if (e === true) {
setExperimentalFeature(true)
} else {
setExperimentalFeature(false)
}
}}
/>
</div>
{/* CPU / GPU switching */} {/* CPU / GPU switching */}
{!isMac && ( {!isMac && (
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none"> <div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="w-4/5 flex-shrink-0 space-y-1.5"> <div className="flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2"> <div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">NVidia GPU</h6> <h6 className="text-sm font-semibold capitalize">NVidia GPU</h6>
</div> </div>
<p className="whitespace-pre-wrap leading-relaxed"> <p className="leading-relaxed">
Enable GPU acceleration for NVidia GPUs. Enable GPU acceleration for NVidia GPUs.
</p> </p>
</div> </div>
@ -133,36 +135,17 @@ const Advanced = () => {
/> />
</div> </div>
)} )}
{/* Experimental */}
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none"> {/* Directory */}
<div className="w-4/5 flex-shrink-0 space-y-1.5"> {experimentalFeature && <DataFolder />}
<div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">
Experimental Mode
</h6>
</div>
<p className="whitespace-pre-wrap leading-relaxed">
Enable experimental features that may be unstable tested.
</p>
</div>
<Switch
checked={experimentalFeature}
onCheckedChange={(e) => {
if (e === true) {
setExperimentalFeature(true)
} else {
setExperimentalFeature(false)
}
}}
/>
</div>
{/* Proxy */} {/* Proxy */}
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none"> <div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="w-4/5 flex-shrink-0 space-y-1.5"> <div className="flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2"> <div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">HTTPS Proxy</h6> <h6 className="text-sm font-semibold capitalize">HTTPS Proxy</h6>
</div> </div>
<p className="whitespace-pre-wrap leading-relaxed"> <p className="leading-relaxed">
Specify the HTTPS proxy or leave blank (proxy auto-configuration and Specify the HTTPS proxy or leave blank (proxy auto-configuration and
SOCKS not supported). SOCKS not supported).
</p> </p>
@ -173,15 +156,16 @@ const Advanced = () => {
/> />
</div> </div>
</div> </div>
{/* Ignore SSL certificates */} {/* Ignore SSL certificates */}
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none"> <div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="w-4/5 flex-shrink-0 space-y-1.5"> <div className="flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2"> <div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize"> <h6 className="text-sm font-semibold capitalize">
Ignore SSL certificates Ignore SSL certificates
</h6> </h6>
</div> </div>
<p className="whitespace-pre-wrap leading-relaxed"> <p className="leading-relaxed">
Allow self-signed or unverified certificates - may be required for Allow self-signed or unverified certificates - may be required for
certain proxies. certain proxies.
</p> </p>
@ -197,79 +181,19 @@ const Advanced = () => {
}} }}
/> />
</div> </div>
{window.electronAPI && (
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none"> {/* Claer log */}
<div className="w-4/5 flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">
Open App Directory
</h6>
</div>
<p className="whitespace-pre-wrap leading-relaxed">
Open the directory where your app data, like conversation history
and model configurations, is located.
</p>
</div>
<Button
size="sm"
themes="secondary"
onClick={() => window.electronAPI.openAppDirectory()}
>
Open
</Button>
</div>
)}
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none"> <div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="w-4/5 flex-shrink-0 space-y-1.5"> <div className="flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2"> <div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">Clear logs</h6> <h6 className="text-sm font-semibold capitalize">Clear logs</h6>
</div> </div>
<p className="whitespace-pre-wrap leading-relaxed"> <p className="leading-relaxed">Clear all logs from Jan app.</p>
Clear all logs from Jan app.
</p>
</div> </div>
<Button size="sm" themes="secondary" onClick={clearLogs}> <Button size="sm" themes="secondaryDanger" onClick={clearLogs}>
Clear Clear
</Button> </Button>
</div> </div>
{experimentalFeature && (
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="w-4/5 flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">
Jan Data Folder
</h6>
</div>
<p className="whitespace-pre-wrap leading-relaxed">
Where messages, model configurations, and other user data is
placed.
</p>
<p className="whitespace-pre-wrap leading-relaxed text-gray-500">
{`${currentPath}`}
</p>
</div>
<Button
size="sm"
themes="secondary"
onClick={onJanVaultDirectoryClick}
>
Select
</Button>
</div>
)}
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="w-4/5 flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2">
<h6 className="text-sm font-semibold capitalize">
Keyboard Shortcuts
</h6>
</div>
<p className="whitespace-pre-wrap leading-relaxed">
Shortcuts that you might find useful in Jan app.
</p>
</div>
<ShortcutModal />
</div>
</div> </div>
) )
} }

View File

@ -111,7 +111,7 @@ const ExtensionCatalog = () => {
onChange={handleFileChange} onChange={handleFileChange}
/> />
<Button <Button
themes="secondary" themes="secondaryBlue"
size="sm" size="sm"
onClick={() => fileInputRef.current?.click()} onClick={() => fileInputRef.current?.click()}
> >

View File

@ -7,14 +7,14 @@ import { motion as m } from 'framer-motion'
import { twMerge } from 'tailwind-merge' import { twMerge } from 'tailwind-merge'
import { SUCCESS_SET_NEW_DESTINATION } from '@/hooks/useVaultDirectory'
import Advanced from '@/screens/Settings/Advanced' import Advanced from '@/screens/Settings/Advanced'
import AppearanceOptions from '@/screens/Settings/Appearance' import AppearanceOptions from '@/screens/Settings/Appearance'
import ExtensionCatalog from '@/screens/Settings/CoreExtensions' import ExtensionCatalog from '@/screens/Settings/CoreExtensions'
import Models from '@/screens/Settings/Models' import Models from '@/screens/Settings/Models'
import { formatExtensionsName } from '@/utils/converter'
const SettingsScreen = () => { const SettingsScreen = () => {
const [activeStaticMenu, setActiveStaticMenu] = useState('My Models') const [activeStaticMenu, setActiveStaticMenu] = useState('My Models')
const [menus, setMenus] = useState<any[]>([]) const [menus, setMenus] = useState<any[]>([])
@ -46,6 +46,12 @@ const SettingsScreen = () => {
} }
} }
useEffect(() => {
if (localStorage.getItem(SUCCESS_SET_NEW_DESTINATION) === 'true') {
setActiveStaticMenu('Advanced Settings')
}
}, [])
return ( return (
<div className="flex h-full bg-background"> <div className="flex h-full bg-background">
<div className="flex h-full w-64 flex-shrink-0 flex-col overflow-y-auto border-r border-border"> <div className="flex h-full w-64 flex-shrink-0 flex-col overflow-y-auto border-r border-border">

View File

@ -4,6 +4,8 @@ type DownloadState = {
speed: number speed: number
percent: number percent: number
size: DownloadSize size: DownloadSize
isFinished?: boolean
children?: DownloadState[]
error?: string error?: string
} }

View File

@ -2,7 +2,7 @@ import { Model } from '@janhq/core'
export const modelBinFileName = (model: Model) => { export const modelBinFileName = (model: Model) => {
const modelFormatExt = '.gguf' const modelFormatExt = '.gguf'
const extractedFileName = model.source_url?.split('/').pop() ?? model.id const extractedFileName = model.sources[0]?.url.split('/').pop() ?? model.id
const fileName = extractedFileName.toLowerCase().endsWith(modelFormatExt) const fileName = extractedFileName.toLowerCase().endsWith(modelFormatExt)
? extractedFileName ? extractedFileName
: model.id : model.id

View File

@ -40,6 +40,8 @@ export const toSettingParams = (
n_parallel: undefined, n_parallel: undefined,
cpu_threads: undefined, cpu_threads: undefined,
prompt_template: undefined, prompt_template: undefined,
llama_model_path: undefined,
mmproj: undefined,
} }
const settingParams: ModelSettingParams = {} const settingParams: ModelSettingParams = {}