Merge branch 'dev' into docs-pena-team

This commit is contained in:
Henry 2024-03-04 16:37:34 +09:00 committed by GitHub
commit 6844bf606d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 415 additions and 320 deletions

4
.gitignore vendored
View File

@ -31,3 +31,7 @@ extensions/inference-nitro-extension/bin/saved-*
extensions/inference-nitro-extension/bin/*.tar.gz extensions/inference-nitro-extension/bin/*.tar.gz
extensions/inference-nitro-extension/bin/vulkaninfoSDK.exe extensions/inference-nitro-extension/bin/vulkaninfoSDK.exe
extensions/inference-nitro-extension/bin/vulkaninfo extensions/inference-nitro-extension/bin/vulkaninfo
# Turborepo
.turbo

View File

@ -1,11 +1,11 @@
---
openapi: 3.0.0 openapi: 3.0.0
info: info:
title: API Reference title: API Reference
description: > description: >
# Introduction # Introduction
Jan API is compatible with the [OpenAI Jan API is compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference).
API](https://platform.openai.com/docs/api-reference).
version: 0.1.8 version: 0.1.8
contact: contact:
name: Jan Discord name: Jan Discord
@ -20,12 +20,12 @@ tags:
description: List and describe the various models available in the API. description: List and describe the various models available in the API.
- name: Chat - name: Chat
description: > description: >
Given a list of messages comprising a conversation, the model will return Given a list of messages comprising a conversation, the model will
a response. return a response.
- name: Messages - name: Messages
description: > description: >
Messages capture a conversation's content. This can include the content Messages capture a conversation's content. This can include the
from LLM responses and other metadata from [chat content from LLM responses and other metadata from [chat
completions](/specs/chats). completions](/specs/chats).
- name: Threads - name: Threads
- name: Assistants - name: Assistants
@ -49,16 +49,16 @@ paths:
summary: | summary: |
Create chat completion Create chat completion
description: > description: >
Creates a model response for the given chat conversation. <a href = Creates a model response for the given chat conversation. <a href
"https://platform.openai.com/docs/api-reference/chat/create"> Equivalent = "https://platform.openai.com/docs/api-reference/chat/create">
to OpenAI's create chat completion. </a> Equivalent to OpenAI's create chat completion. </a>
requestBody: requestBody:
content: content:
application/json: application/json:
schema: schema:
$ref: specs/chat.yaml#/components/schemas/ChatCompletionRequest $ref: specs/chat.yaml#/components/schemas/ChatCompletionRequest
responses: responses:
'200': "200":
description: OK description: OK
content: content:
application/json: application/json:
@ -192,9 +192,7 @@ paths:
} }
response = response = requests.post('http://localhost:1337/v1/chat/completions', json=data)
requests.post('http://localhost:1337/v1/chat/completions',
json=data)
print(response.json()) print(response.json())
/models: /models:
@ -204,12 +202,12 @@ paths:
- Models - Models
summary: List models summary: List models
description: > description: >
Lists the currently available models, and provides basic information Lists the currently available models, and provides basic
about each one such as the owner and availability. <a href = information about each one such as the owner and availability. <a href
"https://platform.openai.com/docs/api-reference/models/list"> Equivalent = "https://platform.openai.com/docs/api-reference/models/list">
to OpenAI's list model. </a> Equivalent to OpenAI's list model. </a>
responses: responses:
'200': "200":
description: OK description: OK
content: content:
application/json: application/json:
@ -228,14 +226,6 @@ paths:
headers: {Accept: 'application/json'} headers: {Accept: 'application/json'}
}); });
const data = await response.json(); const data = await response.json();
- lang: Python
source: |-
import requests
url = 'http://localhost:1337/v1/models'
headers = {'Accept': 'application/json'}
response = requests.get(url, headers=headers)
data = response.json()
- lang: Node.js - lang: Node.js
source: |- source: |-
const fetch = require('node-fetch'); const fetch = require('node-fetch');
@ -249,7 +239,15 @@ paths:
fetch(url, options) fetch(url, options)
.then(res => res.json()) .then(res => res.json())
.then(json => console.log(json)); .then(json => console.log(json));
/models/download/{model_id}: - lang: Python
source: |-
import requests
url = 'http://localhost:1337/v1/models'
headers = {'Accept': 'application/json'}
response = requests.get(url, headers=headers)
data = response.json()
"/models/download/{model_id}":
get: get:
operationId: downloadModel operationId: downloadModel
tags: tags:
@ -267,7 +265,7 @@ paths:
description: | description: |
The ID of the model to use for this request. The ID of the model to use for this request.
responses: responses:
'200': "200":
description: OK description: OK
content: content:
application/json: application/json:
@ -304,20 +302,18 @@ paths:
import requests import requests
response = response = requests.get('http://localhost:1337/v1/models/download/{model_id}', headers={'accept': 'application/json'})
requests.get('http://localhost:1337/v1/models/download/{model_id}',
headers={'accept': 'application/json'})
data = response.json() data = response.json()
/models/{model_id}: "/models/{model_id}":
get: get:
operationId: retrieveModel operationId: retrieveModel
tags: tags:
- Models - Models
summary: Retrieve model summary: Retrieve model
description: > description: >
Get a model instance, providing basic information about the model such Get a model instance, providing basic information about the model
as the owner and permissioning. <a href = such as the owner and permissioning. <a href =
"https://platform.openai.com/docs/api-reference/models/retrieve"> "https://platform.openai.com/docs/api-reference/models/retrieve">
Equivalent to OpenAI's retrieve model. </a> Equivalent to OpenAI's retrieve model. </a>
parameters: parameters:
@ -330,7 +326,7 @@ paths:
description: | description: |
The ID of the model to use for this request. The ID of the model to use for this request.
responses: responses:
'200': "200":
description: OK description: OK
content: content:
application/json: application/json:
@ -374,9 +370,7 @@ paths:
model_id = 'mistral-ins-7b-q4' model_id = 'mistral-ins-7b-q4'
response = response = requests.get(f'http://localhost:1337/v1/models/{model_id}', headers={'accept': 'application/json'})
requests.get(f'http://localhost:1337/v1/models/{model_id}',
headers={'accept': 'application/json'})
print(response.json()) print(response.json())
delete: delete:
@ -398,7 +392,7 @@ paths:
description: | description: |
The model id to delete The model id to delete
responses: responses:
'200': "200":
description: OK description: OK
content: content:
application/json: application/json:
@ -442,9 +436,7 @@ paths:
model_id = 'mistral-ins-7b-q4' model_id = 'mistral-ins-7b-q4'
response = response = requests.delete(f'http://localhost:1337/v1/models/{model_id}', headers={'accept': 'application/json'})
requests.delete(f'http://localhost:1337/v1/models/{model_id}',
headers={'accept': 'application/json'})
/threads: /threads:
post: post:
operationId: createThread operationId: createThread
@ -462,7 +454,7 @@ paths:
schema: schema:
$ref: specs/threads.yaml#/components/schemas/CreateThreadObject $ref: specs/threads.yaml#/components/schemas/CreateThreadObject
responses: responses:
'200': "200":
description: Thread created successfully description: Thread created successfully
content: content:
application/json: application/json:
@ -471,8 +463,8 @@ paths:
x-codeSamples: x-codeSamples:
- lang: cURL - lang: cURL
source: | source: |
curl -X POST http://localhost:1337/v1/threads \ curl -X POST http://localhost:1337/v1/threads \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d '{ -d '{
"messages": [{ "messages": [{
"role": "user", "role": "user",
@ -483,6 +475,73 @@ paths:
"content": "How does AI work? Explain it in simple terms." "content": "How does AI work? Explain it in simple terms."
}] }]
}' }'
- lang: JavaScript
source: |-
const fetch = require('node-fetch');
fetch('http://localhost:1337/v1/threads', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
messages: [
{
role: 'user',
content: 'Hello, what is AI?',
file_ids: ['file-abc123']
},
{
role: 'user',
content: 'How does AI work? Explain it in simple terms.'
}
]
})
});
- lang: Node.js
source: |-
const fetch = require('node-fetch');
fetch('http://localhost:1337/v1/threads', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
messages: [
{
role: 'user',
content: 'Hello, what is AI?',
file_ids: ['file-abc123']
},
{
role: 'user',
content: 'How does AI work? Explain it in simple terms.'
}
]
})
});
- lang: Python
source: |-
import requests
url = 'http://localhost:1337/v1/threads'
payload = {
'messages': [
{
'role': 'user',
'content': 'Hello, what is AI?',
'file_ids': ['file-abc123']
},
{
'role': 'user',
'content': 'How does AI work? Explain it in simple terms.'
}
]
}
response = requests.post(url, json=payload)
print(response.text)
get: get:
operationId: listThreads operationId: listThreads
tags: tags:
@ -491,7 +550,7 @@ paths:
description: | description: |
Retrieves a list of all threads available in the system. Retrieves a list of all threads available in the system.
responses: responses:
'200': "200":
description: List of threads retrieved successfully description: List of threads retrieved successfully
content: content:
application/json: application/json:
@ -516,10 +575,37 @@ paths:
metadata: {} metadata: {}
x-codeSamples: x-codeSamples:
- lang: cURL - lang: cURL
source: | source: |-
curl http://localhost:1337/v1/threads \ curl http://localhost:1337/v1/threads \
-H "Content-Type: application/json" \ -H "Content-Type: application/json"
/threads/{thread_id}: - lang: JavaScript
source: |-
const fetch = require('node-fetch');
fetch('http://localhost:1337/v1/threads', {
method: 'GET',
headers: {'Content-Type': 'application/json'}
}).then(res => res.json())
.then(json => console.log(json));
- lang: Node.js
source: |-
const fetch = require('node-fetch');
fetch('http://localhost:1337/v1/threads', {
method: 'GET',
headers: {'Content-Type': 'application/json'}
}).then(res => res.json())
.then(json => console.log(json));
- lang: Python
source: |-
import requests
url = 'http://localhost:1337/v1/threads'
headers = {'Content-Type': 'application/json'}
response = requests.get(url, headers=headers)
print(response.json())
"/threads/{thread_id}":
get: get:
operationId: getThread operationId: getThread
tags: tags:
@ -539,7 +625,7 @@ paths:
description: | description: |
The ID of the thread to retrieve. The ID of the thread to retrieve.
responses: responses:
'200': "200":
description: Thread details retrieved successfully description: Thread details retrieved successfully
content: content:
application/json: application/json:
@ -579,7 +665,7 @@ paths:
items: items:
$ref: specs/threads.yaml#/components/schemas/ThreadMessageObject $ref: specs/threads.yaml#/components/schemas/ThreadMessageObject
responses: responses:
'200': "200":
description: Thread modified successfully description: Thread modified successfully
content: content:
application/json: application/json:
@ -618,7 +704,7 @@ paths:
description: | description: |
The ID of the thread to be deleted. The ID of the thread to be deleted.
responses: responses:
'200': "200":
description: Thread deleted successfully description: Thread deleted successfully
content: content:
application/json: application/json:
@ -639,7 +725,7 @@ paths:
"https://platform.openai.com/docs/api-reference/assistants/listAssistants"> "https://platform.openai.com/docs/api-reference/assistants/listAssistants">
Equivalent to OpenAI's list assistants. </a> Equivalent to OpenAI's list assistants. </a>
responses: responses:
'200': "200":
description: List of assistants retrieved successfully description: List of assistants retrieved successfully
content: content:
application/json: application/json:
@ -676,10 +762,36 @@ paths:
metadata: {} metadata: {}
x-codeSamples: x-codeSamples:
- lang: cURL - lang: cURL
source: | source: |-
curl http://localhost:1337/v1/assistants \ curl http://localhost:1337/v1/assistants \
-H "Content-Type: application/json" \ -H "Content-Type: application/json"
/assistants/{assistant_id}: - lang: JavaScript
source: |-
fetch('http://localhost:1337/v1/assistants', {
method: 'GET',
headers: {
'Content-Type': 'application/json'
}
})
- lang: Node.js
source: |-
const fetch = require('node-fetch');
fetch('http://localhost:1337/v1/assistants', {
method: 'GET',
headers: {
'Content-Type': 'application/json'
}
})
- lang: Python
source: |-
import requests
url = 'http://localhost:1337/v1/assistants'
headers = {'Content-Type': 'application/json'}
response = requests.get(url, headers=headers)
"/assistants/{assistant_id}":
get: get:
operationId: getAssistant operationId: getAssistant
tags: tags:
@ -699,19 +811,51 @@ paths:
description: | description: |
The ID of the assistant to retrieve. The ID of the assistant to retrieve.
responses: responses:
'200': "200":
description: null description: null
content: content:
application/json: application/json:
schema: schema:
$ref: >- $ref: specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse
specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse
x-codeSamples: x-codeSamples:
- lang: cURL - lang: cURL
source: | source: |-
curl http://localhost:1337/v1/assistants/{assistant_id} \ curl http://localhost:1337/v1/assistants/{assistant_id} \
-H "Content-Type: application/json" \ -H "Content-Type: application/json"
/threads/{thread_id}/messages: - lang: JavaScript
source: |-
const fetch = require('node-fetch');
let assistantId = 'abc123';
fetch(`http://localhost:1337/v1/assistants/${assistantId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json'
}
})
- lang: Node.js
source: |-
const fetch = require('node-fetch');
let assistantId = 'abc123';
fetch(`http://localhost:1337/v1/assistants/${assistantId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json'
}
})
- lang: Python
source: >-
import requests
assistant_id = 'abc123'
response = requests.get(f'http://localhost:1337/v1/assistants/{assistant_id}', headers={'Content-Type': 'application/json'})
"/threads/{thread_id}/messages":
get: get:
operationId: listMessages operationId: listMessages
tags: tags:
@ -730,7 +874,7 @@ paths:
description: | description: |
The ID of the thread from which to retrieve messages. The ID of the thread from which to retrieve messages.
responses: responses:
'200': "200":
description: List of messages retrieved successfully description: List of messages retrieved successfully
content: content:
application/json: application/json:
@ -782,7 +926,7 @@ paths:
- role - role
- content - content
responses: responses:
'200': "200":
description: Message created successfully description: Message created successfully
content: content:
application/json: application/json:
@ -797,7 +941,7 @@ paths:
"role": "user", "role": "user",
"content": "How does AI work? Explain it in simple terms." "content": "How does AI work? Explain it in simple terms."
}' }'
/threads/{thread_id}/messages/{message_id}: "/threads/{thread_id}/messages/{message_id}":
get: get:
operationId: retrieveMessage operationId: retrieveMessage
tags: tags:
@ -824,7 +968,7 @@ paths:
description: | description: |
The ID of the message to retrieve. The ID of the message to retrieve.
responses: responses:
'200': "200":
description: OK description: OK
content: content:
application/json: application/json:
@ -833,8 +977,8 @@ paths:
x-codeSamples: x-codeSamples:
- lang: cURL - lang: cURL
source: > source: >
curl curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id}
http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} \ \
-H "Content-Type: application/json" -H "Content-Type: application/json"
x-webhooks: x-webhooks:
ModelObject: ModelObject:
@ -856,9 +1000,10 @@ x-webhooks:
post: post:
summary: The assistant object summary: The assistant object
description: > description: >
Build assistants that can call models and use tools to perform tasks. Build assistants that can call models and use tools to perform
<a href = "https://platform.openai.com/docs/api-reference/assistants"> tasks. <a href =
Equivalent to OpenAI's assistants object. </a> "https://platform.openai.com/docs/api-reference/assistants"> Equivalent
to OpenAI's assistants object. </a>
operationId: AssistantObjects operationId: AssistantObjects
tags: tags:
- Assistants - Assistants
@ -885,8 +1030,7 @@ x-webhooks:
ThreadObject: ThreadObject:
post: post:
summary: The thread object summary: The thread object
description: >- description: Represents a thread that contains messages. <a href =
Represents a thread that contains messages. <a href =
"https://platform.openai.com/docs/api-reference/threads/object"> "https://platform.openai.com/docs/api-reference/threads/object">
Equivalent to OpenAI's thread object. </a> Equivalent to OpenAI's thread object. </a>
operationId: ThreadObject operationId: ThreadObject

View File

@ -93,12 +93,12 @@ export function handleAppIPCs() {
const { canceled, filePaths } = await dialog.showOpenDialog(mainWindow, { const { canceled, filePaths } = await dialog.showOpenDialog(mainWindow, {
title: 'Select model files', title: 'Select model files',
buttonLabel: 'Select', buttonLabel: 'Select',
properties: ['openFile', 'multiSelections'], properties: ['openFile', 'openDirectory', 'multiSelections'],
}) })
if (canceled) { if (canceled) {
return return
} else {
return filePaths
} }
return filePaths
}) })
} }

View File

@ -61,6 +61,8 @@
"test:e2e": "playwright test --workers=1", "test:e2e": "playwright test --workers=1",
"copy:assets": "rimraf --glob \"./pre-install/*.tgz\" && cpx \"../pre-install/*.tgz\" \"./pre-install\"", "copy:assets": "rimraf --glob \"./pre-install/*.tgz\" && cpx \"../pre-install/*.tgz\" \"./pre-install\"",
"dev": "yarn copy:assets && tsc -p . && electron .", "dev": "yarn copy:assets && tsc -p . && electron .",
"compile": "tsc -p .",
"start": "electron .",
"build": "yarn copy:assets && run-script-os", "build": "yarn copy:assets && run-script-os",
"build:test": "yarn copy:assets && run-script-os", "build:test": "yarn copy:assets && run-script-os",
"build:test:darwin": "tsc -p . && electron-builder -p never -m --dir", "build:test:darwin": "tsc -p . && electron-builder -p never -m --dir",

View File

@ -41,7 +41,8 @@
"build:extensions": "run-script-os", "build:extensions": "run-script-os",
"build:test": "yarn copy:assets && yarn build:web && yarn workspace jan build:test", "build:test": "yarn copy:assets && yarn build:web && yarn workspace jan build:test",
"build": "yarn build:web && yarn build:electron", "build": "yarn build:web && yarn build:electron",
"build:publish": "yarn copy:assets && yarn build:web && yarn workspace jan build:publish" "build:publish": "yarn copy:assets && yarn build:web && yarn workspace jan build:publish",
"turbo:electron": "turbo run dev --parallel --filter=!@janhq/server"
}, },
"devDependencies": { "devDependencies": {
"concurrently": "^8.2.1", "concurrently": "^8.2.1",

29
turbo.json Normal file
View File

@ -0,0 +1,29 @@
{
"$schema": "https://turbo.build/schema.json",
"pipeline": {
"build": {
"outputs": [".next/**", "!.next/cache/**"]
},
"dev": {
"cache": false
},
"web#build": {
"dependsOn": ["@janhq/core#build"]
},
"web:dev": {
"cache": false,
"persistent": true,
"dependsOn": ["@janhq/core#build", "@janhq/uikit#build"]
},
"electron:dev": {
"cache": false,
"persistent": true,
"dependsOn": ["@janhq/core#build", "@janhq/server#build", "jan#compile"]
},
"electron#build": {
"dependsOn": ["web#build", "server#build", "core#build"],
"cache": false
},
"type-check": {}
}
}

View File

@ -1,29 +0,0 @@
type Props = {
title: string
description?: string
disabled?: boolean
onChange?: (text?: string) => void
}
export default function ItemCardSidebar({
description,
title,
disabled,
onChange,
}: Props) {
return (
<div className="flex flex-col gap-2">
<div className="flex items-center gap-2">
<span>{title}</span>
</div>
<input
value={description}
disabled={disabled}
type="text"
className="block w-full rounded-md border-0 px-1 py-1.5 text-white shadow-sm ring-1 ring-inset ring-gray-300 placeholder:text-gray-400 focus:ring-2 focus:ring-inset focus:ring-indigo-600 sm:text-sm sm:leading-6"
placeholder=""
onChange={(e) => onChange?.(e.target.value)}
/>
</div>
)
}

View File

@ -11,8 +11,6 @@ import EventListenerWrapper from '@/containers/Providers/EventListener'
import JotaiWrapper from '@/containers/Providers/Jotai' import JotaiWrapper from '@/containers/Providers/Jotai'
import ThemeWrapper from '@/containers/Providers/Theme' import ThemeWrapper from '@/containers/Providers/Theme'
import FeatureToggleWrapper from '@/context/FeatureToggle'
import { setupCoreServices } from '@/services/coreService' import { setupCoreServices } from '@/services/coreService'
import { import {
isCoreExtensionInstalled, isCoreExtensionInstalled,
@ -81,15 +79,13 @@ const Providers = (props: PropsWithChildren) => {
{settingUp && <Loader description="Preparing Update..." />} {settingUp && <Loader description="Preparing Update..." />}
{setupCore && activated && ( {setupCore && activated && (
<KeyListener> <KeyListener>
<FeatureToggleWrapper> <EventListenerWrapper>
<EventListenerWrapper> <TooltipProvider delayDuration={0}>
<TooltipProvider delayDuration={0}> <DataLoader>{children}</DataLoader>
<DataLoader>{children}</DataLoader> </TooltipProvider>
</TooltipProvider> {!isMac && <GPUDriverPrompt />}
{!isMac && <GPUDriverPrompt />} </EventListenerWrapper>
</EventListenerWrapper> <Toaster />
<Toaster />
</FeatureToggleWrapper>
</KeyListener> </KeyListener>
)} )}
</ThemeWrapper> </ThemeWrapper>

View File

@ -1,104 +0,0 @@
import { createContext, ReactNode, useEffect, useState } from 'react'
interface FeatureToggleContextType {
experimentalFeature: boolean
ignoreSSL: boolean
proxy: string
proxyEnabled: boolean
vulkanEnabled: boolean
setExperimentalFeature: (on: boolean) => void
setVulkanEnabled: (on: boolean) => void
setIgnoreSSL: (on: boolean) => void
setProxy: (value: string) => void
setProxyEnabled: (on: boolean) => void
}
const initialContext: FeatureToggleContextType = {
experimentalFeature: false,
ignoreSSL: false,
proxy: '',
proxyEnabled: false,
vulkanEnabled: false,
setExperimentalFeature: () => {},
setVulkanEnabled: () => {},
setIgnoreSSL: () => {},
setProxy: () => {},
setProxyEnabled: () => {},
}
export const FeatureToggleContext =
createContext<FeatureToggleContextType>(initialContext)
export default function FeatureToggleWrapper({
children,
}: {
children: ReactNode
}) {
const EXPERIMENTAL_FEATURE = 'experimentalFeature'
const VULKAN_ENABLED = 'vulkanEnabled'
const IGNORE_SSL = 'ignoreSSLFeature'
const HTTPS_PROXY_FEATURE = 'httpsProxyFeature'
const PROXY_FEATURE_ENABLED = 'proxyFeatureEnabled'
const [experimentalFeature, directSetExperimentalFeature] =
useState<boolean>(false)
const [proxyEnabled, directSetProxyEnabled] = useState<boolean>(false)
const [vulkanEnabled, directEnableVulkan] = useState<boolean>(false)
const [ignoreSSL, directSetIgnoreSSL] = useState<boolean>(false)
const [proxy, directSetProxy] = useState<string>('')
useEffect(() => {
directSetExperimentalFeature(
localStorage.getItem(EXPERIMENTAL_FEATURE) === 'true'
)
directSetIgnoreSSL(localStorage.getItem(IGNORE_SSL) === 'true')
directSetProxy(localStorage.getItem(HTTPS_PROXY_FEATURE) ?? '')
directSetProxyEnabled(
localStorage.getItem(PROXY_FEATURE_ENABLED) === 'true'
)
}, [])
const setExperimentalFeature = (on: boolean) => {
localStorage.setItem(EXPERIMENTAL_FEATURE, on ? 'true' : 'false')
directSetExperimentalFeature(on)
}
const setVulkanEnabled = (on: boolean) => {
localStorage.setItem(VULKAN_ENABLED, on ? 'true' : 'false')
directEnableVulkan(on)
}
const setIgnoreSSL = (on: boolean) => {
localStorage.setItem(IGNORE_SSL, on ? 'true' : 'false')
directSetIgnoreSSL(on)
}
const setProxy = (proxy: string) => {
localStorage.setItem(HTTPS_PROXY_FEATURE, proxy)
directSetProxy(proxy)
}
const setProxyEnabled = (on: boolean) => {
localStorage.setItem(PROXY_FEATURE_ENABLED, on ? 'true' : 'false')
directSetProxyEnabled(on)
}
return (
<FeatureToggleContext.Provider
value={{
experimentalFeature,
ignoreSSL,
proxy,
proxyEnabled,
vulkanEnabled,
setExperimentalFeature,
setVulkanEnabled,
setIgnoreSSL,
setProxy,
setProxyEnabled,
}}
>
{children}
</FeatureToggleContext.Provider>
)
}

View File

@ -0,0 +1,16 @@
import { atomWithStorage } from 'jotai/utils'
export const hostOptions = ['127.0.0.1', '0.0.0.0']
export const apiServerPortAtom = atomWithStorage('apiServerPort', '1337')
export const apiServerHostAtom = atomWithStorage('apiServerHost', '127.0.0.1')
export const apiServerCorsEnabledAtom = atomWithStorage(
'apiServerCorsEnabled',
true
)
export const apiServerVerboseLogEnabledAtom = atomWithStorage(
'apiServerVerboseLogEnabled',
true
)

View File

@ -1,3 +1,21 @@
import { atom } from 'jotai' import { atom } from 'jotai'
import { atomWithStorage } from 'jotai/utils'
const EXPERIMENTAL_FEATURE = 'experimentalFeature'
const PROXY_FEATURE_ENABLED = 'proxyFeatureEnabled'
const VULKAN_ENABLED = 'vulkanEnabled'
const IGNORE_SSL = 'ignoreSSLFeature'
const HTTPS_PROXY_FEATURE = 'httpsProxyFeature'
export const janDataFolderPathAtom = atom('') export const janDataFolderPathAtom = atom('')
export const experimentalFeatureEnabledAtom = atomWithStorage(
EXPERIMENTAL_FEATURE,
false
)
export const proxyEnabledAtom = atomWithStorage(PROXY_FEATURE_ENABLED, false)
export const proxyAtom = atomWithStorage(HTTPS_PROXY_FEATURE, '')
export const ignoreSslAtom = atomWithStorage(IGNORE_SSL, false)
export const vulkanEnabledAtom = atomWithStorage(VULKAN_ENABLED, false)

View File

@ -1,5 +1,3 @@
import { useContext } from 'react'
import { import {
ExtensionTypeEnum, ExtensionTypeEnum,
HuggingFaceExtension, HuggingFaceExtension,
@ -7,18 +5,18 @@ import {
Quantization, Quantization,
} from '@janhq/core' } from '@janhq/core'
import { useSetAtom } from 'jotai' import { useAtomValue, useSetAtom } from 'jotai'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { extensionManager } from '@/extension/ExtensionManager' import { extensionManager } from '@/extension/ExtensionManager'
import { ignoreSslAtom, proxyAtom } from '@/helpers/atoms/AppConfig.atom'
import { import {
conversionStatusAtom, conversionStatusAtom,
conversionErrorAtom, conversionErrorAtom,
} from '@/helpers/atoms/HFConverter.atom' } from '@/helpers/atoms/HFConverter.atom'
export const useConvertHuggingFaceModel = () => { export const useConvertHuggingFaceModel = () => {
const { ignoreSSL, proxy } = useContext(FeatureToggleContext) const proxy = useAtomValue(proxyAtom)
const ignoreSSL = useAtomValue(ignoreSslAtom)
const setConversionStatus = useSetAtom(conversionStatusAtom) const setConversionStatus = useSetAtom(conversionStatusAtom)
const setConversionError = useSetAtom(conversionErrorAtom) const setConversionError = useSetAtom(conversionErrorAtom)

View File

@ -1,5 +1,3 @@
import { useContext } from 'react'
import { import {
Assistant, Assistant,
ConversationalExtension, ConversationalExtension,
@ -17,8 +15,6 @@ import { atom, useAtomValue, useSetAtom } from 'jotai'
import { selectedModelAtom } from '@/containers/DropdownListSidebar' import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { fileUploadAtom } from '@/containers/Providers/Jotai' import { fileUploadAtom } from '@/containers/Providers/Jotai'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { generateThreadId } from '@/utils/thread' import { generateThreadId } from '@/utils/thread'
import useRecommendedModel from './useRecommendedModel' import useRecommendedModel from './useRecommendedModel'
@ -27,6 +23,7 @@ import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension' import { extensionManager } from '@/extension'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { import {
threadsAtom, threadsAtom,
threadStatesAtom, threadStatesAtom,
@ -59,7 +56,8 @@ export const useCreateNewThread = () => {
const setFileUpload = useSetAtom(fileUploadAtom) const setFileUpload = useSetAtom(fileUploadAtom)
const setSelectedModel = useSetAtom(selectedModelAtom) const setSelectedModel = useSetAtom(selectedModelAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom) const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const { experimentalFeature } = useContext(FeatureToggleContext)
const experimentalEnabled = useAtomValue(experimentalFeatureEnabledAtom)
const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom) const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
const { recommendedModel, downloadedModels } = useRecommendedModel() const { recommendedModel, downloadedModels } = useRecommendedModel()
@ -94,7 +92,7 @@ export const useCreateNewThread = () => {
const assistantInfo: ThreadAssistantInfo = { const assistantInfo: ThreadAssistantInfo = {
assistant_id: assistant.id, assistant_id: assistant.id,
assistant_name: assistant.name, assistant_name: assistant.name,
tools: experimentalFeature ? [assistantTools] : assistant.tools, tools: experimentalEnabled ? [assistantTools] : assistant.tools,
model: { model: {
id: defaultModel?.id ?? '*', id: defaultModel?.id ?? '*',
settings: defaultModel?.settings ?? {}, settings: defaultModel?.settings ?? {},

View File

@ -1,4 +1,4 @@
import { useCallback, useContext } from 'react' import { useCallback } from 'react'
import { import {
Model, Model,
@ -10,17 +10,22 @@ import {
DownloadState, DownloadState,
} from '@janhq/core' } from '@janhq/core'
import { useSetAtom } from 'jotai' import { useAtomValue, useSetAtom } from 'jotai'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { setDownloadStateAtom } from './useDownloadState' import { setDownloadStateAtom } from './useDownloadState'
import { extensionManager } from '@/extension/ExtensionManager' import { extensionManager } from '@/extension/ExtensionManager'
import {
ignoreSslAtom,
proxyAtom,
proxyEnabledAtom,
} from '@/helpers/atoms/AppConfig.atom'
import { addDownloadingModelAtom } from '@/helpers/atoms/Model.atom' import { addDownloadingModelAtom } from '@/helpers/atoms/Model.atom'
export default function useDownloadModel() { export default function useDownloadModel() {
const { ignoreSSL, proxy, proxyEnabled } = useContext(FeatureToggleContext) const ignoreSSL = useAtomValue(ignoreSslAtom)
const proxy = useAtomValue(proxyAtom)
const proxyEnabled = useAtomValue(proxyEnabledAtom)
const setDownloadState = useSetAtom(setDownloadStateAtom) const setDownloadState = useSetAtom(setDownloadStateAtom)
const addDownloadingModel = useSetAtom(addDownloadingModelAtom) const addDownloadingModel = useSetAtom(addDownloadingModelAtom)

View File

@ -39,7 +39,7 @@ export default function useDropModelBinaries() {
})) }))
if (unsupportedFiles.length > 0) { if (unsupportedFiles.length > 0) {
snackbar({ snackbar({
description: `File has to be a .gguf file`, description: `Only files with .gguf extension can be imported.`,
type: 'error', type: 'error',
}) })
} }

View File

@ -20,7 +20,9 @@ export const useGetHFRepoData = () => {
const data = await res.json() const data = await res.json()
setRepoData(data) setRepoData(data)
} catch (err) { } catch (err) {
setFetchError(err as Error) setFetchError(
Error("The repo does not exist or you don't have access to it.")
)
} }
setLoading(false) setLoading(false)
} }

View File

@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable @typescript-eslint/no-explicit-any */
import { useContext, useEffect, useRef, useState } from 'react' import { useEffect, useRef, useState } from 'react'
import { InferenceEvent, MessageStatus, events } from '@janhq/core' import { InferenceEvent, MessageStatus, events } from '@janhq/core'
@ -24,8 +24,6 @@ import { twMerge } from 'tailwind-merge'
import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai' import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { useActiveModel } from '@/hooks/useActiveModel' import { useActiveModel } from '@/hooks/useActiveModel'
import { useClickOutside } from '@/hooks/useClickOutside' import { useClickOutside } from '@/hooks/useClickOutside'
@ -34,6 +32,7 @@ import useSendChatMessage from '@/hooks/useSendChatMessage'
import FileUploadPreview from '../FileUploadPreview' import FileUploadPreview from '../FileUploadPreview'
import ImageUploadPreview from '../ImageUploadPreview' import ImageUploadPreview from '../ImageUploadPreview'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom' import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import { import {
activeThreadAtom, activeThreadAtom,
@ -58,7 +57,7 @@ const ChatInput: React.FC = () => {
const fileInputRef = useRef<HTMLInputElement>(null) const fileInputRef = useRef<HTMLInputElement>(null)
const imageInputRef = useRef<HTMLInputElement>(null) const imageInputRef = useRef<HTMLInputElement>(null)
const [showAttacmentMenus, setShowAttacmentMenus] = useState(false) const [showAttacmentMenus, setShowAttacmentMenus] = useState(false)
const { experimentalFeature } = useContext(FeatureToggleContext) const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom) const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom)
const threadStates = useAtomValue(threadStatesAtom) const threadStates = useAtomValue(threadStatesAtom)

View File

@ -1,5 +1,4 @@
/* eslint-disable @typescript-eslint/no-explicit-any */ import React from 'react'
import React, { useContext } from 'react'
import { import {
Input, Input,
@ -24,8 +23,6 @@ import DropdownListSidebar, {
selectedModelAtom, selectedModelAtom,
} from '@/containers/DropdownListSidebar' } from '@/containers/DropdownListSidebar'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { useCreateNewThread } from '@/hooks/useCreateNewThread' import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import { getConfigurationsData } from '@/utils/componentSettings' import { getConfigurationsData } from '@/utils/componentSettings'
@ -37,6 +34,7 @@ import ModelSetting from '../ModelSetting'
import SettingComponentBuilder from '../ModelSetting/SettingComponent' import SettingComponentBuilder from '../ModelSetting/SettingComponent'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { import {
activeThreadAtom, activeThreadAtom,
getActiveThreadModelParamsAtom, getActiveThreadModelParamsAtom,
@ -50,7 +48,7 @@ const Sidebar: React.FC = () => {
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom) const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const selectedModel = useAtomValue(selectedModelAtom) const selectedModel = useAtomValue(selectedModelAtom)
const { updateThreadMetadata } = useCreateNewThread() const { updateThreadMetadata } = useCreateNewThread()
const { experimentalFeature } = useContext(FeatureToggleContext) const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const modelEngineParams = toSettingParams(activeModelParams) const modelEngineParams = toSettingParams(activeModelParams)
const modelRuntimeParams = toRuntimeParams(activeModelParams) const modelRuntimeParams = toRuntimeParams(activeModelParams)
@ -174,7 +172,7 @@ const Sidebar: React.FC = () => {
<div className="px-2 py-4"> <div className="px-2 py-4">
<SettingComponentBuilder <SettingComponentBuilder
componentData={componentDataEngineSetting} componentData={componentDataEngineSetting}
selector={(x: any) => x.name === 'prompt_template'} selector={(x) => x.name === 'prompt_template'}
/> />
</div> </div>
</CardSidebar> </CardSidebar>

View File

@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/naming-convention */ /* eslint-disable @typescript-eslint/naming-convention */
import React, { useContext, useEffect, useState } from 'react' import React, { useEffect, useState } from 'react'
import { useDropzone } from 'react-dropzone' import { useDropzone } from 'react-dropzone'
@ -18,8 +18,6 @@ import { showLeftSideBarAtom } from '@/containers/Providers/KeyListener'
import { snackbar } from '@/containers/Toast' import { snackbar } from '@/containers/Toast'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { activeModelAtom } from '@/hooks/useActiveModel' import { activeModelAtom } from '@/hooks/useActiveModel'
import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage' import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage'
@ -31,6 +29,7 @@ import ChatInput from './ChatInput'
import RequestDownloadModel from './RequestDownloadModel' import RequestDownloadModel from './RequestDownloadModel'
import Sidebar from './Sidebar' import Sidebar from './Sidebar'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { import {
activeThreadAtom, activeThreadAtom,
engineParamsUpdateAtom, engineParamsUpdateAtom,
@ -63,7 +62,7 @@ const ChatScreen: React.FC = () => {
const reloadModel = useAtomValue(reloadModelAtom) const reloadModel = useAtomValue(reloadModelAtom)
const [dragRejected, setDragRejected] = useState({ code: '' }) const [dragRejected, setDragRejected] = useState({ code: '' })
const setFileUpload = useSetAtom(fileUploadAtom) const setFileUpload = useSetAtom(fileUploadAtom)
const { experimentalFeature } = useContext(FeatureToggleContext) const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const activeModel = useAtomValue(activeModelAtom) const activeModel = useAtomValue(activeModelAtom)

View File

@ -53,7 +53,7 @@ export const HuggingFaceRepoDataLoadedModal = () => {
? '❌ This model is not supported!' ? '❌ This model is not supported!'
: '✅ This model is supported!'} : '✅ This model is supported!'}
</p> </p>
{repoData.tags.includes('gguf') ? ( {repoData.tags?.includes('gguf') ? (
<p>...But you can import it manually!</p> <p>...But you can import it manually!</p>
) : null} ) : null}
</div> </div>

View File

@ -18,7 +18,7 @@ export const HuggingFaceSearchErrorModal = () => {
<p className="text-2xl font-bold">Error!</p> <p className="text-2xl font-bold">Error!</p>
<p className="text-gray-500">Fetch error</p> <p className="text-gray-500">Fetch error</p>
</div> </div>
<p>{fetchError.message}</p> <p className="text-center">{fetchError.message}</p>
<Button <Button
onClick={getRepoData} onClick={getRepoData}
className="w-full" className="w-full"

View File

@ -1,4 +1,4 @@
import { useCallback, useContext, useState } from 'react' import { useCallback, useState } from 'react'
import { import {
Input, Input,
@ -15,13 +15,12 @@ import {
import { useAtomValue, useSetAtom } from 'jotai' import { useAtomValue, useSetAtom } from 'jotai'
import { UploadIcon, SearchIcon } from 'lucide-react' import { UploadIcon, SearchIcon } from 'lucide-react'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { setImportModelStageAtom } from '@/hooks/useImportModel' import { setImportModelStageAtom } from '@/hooks/useImportModel'
import ExploreModelList from './ExploreModelList' import ExploreModelList from './ExploreModelList'
import { HuggingFaceModal } from './HuggingFaceModal' import { HuggingFaceModal } from './HuggingFaceModal'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { import {
configuredModelsAtom, configuredModelsAtom,
downloadedModelsAtom, downloadedModelsAtom,
@ -38,7 +37,7 @@ const ExploreModelsScreen = () => {
const [showHuggingFaceModal, setShowHuggingFaceModal] = useState(false) const [showHuggingFaceModal, setShowHuggingFaceModal] = useState(false)
const setImportModelStage = useSetAtom(setImportModelStageAtom) const setImportModelStage = useSetAtom(setImportModelStageAtom)
const { experimentalFeature } = useContext(FeatureToggleContext) const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const filteredModels = configuredModels.filter((x) => { const filteredModels = configuredModels.filter((x) => {
if (sortSelected === 'Downloaded') { if (sortSelected === 'Downloaded') {

View File

@ -20,7 +20,7 @@ import {
SelectValue, SelectValue,
} from '@janhq/uikit' } from '@janhq/uikit'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai' import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import { Paintbrush, CodeIcon } from 'lucide-react' import { Paintbrush, CodeIcon } from 'lucide-react'
import { ExternalLinkIcon, InfoIcon } from 'lucide-react' import { ExternalLinkIcon, InfoIcon } from 'lucide-react'
@ -53,13 +53,15 @@ import SettingComponentBuilder from '../Chat/ModelSetting/SettingComponent'
import { showRightSideBarAtom } from '../Chat/Sidebar' import { showRightSideBarAtom } from '../Chat/Sidebar'
import {
apiServerCorsEnabledAtom,
apiServerHostAtom,
apiServerPortAtom,
apiServerVerboseLogEnabledAtom,
hostOptions,
} from '@/helpers/atoms/ApiServer.atom'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom' import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
const corsEnabledAtom = atom(true)
const verboseEnabledAtom = atom(true)
const hostAtom = atom('127.0.0.1')
const portAtom = atom('1337')
const LocalServerScreen = () => { const LocalServerScreen = () => {
const [errorRangePort, setErrorRangePort] = useState(false) const [errorRangePort, setErrorRangePort] = useState(false)
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom) const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
@ -73,14 +75,14 @@ const LocalServerScreen = () => {
const modelEngineParams = toSettingParams(selectedModel?.settings) const modelEngineParams = toSettingParams(selectedModel?.settings)
const componentDataEngineSetting = getConfigurationsData(modelEngineParams) const componentDataEngineSetting = getConfigurationsData(modelEngineParams)
const [isCorsEnabled, setIsCorsEnabled] = useAtom(corsEnabledAtom) const [isCorsEnabled, setIsCorsEnabled] = useAtom(apiServerCorsEnabledAtom)
const [isVerboseEnabled, setIsVerboseEnabled] = useAtom(verboseEnabledAtom) const [isVerboseEnabled, setIsVerboseEnabled] = useAtom(
const [host, setHost] = useAtom(hostAtom) apiServerVerboseLogEnabledAtom
const [port, setPort] = useAtom(portAtom) )
const [host, setHost] = useAtom(apiServerHostAtom)
const [port, setPort] = useAtom(apiServerPortAtom)
const [loadModelError, setLoadModelError] = useAtom(loadModelErrorAtom) const [loadModelError, setLoadModelError] = useAtom(loadModelErrorAtom)
const hostOptions = ['127.0.0.1', '0.0.0.0']
const FIRST_TIME_VISIT_API_SERVER = 'firstTimeVisitAPIServer' const FIRST_TIME_VISIT_API_SERVER = 'firstTimeVisitAPIServer'
const [firstTimeVisitAPIServer, setFirstTimeVisitAPIServer] = const [firstTimeVisitAPIServer, setFirstTimeVisitAPIServer] =
@ -88,11 +90,7 @@ const LocalServerScreen = () => {
const handleChangePort = useCallback( const handleChangePort = useCallback(
(value: string) => { (value: string) => {
if (Number(value) <= 0 || Number(value) >= 65536) { setErrorRangePort(Number(value) <= 0 || Number(value) >= 65536)
setErrorRangePort(true)
} else {
setErrorRangePort(false)
}
setPort(value) setPort(value)
}, },
[setPort] [setPort]

View File

@ -1,12 +1,6 @@
'use client' 'use client'
import { import { useEffect, useState, useCallback, ChangeEvent } from 'react'
useContext,
useEffect,
useState,
useCallback,
ChangeEvent,
} from 'react'
import { openExternalUrl, fs } from '@janhq/core' import { openExternalUrl, fs } from '@janhq/core'
@ -29,20 +23,27 @@ import {
ScrollArea, ScrollArea,
} from '@janhq/uikit' } from '@janhq/uikit'
import { useAtom } from 'jotai'
import { AlertTriangleIcon, AlertCircleIcon } from 'lucide-react' import { AlertTriangleIcon, AlertCircleIcon } from 'lucide-react'
import ShortcutModal from '@/containers/ShortcutModal' import ShortcutModal from '@/containers/ShortcutModal'
import { snackbar, toaster } from '@/containers/Toast' import { snackbar, toaster } from '@/containers/Toast'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { useActiveModel } from '@/hooks/useActiveModel' import { useActiveModel } from '@/hooks/useActiveModel'
import { useSettings } from '@/hooks/useSettings' import { useSettings } from '@/hooks/useSettings'
import DataFolder from './DataFolder' import DataFolder from './DataFolder'
import FactoryReset from './FactoryReset' import FactoryReset from './FactoryReset'
import {
experimentalFeatureEnabledAtom,
ignoreSslAtom,
proxyAtom,
proxyEnabledAtom,
vulkanEnabledAtom,
} from '@/helpers/atoms/AppConfig.atom'
type GPU = { type GPU = {
id: string id: string
vram: number | null vram: number | null
@ -50,22 +51,19 @@ type GPU = {
} }
const Advanced = () => { const Advanced = () => {
const { const [experimentalEnabled, setExperimentalEnabled] = useAtom(
experimentalFeature, experimentalFeatureEnabledAtom
setExperimentalFeature, )
ignoreSSL, const [vulkanEnabled, setVulkanEnabled] = useAtom(vulkanEnabledAtom)
setIgnoreSSL, const [proxyEnabled, setProxyEnabled] = useAtom(proxyEnabledAtom)
proxy, const [proxy, setProxy] = useAtom(proxyAtom)
setProxy, const [ignoreSSL, setIgnoreSSL] = useAtom(ignoreSslAtom)
proxyEnabled,
setProxyEnabled,
vulkanEnabled,
setVulkanEnabled,
} = useContext(FeatureToggleContext)
const [partialProxy, setPartialProxy] = useState<string>(proxy) const [partialProxy, setPartialProxy] = useState<string>(proxy)
const [gpuEnabled, setGpuEnabled] = useState<boolean>(false) const [gpuEnabled, setGpuEnabled] = useState<boolean>(false)
const [gpuList, setGpuList] = useState<GPU[]>([]) const [gpuList, setGpuList] = useState<GPU[]>([])
const [gpusInUse, setGpusInUse] = useState<string[]>([]) const [gpusInUse, setGpusInUse] = useState<string[]>([])
const { readSettings, saveSettings, validateSettings, setShowNotification } = const { readSettings, saveSettings, validateSettings, setShowNotification } =
useSettings() useSettings()
const { stopModel } = useActiveModel() const { stopModel } = useActiveModel()
@ -169,8 +167,8 @@ const Advanced = () => {
</p> </p>
</div> </div>
<Switch <Switch
checked={experimentalFeature} checked={experimentalEnabled}
onCheckedChange={setExperimentalFeature} onCheckedChange={setExperimentalEnabled}
/> />
</div> </div>
@ -355,7 +353,7 @@ const Advanced = () => {
)} )}
{/* Vulkan for AMD GPU/ APU and Intel Arc GPU */} {/* Vulkan for AMD GPU/ APU and Intel Arc GPU */}
{!isMac && experimentalFeature && ( {!isMac && experimentalEnabled && (
<div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none"> <div className="flex w-full items-start justify-between border-b border-border py-4 first:pt-0 last:border-none">
<div className="flex-shrink-0 space-y-1.5"> <div className="flex-shrink-0 space-y-1.5">
<div className="flex gap-x-2"> <div className="flex gap-x-2">

View File

@ -116,6 +116,11 @@ const EditModelInfoModal: React.FC = () => {
return null return null
} }
const onTagsChange = (e: React.ChangeEvent<HTMLInputElement>) => {
const tags = e.target.value.split(',')
setTags(tags)
}
return ( return (
<Modal <Modal
open={importModelStage === 'EDIT_MODEL_INFO'} open={importModelStage === 'EDIT_MODEL_INFO'}
@ -128,21 +133,23 @@ const EditModelInfoModal: React.FC = () => {
<div className="flex flex-row space-x-4 rounded-xl border p-4"> <div className="flex flex-row space-x-4 rounded-xl border p-4">
<div className="flex h-10 w-10 items-center justify-center rounded-full bg-blue-400"> <div className="flex h-10 w-10 items-center justify-center rounded-full bg-blue-400">
<Paperclip /> <Paperclip color="#fff" />
</div> </div>
<div className="flex flex-col"> <div className="flex flex-1 flex-col">
<p>{editingModel.name}</p> <p>{editingModel.name}</p>
<div className="flex flex-row"> <div className="flex flex-row">
<span className="mr-2 text-sm text-[#71717A]"> <span className="mr-2 text-sm text-[#71717A]">
{toGibibytes(editingModel.size)} {toGibibytes(editingModel.size)}
</span> </span>
<span className="text-sm font-semibold text-[#71717A]"> <div className="flex flex-row space-x-1">
Format:{' '} <span className="text-sm font-semibold text-[#71717A]">
</span> Format:
<span className="text-sm font-normal text-[#71717A]"> </span>
{editingModel.format.toUpperCase()} <span className="text-sm font-normal text-[#71717A]">
</span> {editingModel.format.toUpperCase()}
</span>
</div>
</div> </div>
<div className="mt-1 flex flex-row items-center space-x-2"> <div className="mt-1 flex flex-row items-center space-x-2">
<span className="line-clamp-1 text-xs font-normal text-[#71717A]"> <span className="line-clamp-1 text-xs font-normal text-[#71717A]">
@ -189,7 +196,7 @@ const EditModelInfoModal: React.FC = () => {
</div> </div>
<div className="flex flex-col"> <div className="flex flex-col">
<label className="mb-1">Tags</label> <label className="mb-1">Tags</label>
<Input /> <Input value={tags.join(',')} onChange={onTagsChange} />
</div> </div>
</form> </form>

View File

@ -1,7 +1,7 @@
import { useCallback } from 'react' import { useCallback } from 'react'
import { useDropzone } from 'react-dropzone' import { useDropzone } from 'react-dropzone'
import { ImportingModel, baseName, fs } from '@janhq/core' import { ImportingModel, baseName, fs, joinPath } from '@janhq/core'
import { Modal, ModalContent, ModalHeader, ModalTitle } from '@janhq/uikit' import { Modal, ModalContent, ModalHeader, ModalTitle } from '@janhq/uikit'
import { useAtomValue, useSetAtom } from 'jotai' import { useAtomValue, useSetAtom } from 'jotai'
@ -34,14 +34,31 @@ const SelectingModelModal: React.FC = () => {
const sanitizedFilePaths: FilePathWithSize[] = [] const sanitizedFilePaths: FilePathWithSize[] = []
for (const filePath of filePaths) { for (const filePath of filePaths) {
const fileStats = await fs.fileStat(filePath, true) const fileStats = await fs.fileStat(filePath, true)
if (!fileStats || fileStats.isDirectory) continue if (!fileStats) continue
const fileName = await baseName(filePath) if (!fileStats.isDirectory) {
sanitizedFilePaths.push({ const fileName = await baseName(filePath)
path: filePath, sanitizedFilePaths.push({
name: fileName, path: filePath,
size: fileStats.size, name: fileName,
}) size: fileStats.size,
})
} else {
// allowing only one level of directory
const files = await fs.readdirSync(filePath)
for (const file of files) {
const fullPath = await joinPath([filePath, file])
const fileStats = await fs.fileStat(fullPath, true)
if (!fileStats || fileStats.isDirectory) continue
sanitizedFilePaths.push({
path: fullPath,
name: file,
size: fileStats.size,
})
}
}
} }
const unsupportedFiles = sanitizedFilePaths.filter( const unsupportedFiles = sanitizedFilePaths.filter(
@ -68,7 +85,7 @@ const SelectingModelModal: React.FC = () => {
) )
if (unsupportedFiles.length > 0) { if (unsupportedFiles.length > 0) {
snackbar({ snackbar({
description: `File has to be a .gguf file`, description: `Only files with .gguf extension can be imported.`,
type: 'error', type: 'error',
}) })
} }