diff --git a/.gitignore b/.gitignore
index 62878011e..75518bf5a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,3 +31,7 @@ extensions/inference-nitro-extension/bin/saved-*
extensions/inference-nitro-extension/bin/*.tar.gz
extensions/inference-nitro-extension/bin/vulkaninfoSDK.exe
extensions/inference-nitro-extension/bin/vulkaninfo
+
+
+# Turborepo
+.turbo
\ No newline at end of file
diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml
index 1a37dfd84..f45db7d2d 100644
--- a/docs/openapi/jan.yaml
+++ b/docs/openapi/jan.yaml
@@ -1,11 +1,11 @@
+---
openapi: 3.0.0
info:
title: API Reference
description: >
# Introduction
- Jan API is compatible with the [OpenAI
- API](https://platform.openai.com/docs/api-reference).
+ Jan API is compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference).
version: 0.1.8
contact:
name: Jan Discord
@@ -20,12 +20,12 @@ tags:
description: List and describe the various models available in the API.
- name: Chat
description: >
- Given a list of messages comprising a conversation, the model will return
- a response.
+ Given a list of messages comprising a conversation, the model will
+ return a response.
- name: Messages
description: >
- Messages capture a conversation's content. This can include the content
- from LLM responses and other metadata from [chat
+ Messages capture a conversation's content. This can include the
+ content from LLM responses and other metadata from [chat
completions](/specs/chats).
- name: Threads
- name: Assistants
@@ -49,16 +49,16 @@ paths:
summary: |
Create chat completion
description: >
- Creates a model response for the given chat conversation. Equivalent
- to OpenAI's create chat completion.
+ Creates a model response for the given chat conversation.
+ Equivalent to OpenAI's create chat completion.
requestBody:
content:
application/json:
schema:
$ref: specs/chat.yaml#/components/schemas/ChatCompletionRequest
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -192,9 +192,7 @@ paths:
}
- response =
- requests.post('http://localhost:1337/v1/chat/completions',
- json=data)
+ response = requests.post('http://localhost:1337/v1/chat/completions', json=data)
print(response.json())
/models:
@@ -204,12 +202,12 @@ paths:
- Models
summary: List models
description: >
- Lists the currently available models, and provides basic information
- about each one such as the owner and availability. Equivalent
- to OpenAI's list model.
+ Lists the currently available models, and provides basic
+ information about each one such as the owner and availability.
+ Equivalent to OpenAI's list model.
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -228,14 +226,6 @@ paths:
headers: {Accept: 'application/json'}
});
const data = await response.json();
- - lang: Python
- source: |-
- import requests
-
- url = 'http://localhost:1337/v1/models'
- headers = {'Accept': 'application/json'}
- response = requests.get(url, headers=headers)
- data = response.json()
- lang: Node.js
source: |-
const fetch = require('node-fetch');
@@ -249,7 +239,15 @@ paths:
fetch(url, options)
.then(res => res.json())
.then(json => console.log(json));
- /models/download/{model_id}:
+ - lang: Python
+ source: |-
+ import requests
+
+ url = 'http://localhost:1337/v1/models'
+ headers = {'Accept': 'application/json'}
+ response = requests.get(url, headers=headers)
+ data = response.json()
+ "/models/download/{model_id}":
get:
operationId: downloadModel
tags:
@@ -267,7 +265,7 @@ paths:
description: |
The ID of the model to use for this request.
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -304,20 +302,18 @@ paths:
import requests
- response =
- requests.get('http://localhost:1337/v1/models/download/{model_id}',
- headers={'accept': 'application/json'})
+ response = requests.get('http://localhost:1337/v1/models/download/{model_id}', headers={'accept': 'application/json'})
data = response.json()
- /models/{model_id}:
+ "/models/{model_id}":
get:
operationId: retrieveModel
tags:
- Models
summary: Retrieve model
description: >
- Get a model instance, providing basic information about the model such
- as the owner and permissioning.
Equivalent to OpenAI's retrieve model.
parameters:
@@ -330,7 +326,7 @@ paths:
description: |
The ID of the model to use for this request.
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -374,9 +370,7 @@ paths:
model_id = 'mistral-ins-7b-q4'
- response =
- requests.get(f'http://localhost:1337/v1/models/{model_id}',
- headers={'accept': 'application/json'})
+ response = requests.get(f'http://localhost:1337/v1/models/{model_id}', headers={'accept': 'application/json'})
print(response.json())
delete:
@@ -398,7 +392,7 @@ paths:
description: |
The model id to delete
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -442,9 +436,7 @@ paths:
model_id = 'mistral-ins-7b-q4'
- response =
- requests.delete(f'http://localhost:1337/v1/models/{model_id}',
- headers={'accept': 'application/json'})
+ response = requests.delete(f'http://localhost:1337/v1/models/{model_id}', headers={'accept': 'application/json'})
/threads:
post:
operationId: createThread
@@ -462,7 +454,7 @@ paths:
schema:
$ref: specs/threads.yaml#/components/schemas/CreateThreadObject
responses:
- '200':
+ "200":
description: Thread created successfully
content:
application/json:
@@ -471,8 +463,8 @@ paths:
x-codeSamples:
- lang: cURL
source: |
- curl -X POST http://localhost:1337/v1/threads \
- -H "Content-Type: application/json" \
+ curl -X POST http://localhost:1337/v1/threads \
+ -H "Content-Type: application/json" \
-d '{
"messages": [{
"role": "user",
@@ -483,6 +475,73 @@ paths:
"content": "How does AI work? Explain it in simple terms."
}]
}'
+ - lang: JavaScript
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/threads', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ messages: [
+ {
+ role: 'user',
+ content: 'Hello, what is AI?',
+ file_ids: ['file-abc123']
+ },
+ {
+ role: 'user',
+ content: 'How does AI work? Explain it in simple terms.'
+ }
+ ]
+ })
+ });
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/threads', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ messages: [
+ {
+ role: 'user',
+ content: 'Hello, what is AI?',
+ file_ids: ['file-abc123']
+ },
+ {
+ role: 'user',
+ content: 'How does AI work? Explain it in simple terms.'
+ }
+ ]
+ })
+ });
+ - lang: Python
+ source: |-
+ import requests
+
+ url = 'http://localhost:1337/v1/threads'
+ payload = {
+ 'messages': [
+ {
+ 'role': 'user',
+ 'content': 'Hello, what is AI?',
+ 'file_ids': ['file-abc123']
+ },
+ {
+ 'role': 'user',
+ 'content': 'How does AI work? Explain it in simple terms.'
+ }
+ ]
+ }
+
+ response = requests.post(url, json=payload)
+ print(response.text)
get:
operationId: listThreads
tags:
@@ -491,7 +550,7 @@ paths:
description: |
Retrieves a list of all threads available in the system.
responses:
- '200':
+ "200":
description: List of threads retrieved successfully
content:
application/json:
@@ -516,10 +575,37 @@ paths:
metadata: {}
x-codeSamples:
- lang: cURL
- source: |
- curl http://localhost:1337/v1/threads \
- -H "Content-Type: application/json" \
- /threads/{thread_id}:
+ source: |-
+ curl http://localhost:1337/v1/threads \
+ -H "Content-Type: application/json"
+ - lang: JavaScript
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/threads', {
+ method: 'GET',
+ headers: {'Content-Type': 'application/json'}
+ }).then(res => res.json())
+ .then(json => console.log(json));
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/threads', {
+ method: 'GET',
+ headers: {'Content-Type': 'application/json'}
+ }).then(res => res.json())
+ .then(json => console.log(json));
+ - lang: Python
+ source: |-
+ import requests
+
+ url = 'http://localhost:1337/v1/threads'
+ headers = {'Content-Type': 'application/json'}
+
+ response = requests.get(url, headers=headers)
+ print(response.json())
+ "/threads/{thread_id}":
get:
operationId: getThread
tags:
@@ -539,7 +625,7 @@ paths:
description: |
The ID of the thread to retrieve.
responses:
- '200':
+ "200":
description: Thread details retrieved successfully
content:
application/json:
@@ -579,7 +665,7 @@ paths:
items:
$ref: specs/threads.yaml#/components/schemas/ThreadMessageObject
responses:
- '200':
+ "200":
description: Thread modified successfully
content:
application/json:
@@ -618,7 +704,7 @@ paths:
description: |
The ID of the thread to be deleted.
responses:
- '200':
+ "200":
description: Thread deleted successfully
content:
application/json:
@@ -639,7 +725,7 @@ paths:
"https://platform.openai.com/docs/api-reference/assistants/listAssistants">
Equivalent to OpenAI's list assistants.
responses:
- '200':
+ "200":
description: List of assistants retrieved successfully
content:
application/json:
@@ -676,10 +762,36 @@ paths:
metadata: {}
x-codeSamples:
- lang: cURL
- source: |
+ source: |-
curl http://localhost:1337/v1/assistants \
- -H "Content-Type: application/json" \
- /assistants/{assistant_id}:
+ -H "Content-Type: application/json"
+ - lang: JavaScript
+ source: |-
+ fetch('http://localhost:1337/v1/assistants', {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ })
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/assistants', {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ })
+ - lang: Python
+ source: |-
+ import requests
+
+ url = 'http://localhost:1337/v1/assistants'
+ headers = {'Content-Type': 'application/json'}
+
+ response = requests.get(url, headers=headers)
+ "/assistants/{assistant_id}":
get:
operationId: getAssistant
tags:
@@ -699,19 +811,51 @@ paths:
description: |
The ID of the assistant to retrieve.
responses:
- '200':
+ "200":
description: null
content:
application/json:
schema:
- $ref: >-
- specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse
+ $ref: specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse
x-codeSamples:
- lang: cURL
- source: |
+ source: |-
curl http://localhost:1337/v1/assistants/{assistant_id} \
- -H "Content-Type: application/json" \
- /threads/{thread_id}/messages:
+ -H "Content-Type: application/json"
+ - lang: JavaScript
+ source: |-
+ const fetch = require('node-fetch');
+
+ let assistantId = 'abc123';
+
+ fetch(`http://localhost:1337/v1/assistants/${assistantId}`, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ })
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ let assistantId = 'abc123';
+
+ fetch(`http://localhost:1337/v1/assistants/${assistantId}`, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ })
+ - lang: Python
+ source: >-
+ import requests
+
+
+ assistant_id = 'abc123'
+
+
+ response = requests.get(f'http://localhost:1337/v1/assistants/{assistant_id}', headers={'Content-Type': 'application/json'})
+ "/threads/{thread_id}/messages":
get:
operationId: listMessages
tags:
@@ -730,7 +874,7 @@ paths:
description: |
The ID of the thread from which to retrieve messages.
responses:
- '200':
+ "200":
description: List of messages retrieved successfully
content:
application/json:
@@ -782,7 +926,7 @@ paths:
- role
- content
responses:
- '200':
+ "200":
description: Message created successfully
content:
application/json:
@@ -797,7 +941,7 @@ paths:
"role": "user",
"content": "How does AI work? Explain it in simple terms."
}'
- /threads/{thread_id}/messages/{message_id}:
+ "/threads/{thread_id}/messages/{message_id}":
get:
operationId: retrieveMessage
tags:
@@ -824,7 +968,7 @@ paths:
description: |
The ID of the message to retrieve.
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -833,8 +977,8 @@ paths:
x-codeSamples:
- lang: cURL
source: >
- curl
- http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} \
+ curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id}
+ \
-H "Content-Type: application/json"
x-webhooks:
ModelObject:
@@ -856,9 +1000,10 @@ x-webhooks:
post:
summary: The assistant object
description: >
- Build assistants that can call models and use tools to perform tasks.
-
- Equivalent to OpenAI's assistants object.
+ Build assistants that can call models and use tools to perform
+ tasks. Equivalent
+ to OpenAI's assistants object.
operationId: AssistantObjects
tags:
- Assistants
@@ -885,8 +1030,7 @@ x-webhooks:
ThreadObject:
post:
summary: The thread object
- description: >-
- Represents a thread that contains messages.
Equivalent to OpenAI's thread object.
operationId: ThreadObject
diff --git a/electron/handlers/native.ts b/electron/handlers/native.ts
index 79fa994bf..19a473e73 100644
--- a/electron/handlers/native.ts
+++ b/electron/handlers/native.ts
@@ -93,12 +93,12 @@ export function handleAppIPCs() {
const { canceled, filePaths } = await dialog.showOpenDialog(mainWindow, {
title: 'Select model files',
buttonLabel: 'Select',
- properties: ['openFile', 'multiSelections'],
+ properties: ['openFile', 'openDirectory', 'multiSelections'],
})
if (canceled) {
return
- } else {
- return filePaths
}
+
+ return filePaths
})
}
diff --git a/electron/package.json b/electron/package.json
index 7cdb98360..f51df233b 100644
--- a/electron/package.json
+++ b/electron/package.json
@@ -61,6 +61,8 @@
"test:e2e": "playwright test --workers=1",
"copy:assets": "rimraf --glob \"./pre-install/*.tgz\" && cpx \"../pre-install/*.tgz\" \"./pre-install\"",
"dev": "yarn copy:assets && tsc -p . && electron .",
+ "compile": "tsc -p .",
+ "start": "electron .",
"build": "yarn copy:assets && run-script-os",
"build:test": "yarn copy:assets && run-script-os",
"build:test:darwin": "tsc -p . && electron-builder -p never -m --dir",
diff --git a/package.json b/package.json
index 957934fda..847e89d91 100644
--- a/package.json
+++ b/package.json
@@ -41,7 +41,8 @@
"build:extensions": "run-script-os",
"build:test": "yarn copy:assets && yarn build:web && yarn workspace jan build:test",
"build": "yarn build:web && yarn build:electron",
- "build:publish": "yarn copy:assets && yarn build:web && yarn workspace jan build:publish"
+ "build:publish": "yarn copy:assets && yarn build:web && yarn workspace jan build:publish",
+ "turbo:electron": "turbo run dev --parallel --filter=!@janhq/server"
},
"devDependencies": {
"concurrently": "^8.2.1",
diff --git a/turbo.json b/turbo.json
new file mode 100644
index 000000000..0e53ece2d
--- /dev/null
+++ b/turbo.json
@@ -0,0 +1,29 @@
+{
+ "$schema": "https://turbo.build/schema.json",
+ "pipeline": {
+ "build": {
+ "outputs": [".next/**", "!.next/cache/**"]
+ },
+ "dev": {
+ "cache": false
+ },
+ "web#build": {
+ "dependsOn": ["@janhq/core#build"]
+ },
+ "web:dev": {
+ "cache": false,
+ "persistent": true,
+ "dependsOn": ["@janhq/core#build", "@janhq/uikit#build"]
+ },
+ "electron:dev": {
+ "cache": false,
+ "persistent": true,
+ "dependsOn": ["@janhq/core#build", "@janhq/server#build", "jan#compile"]
+ },
+ "electron#build": {
+ "dependsOn": ["web#build", "server#build", "core#build"],
+ "cache": false
+ },
+ "type-check": {}
+ }
+}
diff --git a/web/containers/ItemCardSidebar/index.tsx b/web/containers/ItemCardSidebar/index.tsx
deleted file mode 100644
index 627d7f45d..000000000
--- a/web/containers/ItemCardSidebar/index.tsx
+++ /dev/null
@@ -1,29 +0,0 @@
-type Props = {
- title: string
- description?: string
- disabled?: boolean
- onChange?: (text?: string) => void
-}
-
-export default function ItemCardSidebar({
- description,
- title,
- disabled,
- onChange,
-}: Props) {
- return (
-
-
- {title}
-
-
onChange?.(e.target.value)}
- />
-
- )
-}
diff --git a/web/containers/Providers/index.tsx b/web/containers/Providers/index.tsx
index e7a179ec4..73445f10a 100644
--- a/web/containers/Providers/index.tsx
+++ b/web/containers/Providers/index.tsx
@@ -11,8 +11,6 @@ import EventListenerWrapper from '@/containers/Providers/EventListener'
import JotaiWrapper from '@/containers/Providers/Jotai'
import ThemeWrapper from '@/containers/Providers/Theme'
-import FeatureToggleWrapper from '@/context/FeatureToggle'
-
import { setupCoreServices } from '@/services/coreService'
import {
isCoreExtensionInstalled,
@@ -81,15 +79,13 @@ const Providers = (props: PropsWithChildren) => {
{settingUp && }
{setupCore && activated && (
-
-
-
- {children}
-
- {!isMac && }
-
-
-
+
+
+ {children}
+
+ {!isMac && }
+
+
)}
diff --git a/web/context/FeatureToggle.tsx b/web/context/FeatureToggle.tsx
deleted file mode 100644
index 5a63eb66e..000000000
--- a/web/context/FeatureToggle.tsx
+++ /dev/null
@@ -1,104 +0,0 @@
-import { createContext, ReactNode, useEffect, useState } from 'react'
-
-interface FeatureToggleContextType {
- experimentalFeature: boolean
- ignoreSSL: boolean
- proxy: string
- proxyEnabled: boolean
- vulkanEnabled: boolean
- setExperimentalFeature: (on: boolean) => void
- setVulkanEnabled: (on: boolean) => void
- setIgnoreSSL: (on: boolean) => void
- setProxy: (value: string) => void
- setProxyEnabled: (on: boolean) => void
-}
-
-const initialContext: FeatureToggleContextType = {
- experimentalFeature: false,
- ignoreSSL: false,
- proxy: '',
- proxyEnabled: false,
- vulkanEnabled: false,
- setExperimentalFeature: () => {},
- setVulkanEnabled: () => {},
- setIgnoreSSL: () => {},
- setProxy: () => {},
- setProxyEnabled: () => {},
-}
-
-export const FeatureToggleContext =
- createContext(initialContext)
-
-export default function FeatureToggleWrapper({
- children,
-}: {
- children: ReactNode
-}) {
- const EXPERIMENTAL_FEATURE = 'experimentalFeature'
- const VULKAN_ENABLED = 'vulkanEnabled'
- const IGNORE_SSL = 'ignoreSSLFeature'
- const HTTPS_PROXY_FEATURE = 'httpsProxyFeature'
- const PROXY_FEATURE_ENABLED = 'proxyFeatureEnabled'
-
- const [experimentalFeature, directSetExperimentalFeature] =
- useState(false)
- const [proxyEnabled, directSetProxyEnabled] = useState(false)
- const [vulkanEnabled, directEnableVulkan] = useState(false)
- const [ignoreSSL, directSetIgnoreSSL] = useState(false)
- const [proxy, directSetProxy] = useState('')
-
- useEffect(() => {
- directSetExperimentalFeature(
- localStorage.getItem(EXPERIMENTAL_FEATURE) === 'true'
- )
- directSetIgnoreSSL(localStorage.getItem(IGNORE_SSL) === 'true')
- directSetProxy(localStorage.getItem(HTTPS_PROXY_FEATURE) ?? '')
- directSetProxyEnabled(
- localStorage.getItem(PROXY_FEATURE_ENABLED) === 'true'
- )
- }, [])
-
- const setExperimentalFeature = (on: boolean) => {
- localStorage.setItem(EXPERIMENTAL_FEATURE, on ? 'true' : 'false')
- directSetExperimentalFeature(on)
- }
-
- const setVulkanEnabled = (on: boolean) => {
- localStorage.setItem(VULKAN_ENABLED, on ? 'true' : 'false')
- directEnableVulkan(on)
- }
-
- const setIgnoreSSL = (on: boolean) => {
- localStorage.setItem(IGNORE_SSL, on ? 'true' : 'false')
- directSetIgnoreSSL(on)
- }
-
- const setProxy = (proxy: string) => {
- localStorage.setItem(HTTPS_PROXY_FEATURE, proxy)
- directSetProxy(proxy)
- }
-
- const setProxyEnabled = (on: boolean) => {
- localStorage.setItem(PROXY_FEATURE_ENABLED, on ? 'true' : 'false')
- directSetProxyEnabled(on)
- }
-
- return (
-
- {children}
-
- )
-}
diff --git a/web/helpers/atoms/ApiServer.atom.ts b/web/helpers/atoms/ApiServer.atom.ts
new file mode 100644
index 000000000..2ccd2de23
--- /dev/null
+++ b/web/helpers/atoms/ApiServer.atom.ts
@@ -0,0 +1,16 @@
+import { atomWithStorage } from 'jotai/utils'
+
+export const hostOptions = ['127.0.0.1', '0.0.0.0']
+
+export const apiServerPortAtom = atomWithStorage('apiServerPort', '1337')
+export const apiServerHostAtom = atomWithStorage('apiServerHost', '127.0.0.1')
+
+export const apiServerCorsEnabledAtom = atomWithStorage(
+ 'apiServerCorsEnabled',
+ true
+)
+
+export const apiServerVerboseLogEnabledAtom = atomWithStorage(
+ 'apiServerVerboseLogEnabled',
+ true
+)
diff --git a/web/helpers/atoms/AppConfig.atom.ts b/web/helpers/atoms/AppConfig.atom.ts
index 9dfdfca90..75343d722 100644
--- a/web/helpers/atoms/AppConfig.atom.ts
+++ b/web/helpers/atoms/AppConfig.atom.ts
@@ -1,3 +1,21 @@
import { atom } from 'jotai'
+import { atomWithStorage } from 'jotai/utils'
+
+const EXPERIMENTAL_FEATURE = 'experimentalFeature'
+const PROXY_FEATURE_ENABLED = 'proxyFeatureEnabled'
+const VULKAN_ENABLED = 'vulkanEnabled'
+const IGNORE_SSL = 'ignoreSSLFeature'
+const HTTPS_PROXY_FEATURE = 'httpsProxyFeature'
export const janDataFolderPathAtom = atom('')
+
+export const experimentalFeatureEnabledAtom = atomWithStorage(
+ EXPERIMENTAL_FEATURE,
+ false
+)
+
+export const proxyEnabledAtom = atomWithStorage(PROXY_FEATURE_ENABLED, false)
+export const proxyAtom = atomWithStorage(HTTPS_PROXY_FEATURE, '')
+
+export const ignoreSslAtom = atomWithStorage(IGNORE_SSL, false)
+export const vulkanEnabledAtom = atomWithStorage(VULKAN_ENABLED, false)
diff --git a/web/hooks/useConvertHuggingFaceModel.ts b/web/hooks/useConvertHuggingFaceModel.ts
index bbf33207b..0616c4ee7 100644
--- a/web/hooks/useConvertHuggingFaceModel.ts
+++ b/web/hooks/useConvertHuggingFaceModel.ts
@@ -1,5 +1,3 @@
-import { useContext } from 'react'
-
import {
ExtensionTypeEnum,
HuggingFaceExtension,
@@ -7,18 +5,18 @@ import {
Quantization,
} from '@janhq/core'
-import { useSetAtom } from 'jotai'
-
-import { FeatureToggleContext } from '@/context/FeatureToggle'
+import { useAtomValue, useSetAtom } from 'jotai'
import { extensionManager } from '@/extension/ExtensionManager'
+import { ignoreSslAtom, proxyAtom } from '@/helpers/atoms/AppConfig.atom'
import {
conversionStatusAtom,
conversionErrorAtom,
} from '@/helpers/atoms/HFConverter.atom'
export const useConvertHuggingFaceModel = () => {
- const { ignoreSSL, proxy } = useContext(FeatureToggleContext)
+ const proxy = useAtomValue(proxyAtom)
+ const ignoreSSL = useAtomValue(ignoreSslAtom)
const setConversionStatus = useSetAtom(conversionStatusAtom)
const setConversionError = useSetAtom(conversionErrorAtom)
diff --git a/web/hooks/useCreateNewThread.ts b/web/hooks/useCreateNewThread.ts
index 722e5b7e4..247c65c55 100644
--- a/web/hooks/useCreateNewThread.ts
+++ b/web/hooks/useCreateNewThread.ts
@@ -1,5 +1,3 @@
-import { useContext } from 'react'
-
import {
Assistant,
ConversationalExtension,
@@ -17,8 +15,6 @@ import { atom, useAtomValue, useSetAtom } from 'jotai'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { fileUploadAtom } from '@/containers/Providers/Jotai'
-import { FeatureToggleContext } from '@/context/FeatureToggle'
-
import { generateThreadId } from '@/utils/thread'
import useRecommendedModel from './useRecommendedModel'
@@ -27,6 +23,7 @@ import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension'
+import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import {
threadsAtom,
threadStatesAtom,
@@ -59,7 +56,8 @@ export const useCreateNewThread = () => {
const setFileUpload = useSetAtom(fileUploadAtom)
const setSelectedModel = useSetAtom(selectedModelAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
- const { experimentalFeature } = useContext(FeatureToggleContext)
+
+ const experimentalEnabled = useAtomValue(experimentalFeatureEnabledAtom)
const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
const { recommendedModel, downloadedModels } = useRecommendedModel()
@@ -94,7 +92,7 @@ export const useCreateNewThread = () => {
const assistantInfo: ThreadAssistantInfo = {
assistant_id: assistant.id,
assistant_name: assistant.name,
- tools: experimentalFeature ? [assistantTools] : assistant.tools,
+ tools: experimentalEnabled ? [assistantTools] : assistant.tools,
model: {
id: defaultModel?.id ?? '*',
settings: defaultModel?.settings ?? {},
diff --git a/web/hooks/useDownloadModel.ts b/web/hooks/useDownloadModel.ts
index 59333fbde..9f6334c71 100644
--- a/web/hooks/useDownloadModel.ts
+++ b/web/hooks/useDownloadModel.ts
@@ -1,4 +1,4 @@
-import { useCallback, useContext } from 'react'
+import { useCallback } from 'react'
import {
Model,
@@ -10,17 +10,22 @@ import {
DownloadState,
} from '@janhq/core'
-import { useSetAtom } from 'jotai'
-
-import { FeatureToggleContext } from '@/context/FeatureToggle'
+import { useAtomValue, useSetAtom } from 'jotai'
import { setDownloadStateAtom } from './useDownloadState'
import { extensionManager } from '@/extension/ExtensionManager'
+import {
+ ignoreSslAtom,
+ proxyAtom,
+ proxyEnabledAtom,
+} from '@/helpers/atoms/AppConfig.atom'
import { addDownloadingModelAtom } from '@/helpers/atoms/Model.atom'
export default function useDownloadModel() {
- const { ignoreSSL, proxy, proxyEnabled } = useContext(FeatureToggleContext)
+ const ignoreSSL = useAtomValue(ignoreSslAtom)
+ const proxy = useAtomValue(proxyAtom)
+ const proxyEnabled = useAtomValue(proxyEnabledAtom)
const setDownloadState = useSetAtom(setDownloadStateAtom)
const addDownloadingModel = useSetAtom(addDownloadingModelAtom)
diff --git a/web/hooks/useDropModelBinaries.ts b/web/hooks/useDropModelBinaries.ts
index c08e1dc73..d87e96627 100644
--- a/web/hooks/useDropModelBinaries.ts
+++ b/web/hooks/useDropModelBinaries.ts
@@ -39,7 +39,7 @@ export default function useDropModelBinaries() {
}))
if (unsupportedFiles.length > 0) {
snackbar({
- description: `File has to be a .gguf file`,
+ description: `Only files with .gguf extension can be imported.`,
type: 'error',
})
}
diff --git a/web/hooks/useGetHFRepoData.ts b/web/hooks/useGetHFRepoData.ts
index 45f979fbd..d14458854 100644
--- a/web/hooks/useGetHFRepoData.ts
+++ b/web/hooks/useGetHFRepoData.ts
@@ -20,7 +20,9 @@ export const useGetHFRepoData = () => {
const data = await res.json()
setRepoData(data)
} catch (err) {
- setFetchError(err as Error)
+ setFetchError(
+ Error("The repo does not exist or you don't have access to it.")
+ )
}
setLoading(false)
}
diff --git a/web/screens/Chat/ChatInput/index.tsx b/web/screens/Chat/ChatInput/index.tsx
index 5b8128439..d5334cab8 100644
--- a/web/screens/Chat/ChatInput/index.tsx
+++ b/web/screens/Chat/ChatInput/index.tsx
@@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
-import { useContext, useEffect, useRef, useState } from 'react'
+import { useEffect, useRef, useState } from 'react'
import { InferenceEvent, MessageStatus, events } from '@janhq/core'
@@ -24,8 +24,6 @@ import { twMerge } from 'tailwind-merge'
import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
-import { FeatureToggleContext } from '@/context/FeatureToggle'
-
import { useActiveModel } from '@/hooks/useActiveModel'
import { useClickOutside } from '@/hooks/useClickOutside'
@@ -34,6 +32,7 @@ import useSendChatMessage from '@/hooks/useSendChatMessage'
import FileUploadPreview from '../FileUploadPreview'
import ImageUploadPreview from '../ImageUploadPreview'
+import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import {
activeThreadAtom,
@@ -58,7 +57,7 @@ const ChatInput: React.FC = () => {
const fileInputRef = useRef(null)
const imageInputRef = useRef(null)
const [showAttacmentMenus, setShowAttacmentMenus] = useState(false)
- const { experimentalFeature } = useContext(FeatureToggleContext)
+ const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom)
const threadStates = useAtomValue(threadStatesAtom)
diff --git a/web/screens/Chat/Sidebar/index.tsx b/web/screens/Chat/Sidebar/index.tsx
index 4f7e1bd50..2ff5a1253 100644
--- a/web/screens/Chat/Sidebar/index.tsx
+++ b/web/screens/Chat/Sidebar/index.tsx
@@ -1,5 +1,4 @@
-/* eslint-disable @typescript-eslint/no-explicit-any */
-import React, { useContext } from 'react'
+import React from 'react'
import {
Input,
@@ -24,8 +23,6 @@ import DropdownListSidebar, {
selectedModelAtom,
} from '@/containers/DropdownListSidebar'
-import { FeatureToggleContext } from '@/context/FeatureToggle'
-
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import { getConfigurationsData } from '@/utils/componentSettings'
@@ -37,6 +34,7 @@ import ModelSetting from '../ModelSetting'
import SettingComponentBuilder from '../ModelSetting/SettingComponent'
+import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import {
activeThreadAtom,
getActiveThreadModelParamsAtom,
@@ -50,7 +48,7 @@ const Sidebar: React.FC = () => {
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const { updateThreadMetadata } = useCreateNewThread()
- const { experimentalFeature } = useContext(FeatureToggleContext)
+ const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const modelEngineParams = toSettingParams(activeModelParams)
const modelRuntimeParams = toRuntimeParams(activeModelParams)
@@ -174,7 +172,7 @@ const Sidebar: React.FC = () => {
x.name === 'prompt_template'}
+ selector={(x) => x.name === 'prompt_template'}
/>
diff --git a/web/screens/Chat/index.tsx b/web/screens/Chat/index.tsx
index 125e58b3b..00bca550f 100644
--- a/web/screens/Chat/index.tsx
+++ b/web/screens/Chat/index.tsx
@@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/naming-convention */
-import React, { useContext, useEffect, useState } from 'react'
+import React, { useEffect, useState } from 'react'
import { useDropzone } from 'react-dropzone'
@@ -18,8 +18,6 @@ import { showLeftSideBarAtom } from '@/containers/Providers/KeyListener'
import { snackbar } from '@/containers/Toast'
-import { FeatureToggleContext } from '@/context/FeatureToggle'
-
import { activeModelAtom } from '@/hooks/useActiveModel'
import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage'
@@ -31,6 +29,7 @@ import ChatInput from './ChatInput'
import RequestDownloadModel from './RequestDownloadModel'
import Sidebar from './Sidebar'
+import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import {
activeThreadAtom,
engineParamsUpdateAtom,
@@ -63,7 +62,7 @@ const ChatScreen: React.FC = () => {
const reloadModel = useAtomValue(reloadModelAtom)
const [dragRejected, setDragRejected] = useState({ code: '' })
const setFileUpload = useSetAtom(fileUploadAtom)
- const { experimentalFeature } = useContext(FeatureToggleContext)
+ const experimentalFeature = useAtomValue(experimentalFeatureEnabledAtom)
const activeModel = useAtomValue(activeModelAtom)
diff --git a/web/screens/ExploreModels/HuggingFaceRepoDataLoadedModal/index.tsx b/web/screens/ExploreModels/HuggingFaceRepoDataLoadedModal/index.tsx
index c4e9131bc..32284ede5 100644
--- a/web/screens/ExploreModels/HuggingFaceRepoDataLoadedModal/index.tsx
+++ b/web/screens/ExploreModels/HuggingFaceRepoDataLoadedModal/index.tsx
@@ -53,7 +53,7 @@ export const HuggingFaceRepoDataLoadedModal = () => {
? '❌ This model is not supported!'
: '✅ This model is supported!'}
- {repoData.tags.includes('gguf') ? (
+ {repoData.tags?.includes('gguf') ? (
...But you can import it manually!
) : null}
diff --git a/web/screens/ExploreModels/HuggingFaceSearchErrorModal/index.tsx b/web/screens/ExploreModels/HuggingFaceSearchErrorModal/index.tsx
index 31c7d48d4..4cb58332b 100644
--- a/web/screens/ExploreModels/HuggingFaceSearchErrorModal/index.tsx
+++ b/web/screens/ExploreModels/HuggingFaceSearchErrorModal/index.tsx
@@ -18,7 +18,7 @@ export const HuggingFaceSearchErrorModal = () => {
Error!
Fetch error
- {fetchError.message}
+ {fetchError.message}