diff --git a/.github/workflows/jan-electron-build.yml b/.github/workflows/jan-electron-build.yml
index 8898c8211..ab90f696f 100644
--- a/.github/workflows/jan-electron-build.yml
+++ b/.github/workflows/jan-electron-build.yml
@@ -25,12 +25,11 @@ jobs:
GITHUB_REF: ${{ github.ref }}
- name: Create Draft Release
id: create_release
- uses: actions/create-release@v1
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ uses: softprops/action-gh-release@v2
with:
tag_name: ${{ github.ref_name }}
- release_name: "${{ env.VERSION }}"
+ token: ${{ secrets.GITHUB_TOKEN }}
+ name: "${{ env.VERSION }}"
draft: true
prerelease: false
diff --git a/.github/workflows/jan-openai-api-test.yml b/.github/workflows/jan-openai-api-test.yml
index 9964a41d5..b7e2717b3 100644
--- a/.github/workflows/jan-openai-api-test.yml
+++ b/.github/workflows/jan-openai-api-test.yml
@@ -1,6 +1,13 @@
name: Test - OpenAI API Pytest collection
on:
workflow_dispatch:
+ inputs:
+ endpoints:
+ description: 'comma-separated list (see available at endpoints_mapping.json e.g. GET /users,POST /transform)'
+ required: false
+ default: all
+ type: string
+
push:
branches:
- main
@@ -38,11 +45,11 @@ jobs:
rm -rf ~/jan
make clean
- - name: install dependencies
+ - name: Install dependencies
run: |
npm install -g @stoplight/prism-cli
- - name: create python virtual environment and run test
+ - name: Create python virtual environment and run test
run: |
python3 -m venv /tmp/jan
source /tmp/jan/bin/activate
@@ -65,10 +72,14 @@ jobs:
# Append to conftest.py
cat ../docs/tests/conftest.py >> tests/conftest.py
-
+ cat ../docs/tests/endpoints_mapping.json >> tests/endpoints_mapping.json
+
# start mock server and run test then stop mock server
- prism mock ../docs/openapi/jan.yaml > prism.log & prism_pid=$! && pytest --reportportal --html=report.html && kill $prism_pid
+ prism mock ../docs/openapi/jan.yaml > prism.log & prism_pid=$! &&
+ pytest --endpoint "$ENDPOINTS" --reportportal --html=report.html && kill $prism_pid
deactivate
+ env:
+ ENDPOINTS: ${{ github.event.inputs.endpoints }}
- name: Upload Artifact
uses: actions/upload-artifact@v2
@@ -79,7 +90,7 @@ jobs:
openai-python/assets
openai-python/prism.log
- - name: clean up
+ - name: Clean up
if: always()
run: |
rm -rf /tmp/jan
diff --git a/README.md b/README.md
index 5c4da2985..e1622b081 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# Jan - Bring AI to your Desktop
+# Jan - Turn your computer into an AI computer

@@ -19,13 +19,14 @@
- Discord
-> ⚠️ **Jan is currently in Development**: Expect breaking changes and bugs!
+>[!Warning]
+>**Jan is currently in Development**: Expect breaking changes and bugs!
Jan is an open-source ChatGPT alternative that runs 100% offline on your computer.
**Jan runs on any hardware.** From PCs to multi-GPU clusters, Jan supports universal architectures:
-- [x] Nvidia GPUs (fast)
+- [x] NVIDIA GPUs (fast)
- [x] Apple M-series (fast)
- [x] Apple Intel
- [x] Linux Debian
@@ -57,7 +58,7 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
- M1/M2
+ M1/M2/M3/M4
|
@@ -90,7 +91,7 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
|
- M1/M2
+ M1/M2/M3/M4
|
diff --git a/docs/tests/conftest.py b/docs/tests/conftest.py
index 86b6c422f..bec60cf0b 100644
--- a/docs/tests/conftest.py
+++ b/docs/tests/conftest.py
@@ -1,6 +1,40 @@
+import json
+
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--endpoint", action="store", default="all", help="my option: endpoints"
+ )
+
+
+def pytest_configure(config):
+ config.addinivalue_line(
+ "markers", "endpoint(endpoint): this mark select the test based on endpoint"
+ )
+
+
+def pytest_runtest_setup(item):
+ getoption = item.config.getoption("--endpoint").split(",")
+ if getoption not in (["all"], [''], [""]):
+ endpoint_names = [mark.args[0] for mark in item.iter_markers(name="endpoint")]
+ if not endpoint_names or not set(getoption).intersection(set(endpoint_names)):
+ pytest.skip("Test skipped because endpoint is {!r}".format(endpoint_names))
+
+
def pytest_collection_modifyitems(items):
+ # load the JSON file
+ with open("tests/endpoints_mapping.json", "r") as json_file:
+ endpoints_file_mapping = json.load(json_file)
+
+ # create a dictionary to map filenames to endpoints
+ filename_to_endpoint = {}
+ for endpoint, files in endpoints_file_mapping.items():
+ for filename in files:
+ filename_to_endpoint[filename] = endpoint
+
+ # add the markers based on the JSON file
for item in items:
- # add the name of the file (without extension) as a marker
- filename = item.nodeid.split("::")[0].split("/")[-1].replace(".py", "")
- marker = pytest.mark.file(filename)
- item.add_marker(marker)
+ # map the name of the file to endpoint, else use default value
+ filename = item.fspath.basename
+ marker = filename_to_endpoint.get(filename, filename)
+ item.add_marker(pytest.mark.endpoint(marker, filename=filename))
diff --git a/docs/tests/endpoints_mapping.json b/docs/tests/endpoints_mapping.json
new file mode 100644
index 000000000..5800abc32
--- /dev/null
+++ b/docs/tests/endpoints_mapping.json
@@ -0,0 +1,75 @@
+{
+ "/embeddings": [
+ "test_embedding.py"
+ ],
+ "/audio/translations": [
+ "test_translations.py"
+ ],
+ "/audio/transcriptions": [
+ "test_transcriptions.py"
+ ],
+ "/moderations": [
+ "test_moderations.py"
+ ],
+ "/images/generations": [
+ "test_images.py"
+ ],
+ "/batches": [
+ "test_batches.py"
+ ],
+ "/vector_stores": [
+ "test_vector_stores.py"
+ ],
+ "/fine_tuning/jobs": [
+ "test_jobs.py",
+ "test_checkpoints.py"
+ ],
+ "/assistants": [
+ "test_assistants.py"
+ ],
+ "/threads/{thread_id}/runs": [
+ "test_runs.py"
+ ],
+ "/threads/{thread_id}/runs/{run_id}/steps": [
+ "test_steps.py"
+ ],
+ "/vector_stores/{vector_store_id}/file_batches": [
+ "test_file_batches.py"
+ ],
+ "/messages": [
+ "test_messages.py"
+ ],
+ "/vector_stores/{vector_store_id}/files": [
+ "test_files.py"
+ ],
+ "/chat/completions": [
+ "test_completions.py"
+ ],
+ "/threads": [
+ "test_threads.py"
+ ],
+ "/audio/speech": [
+ "test_speech.py"
+ ],
+ "/models": [
+ "test_models.py"
+ ],
+ "native_client_sdk_only": [
+ "test_streaming.py"
+ ],
+ "utils": [
+ "test_response.py",
+ "test_client.py",
+ "test_extract_files.py",
+ "test_typing.py",
+ "test_legacy_response.py",
+ "test_module_client.py",
+ "test_old_api.py",
+ "test_proxy.py",
+ "test_qs.py",
+ "test_required_args.py",
+ "test_transform.py",
+ "test_azure.py",
+ "test_deepcopy.py"
+ ]
+}
diff --git a/electron/sign.js b/electron/sign.js
index 73afedc4e..9955e53e8 100644
--- a/electron/sign.js
+++ b/electron/sign.js
@@ -1,5 +1,28 @@
const { exec } = require('child_process')
+function execCommandWithRetry(command, retries = 3) {
+ return new Promise((resolve, reject) => {
+ const execute = (attempt) => {
+ exec(command, (error, stdout, stderr) => {
+ if (error) {
+ console.error(`Error: ${error}`)
+ if (attempt < retries) {
+ console.log(`Retrying... Attempt ${attempt + 1}`)
+ execute(attempt + 1)
+ } else {
+ return reject(error)
+ }
+ } else {
+ console.log(`stdout: ${stdout}`)
+ console.error(`stderr: ${stderr}`)
+ resolve()
+ }
+ })
+ }
+ execute(0)
+ })
+}
+
function sign({
path,
name,
@@ -13,16 +36,9 @@ function sign({
}) {
return new Promise((resolve, reject) => {
const command = `azuresigntool.exe sign -kvu "${certUrl}" -kvi "${clientId}" -kvt "${tenantId}" -kvs "${clientSecret}" -kvc "${certName}" -tr "${timestampServer}" -v "${path}"`
-
- exec(command, (error, stdout, stderr) => {
- if (error) {
- console.error(`Error: ${error}`)
- return reject(error)
- }
- console.log(`stdout: ${stdout}`)
- console.error(`stderr: ${stderr}`)
- resolve()
- })
+ execCommandWithRetry(command)
+ .then(resolve)
+ .catch(reject)
})
}
@@ -34,15 +50,20 @@ exports.default = async function (options) {
const certName = process.env.AZURE_CERT_NAME
const timestampServer = 'http://timestamp.globalsign.com/tsa/r6advanced1'
- await sign({
- path: options.path,
- name: 'jan-win-x64',
- certUrl,
- clientId,
- tenantId,
- clientSecret,
- certName,
- timestampServer,
- version: options.version,
- })
+ try {
+ await sign({
+ path: options.path,
+ name: 'jan-win-x64',
+ certUrl,
+ clientId,
+ tenantId,
+ clientSecret,
+ certName,
+ timestampServer,
+ version: options.version,
+ })
+ } catch (error) {
+ console.error('Failed to sign after 3 attempts:', error)
+ process.exit(1)
+ }
}
diff --git a/extensions/inference-nitro-extension/bin/version.txt b/extensions/inference-nitro-extension/bin/version.txt
index f90568270..76914ddc0 100644
--- a/extensions/inference-nitro-extension/bin/version.txt
+++ b/extensions/inference-nitro-extension/bin/version.txt
@@ -1 +1 @@
-0.4.7
+0.4.9
diff --git a/extensions/inference-nitro-extension/package.json b/extensions/inference-nitro-extension/package.json
index d396778d9..1903eafef 100644
--- a/extensions/inference-nitro-extension/package.json
+++ b/extensions/inference-nitro-extension/package.json
@@ -1,7 +1,7 @@
{
"name": "@janhq/inference-cortex-extension",
"productName": "Cortex Inference Engine",
- "version": "1.0.7",
+ "version": "1.0.10",
"description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.",
"main": "dist/index.js",
"node": "dist/node/index.cjs.js",
diff --git a/extensions/inference-nitro-extension/resources/models/aya-23-35b/model.json b/extensions/inference-nitro-extension/resources/models/aya-23-35b/model.json
new file mode 100644
index 000000000..c70c02080
--- /dev/null
+++ b/extensions/inference-nitro-extension/resources/models/aya-23-35b/model.json
@@ -0,0 +1,35 @@
+{
+ "sources": [
+ {
+ "filename": "aya-23-35B-Q4_K_M.gguf",
+ "url": "https://huggingface.co/bartowski/aya-23-35B-GGUF/resolve/main/aya-23-35B-Q4_K_M.gguf"
+ }
+ ],
+ "id": "aya-23-35b",
+ "object": "model",
+ "name": "Aya 23 35B Q4",
+ "version": "1.0",
+ "description": "Aya 23 can talk upto 23 languages fluently.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 8192,
+ "prompt_template": "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{system_prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>",
+ "llama_model_path": "aya-23-35B-Q4_K_M.gguf",
+ "ngl": 40
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 8192,
+ "frequency_penalty": 0,
+ "presence_penalty": 0,
+ "stop": ["<|END_OF_TURN_TOKEN|>"]
+ },
+ "metadata": {
+ "author": "CohereForAI",
+ "tags": ["34B", "Finetuned"],
+ "size": 21556982144
+ },
+ "engine": "nitro"
+}
diff --git a/extensions/inference-nitro-extension/resources/models/aya-23-8b/model.json b/extensions/inference-nitro-extension/resources/models/aya-23-8b/model.json
new file mode 100644
index 000000000..ccb9a6f7f
--- /dev/null
+++ b/extensions/inference-nitro-extension/resources/models/aya-23-8b/model.json
@@ -0,0 +1,35 @@
+{
+ "sources": [
+ {
+ "filename": "aya-23-8B-Q4_K_M.gguf",
+ "url": "https://huggingface.co/bartowski/aya-23-8B-GGUF/resolve/main/aya-23-8B-Q4_K_M.gguf"
+ }
+ ],
+ "id": "aya-23-8b",
+ "object": "model",
+ "name": "Aya 23 8B Q4",
+ "version": "1.0",
+ "description": "Aya 23 can talk upto 23 languages fluently.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 8192,
+ "prompt_template": "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{system_prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>",
+ "llama_model_path": "aya-23-8B-Q4_K_M.gguf",
+ "ngl": 32
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 8192,
+ "frequency_penalty": 0,
+ "presence_penalty": 0,
+ "stop": ["<|END_OF_TURN_TOKEN|>"]
+ },
+ "metadata": {
+ "author": "CohereForAI",
+ "tags": ["7B", "Finetuned","Featured"],
+ "size": 5056982144
+ },
+ "engine": "nitro"
+}
diff --git a/extensions/inference-nitro-extension/resources/models/phi3-3.8b/model.json b/extensions/inference-nitro-extension/resources/models/phi3-3.8b/model.json
index 6777cb6b6..f384fd953 100644
--- a/extensions/inference-nitro-extension/resources/models/phi3-3.8b/model.json
+++ b/extensions/inference-nitro-extension/resources/models/phi3-3.8b/model.json
@@ -8,17 +8,23 @@
"id": "phi3-3.8b",
"object": "model",
"name": "Phi-3 Mini",
- "version": "1.0",
+ "version": "1.1",
"description": "Phi-3 Mini is Microsoft's newest, compact model designed for mobile use.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "<|user|>\n{prompt}<|end|>\n<|assistant|>\n",
- "llama_model_path": "Phi-3-mini-4k-instruct-q4.gguf"
+ "llama_model_path": "Phi-3-mini-4k-instruct-q4.gguf",
+ "ngl": 32
},
"parameters": {
"max_tokens": 4096,
- "stop": ["<|end|>"]
+ "stop": ["<|end|>"],
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
},
"metadata": {
"author": "Microsoft",
diff --git a/extensions/inference-nitro-extension/resources/models/phi3-medium/model.json b/extensions/inference-nitro-extension/resources/models/phi3-medium/model.json
new file mode 100644
index 000000000..63dda8f0a
--- /dev/null
+++ b/extensions/inference-nitro-extension/resources/models/phi3-medium/model.json
@@ -0,0 +1,38 @@
+{
+ "sources": [
+ {
+ "url": "https://huggingface.co/bartowski/Phi-3-medium-128k-instruct-GGUF/resolve/main/Phi-3-medium-128k-instruct-Q4_K_M.gguf",
+ "filename": "Phi-3-medium-128k-instruct-Q4_K_M.gguf"
+ }
+ ],
+ "id": "phi3-medium",
+ "object": "model",
+ "name": "Phi-3 Medium",
+ "version": "1.0",
+ "description": "Phi-3 Medium is Microsoft's latest SOTA model.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 128000,
+ "prompt_template": "<|user|>\n{prompt}<|end|>\n<|assistant|>\n",
+ "llama_model_path": "Phi-3-medium-128k-instruct-Q4_K_M.gguf",
+ "ngl": 32
+ },
+ "parameters": {
+ "max_tokens": 128000,
+ "stop": ["<|end|>"],
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Microsoft",
+ "tags": [
+ "7B",
+ "Finetuned"
+ ],
+ "size": 8366000000
+ },
+ "engine": "nitro"
+ }
\ No newline at end of file
diff --git a/extensions/inference-nitro-extension/rollup.config.ts b/extensions/inference-nitro-extension/rollup.config.ts
index b0707f404..c28d5b64e 100644
--- a/extensions/inference-nitro-extension/rollup.config.ts
+++ b/extensions/inference-nitro-extension/rollup.config.ts
@@ -23,6 +23,7 @@ const mistralIns7bq4Json = require('./resources/models/mistral-ins-7b-q4/model.j
const mixtral8x7bInstructJson = require('./resources/models/mixtral-8x7b-instruct/model.json')
const noromaid7bJson = require('./resources/models/noromaid-7b/model.json')
const openchat357bJson = require('./resources/models/openchat-3.5-7b/model.json')
+const phi3bJson = require('./resources/models/phi3-3.8b/model.json')
const phind34bJson = require('./resources/models/phind-34b/model.json')
const qwen7bJson = require('./resources/models/qwen-7b/model.json')
const stableZephyr3bJson = require('./resources/models/stable-zephyr-3b/model.json')
@@ -34,6 +35,9 @@ const wizardcoder13bJson = require('./resources/models/wizardcoder-13b/model.jso
const yi34bJson = require('./resources/models/yi-34b/model.json')
const llama3Json = require('./resources/models/llama3-8b-instruct/model.json')
const llama3Hermes8bJson = require('./resources/models/llama3-hermes-8b/model.json')
+const aya8bJson = require('./resources/models/aya-23-8b/model.json')
+const aya35bJson = require('./resources/models/aya-23-35b/model.json')
+const phimediumJson = require('./resources/models/phi3-medium/model.json')
export default [
{
@@ -64,6 +68,7 @@ export default [
mixtral8x7bInstructJson,
noromaid7bJson,
openchat357bJson,
+ phi3bJson,
phind34bJson,
qwen7bJson,
stableZephyr3bJson,
@@ -74,7 +79,10 @@ export default [
wizardcoder13bJson,
yi34bJson,
llama3Json,
- llama3Hermes8bJson
+ llama3Hermes8bJson,
+ phimediumJson,
+ aya8bJson,
+ aya35bJson
]),
NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`),
DEFAULT_SETTINGS: JSON.stringify(defaultSettingJson),
diff --git a/web/helpers/atoms/ChatMessage.atom.ts b/web/helpers/atoms/ChatMessage.atom.ts
index d092dd89c..4da22d13a 100644
--- a/web/helpers/atoms/ChatMessage.atom.ts
+++ b/web/helpers/atoms/ChatMessage.atom.ts
@@ -114,6 +114,7 @@ export const deleteMessageAtom = atom(null, (get, set, id: string) => {
newData[threadId] = newData[threadId].filter(
(e) => e.id !== id && e.status !== MessageStatus.Error
)
+
set(chatMessages, newData)
}
})
diff --git a/web/hooks/useDeleteThread.ts b/web/hooks/useDeleteThread.ts
index 62f3a65b5..69e51228f 100644
--- a/web/hooks/useDeleteThread.ts
+++ b/web/hooks/useDeleteThread.ts
@@ -6,6 +6,7 @@ import {
ConversationalExtension,
fs,
joinPath,
+ Thread,
} from '@janhq/core'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
@@ -27,6 +28,7 @@ import {
setActiveThreadIdAtom,
deleteThreadStateAtom,
updateThreadStateLastMessageAtom,
+ updateThreadAtom,
} from '@/helpers/atoms/Thread.atom'
export default function useDeleteThread() {
@@ -41,6 +43,7 @@ export default function useDeleteThread() {
const deleteThreadState = useSetAtom(deleteThreadStateAtom)
const updateThreadLastMessage = useSetAtom(updateThreadStateLastMessageAtom)
+ const updateThread = useSetAtom(updateThreadAtom)
const cleanThread = useCallback(
async (threadId: string) => {
@@ -73,19 +76,27 @@ export default function useDeleteThread() {
thread.metadata = {
...thread.metadata,
- lastMessage: undefined,
}
+
+ const updatedThread: Thread = {
+ ...thread,
+ title: 'New Thread',
+ metadata: { ...thread.metadata, lastMessage: undefined },
+ }
+
await extensionManager
.get(ExtensionTypeEnum.Conversational)
- ?.saveThread(thread)
+ ?.saveThread(updatedThread)
updateThreadLastMessage(threadId, undefined)
+ updateThread(updatedThread)
},
[
- janDataFolderPath,
+ cleanMessages,
threads,
messages,
- cleanMessages,
updateThreadLastMessage,
+ updateThread,
+ janDataFolderPath,
]
)
diff --git a/web/package.json b/web/package.json
index a654e3a5c..7c2e58e8c 100644
--- a/web/package.json
+++ b/web/package.json
@@ -29,7 +29,7 @@
"marked": "^9.1.2",
"marked-highlight": "^2.0.6",
"marked-katex-extension": "^5.0.1",
- "next": "14.0.1",
+ "next": "14.2.3",
"next-themes": "^0.2.1",
"postcss": "8.4.31",
"posthog-js": "^1.95.1",
diff --git a/web/screens/Chat/MessageToolbar/index.tsx b/web/screens/Chat/MessageToolbar/index.tsx
index 9c62c5d2f..744a8def0 100644
--- a/web/screens/Chat/MessageToolbar/index.tsx
+++ b/web/screens/Chat/MessageToolbar/index.tsx
@@ -1,3 +1,5 @@
+import { useCallback } from 'react'
+
import {
MessageStatus,
ExtensionTypeEnum,
@@ -5,6 +7,7 @@ import {
ChatCompletionRole,
ConversationalExtension,
ContentType,
+ Thread,
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
import {
@@ -26,7 +29,11 @@ import {
editMessageAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
-import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
+import {
+ activeThreadAtom,
+ updateThreadAtom,
+ updateThreadStateLastMessageAtom,
+} from '@/helpers/atoms/Thread.atom'
const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
const deleteMessage = useSetAtom(deleteMessageAtom)
@@ -35,9 +42,19 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
const messages = useAtomValue(getCurrentChatMessagesAtom)
const { resendChatMessage } = useSendChatMessage()
const clipboard = useClipboard({ timeout: 1000 })
+ const updateThreadLastMessage = useSetAtom(updateThreadStateLastMessageAtom)
+ const updateThread = useSetAtom(updateThreadAtom)
- const onDeleteClick = async () => {
+ const onDeleteClick = useCallback(async () => {
deleteMessage(message.id ?? '')
+
+ const lastResponse = messages
+ .filter(
+ (msg) =>
+ msg.id !== message.id && msg.role === ChatCompletionRole.Assistant
+ )
+ .slice(-1)[0]
+
if (thread) {
// Should also delete error messages to clear out the error state
await extensionManager
@@ -48,8 +65,26 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
(msg) => msg.id !== message.id && msg.status !== MessageStatus.Error
)
)
+
+ const updatedThread: Thread = {
+ ...thread,
+ metadata: {
+ ...thread.metadata,
+ lastMessage: messages.filter(
+ (msg) => msg.role === ChatCompletionRole.Assistant
+ )[
+ messages.filter((msg) => msg.role === ChatCompletionRole.Assistant)
+ .length - 1
+ ]?.content[0].text.value,
+ },
+ }
+
+ updateThreadLastMessage(thread.id, lastResponse?.content)
+
+ updateThread(updatedThread)
}
- }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [messages])
const onEditClick = async () => {
setEditMessage(message.id ?? '')
diff --git a/web/screens/Settings/HuggingFaceRepoDetailModal/ModelSegmentInfo/index.tsx b/web/screens/Settings/HuggingFaceRepoDetailModal/ModelSegmentInfo/index.tsx
index 11086d237..6a6601b33 100644
--- a/web/screens/Settings/HuggingFaceRepoDetailModal/ModelSegmentInfo/index.tsx
+++ b/web/screens/Settings/HuggingFaceRepoDetailModal/ModelSegmentInfo/index.tsx
@@ -13,11 +13,12 @@ const ModelSegmentInfo: React.FC = () => {
)
const { author, modelName, downloads, modelUrl } = useMemo(() => {
- const author =
- (importingHuggingFaceRepoData?.cardData['model_creator'] as string) ??
- 'N/A'
- const modelName =
- (importingHuggingFaceRepoData?.cardData['model_name'] as string) ?? 'N/A'
+ const cardData = importingHuggingFaceRepoData?.cardData
+ const author = (cardData?.['model_creator'] ?? 'N/A') as string
+ const modelName = (cardData?.['model_name'] ??
+ importingHuggingFaceRepoData?.id ??
+ 'N/A') as string
+
const modelUrl = importingHuggingFaceRepoData?.modelUrl ?? 'N/A'
const downloads = importingHuggingFaceRepoData?.downloads ?? 0
|