Merge pull request #2950 from janhq/dev

Release/0.4.14 to main
This commit is contained in:
Van Pham 2024-05-27 16:17:02 +07:00 committed by GitHub
commit 9b65944115
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 369 additions and 58 deletions

View File

@ -25,12 +25,11 @@ jobs:
GITHUB_REF: ${{ github.ref }} GITHUB_REF: ${{ github.ref }}
- name: Create Draft Release - name: Create Draft Release
id: create_release id: create_release
uses: actions/create-release@v1 uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
tag_name: ${{ github.ref_name }} tag_name: ${{ github.ref_name }}
release_name: "${{ env.VERSION }}" token: ${{ secrets.GITHUB_TOKEN }}
name: "${{ env.VERSION }}"
draft: true draft: true
prerelease: false prerelease: false

View File

@ -1,6 +1,13 @@
name: Test - OpenAI API Pytest collection name: Test - OpenAI API Pytest collection
on: on:
workflow_dispatch: workflow_dispatch:
inputs:
endpoints:
description: 'comma-separated list (see available at endpoints_mapping.json e.g. GET /users,POST /transform)'
required: false
default: all
type: string
push: push:
branches: branches:
- main - main
@ -38,11 +45,11 @@ jobs:
rm -rf ~/jan rm -rf ~/jan
make clean make clean
- name: install dependencies - name: Install dependencies
run: | run: |
npm install -g @stoplight/prism-cli npm install -g @stoplight/prism-cli
- name: create python virtual environment and run test - name: Create python virtual environment and run test
run: | run: |
python3 -m venv /tmp/jan python3 -m venv /tmp/jan
source /tmp/jan/bin/activate source /tmp/jan/bin/activate
@ -65,10 +72,14 @@ jobs:
# Append to conftest.py # Append to conftest.py
cat ../docs/tests/conftest.py >> tests/conftest.py cat ../docs/tests/conftest.py >> tests/conftest.py
cat ../docs/tests/endpoints_mapping.json >> tests/endpoints_mapping.json
# start mock server and run test then stop mock server # start mock server and run test then stop mock server
prism mock ../docs/openapi/jan.yaml > prism.log & prism_pid=$! && pytest --reportportal --html=report.html && kill $prism_pid prism mock ../docs/openapi/jan.yaml > prism.log & prism_pid=$! &&
pytest --endpoint "$ENDPOINTS" --reportportal --html=report.html && kill $prism_pid
deactivate deactivate
env:
ENDPOINTS: ${{ github.event.inputs.endpoints }}
- name: Upload Artifact - name: Upload Artifact
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
@ -79,7 +90,7 @@ jobs:
openai-python/assets openai-python/assets
openai-python/prism.log openai-python/prism.log
- name: clean up - name: Clean up
if: always() if: always()
run: | run: |
rm -rf /tmp/jan rm -rf /tmp/jan

View File

@ -1,4 +1,4 @@
# Jan - Bring AI to your Desktop # Jan - Turn your computer into an AI computer
![Jan banner](https://github.com/janhq/jan/assets/89722390/35daac7d-b895-487c-a6ac-6663daaad78e) ![Jan banner](https://github.com/janhq/jan/assets/89722390/35daac7d-b895-487c-a6ac-6663daaad78e)
@ -19,13 +19,14 @@
- <a href="https://discord.gg/AsJ8krTT3N">Discord</a> - <a href="https://discord.gg/AsJ8krTT3N">Discord</a>
</p> </p>
> ⚠️ **Jan is currently in Development**: Expect breaking changes and bugs! >[!Warning]
>**Jan is currently in Development**: Expect breaking changes and bugs!
Jan is an open-source ChatGPT alternative that runs 100% offline on your computer. Jan is an open-source ChatGPT alternative that runs 100% offline on your computer.
**Jan runs on any hardware.** From PCs to multi-GPU clusters, Jan supports universal architectures: **Jan runs on any hardware.** From PCs to multi-GPU clusters, Jan supports universal architectures:
- [x] Nvidia GPUs (fast) - [x] NVIDIA GPUs (fast)
- [x] Apple M-series (fast) - [x] Apple M-series (fast)
- [x] Apple Intel - [x] Apple Intel
- [x] Linux Debian - [x] Linux Debian
@ -57,7 +58,7 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<td style="text-align:center"> <td style="text-align:center">
<a href='https://app.jan.ai/download/latest/mac-arm64'> <a href='https://app.jan.ai/download/latest/mac-arm64'>
<img src='https://github.com/janhq/docs/blob/main/static/img/mac.png' style="height:15px; width: 15px" /> <img src='https://github.com/janhq/docs/blob/main/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b> <b>M1/M2/M3/M4</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">
@ -90,7 +91,7 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<td style="text-align:center"> <td style="text-align:center">
<a href='https://app.jan.ai/download/nightly/mac-arm64'> <a href='https://app.jan.ai/download/nightly/mac-arm64'>
<img src='https://github.com/janhq/docs/blob/main/static/img/mac.png' style="height:15px; width: 15px" /> <img src='https://github.com/janhq/docs/blob/main/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b> <b>M1/M2/M3/M4</b>
</a> </a>
</td> </td>
<td style="text-align:center"> <td style="text-align:center">

View File

@ -1,6 +1,40 @@
import json
def pytest_addoption(parser):
parser.addoption(
"--endpoint", action="store", default="all", help="my option: endpoints"
)
def pytest_configure(config):
config.addinivalue_line(
"markers", "endpoint(endpoint): this mark select the test based on endpoint"
)
def pytest_runtest_setup(item):
getoption = item.config.getoption("--endpoint").split(",")
if getoption not in (["all"], [''], [""]):
endpoint_names = [mark.args[0] for mark in item.iter_markers(name="endpoint")]
if not endpoint_names or not set(getoption).intersection(set(endpoint_names)):
pytest.skip("Test skipped because endpoint is {!r}".format(endpoint_names))
def pytest_collection_modifyitems(items): def pytest_collection_modifyitems(items):
# load the JSON file
with open("tests/endpoints_mapping.json", "r") as json_file:
endpoints_file_mapping = json.load(json_file)
# create a dictionary to map filenames to endpoints
filename_to_endpoint = {}
for endpoint, files in endpoints_file_mapping.items():
for filename in files:
filename_to_endpoint[filename] = endpoint
# add the markers based on the JSON file
for item in items: for item in items:
# add the name of the file (without extension) as a marker # map the name of the file to endpoint, else use default value
filename = item.nodeid.split("::")[0].split("/")[-1].replace(".py", "") filename = item.fspath.basename
marker = pytest.mark.file(filename) marker = filename_to_endpoint.get(filename, filename)
item.add_marker(marker) item.add_marker(pytest.mark.endpoint(marker, filename=filename))

View File

@ -0,0 +1,75 @@
{
"/embeddings": [
"test_embedding.py"
],
"/audio/translations": [
"test_translations.py"
],
"/audio/transcriptions": [
"test_transcriptions.py"
],
"/moderations": [
"test_moderations.py"
],
"/images/generations": [
"test_images.py"
],
"/batches": [
"test_batches.py"
],
"/vector_stores": [
"test_vector_stores.py"
],
"/fine_tuning/jobs": [
"test_jobs.py",
"test_checkpoints.py"
],
"/assistants": [
"test_assistants.py"
],
"/threads/{thread_id}/runs": [
"test_runs.py"
],
"/threads/{thread_id}/runs/{run_id}/steps": [
"test_steps.py"
],
"/vector_stores/{vector_store_id}/file_batches": [
"test_file_batches.py"
],
"/messages": [
"test_messages.py"
],
"/vector_stores/{vector_store_id}/files": [
"test_files.py"
],
"/chat/completions": [
"test_completions.py"
],
"/threads": [
"test_threads.py"
],
"/audio/speech": [
"test_speech.py"
],
"/models": [
"test_models.py"
],
"native_client_sdk_only": [
"test_streaming.py"
],
"utils": [
"test_response.py",
"test_client.py",
"test_extract_files.py",
"test_typing.py",
"test_legacy_response.py",
"test_module_client.py",
"test_old_api.py",
"test_proxy.py",
"test_qs.py",
"test_required_args.py",
"test_transform.py",
"test_azure.py",
"test_deepcopy.py"
]
}

View File

@ -1,5 +1,28 @@
const { exec } = require('child_process') const { exec } = require('child_process')
function execCommandWithRetry(command, retries = 3) {
return new Promise((resolve, reject) => {
const execute = (attempt) => {
exec(command, (error, stdout, stderr) => {
if (error) {
console.error(`Error: ${error}`)
if (attempt < retries) {
console.log(`Retrying... Attempt ${attempt + 1}`)
execute(attempt + 1)
} else {
return reject(error)
}
} else {
console.log(`stdout: ${stdout}`)
console.error(`stderr: ${stderr}`)
resolve()
}
})
}
execute(0)
})
}
function sign({ function sign({
path, path,
name, name,
@ -13,16 +36,9 @@ function sign({
}) { }) {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const command = `azuresigntool.exe sign -kvu "${certUrl}" -kvi "${clientId}" -kvt "${tenantId}" -kvs "${clientSecret}" -kvc "${certName}" -tr "${timestampServer}" -v "${path}"` const command = `azuresigntool.exe sign -kvu "${certUrl}" -kvi "${clientId}" -kvt "${tenantId}" -kvs "${clientSecret}" -kvc "${certName}" -tr "${timestampServer}" -v "${path}"`
execCommandWithRetry(command)
exec(command, (error, stdout, stderr) => { .then(resolve)
if (error) { .catch(reject)
console.error(`Error: ${error}`)
return reject(error)
}
console.log(`stdout: ${stdout}`)
console.error(`stderr: ${stderr}`)
resolve()
})
}) })
} }
@ -34,15 +50,20 @@ exports.default = async function (options) {
const certName = process.env.AZURE_CERT_NAME const certName = process.env.AZURE_CERT_NAME
const timestampServer = 'http://timestamp.globalsign.com/tsa/r6advanced1' const timestampServer = 'http://timestamp.globalsign.com/tsa/r6advanced1'
await sign({ try {
path: options.path, await sign({
name: 'jan-win-x64', path: options.path,
certUrl, name: 'jan-win-x64',
clientId, certUrl,
tenantId, clientId,
clientSecret, tenantId,
certName, clientSecret,
timestampServer, certName,
version: options.version, timestampServer,
}) version: options.version,
})
} catch (error) {
console.error('Failed to sign after 3 attempts:', error)
process.exit(1)
}
} }

View File

@ -1 +1 @@
0.4.7 0.4.9

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/inference-cortex-extension", "name": "@janhq/inference-cortex-extension",
"productName": "Cortex Inference Engine", "productName": "Cortex Inference Engine",
"version": "1.0.7", "version": "1.0.10",
"description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.", "description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.",
"main": "dist/index.js", "main": "dist/index.js",
"node": "dist/node/index.cjs.js", "node": "dist/node/index.cjs.js",

View File

@ -0,0 +1,35 @@
{
"sources": [
{
"filename": "aya-23-35B-Q4_K_M.gguf",
"url": "https://huggingface.co/bartowski/aya-23-35B-GGUF/resolve/main/aya-23-35B-Q4_K_M.gguf"
}
],
"id": "aya-23-35b",
"object": "model",
"name": "Aya 23 35B Q4",
"version": "1.0",
"description": "Aya 23 can talk upto 23 languages fluently.",
"format": "gguf",
"settings": {
"ctx_len": 8192,
"prompt_template": "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{system_prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>",
"llama_model_path": "aya-23-35B-Q4_K_M.gguf",
"ngl": 40
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 8192,
"frequency_penalty": 0,
"presence_penalty": 0,
"stop": ["<|END_OF_TURN_TOKEN|>"]
},
"metadata": {
"author": "CohereForAI",
"tags": ["34B", "Finetuned"],
"size": 21556982144
},
"engine": "nitro"
}

View File

@ -0,0 +1,35 @@
{
"sources": [
{
"filename": "aya-23-8B-Q4_K_M.gguf",
"url": "https://huggingface.co/bartowski/aya-23-8B-GGUF/resolve/main/aya-23-8B-Q4_K_M.gguf"
}
],
"id": "aya-23-8b",
"object": "model",
"name": "Aya 23 8B Q4",
"version": "1.0",
"description": "Aya 23 can talk upto 23 languages fluently.",
"format": "gguf",
"settings": {
"ctx_len": 8192,
"prompt_template": "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{system_prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>",
"llama_model_path": "aya-23-8B-Q4_K_M.gguf",
"ngl": 32
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 8192,
"frequency_penalty": 0,
"presence_penalty": 0,
"stop": ["<|END_OF_TURN_TOKEN|>"]
},
"metadata": {
"author": "CohereForAI",
"tags": ["7B", "Finetuned","Featured"],
"size": 5056982144
},
"engine": "nitro"
}

View File

@ -8,17 +8,23 @@
"id": "phi3-3.8b", "id": "phi3-3.8b",
"object": "model", "object": "model",
"name": "Phi-3 Mini", "name": "Phi-3 Mini",
"version": "1.0", "version": "1.1",
"description": "Phi-3 Mini is Microsoft's newest, compact model designed for mobile use.", "description": "Phi-3 Mini is Microsoft's newest, compact model designed for mobile use.",
"format": "gguf", "format": "gguf",
"settings": { "settings": {
"ctx_len": 4096, "ctx_len": 4096,
"prompt_template": "<|user|>\n{prompt}<|end|>\n<|assistant|>\n", "prompt_template": "<|user|>\n{prompt}<|end|>\n<|assistant|>\n",
"llama_model_path": "Phi-3-mini-4k-instruct-q4.gguf" "llama_model_path": "Phi-3-mini-4k-instruct-q4.gguf",
"ngl": 32
}, },
"parameters": { "parameters": {
"max_tokens": 4096, "max_tokens": 4096,
"stop": ["<|end|>"] "stop": ["<|end|>"],
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"frequency_penalty": 0,
"presence_penalty": 0
}, },
"metadata": { "metadata": {
"author": "Microsoft", "author": "Microsoft",

View File

@ -0,0 +1,38 @@
{
"sources": [
{
"url": "https://huggingface.co/bartowski/Phi-3-medium-128k-instruct-GGUF/resolve/main/Phi-3-medium-128k-instruct-Q4_K_M.gguf",
"filename": "Phi-3-medium-128k-instruct-Q4_K_M.gguf"
}
],
"id": "phi3-medium",
"object": "model",
"name": "Phi-3 Medium",
"version": "1.0",
"description": "Phi-3 Medium is Microsoft's latest SOTA model.",
"format": "gguf",
"settings": {
"ctx_len": 128000,
"prompt_template": "<|user|>\n{prompt}<|end|>\n<|assistant|>\n",
"llama_model_path": "Phi-3-medium-128k-instruct-Q4_K_M.gguf",
"ngl": 32
},
"parameters": {
"max_tokens": 128000,
"stop": ["<|end|>"],
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "Microsoft",
"tags": [
"7B",
"Finetuned"
],
"size": 8366000000
},
"engine": "nitro"
}

View File

@ -23,6 +23,7 @@ const mistralIns7bq4Json = require('./resources/models/mistral-ins-7b-q4/model.j
const mixtral8x7bInstructJson = require('./resources/models/mixtral-8x7b-instruct/model.json') const mixtral8x7bInstructJson = require('./resources/models/mixtral-8x7b-instruct/model.json')
const noromaid7bJson = require('./resources/models/noromaid-7b/model.json') const noromaid7bJson = require('./resources/models/noromaid-7b/model.json')
const openchat357bJson = require('./resources/models/openchat-3.5-7b/model.json') const openchat357bJson = require('./resources/models/openchat-3.5-7b/model.json')
const phi3bJson = require('./resources/models/phi3-3.8b/model.json')
const phind34bJson = require('./resources/models/phind-34b/model.json') const phind34bJson = require('./resources/models/phind-34b/model.json')
const qwen7bJson = require('./resources/models/qwen-7b/model.json') const qwen7bJson = require('./resources/models/qwen-7b/model.json')
const stableZephyr3bJson = require('./resources/models/stable-zephyr-3b/model.json') const stableZephyr3bJson = require('./resources/models/stable-zephyr-3b/model.json')
@ -34,6 +35,9 @@ const wizardcoder13bJson = require('./resources/models/wizardcoder-13b/model.jso
const yi34bJson = require('./resources/models/yi-34b/model.json') const yi34bJson = require('./resources/models/yi-34b/model.json')
const llama3Json = require('./resources/models/llama3-8b-instruct/model.json') const llama3Json = require('./resources/models/llama3-8b-instruct/model.json')
const llama3Hermes8bJson = require('./resources/models/llama3-hermes-8b/model.json') const llama3Hermes8bJson = require('./resources/models/llama3-hermes-8b/model.json')
const aya8bJson = require('./resources/models/aya-23-8b/model.json')
const aya35bJson = require('./resources/models/aya-23-35b/model.json')
const phimediumJson = require('./resources/models/phi3-medium/model.json')
export default [ export default [
{ {
@ -64,6 +68,7 @@ export default [
mixtral8x7bInstructJson, mixtral8x7bInstructJson,
noromaid7bJson, noromaid7bJson,
openchat357bJson, openchat357bJson,
phi3bJson,
phind34bJson, phind34bJson,
qwen7bJson, qwen7bJson,
stableZephyr3bJson, stableZephyr3bJson,
@ -74,7 +79,10 @@ export default [
wizardcoder13bJson, wizardcoder13bJson,
yi34bJson, yi34bJson,
llama3Json, llama3Json,
llama3Hermes8bJson llama3Hermes8bJson,
phimediumJson,
aya8bJson,
aya35bJson
]), ]),
NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`), NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`),
DEFAULT_SETTINGS: JSON.stringify(defaultSettingJson), DEFAULT_SETTINGS: JSON.stringify(defaultSettingJson),

View File

@ -114,6 +114,7 @@ export const deleteMessageAtom = atom(null, (get, set, id: string) => {
newData[threadId] = newData[threadId].filter( newData[threadId] = newData[threadId].filter(
(e) => e.id !== id && e.status !== MessageStatus.Error (e) => e.id !== id && e.status !== MessageStatus.Error
) )
set(chatMessages, newData) set(chatMessages, newData)
} }
}) })

View File

@ -6,6 +6,7 @@ import {
ConversationalExtension, ConversationalExtension,
fs, fs,
joinPath, joinPath,
Thread,
} from '@janhq/core' } from '@janhq/core'
import { useAtom, useAtomValue, useSetAtom } from 'jotai' import { useAtom, useAtomValue, useSetAtom } from 'jotai'
@ -27,6 +28,7 @@ import {
setActiveThreadIdAtom, setActiveThreadIdAtom,
deleteThreadStateAtom, deleteThreadStateAtom,
updateThreadStateLastMessageAtom, updateThreadStateLastMessageAtom,
updateThreadAtom,
} from '@/helpers/atoms/Thread.atom' } from '@/helpers/atoms/Thread.atom'
export default function useDeleteThread() { export default function useDeleteThread() {
@ -41,6 +43,7 @@ export default function useDeleteThread() {
const deleteThreadState = useSetAtom(deleteThreadStateAtom) const deleteThreadState = useSetAtom(deleteThreadStateAtom)
const updateThreadLastMessage = useSetAtom(updateThreadStateLastMessageAtom) const updateThreadLastMessage = useSetAtom(updateThreadStateLastMessageAtom)
const updateThread = useSetAtom(updateThreadAtom)
const cleanThread = useCallback( const cleanThread = useCallback(
async (threadId: string) => { async (threadId: string) => {
@ -73,19 +76,27 @@ export default function useDeleteThread() {
thread.metadata = { thread.metadata = {
...thread.metadata, ...thread.metadata,
lastMessage: undefined,
} }
const updatedThread: Thread = {
...thread,
title: 'New Thread',
metadata: { ...thread.metadata, lastMessage: undefined },
}
await extensionManager await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational) .get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.saveThread(thread) ?.saveThread(updatedThread)
updateThreadLastMessage(threadId, undefined) updateThreadLastMessage(threadId, undefined)
updateThread(updatedThread)
}, },
[ [
janDataFolderPath, cleanMessages,
threads, threads,
messages, messages,
cleanMessages,
updateThreadLastMessage, updateThreadLastMessage,
updateThread,
janDataFolderPath,
] ]
) )

View File

@ -29,7 +29,7 @@
"marked": "^9.1.2", "marked": "^9.1.2",
"marked-highlight": "^2.0.6", "marked-highlight": "^2.0.6",
"marked-katex-extension": "^5.0.1", "marked-katex-extension": "^5.0.1",
"next": "14.0.1", "next": "14.2.3",
"next-themes": "^0.2.1", "next-themes": "^0.2.1",
"postcss": "8.4.31", "postcss": "8.4.31",
"posthog-js": "^1.95.1", "posthog-js": "^1.95.1",

View File

@ -1,3 +1,5 @@
import { useCallback } from 'react'
import { import {
MessageStatus, MessageStatus,
ExtensionTypeEnum, ExtensionTypeEnum,
@ -5,6 +7,7 @@ import {
ChatCompletionRole, ChatCompletionRole,
ConversationalExtension, ConversationalExtension,
ContentType, ContentType,
Thread,
} from '@janhq/core' } from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai' import { useAtomValue, useSetAtom } from 'jotai'
import { import {
@ -26,7 +29,11 @@ import {
editMessageAtom, editMessageAtom,
getCurrentChatMessagesAtom, getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom' } from '@/helpers/atoms/ChatMessage.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom' import {
activeThreadAtom,
updateThreadAtom,
updateThreadStateLastMessageAtom,
} from '@/helpers/atoms/Thread.atom'
const MessageToolbar = ({ message }: { message: ThreadMessage }) => { const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
const deleteMessage = useSetAtom(deleteMessageAtom) const deleteMessage = useSetAtom(deleteMessageAtom)
@ -35,9 +42,19 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
const messages = useAtomValue(getCurrentChatMessagesAtom) const messages = useAtomValue(getCurrentChatMessagesAtom)
const { resendChatMessage } = useSendChatMessage() const { resendChatMessage } = useSendChatMessage()
const clipboard = useClipboard({ timeout: 1000 }) const clipboard = useClipboard({ timeout: 1000 })
const updateThreadLastMessage = useSetAtom(updateThreadStateLastMessageAtom)
const updateThread = useSetAtom(updateThreadAtom)
const onDeleteClick = async () => { const onDeleteClick = useCallback(async () => {
deleteMessage(message.id ?? '') deleteMessage(message.id ?? '')
const lastResponse = messages
.filter(
(msg) =>
msg.id !== message.id && msg.role === ChatCompletionRole.Assistant
)
.slice(-1)[0]
if (thread) { if (thread) {
// Should also delete error messages to clear out the error state // Should also delete error messages to clear out the error state
await extensionManager await extensionManager
@ -48,8 +65,26 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
(msg) => msg.id !== message.id && msg.status !== MessageStatus.Error (msg) => msg.id !== message.id && msg.status !== MessageStatus.Error
) )
) )
const updatedThread: Thread = {
...thread,
metadata: {
...thread.metadata,
lastMessage: messages.filter(
(msg) => msg.role === ChatCompletionRole.Assistant
)[
messages.filter((msg) => msg.role === ChatCompletionRole.Assistant)
.length - 1
]?.content[0].text.value,
},
}
updateThreadLastMessage(thread.id, lastResponse?.content)
updateThread(updatedThread)
} }
} // eslint-disable-next-line react-hooks/exhaustive-deps
}, [messages])
const onEditClick = async () => { const onEditClick = async () => {
setEditMessage(message.id ?? '') setEditMessage(message.id ?? '')

View File

@ -13,11 +13,12 @@ const ModelSegmentInfo: React.FC = () => {
) )
const { author, modelName, downloads, modelUrl } = useMemo(() => { const { author, modelName, downloads, modelUrl } = useMemo(() => {
const author = const cardData = importingHuggingFaceRepoData?.cardData
(importingHuggingFaceRepoData?.cardData['model_creator'] as string) ?? const author = (cardData?.['model_creator'] ?? 'N/A') as string
'N/A' const modelName = (cardData?.['model_name'] ??
const modelName = importingHuggingFaceRepoData?.id ??
(importingHuggingFaceRepoData?.cardData['model_name'] as string) ?? 'N/A' 'N/A') as string
const modelUrl = importingHuggingFaceRepoData?.modelUrl ?? 'N/A' const modelUrl = importingHuggingFaceRepoData?.modelUrl ?? 'N/A'
const downloads = importingHuggingFaceRepoData?.downloads ?? 0 const downloads = importingHuggingFaceRepoData?.downloads ?? 0