chore: fix lint

This commit is contained in:
Louis 2025-05-15 17:27:46 +07:00
parent 2418095656
commit 9063f7e6c4
No known key found for this signature in database
GPG Key ID: 44FA9F4D33C37DE2
3 changed files with 14 additions and 15 deletions

View File

@ -24,11 +24,7 @@ import {
ChatCompletionTool, ChatCompletionTool,
ChatCompletionMessageToolCall, ChatCompletionMessageToolCall,
} from 'openai/resources/chat' } from 'openai/resources/chat'
import { import { CompletionResponse, StreamCompletionResponse, TokenJS } from 'token.js'
CompletionResponse,
StreamCompletionResponse,
TokenJS,
} from 'token.js'
import { ulid } from 'ulidx' import { ulid } from 'ulidx'
import { modelDropdownStateAtom } from '@/containers/ModelDropdown' import { modelDropdownStateAtom } from '@/containers/ModelDropdown'
@ -225,7 +221,7 @@ export default function useSendChatMessage(
}, },
activeThread, activeThread,
messages ?? currentMessages, messages ?? currentMessages,
(tools && tools.length) ? tools : undefined, tools && tools.length ? tools : undefined
).addSystemMessage(activeAssistant.instructions) ).addSystemMessage(activeAssistant.instructions)
requestBuilder.pushMessage(prompt, base64Blob, fileUpload) requestBuilder.pushMessage(prompt, base64Blob, fileUpload)
@ -265,8 +261,9 @@ export default function useSendChatMessage(
} }
// Start Model if not started // Start Model if not started
const isCortex = modelRequest.engine == InferenceEngine.cortex || const isCortex =
modelRequest.engine == InferenceEngine.cortex_llamacpp modelRequest.engine == InferenceEngine.cortex ||
modelRequest.engine == InferenceEngine.cortex_llamacpp
const modelId = selectedModel?.id ?? activeAssistantRef.current?.model.id const modelId = selectedModel?.id ?? activeAssistantRef.current?.model.id
if (base64Blob) { if (base64Blob) {
@ -297,9 +294,10 @@ export default function useSendChatMessage(
extendBuiltInEngineModels(tokenJS, provider, modelId) extendBuiltInEngineModels(tokenJS, provider, modelId)
// llama.cpp currently does not support streaming when tools are used. // llama.cpp currently does not support streaming when tools are used.
const useStream = (requestBuilder.tools && isCortex) ? const useStream =
false : requestBuilder.tools && isCortex
modelRequest.parameters?.stream ? false
: modelRequest.parameters?.stream
let parentMessageId: string | undefined let parentMessageId: string | undefined
while (!isDone) { while (!isDone) {
@ -577,4 +575,4 @@ export default function useSendChatMessage(
sendChatMessage, sendChatMessage,
resendChatMessage, resendChatMessage,
} }
} }

View File

@ -144,10 +144,10 @@ const ModelPage = ({ model, onGoBack }: Props) => {
{model.type !== 'cloud' && ( {model.type !== 'cloud' && (
<> <>
<th></th> <th></th>
<th className="max-w-32 hidden px-6 py-3 text-left text-sm font-semibold sm:table-cell"> <th className="hidden max-w-32 px-6 py-3 text-left text-sm font-semibold sm:table-cell">
Format Format
</th> </th>
<th className="max-w-32 hidden px-6 py-3 text-left text-sm font-semibold sm:table-cell"> <th className="hidden max-w-32 px-6 py-3 text-left text-sm font-semibold sm:table-cell">
Size Size
</th> </th>
</> </>

View File

@ -168,7 +168,8 @@ const RemoteEngineSettings = ({
<p className="mt-1 text-[hsla(var(--text-secondary))]"> <p className="mt-1 text-[hsla(var(--text-secondary))]">
{!customEngineLogo ? ( {!customEngineLogo ? (
<span> <span>
Enter your authentication key to activate this engine.{' '} Enter your authentication key to activate this
engine.{' '}
</span> </span>
) : ( ) : (
<span> <span>