feat: tool use

This commit is contained in:
Louis 2025-05-16 00:27:20 +07:00 committed by Faisal Amir
parent 66a4ac420b
commit 95f90f601d
3 changed files with 198 additions and 37 deletions

View File

@ -22,8 +22,10 @@ import { useGeneralSetting } from '@/hooks/useGeneralSetting'
import { useModelProvider } from '@/hooks/useModelProvider' import { useModelProvider } from '@/hooks/useModelProvider'
import { import {
emptyThreadContent, emptyThreadContent,
extractToolCall,
newAssistantThreadContent, newAssistantThreadContent,
newUserThreadContent, newUserThreadContent,
postMessageProcessing,
sendCompletion, sendCompletion,
startModel, startModel,
} from '@/lib/completion' } from '@/lib/completion'
@ -37,6 +39,8 @@ import { MovingBorder } from './MovingBorder'
import { MCPTool } from '@/types/completion' import { MCPTool } from '@/types/completion'
import { listen } from '@tauri-apps/api/event' import { listen } from '@tauri-apps/api/event'
import { SystemEvent } from '@/types/events' import { SystemEvent } from '@/types/events'
import { CompletionMessagesBuilder } from '@/lib/messages'
import { ChatCompletionMessageToolCall } from 'openai/resources'
type ChatInputProps = { type ChatInputProps = {
className?: string className?: string
@ -57,12 +61,10 @@ const ChatInput = ({ className, showSpeedToken = true }: ChatInputProps) => {
useModelProvider() useModelProvider()
const { getCurrentThread: retrieveThread, createThread } = useThreads() const { getCurrentThread: retrieveThread, createThread } = useThreads()
const { streamingContent, updateStreamingContent } = useAppState() const { streamingContent, updateStreamingContent, updateLoadingModel } =
useAppState()
const { addMessage } = useMessages() const { addMessage } = useMessages()
const router = useRouter() const router = useRouter()
const { updateLoadingModel } = useAppState()
const provider = useMemo(() => { const provider = useMemo(() => {
return getProviderByName(selectedProvider) return getProviderByName(selectedProvider)
@ -104,9 +106,7 @@ const ChatInput = ({ className, showSpeedToken = true }: ChatInputProps) => {
// Unsubscribe from the event when the component unmounts // Unsubscribe from the event when the component unmounts
unsubscribe = unsub unsubscribe = unsub
}) })
return () => { return unsubscribe
unsubscribe()
}
}, []) }, [])
useEffect(() => { useEffect(() => {
@ -146,7 +146,6 @@ const ChatInput = ({ className, showSpeedToken = true }: ChatInputProps) => {
if (!activeThread || !provider) return if (!activeThread || !provider) return
updateStreamingContent(emptyThreadContent) updateStreamingContent(emptyThreadContent)
addMessage(newUserThreadContent(activeThread.id, prompt)) addMessage(newUserThreadContent(activeThread.id, prompt))
setPrompt('') setPrompt('')
try { try {
@ -158,18 +157,30 @@ const ChatInput = ({ className, showSpeedToken = true }: ChatInputProps) => {
updateLoadingModel(false) updateLoadingModel(false)
} }
const completion = await sendCompletion( const builder = new CompletionMessagesBuilder()
activeThread, // REMARK: Would it possible to not attach the entire message history to the request?
provider, // TODO: If not amend messages history here
prompt, builder.addUserMessage(prompt)
tools
)
if (!completion) throw new Error('No completion received') let isCompleted = false
let accumulatedText = ''
try { while (!isCompleted) {
const completion = await sendCompletion(
activeThread,
provider,
builder.getMessages(),
tools
)
if (!completion) throw new Error('No completion received')
let accumulatedText = ''
const currentCall: ChatCompletionMessageToolCall | null = null
const toolCalls: ChatCompletionMessageToolCall[] = []
for await (const part of completion) { for await (const part of completion) {
const delta = part.choices[0]?.delta?.content || '' const delta = part.choices[0]?.delta?.content || ''
if (part.choices[0]?.delta?.tool_calls) {
extractToolCall(part, currentCall, toolCalls)
}
if (delta) { if (delta) {
accumulatedText += delta accumulatedText += delta
// Create a new object each time to avoid reference issues // Create a new object each time to avoid reference issues
@ -182,17 +193,17 @@ const ChatInput = ({ className, showSpeedToken = true }: ChatInputProps) => {
await new Promise((resolve) => setTimeout(resolve, 0)) await new Promise((resolve) => setTimeout(resolve, 0))
} }
} }
} catch (error) {
console.error('Error during streaming:', error)
} finally {
// Create a final content object for adding to the thread // Create a final content object for adding to the thread
if (accumulatedText) { const finalContent = newAssistantThreadContent(
const finalContent = newAssistantThreadContent( activeThread.id,
activeThread.id, accumulatedText
accumulatedText )
) builder.addAssistantMessage(accumulatedText, undefined, toolCalls)
addMessage(finalContent) const updatedMessage = await postMessageProcessing(toolCalls, builder, finalContent)
} console.log(updatedMessage)
addMessage(updatedMessage ?? finalContent)
isCompleted = !toolCalls.length
} }
} catch (error) { } catch (error) {
console.error('Error sending message:', error) console.error('Error sending message:', error)

View File

@ -8,7 +8,9 @@ import {
} from '@janhq/core' } from '@janhq/core'
import { invoke } from '@tauri-apps/api/core' import { invoke } from '@tauri-apps/api/core'
import { import {
ChatCompletionMessageParam,
ChatCompletionTool, ChatCompletionTool,
CompletionResponseChunk,
models, models,
StreamCompletionResponse, StreamCompletionResponse,
TokenJS, TokenJS,
@ -16,6 +18,9 @@ import {
import { ulid } from 'ulidx' import { ulid } from 'ulidx'
import { normalizeProvider } from './models' import { normalizeProvider } from './models'
import { MCPTool } from '@/types/completion' import { MCPTool } from '@/types/completion'
import { CompletionMessagesBuilder } from './messages'
import { ChatCompletionMessageToolCall } from 'openai/resources'
/** /**
* @fileoverview Helper functions for creating thread content. * @fileoverview Helper functions for creating thread content.
* These functions are used to create thread content objects * These functions are used to create thread content objects
@ -97,13 +102,13 @@ export const emptyThreadContent: ThreadMessage = {
* @fileoverview Helper function to send a completion request to the model provider. * @fileoverview Helper function to send a completion request to the model provider.
* @param thread * @param thread
* @param provider * @param provider
* @param prompt * @param messages
* @returns * @returns
*/ */
export const sendCompletion = async ( export const sendCompletion = async (
thread: Thread, thread: Thread,
provider: ModelProvider, provider: ModelProvider,
prompt: string, messages: ChatCompletionMessageParam[],
tools: MCPTool[] = [] tools: MCPTool[] = []
): Promise<StreamCompletionResponse | undefined> => { ): Promise<StreamCompletionResponse | undefined> => {
if (!thread?.model?.id || !provider) return undefined if (!thread?.model?.id || !provider) return undefined
@ -124,13 +129,9 @@ export const sendCompletion = async (
stream: true, stream: true,
provider: providerName, provider: providerName,
model: thread.model?.id, model: thread.model?.id,
messages: [ messages,
{
role: 'user',
content: prompt,
},
],
tools: normalizeTools(tools), tools: normalizeTools(tools),
tool_choice: tools.length ? 'auto' : undefined,
}) })
return completion return completion
} }
@ -138,6 +139,8 @@ export const sendCompletion = async (
/** /**
* @fileoverview Helper function to start a model. * @fileoverview Helper function to start a model.
* This function loads the model from the provider. * This function loads the model from the provider.
* @deprecated This function is deprecated and will be removed in the future.
* Provider's chat function will handle loading the model.
* @param provider * @param provider
* @param model * @param model
* @returns * @returns
@ -170,8 +173,8 @@ export const stopModel = async (
/** /**
* @fileoverview Helper function to normalize tools for the chat completion request. * @fileoverview Helper function to normalize tools for the chat completion request.
* This function converts the MCPTool objects to ChatCompletionTool objects. * This function converts the MCPTool objects to ChatCompletionTool objects.
* @param tools * @param tools
* @returns * @returns
*/ */
export const normalizeTools = (tools: MCPTool[]): ChatCompletionTool[] => { export const normalizeTools = (tools: MCPTool[]): ChatCompletionTool[] => {
return tools.map((tool) => ({ return tools.map((tool) => ({
@ -184,3 +187,114 @@ export const normalizeTools = (tools: MCPTool[]): ChatCompletionTool[] => {
}, },
})) }))
} }
/**
* @fileoverview Helper function to extract tool calls from the completion response.
* @param part
* @param calls
*/
export const extractToolCall = (
part: CompletionResponseChunk,
currentCall: ChatCompletionMessageToolCall | null,
calls: ChatCompletionMessageToolCall[]
) => {
const deltaToolCalls = part.choices[0].delta.tool_calls
// Handle the beginning of a new tool call
if (deltaToolCalls?.[0]?.index !== undefined && deltaToolCalls[0]?.function) {
const index = deltaToolCalls[0].index
// Create new tool call if this is the first chunk for it
if (!calls[index]) {
calls[index] = {
id: deltaToolCalls[0]?.id || '',
function: {
name: deltaToolCalls[0]?.function?.name || '',
arguments: deltaToolCalls[0]?.function?.arguments || '',
},
type: 'function',
}
currentCall = calls[index]
} else {
// Continuation of existing tool call
currentCall = calls[index]
// Append to function name or arguments if they exist in this chunk
if (deltaToolCalls[0]?.function?.name) {
currentCall!.function.name += deltaToolCalls[0].function.name
}
if (deltaToolCalls[0]?.function?.arguments) {
currentCall!.function.arguments +=
deltaToolCalls[0].function.arguments
}
}
}
return calls
}
/**
* @fileoverview Helper function to process the completion response.
* @param calls
* @param builder
* @param message
* @param content
*/
export const postMessageProcessing = async (
calls: ChatCompletionMessageToolCall[],
builder: CompletionMessagesBuilder,
message: ThreadMessage
) => {
// Handle completed tool calls
if (calls.length) {
for (const toolCall of calls) {
const toolId = ulid()
const toolCallsMetadata =
message.metadata?.tool_calls &&
Array.isArray(message.metadata?.tool_calls)
? message.metadata?.tool_calls
: []
message.metadata = {
...(message.metadata ?? {}),
tool_calls: [
...toolCallsMetadata,
{
tool: {
...(toolCall as object),
id: toolId,
},
response: undefined,
state: 'pending',
},
],
}
const result = await window.core.api.callTool({
toolName: toolCall.function.name,
arguments: toolCall.function.arguments.length
? JSON.parse(toolCall.function.arguments)
: {},
})
if (result.error) break
message.metadata = {
...(message.metadata ?? {}),
tool_calls: [
...toolCallsMetadata,
{
tool: {
...toolCall,
id: toolId,
},
response: result,
state: 'ready',
},
],
}
builder.addToolMessage(result.content[0]?.text ?? '', toolCall.id)
// update message metadata
return message
}
}
}

View File

@ -0,0 +1,36 @@
import { ChatCompletionMessageParam } from 'token.js'
import { ChatCompletionMessageToolCall } from 'openai/resources'
export class CompletionMessagesBuilder {
private messages: ChatCompletionMessageParam[] = []
constructor() {}
addUserMessage(content: string) {
this.messages.push({
role: 'user',
content: content,
})
}
addAssistantMessage(content: string, refusal?: string, calls?: ChatCompletionMessageToolCall[]) {
this.messages.push({
role: 'assistant',
content: content,
refusal: refusal,
tool_calls: calls
})
}
addToolMessage(content: string, toolCallId: string) {
this.messages.push({
role: 'tool',
content: content,
tool_call_id: toolCallId,
})
}
getMessages(): ChatCompletionMessageParam[] {
return this.messages
}
}