fix: correct thinking time and chat translation keys

The update fixes how total thinking time is calculated during a chat message flow.
Previously the elapsed time from the initial completion was incorrectly added to the
overall thinking time, leading to inflated metrics. The new logic splits the
computation into separate phases (initial completion, tool execution, and
follow‑up completions) and accumulates them into `totalThinkingTime`, ensuring
accurate measurement.

Additionally, translation keys for chat components are now namespaced with
`chat:` to avoid collisions and clearly indicate the context in which they
are used. The diff also removes a stray comment line and keeps metadata
updates consistent across recursive calls.
This commit is contained in:
Akarshan 2025-10-29 20:09:56 +05:30
parent 0c80950226
commit 97c94079a9
No known key found for this signature in database
GPG Key ID: D75C9634A870665F
4 changed files with 40 additions and 9 deletions

View File

@ -10,7 +10,7 @@ import ImageModal from '@/containers/dialogs/ImageModal'
// Define ReActStep type (Reasoning-Action Step)
type ReActStep = {
type: 'reasoning' | 'tool_call' | 'tool_output' | 'done' // Changed 'thought' to 'reasoning'
type: 'reasoning' | 'tool_call' | 'tool_output' | 'done'
content: string
metadata?: any
time?: number
@ -114,7 +114,7 @@ const ThinkingBlock = ({
<div className="mb-4 rounded-lg bg-main-view-fg/4 border border-dashed border-main-view-fg/10 p-2 flex items-center gap-3">
<Loader className="size-4 animate-spin text-main-view-fg/60" />
<span className="font-medium text-main-view-fg/80">
{t('thinking')}
{t('chat:thinking')}
</span>
</div>
</div>
@ -144,7 +144,7 @@ const ThinkingBlock = ({
const timeInSeconds = formatDuration(step.time ?? 0)
const timeDisplay =
timeInSeconds > 0
? `(${t('for')} ${timeInSeconds} ${t('seconds')})`
? `(${t('chat:for')} ${timeInSeconds} ${t('chat:seconds')})`
: ''
return (
@ -259,7 +259,7 @@ const ThinkingBlock = ({
activeStep.type === 'tool_call' ||
activeStep.type === 'tool_output'
) {
return `${t('calling_tool')}` // Use a specific translation key for tool
return `${t('chat:calling_tool')}` // Use a specific translation key for tool
} else if (activeStep.type === 'reasoning') {
return `${t('chat:thinking')}` // Use the generic thinking key
}

View File

@ -755,7 +755,9 @@ export const useChat = () => {
throw new Error('No response received from the model')
}
const totalThinkingTime = Date.now() - startTime // Calculate total elapsed time
const completionFinishTime = Date.now()
// Calculate the time taken for the initial completion (streaming or non-streaming)
const initialCompletionTime = completionFinishTime - startTime
const messageMetadata: Record<string, any> = {
tokenSpeed: useAppState.getState().tokenSpeed,
@ -764,7 +766,7 @@ export const useChat = () => {
}
if (accumulatedText.includes('<think>') || toolCalls.length > 0) {
messageMetadata.totalThinkingTime = totalThinkingTime
messageMetadata.totalThinkingTime = initialCompletionTime
}
// This is the message object that will be built upon by postMessageProcessing

View File

@ -406,7 +406,6 @@ export const extractToolCall = (
}
return calls
}
/**
* Helper function to check if a tool call is a browser MCP tool
* @param toolName - The name of the tool
@ -558,6 +557,10 @@ export const postMessageProcessing = async (
currentStepCount: number = 0,
isProactiveMode: boolean = false
): Promise<ThreadMessage> => {
// Initialize/get the current total thinking time from metadata
// This value is passed from sendMessage (initial completion time) or previous recursive call
let currentTotalTime = (message.metadata?.totalThinkingTime as number) ?? 0
// Handle completed tool calls
if (calls.length > 0) {
// Check limit BEFORE processing
@ -604,6 +607,7 @@ export const postMessageProcessing = async (
message.metadata = {
...(message.metadata ?? {}),
tool_calls: currentToolCalls,
totalThinkingTime: currentTotalTime,
}
if (updateStreamingUI) updateStreamingUI({ ...message }) // Show pending call
@ -634,6 +638,8 @@ export const postMessageProcessing = async (
)
: true)
const toolExecutionStartTime = Date.now()
const { promise, cancel } = isRagTool
? ragFeatureAvailable
? {
@ -683,6 +689,8 @@ export const postMessageProcessing = async (
error: 'disallowed',
}
const toolExecutionTime = Date.now() - toolExecutionStartTime
if (typeof result === 'string') {
result = {
content: [{ type: 'text', text: result }],
@ -690,9 +698,15 @@ export const postMessageProcessing = async (
}
}
currentTotalTime += toolExecutionTime
// Update the entry in the metadata array
toolCallEntry.response = result
toolCallEntry.state = 'ready'
message.metadata = {
...(message.metadata ?? {}),
totalThinkingTime: currentTotalTime,
}
if (updateStreamingUI) updateStreamingUI({ ...message }) // Show result
const streamEvents = (message.metadata?.streamEvents || []) as any[]
@ -738,6 +752,8 @@ export const postMessageProcessing = async (
try {
const messagesWithToolResults = builder.getMessages()
const followUpStartTime = Date.now()
const followUpCompletion = await sendCompletion(
thread,
provider,
@ -748,6 +764,8 @@ export const postMessageProcessing = async (
{}
)
let streamFinishTime = Date.now()
if (followUpCompletion) {
let followUpText = ''
const newToolCalls: ChatCompletionMessageToolCall[] = []
@ -766,6 +784,7 @@ export const postMessageProcessing = async (
}
if (textContent?.text) textContent.text.value += followUpText
if (updateStreamingUI) updateStreamingUI({ ...message })
streamFinishTime = Date.now()
} else {
// Handle streaming response
const reasoningProcessor = new ReasoningProcessor()
@ -777,7 +796,6 @@ export const postMessageProcessing = async (
const deltaContent = chunk.choices[0]?.delta?.content || ''
if (textContent?.text) {
// if (deltaReasoning) textContent.text.value += deltaReasoning
if (deltaContent) {
textContent.text.value += deltaContent
followUpText += deltaContent
@ -810,6 +828,8 @@ export const postMessageProcessing = async (
message.metadata = {
...(message.metadata ?? {}),
streamEvents: streamEvents,
totalThinkingTime:
currentTotalTime + (Date.now() - followUpStartTime), // Optimistic update
}
if (updateStreamingUI) {
@ -822,7 +842,7 @@ export const postMessageProcessing = async (
updateStreamingUI(uiMessage)
}
}
streamFinishTime = Date.now()
if (textContent?.text && updateStreamingUI) {
// Final UI update after streaming completes
const uiMessage: ThreadMessage = {
@ -833,9 +853,17 @@ export const postMessageProcessing = async (
}
}
const followUpTotalTime = streamFinishTime - followUpStartTime
currentTotalTime += followUpTotalTime //
message.metadata = {
...(message.metadata ?? {}),
totalThinkingTime: currentTotalTime,
}
// Recursively process new tool calls if any
if (newToolCalls.length > 0) {
builder.addAssistantMessage(followUpText, undefined, newToolCalls)
// Recursive call continues accumulation on the same message object
await postMessageProcessing(
newToolCalls,
builder,

View File

@ -13,6 +13,7 @@
"tool_called": "Called tools",
"calling_tool": "Calling a tool",
"thinking": "Thinking",
"thought": "Thought",
"for": "for",
"seconds": "seconds"
}