diff --git a/web-app/src/containers/ThinkingBlock.tsx b/web-app/src/containers/ThinkingBlock.tsx index 211fda9ff..bceaa4236 100644 --- a/web-app/src/containers/ThinkingBlock.tsx +++ b/web-app/src/containers/ThinkingBlock.tsx @@ -1,13 +1,26 @@ -import { ChevronDown, ChevronUp, Loader } from 'lucide-react' +import { ChevronDown, ChevronUp, Loader, Check } from 'lucide-react' import { create } from 'zustand' import { RenderMarkdown } from './RenderMarkdown' import { useAppState } from '@/hooks/useAppState' import { useTranslation } from '@/i18n/react-i18next-compat' import { extractThinkingContent } from '@/lib/utils' +import { useMemo, useState, useEffect } from 'react' +import { cn } from '@/lib/utils' + +// Define ThoughtStep type +type ThoughtStep = { + type: 'thought' | 'tool_call' | 'tool_output' | 'done' + content: string + metadata?: any + time?: number +} interface Props { text: string id: string + steps?: ThoughtStep[] + loading?: boolean + duration?: number } // Zustand store for thinking block state @@ -27,25 +40,193 @@ const useThinkingStore = create((set) => ({ })), })) -const ThinkingBlock = ({ id, text }: Props) => { +// Helper to format duration in seconds +const formatDuration = (ms: number) => { + // Only show seconds if duration is present and non-zero + if (ms > 0) { + return Math.round(ms / 1000) + } + return 0 +} + +// Function to safely extract thought paragraphs from streaming text +const extractStreamingParagraphs = (rawText: string): string[] => { + const cleanedContent = rawText.replace(/<\/?think>/g, '').trim() + + // Split by double newline (paragraph boundary) + let paragraphs = cleanedContent + .split(/\n\s*\n/) + .filter((s) => s.trim().length > 0) + + // If no explicit double newline paragraphs, treat single newlines as breaks for streaming visualization + if (paragraphs.length <= 1 && cleanedContent.includes('\n')) { + paragraphs = cleanedContent.split('\n').filter((s) => s.trim().length > 0) + } + + // Ensure we always return at least one item if content exists + if (paragraphs.length === 0 && cleanedContent.length > 0) { + return [cleanedContent] + } + + return paragraphs +} + +const ThinkingBlock = ({ + id, + text, + steps, + loading: propLoading, + duration, +}: Props) => { const thinkingState = useThinkingStore((state) => state.thinkingState) const setThinkingState = useThinkingStore((state) => state.setThinkingState) - const isStreaming = useAppState((state) => !!state.streamingContent) + const isStreamingApp = useAppState((state) => !!state.streamingContent) const { t } = useTranslation() - // Check for thinking formats + + // Determine actual loading state const hasThinkTag = text.includes('') && !text.includes('') const hasAnalysisChannel = text.includes('<|channel|>analysis<|message|>') && !text.includes('<|start|>assistant<|channel|>final<|message|>') - const loading = (hasThinkTag || hasAnalysisChannel) && isStreaming + + const loading = + propLoading ?? ((hasThinkTag || hasAnalysisChannel) && isStreamingApp) + + // Set default expansion state: expanded if loading, collapsed if done. const isExpanded = thinkingState[id] ?? (loading ? true : false) - const handleClick = () => { - const newExpandedState = !isExpanded - setThinkingState(id, newExpandedState) - } const thinkingContent = extractThinkingContent(text) - if (!thinkingContent) return null + + // If we are not loading AND there is no content/steps, hide the block entirely. + const hasContent = !!thinkingContent || (steps && steps.length >= 1) + if (!loading && !hasContent) return null + + // --- Streaming Logic --- + const streamingParagraphs = extractStreamingParagraphs(thinkingContent) + const currentParagraph = + streamingParagraphs[streamingParagraphs.length - 1] || '' + + // State for replacement animation + const [visibleParagraph, setVisibleParagraph] = useState(currentParagraph) + const [transitioning, setTransitioning] = useState(false) + + useEffect(() => { + if (loading && currentParagraph !== visibleParagraph) { + // Start transition out (opacity: 0) + setTransitioning(true) + + // Simulate subtle easeIn replacement after a short delay + const timeout = setTimeout(() => { + setVisibleParagraph(currentParagraph) + // After content replacement, transition in (opacity: 1) + setTransitioning(false) + }, 150) + + return () => clearTimeout(timeout) + } else if (!loading) { + // Ensure the last state is captured when streaming stops + setVisibleParagraph(currentParagraph) + } + // Update immediately on initial render or if content is stable + if (!loading || streamingParagraphs.length <= 1) { + setVisibleParagraph(currentParagraph) + } + }, [currentParagraph, loading, visibleParagraph, streamingParagraphs.length]) + + // Check if we are currently streaming but haven't received enough content for a meaningful paragraph + const isInitialStreaming = + loading && currentParagraph.length === 0 && steps?.length === 0 + + // If loading but we have no content yet, hide the component until the first content piece arrives. + if (isInitialStreaming) { + return null + } + + const handleClick = () => { + // Only allow toggling expansion if not currently loading + if (!loading) { + setThinkingState(id, !isExpanded) + } + } + + // --- Rendering Functions for Expanded View --- + + const renderStepContent = (step: ThoughtStep, index: number) => { + if (step.type === 'done') { + const timeInSeconds = formatDuration(step.time ?? 0) + + // TODO: Add translations + const timeDisplay = + timeInSeconds > 0 + ? `(${t('for')} ${timeInSeconds} ${t('seconds')})` + : '' + + return ( +
+ {/* Use Check icon for done state */} + + {t('common:done')} + {timeDisplay && ( + {timeDisplay} + )} +
+ ) + } + + let contentDisplay + if (step.type === 'tool_call') { + const args = step.metadata ? step.metadata : '' + contentDisplay = ( + <> +

+ Tool Call: {step.content} +

+ {args && ( +
+ +
+ )} + + ) + } else if (step.type === 'tool_output') { + contentDisplay = ( + <> +

Tool Output:

+
+ +
+ + ) + } else { + // thought + contentDisplay = ( + + ) + } + + return ( +
+ {contentDisplay} +
+ ) + } + + const headerTitle = useMemo(() => { + if (loading) return t('thinking') + const timeInSeconds = formatDuration(duration ?? 0) + + if (timeInSeconds > 0) { + // Ensure translated strings are used correctly + return `${t('thought')} ${t('for')} ${timeInSeconds} ${t('seconds')}` + } + return t('thought') + }, [loading, duration, t]) return (
{ {loading && ( )} -
- {isExpanded && ( + {/* Streaming/Condensed View (Visible ONLY when loading) */} + {loading && ( +
+ +
+ )} + + {/* Expanded View (Req 5) */} + {isExpanded && !loading && (
- +
+ {steps?.map((step, index) => ( +
+ {/* Bullet point/Icon position relative to line */} +
+ + {/* Step Content */} + {renderStepContent(step, index)} +
+ ))} +
)}
diff --git a/web-app/src/containers/ThreadContent.tsx b/web-app/src/containers/ThreadContent.tsx index e120544c1..472e2f557 100644 --- a/web-app/src/containers/ThreadContent.tsx +++ b/web-app/src/containers/ThreadContent.tsx @@ -28,6 +28,28 @@ import { useTranslation } from '@/i18n/react-i18next-compat' import { useModelProvider } from '@/hooks/useModelProvider' import { extractFilesFromPrompt } from '@/lib/fileMetadata' import { createImageAttachment } from '@/types/attachment' +import { extractThinkingContent } from '@/lib/utils' + +// Define ToolCall interface for type safety when accessing metadata +interface ToolCall { + tool?: { + id: number + function?: { + name: string + arguments?: object | string + } + } + response?: any + state?: 'pending' | 'completed' +} + +// Define ThoughtStep type +type ThoughtStep = { + type: 'thought' | 'tool_call' | 'tool_output' | 'done' + content: string + metadata?: any + time?: number +} const CopyButton = ({ text }: { text: string }) => { const [copied, setCopied] = useState(false) @@ -147,6 +169,9 @@ export const ThreadContent = memo( return { reasoningSegment: undefined, textSegment: text } }, [text]) + // Check if reasoning segment is actually present (i.e., non-empty string) + const hasReasoning = !!reasoningSegment + const getMessages = useMessages((state) => state.getMessages) const deleteMessage = useMessages((state) => state.deleteMessage) const sendMessage = useChat() @@ -164,7 +189,8 @@ export const ThreadContent = memo( deleteMessage(toSendMessage.thread_id, toSendMessage.id ?? '') // Extract text content and any attachments const rawText = - toSendMessage.content?.find((c) => c.type === 'text')?.text?.value || '' + toSendMessage.content?.find((c) => c.type === 'text')?.text?.value || + '' const { cleanPrompt: textContent } = extractFilesFromPrompt(rawText) const attachments = toSendMessage.content ?.filter((c) => (c.type === 'image_url' && c.image_url?.url) || false) @@ -226,6 +252,71 @@ export const ThreadContent = memo( | { avatar?: React.ReactNode; name?: React.ReactNode } | undefined + // START: Constructing allSteps for ThinkingBlock (Req 5) + const allSteps: ThoughtStep[] = useMemo(() => { + const steps: ThoughtStep[] = [] + + // 1. Extract thought paragraphs + const thoughtText = extractThinkingContent(reasoningSegment || '') + const thoughtParagraphs = thoughtText + ? thoughtText + .split(/\n\s*\n/) + .filter((s) => s.trim().length > 0) + .map((content) => ({ + type: 'thought' as const, + content: content.trim(), + })) + : [] + steps.push(...thoughtParagraphs) + + // 2. Extract tool steps + if (isToolCalls && item.metadata?.tool_calls) { + const toolCalls = item.metadata.tool_calls as ToolCall[] + for (const call of toolCalls) { + // Tool Call Step + steps.push({ + type: 'tool_call', + content: call.tool?.function?.name || 'Tool Call', + metadata: call.tool?.function?.arguments as string, // Arguments are typically a JSON string + }) + + // Tool Output Step + if (call.response) { + // Response object usually needs stringifying for display + const outputContent = + typeof call.response === 'string' + ? call.response + : JSON.stringify(call.response, null, 2) + + steps.push({ + type: 'tool_output', + content: outputContent, + }) + } + } + } + + // 3. Add Done step if not streaming + const totalTime = item.metadata?.totalThinkingTime as number | undefined + if (!isStreamingThisThread && (hasReasoning || isToolCalls)) { + steps.push({ + type: 'done', + content: 'Done', + time: totalTime, + }) + } + + return steps + }, [ + reasoningSegment, + isToolCalls, + item.metadata, + isStreamingThisThread, + t, + hasReasoning, + ]) + // END: Constructing allSteps + return ( {item.role === 'user' && ( @@ -360,14 +451,19 @@ export const ThreadContent = memo( )} - {reasoningSegment && ( + {hasReasoning && ( )} @@ -376,7 +472,9 @@ export const ThreadContent = memo( components={linkComponents} /> - {isToolCalls && item.metadata?.tool_calls ? ( + {/* Only render external ToolCallBlocks if there is NO dedicated reasoning block + (i.e., when tools are streamed as standalone output and are NOT captured by ThinkingBlock). */} + {!hasReasoning && isToolCalls && item.metadata?.tool_calls ? ( <> {(item.metadata.tool_calls as ToolCall[]).map((toolCall) => ( { const setModelLoadError = useModelLoad((state) => state.setModelLoadError) const router = useRouter() - const getCurrentThread = useCallback(async (projectId?: string) => { - let currentThread = retrieveThread() + const getCurrentThread = useCallback( + async (projectId?: string) => { + let currentThread = retrieveThread() - // Check if we're in temporary chat mode - const isTemporaryMode = window.location.search.includes(`${TEMPORARY_CHAT_QUERY_ID}=true`) - - // Clear messages for existing temporary thread on reload to ensure fresh start - if (isTemporaryMode && currentThread?.id === TEMPORARY_CHAT_ID) { - setMessages(TEMPORARY_CHAT_ID, []) - } - - if (!currentThread) { - // Get prompt directly from store when needed - const currentPrompt = usePrompt.getState().prompt - const currentAssistant = useAssistant.getState().currentAssistant - const assistants = useAssistant.getState().assistants - const selectedModel = useModelProvider.getState().selectedModel - const selectedProvider = useModelProvider.getState().selectedProvider - - // Get project metadata if projectId is provided - let projectMetadata: { id: string; name: string; updated_at: number } | undefined - if (projectId) { - const project = await serviceHub.projects().getProjectById(projectId) - if (project) { - projectMetadata = { - id: project.id, - name: project.name, - updated_at: project.updated_at, - } - } - } - - currentThread = await createThread( - { - id: selectedModel?.id ?? defaultModel(selectedProvider), - provider: selectedProvider, - }, - isTemporaryMode ? 'Temporary Chat' : currentPrompt, - assistants.find((a) => a.id === currentAssistant?.id) || assistants[0], - projectMetadata, - isTemporaryMode // pass temporary flag + // Check if we're in temporary chat mode + const isTemporaryMode = window.location.search.includes( + `${TEMPORARY_CHAT_QUERY_ID}=true` ) - // Clear messages for temporary chat to ensure fresh start on reload + // Clear messages for existing temporary thread on reload to ensure fresh start if (isTemporaryMode && currentThread?.id === TEMPORARY_CHAT_ID) { setMessages(TEMPORARY_CHAT_ID, []) } - // Set flag for temporary chat navigation - if (currentThread.id === TEMPORARY_CHAT_ID) { - sessionStorage.setItem('temp-chat-nav', 'true') - } + if (!currentThread) { + // Get prompt directly from store when needed + const currentPrompt = usePrompt.getState().prompt + const currentAssistant = useAssistant.getState().currentAssistant + const assistants = useAssistant.getState().assistants + const selectedModel = useModelProvider.getState().selectedModel + const selectedProvider = useModelProvider.getState().selectedProvider - router.navigate({ - to: route.threadsDetail, - params: { threadId: currentThread.id }, - }) - } - return currentThread - }, [createThread, retrieveThread, router, setMessages, serviceHub]) + // Get project metadata if projectId is provided + let projectMetadata: + | { id: string; name: string; updated_at: number } + | undefined + if (projectId) { + const project = await serviceHub.projects().getProjectById(projectId) + if (project) { + projectMetadata = { + id: project.id, + name: project.name, + updated_at: project.updated_at, + } + } + } + + currentThread = await createThread( + { + id: selectedModel?.id ?? defaultModel(selectedProvider), + provider: selectedProvider, + }, + isTemporaryMode ? 'Temporary Chat' : currentPrompt, + assistants.find((a) => a.id === currentAssistant?.id) || + assistants[0], + projectMetadata, + isTemporaryMode // pass temporary flag + ) + + // Clear messages for temporary chat to ensure fresh start on reload + if (isTemporaryMode && currentThread?.id === TEMPORARY_CHAT_ID) { + setMessages(TEMPORARY_CHAT_ID, []) + } + + // Set flag for temporary chat navigation + if (currentThread.id === TEMPORARY_CHAT_ID) { + sessionStorage.setItem('temp-chat-nav', 'true') + } + + router.navigate({ + to: route.threadsDetail, + params: { threadId: currentThread.id }, + }) + } + return currentThread + }, + [createThread, retrieveThread, router, setMessages, serviceHub] + ) const restartModel = useCallback( async (provider: ProviderObject, modelId: string) => { @@ -297,7 +305,9 @@ export const useChat = () => { updateAttachmentProcessing(img.name, 'processing') } // Upload image, get id/URL - const res = await serviceHub.uploads().ingestImage(activeThread.id, img) + const res = await serviceHub + .uploads() + .ingestImage(activeThread.id, img) processedAttachments.push({ ...img, id: res.id, @@ -313,7 +323,9 @@ export const useChat = () => { updateAttachmentProcessing(img.name, 'error') } const desc = err instanceof Error ? err.message : String(err) - toast.error('Failed to ingest image attachment', { description: desc }) + toast.error('Failed to ingest image attachment', { + description: desc, + }) return } } @@ -394,6 +406,9 @@ export const useChat = () => { updateThreadTimestamp(activeThread.id) usePrompt.getState().setPrompt('') const selectedModel = useModelProvider.getState().selectedModel + + const startTime = Date.now() // Start timer here + try { if (selectedModel?.id) { updateLoadingModel(true) @@ -705,14 +720,22 @@ export const useChat = () => { throw new Error('No response received from the model') } + const totalThinkingTime = Date.now() - startTime // Calculate total elapsed time + // Create a final content object for adding to the thread + const messageMetadata: Record = { + tokenSpeed: useAppState.getState().tokenSpeed, + assistant: currentAssistant, + } + + if (accumulatedText.includes('') || toolCalls.length > 0) { + messageMetadata.totalThinkingTime = totalThinkingTime + } + const finalContent = newAssistantThreadContent( activeThread.id, accumulatedText, - { - tokenSpeed: useAppState.getState().tokenSpeed, - assistant: currentAssistant, - } + messageMetadata ) builder.addAssistantMessage(accumulatedText, undefined, toolCalls) @@ -730,6 +753,14 @@ export const useChat = () => { allowAllMCPPermissions, isProactiveMode ) + + if (updatedMessage && updatedMessage.metadata) { + if (finalContent.metadata?.totalThinkingTime !== undefined) { + updatedMessage.metadata.totalThinkingTime = + finalContent.metadata.totalThinkingTime + } + } + addMessage(updatedMessage ?? finalContent) updateStreamingContent(emptyThreadContent) updatePromptProgress(undefined)