Compare commits

..

1 Commits

Author SHA1 Message Date
Vitor Alcantara Batista
154301b3ad
Brazilian Portuguese translation (#6809)
Co-authored-by: Vitor Alcantara Batista <vitor.alcantara@petrobras.com.br>
2025-10-29 23:36:35 +05:30
25 changed files with 1618 additions and 1823 deletions

View File

@ -16,6 +16,7 @@ const LANGUAGES = [
{ value: 'zh-CN', label: '简体中文' },
{ value: 'zh-TW', label: '繁體中文' },
{ value: 'de-DE', label: 'Deutsch' },
{ value: 'pt-BR', label: 'Português (Brasil)' },
{ value: 'ja', label: '日本語' },
]

View File

@ -8,7 +8,6 @@ import { cn } from '@/lib/utils'
import { ArrowDown } from 'lucide-react'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { useAppState } from '@/hooks/useAppState'
import { MessageStatus } from '@janhq/core'
const ScrollToBottom = ({
threadId,
@ -19,10 +18,8 @@ const ScrollToBottom = ({
}) => {
const { t } = useTranslation()
const appMainViewBgColor = useAppearance((state) => state.appMainViewBgColor)
const { showScrollToBottomBtn, scrollToBottom } = useThreadScrolling(
threadId,
scrollContainerRef
)
const { showScrollToBottomBtn, scrollToBottom } =
useThreadScrolling(threadId, scrollContainerRef)
const { messages } = useMessages(
useShallow((state) => ({
messages: state.messages[threadId],
@ -31,9 +28,12 @@ const ScrollToBottom = ({
const streamingContent = useAppState((state) => state.streamingContent)
const lastMsg = messages[messages.length - 1]
const showGenerateAIResponseBtn =
!!lastMsg && lastMsg.status !== MessageStatus.Ready && !streamingContent
(messages[messages.length - 1]?.role === 'user' ||
(messages[messages.length - 1]?.metadata &&
'tool_calls' in (messages[messages.length - 1].metadata ?? {}))) &&
!streamingContent
return (
<div
className={cn(

View File

@ -1,43 +1,13 @@
/* eslint-disable react-hooks/rules-of-hooks */
/* eslint-disable @typescript-eslint/no-explicit-any */
import { ChevronDown, ChevronUp, Loader, Check } from 'lucide-react'
import { ChevronDown, ChevronUp, Loader } from 'lucide-react'
import { create } from 'zustand'
import { RenderMarkdown } from './RenderMarkdown'
import { useAppState } from '@/hooks/useAppState'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { useMemo, useState } from 'react'
import { cn } from '@/lib/utils'
import ImageModal from '@/containers/dialogs/ImageModal'
// Define ReActStep type (Reasoning-Action Step)
type ReActStep = {
type: 'reasoning' | 'tool_call' | 'tool_output' | 'done'
content: string
metadata?: any
time?: number
}
import { extractThinkingContent } from '@/lib/utils'
interface Props {
text: string
id: string
steps?: ReActStep[] // Updated type
loading?: boolean
duration?: number
linkComponents?: object
}
// Utility function to safely parse JSON
const safeParseJSON = (text: string) => {
try {
return JSON.parse(text)
} catch {
return null
}
}
// Utility to create data URL for images
const createDataUrl = (base64Data: string, mimeType: string): string => {
if (base64Data.startsWith('data:')) return base64Data
return `data:${mimeType};base64,${base64Data}`
}
// Zustand store for thinking block state
@ -57,320 +27,54 @@ const useThinkingStore = create<ThinkingBlockState>((set) => ({
})),
}))
// Helper to format duration in seconds
const formatDuration = (ms: number) => {
if (ms > 0) {
return Math.round(ms / 1000)
}
return 0
}
const ThinkingBlock = ({
id,
steps = [],
loading: propLoading,
duration,
linkComponents,
}: Props) => {
const ThinkingBlock = ({ id, text }: Props) => {
const thinkingState = useThinkingStore((state) => state.thinkingState)
const setThinkingState = useThinkingStore((state) => state.setThinkingState)
const isStreaming = useAppState((state) => !!state.streamingContent)
const { t } = useTranslation()
// Move useState for modal management to the top level of the component
const [modalImage, setModalImage] = useState<{
url: string
alt: string
} | null>(null)
const closeModal = () => setModalImage(null)
const handleImageClick = (url: string, alt: string) =>
setModalImage({ url, alt })
// Actual loading state comes from prop, determined by whether final text started streaming
const loading = propLoading
// Set default expansion state: collapsed if done (not loading).
// If loading transitions to false (textSegment starts), this defaults to collapsed.
// Check for thinking formats
const hasThinkTag = text.includes('<think>') && !text.includes('</think>')
const hasAnalysisChannel =
text.includes('<|channel|>analysis<|message|>') &&
!text.includes('<|start|>assistant<|channel|>final<|message|>')
const loading = (hasThinkTag || hasAnalysisChannel) && isStreaming
const isExpanded = thinkingState[id] ?? (loading ? true : false)
// Filter out the 'done' step for streaming display
const stepsWithoutDone = useMemo(
() => steps.filter((step) => step.type !== 'done'),
[steps]
)
const N = stepsWithoutDone.length
// Determine the step to display in the condensed streaming view
const activeStep = useMemo(() => {
if (!loading || N === 0) return null
return stepsWithoutDone[N - 1]
}, [loading, N, stepsWithoutDone])
// If not loading, and there are no steps, hide the block entirely.
const hasContent = steps.length > 0
if (!loading && !hasContent) return null
const handleClick = () => {
// Only allow toggling expansion if not currently loading
// Also only allow if there is content (to prevent collapsing the simple 'Thinking')
if (!loading && hasContent) {
setThinkingState(id, !isExpanded)
}
const newExpandedState = !isExpanded
setThinkingState(id, newExpandedState)
}
// --- Rendering Functions for Expanded View ---
const renderStepContent = (
step: ReActStep,
index: number,
handleImageClick: (url: string, alt: string) => void,
t: (key: string) => string
) => {
// Updated type
if (step.type === 'done') {
const timeInSeconds = formatDuration(step.time ?? 0)
const timeDisplay =
timeInSeconds > 0
? `(${t('chat:for')} ${timeInSeconds} ${t('chat:seconds')})`
: ''
return (
<div
key={index}
className="flex items-center gap-1 text-accent transition-all"
>
<Check className="size-4" />
<span className="font-medium">{t('done')}</span>
{timeDisplay && (
<span className="text-main-view-fg/60 text-xs">{timeDisplay}</span>
)}
</div>
)
}
const parsed = safeParseJSON(step.content)
const mcpContent = parsed?.content ?? []
const hasImages =
Array.isArray(mcpContent) &&
mcpContent.some((c) => c.type === 'image' && c.data && c.mimeType)
let contentDisplay: React.ReactNode
if (step.type === 'tool_call') {
const args = step.metadata ? step.metadata : ''
contentDisplay = (
<>
<p className="font-medium text-main-view-fg/90">
Tool Input: <span className="text-accent">{step.content}</span>
</p>
{args && (
<div className="mt-1">
<RenderMarkdown
isWrapping={true}
content={'```json\n' + args + '\n```'}
/>
</div>
)}
</>
)
} else if (step.type === 'tool_output') {
if (hasImages) {
// Display each image
contentDisplay = (
<>
<p className="font-medium text-main-view-fg/90">
Tool Output (Images):
</p>
<div className="mt-2 space-y-2">
{mcpContent.map((item: any, index: number) =>
item.type === 'image' && item.data && item.mimeType ? (
<div key={index} className="my-2">
<img
src={createDataUrl(item.data, item.mimeType)}
alt={`MCP Image ${index + 1}`}
className="max-w-full max-h-64 object-contain rounded-md border border-main-view-fg/10 cursor-pointer hover:opacity-80 transition-opacity"
onError={(e) => (e.currentTarget.style.display = 'none')}
onClick={() =>
handleImageClick(
createDataUrl(item.data, item.mimeType),
`MCP Image ${index + 1}`
)
}
/>
</div>
) : null
)}
</div>
</>
)
} else {
// Default behavior: wrap text in code block if no backticks
let content = step.content.substring(0, 1000)
if (!content.includes('```')) {
content = '```json\n' + content + '\n```'
}
contentDisplay = (
<>
<p className="font-medium text-main-view-fg/90">Tool Output:</p>
<div className="mt-1">
<RenderMarkdown
isWrapping={true}
content={content}
components={linkComponents}
/>
</div>
</>
)
}
} else {
contentDisplay = (
<RenderMarkdown
isWrapping={true}
content={step.content}
components={linkComponents}
/>
)
}
return (
<div key={index} className="text-main-view-fg/80">
{contentDisplay}
</div>
)
}
const headerTitle: string = useMemo(() => {
// Check if any step was a tool call
const hasToolCalls = steps.some((step) => step.type === 'tool_call')
const hasReasoning = steps.some((step) => step.type === 'reasoning')
const timeInSeconds = formatDuration(duration ?? 0)
if (loading) {
// Logic for streaming (loading) state:
if (activeStep) {
if (
activeStep.type === 'tool_call' ||
activeStep.type === 'tool_output'
) {
return `${t('chat:calling_tool')}` // Use a specific translation key for tool
} else if (activeStep.type === 'reasoning') {
return `${t('chat:thinking')}` // Use the generic thinking key
}
}
// Fallback for isStreamingEmpty state (N=0)
return `${t('chat:thinking')}`
}
// Logic for finalized (not loading) state:
// Build label based on what steps occurred
let label = ''
if (hasReasoning && hasToolCalls) {
// Use a more descriptive label when both were involved
label = t('chat:thought_and_tool_call')
} else if (hasToolCalls) {
label = t('chat:tool_called')
} else {
label = t('chat:thought')
}
if (timeInSeconds > 0) {
return `${label} ${t('chat:for')} ${timeInSeconds} ${t('chat:seconds')}`
}
return label
}, [loading, duration, t, activeStep, steps])
const thinkingContent = extractThinkingContent(text)
if (!thinkingContent) return null
return (
<div
className="mx-auto w-full break-words"
onClick={loading || !hasContent ? undefined : handleClick}
className="mx-auto w-full cursor-pointer break-words"
onClick={handleClick}
>
<div className="mb-4 rounded-lg bg-main-view-fg/4 p-2 transition-all duration-200">
<div className="mb-4 rounded-lg bg-main-view-fg/4 border border-dashed border-main-view-fg/10 p-2">
<div className="flex items-center gap-3">
{loading && (
<Loader className="size-4 animate-spin text-main-view-fg/60" />
)}
<button
className="flex items-center gap-2 focus:outline-none"
disabled={loading || !hasContent}
>
{/* Display chevron only if not loading AND steps exist to expand */}
{!loading &&
hasContent &&
(isExpanded ? (
<ChevronUp className="size-4 text-main-view-fg/60 transition-transform duration-200" />
) : (
<ChevronDown className="size-4 text-main-view-fg/60 transition-transform duration-200" />
))}
<span className="font-medium transition-all duration-200">
{headerTitle}
<button className="flex items-center gap-2 focus:outline-none">
{isExpanded ? (
<ChevronUp className="size-4 text-main-view-fg/60" />
) : (
<ChevronDown className="size-4 text-main-view-fg/60" />
)}
<span className="font-medium">
{loading ? t('common:thinking') : t('common:thought')}
</span>
</button>
</div>
{/* Streaming/Condensed View - shows active step (N-1) */}
{/* This block handles both the N>0 case and the N=0 fallback, ensuring stability. */}
{loading && (activeStep || N === 0) && (
<div
key={`streaming-${N - 1}`}
className={cn(
'mt-4 pl-2 pr-4 text-main-view-fg/60',
// Only animate fade-in if it's not the very first step (N > 1)
N > 1 && 'animate-in fade-in slide-in-from-top-2 duration-300'
)}
>
<div className="relative border-main-view-fg/20">
{/* If N=0, just show the fallback text in the header and this area remains minimal. */}
{activeStep && (
<div className="relative pl-5">
{/* Bullet point/Icon position relative to line */}
<div
className={cn(
'absolute left-[-2px] top-1.5 size-2 rounded-full bg-main-view-fg/60',
activeStep.type !== 'done' && 'animate-pulse' // Pulse if active/streaming
)}
/>
{/* Active step content */}
{renderStepContent(activeStep, N - 1, handleImageClick, t)}
</div>
)}
</div>
</div>
)}
{/* Expanded View - shows all steps */}
{isExpanded && !loading && hasContent && (
<div className="mt-4 pl-2 pr-4 text-main-view-fg/60 animate-in fade-in slide-in-from-top-2 duration-300">
<div className="relative border-main-view-fg/20">
{steps.map((step, index) => (
<div
key={index}
className={cn(
'relative pl-5 pb-2',
'fade-in slide-in-from-left-1 duration-200',
step.type !== 'done' &&
'after:content-[] after:border-l after:border-dashed after:border-main-view-fg/20 after:absolute after:left-0.5 after:bottom-0 after:w-1 after:h-[calc(100%-8px)]'
)}
style={{ animationDelay: `${index * 50}ms` }}
>
{/* Bullet point/Icon position relative to line */}
<div
className={cn(
'absolute left-[-2px] top-1.5 size-2 rounded-full transition-colors duration-200',
step.type === 'done' ? 'bg-accent' : 'bg-main-view-fg/60'
)}
/>
{/* Step Content */}
{renderStepContent(step, index, handleImageClick, t)}
</div>
))}
</div>
{isExpanded && (
<div className="mt-2 pl-6 pr-4 text-main-view-fg/60">
<RenderMarkdown content={thinkingContent} />
</div>
)}
</div>
{/* Render ImageModal once at the top level */}
<ImageModal image={modalImage} onClose={closeModal} />
</div>
)
}

View File

@ -7,7 +7,7 @@ import { useAppState } from '@/hooks/useAppState'
import { cn } from '@/lib/utils'
import { useMessages } from '@/hooks/useMessages'
import ThinkingBlock from '@/containers/ThinkingBlock'
// import ToolCallBlock from '@/containers/ToolCallBlock'
import ToolCallBlock from '@/containers/ToolCallBlock'
import { useChat } from '@/hooks/useChat'
import {
EditMessageDialog,
@ -29,62 +29,6 @@ import { useModelProvider } from '@/hooks/useModelProvider'
import { extractFilesFromPrompt } from '@/lib/fileMetadata'
import { createImageAttachment } from '@/types/attachment'
// Define ToolCall interface for type safety when accessing metadata
interface ToolCall {
tool?: {
id: number
function?: {
name: string
arguments?: object | string
}
}
response?: any
state?: 'pending' | 'completed'
}
// Define ReActStep type (Reasoning-Action Step)
type ReActStep = {
type: 'reasoning' | 'tool_call' | 'tool_output' | 'done'
content: string
metadata?: any
time?: number
}
const cleanReasoning = (content: string) => {
return content
.replace(/^<think>/, '') // Remove opening tag at start
.replace(/<\/think>$/, '') // Remove closing tag at end
.trim()
}
// Helper function to extract content within <think> tags and strip all auxiliary tags from the final output
const extractContentAndClean = (
rawText: string
): { reasoningText: string; finalOutput: string } => {
// Regex to match content within <think>...</think> tags
const thinkTagRegex = /<think>([\s\S]*?)<\/think>/g
let reasoningText = ''
let finalOutput = rawText
// Extract content within <think> tags for streamedReasoningText
const thinkMatches = [...rawText.matchAll(thinkTagRegex)]
if (thinkMatches.length > 0) {
// Join all reasoning parts separated by newlines
reasoningText = thinkMatches
.map((match) => match[1])
.join('\n\n')
.trim()
}
// 2. Strip ALL auxiliary tags from finalOutput
finalOutput = finalOutput
.replace(thinkTagRegex, '') // Remove <think> tags and content
.trim()
return { reasoningText, finalOutput }
}
const CopyButton = ({ text }: { text: string }) => {
const [copied, setCopied] = useState(false)
const { t } = useTranslation()
@ -168,71 +112,40 @@ export const ThreadContent = memo(
return { files: [], cleanPrompt: text }
}, [text, item.role])
type StreamEvent = {
timestamp: number
type: 'reasoning_chunk' | 'tool_call' | 'tool_output'
data: any
}
const { reasoningSegment, textSegment } = useMemo(() => {
// Check for thinking formats
const hasThinkTag = text.includes('<think>') && !text.includes('</think>')
const hasAnalysisChannel =
text.includes('<|channel|>analysis<|message|>') &&
!text.includes('<|start|>assistant<|channel|>final<|message|>')
const {
finalOutputText,
streamedReasoningText,
isReasoningActiveLoading,
hasReasoningSteps,
} = useMemo(() => {
let currentFinalText = text.trim()
let currentReasoning = '' // Reasoning is now only derived from streamEvents/allSteps
if (hasThinkTag || hasAnalysisChannel)
return { reasoningSegment: text, textSegment: '' }
// Extract raw streamEvents and check for finalized state
const streamEvents = (item.metadata?.streamEvents as StreamEvent[]) || []
const isMessageFinalized = !isStreamingThisThread
// If the message is finalized AND there are no streamEvents,
// we assume the 'text' contains the full dump (reasoning + output + tool tags)
if (isMessageFinalized && streamEvents.length === 0) {
// Use the new helper to separate reasoning (from <think>) and clean the final output
const { reasoningText, finalOutput } = extractContentAndClean(text)
currentFinalText = finalOutput
currentReasoning = reasoningText
} else {
// Otherwise, trust the streamEvents path (if present) or the current text is the final output
// We clean the current text just in case, but it should be clean in streaming mode
const { finalOutput } = extractContentAndClean(text)
currentFinalText = finalOutput
// Check for completed think tag format
const thinkMatch = text.match(/<think>([\s\S]*?)<\/think>/)
if (thinkMatch?.index !== undefined) {
const splitIndex = thinkMatch.index + thinkMatch[0].length
return {
reasoningSegment: text.slice(0, splitIndex),
textSegment: text.slice(splitIndex),
}
}
// Check for tool calls or reasoning events in metadata to determine steps/loading
const isToolCallsPresent = !!(
item.metadata &&
'tool_calls' in item.metadata &&
Array.isArray(item.metadata.tool_calls) &&
item.metadata.tool_calls.length > 0
// Check for completed analysis channel format
const analysisMatch = text.match(
/<\|channel\|>analysis<\|message\|>([\s\S]*?)<\|start\|>assistant<\|channel\|>final<\|message\|>/
)
// Check for any reasoning chunks in the streamEvents OR if we extracted reasoning from text
const hasReasoningEvents =
streamEvents.some((e: StreamEvent) => e.type === 'reasoning_chunk') ||
currentReasoning.length > 0 // Added check for extracted reasoning
const hasSteps = isToolCallsPresent || hasReasoningEvents
// Loading if streaming, no final output yet, but we expect steps (reasoning or tool calls)
const loading =
isStreamingThisThread && currentFinalText.length === 0 && hasSteps
return {
finalOutputText: currentFinalText,
streamedReasoningText: currentReasoning,
isReasoningActiveLoading: loading,
hasReasoningSteps: hasSteps,
if (analysisMatch?.index !== undefined) {
const splitIndex = analysisMatch.index + analysisMatch[0].length
return {
reasoningSegment: text.slice(0, splitIndex),
textSegment: text.slice(splitIndex),
}
}
}, [item.metadata, text, isStreamingThisThread])
const isToolCalls =
item.metadata &&
'tool_calls' in item.metadata &&
Array.isArray(item.metadata.tool_calls) &&
item.metadata.tool_calls.length
return { reasoningSegment: undefined, textSegment: text }
}, [text])
const getMessages = useMessages((state) => state.getMessages)
const deleteMessage = useMessages((state) => state.deleteMessage)
@ -251,8 +164,7 @@ export const ThreadContent = memo(
deleteMessage(toSendMessage.thread_id, toSendMessage.id ?? '')
// Extract text content and any attachments
const rawText =
toSendMessage.content?.find((c) => c.type === 'text')?.text?.value ||
''
toSendMessage.content?.find((c) => c.type === 'text')?.text?.value || ''
const { cleanPrompt: textContent } = extractFilesFromPrompt(rawText)
const attachments = toSendMessage.content
?.filter((c) => (c.type === 'image_url' && c.image_url?.url) || false)
@ -291,7 +203,7 @@ export const ThreadContent = memo(
while (toSendMessage && toSendMessage?.role !== 'user') {
deleteMessage(toSendMessage.thread_id, toSendMessage.id ?? '')
toSendMessage = threadMessages.pop()
// Stop deletion when encountering an assistant message that isn't a tool call
// Stop deletion when encountering an assistant message that isnt a tool call
if (
toSendMessage &&
toSendMessage.role === 'assistant' &&
@ -304,235 +216,16 @@ export const ThreadContent = memo(
}
}, [deleteMessage, getMessages, item])
const isToolCalls =
item.metadata &&
'tool_calls' in item.metadata &&
Array.isArray(item.metadata.tool_calls) &&
item.metadata.tool_calls.length
const assistant = item.metadata?.assistant as
| { avatar?: React.ReactNode; name?: React.ReactNode }
| undefined
// Constructing allSteps for ThinkingBlock - CHRONOLOGICAL approach
const allSteps: ReActStep[] = useMemo(() => {
const steps: ReActStep[] = []
// Get streamEvents from metadata (if available)
const streamEvents = (item.metadata?.streamEvents as StreamEvent[]) || []
const toolCalls = (item.metadata?.tool_calls || []) as ToolCall[]
const isMessageFinalized = !isStreamingThisThread
if (streamEvents.length > 0) {
// CHRONOLOGICAL PATH: Use streamEvents for true temporal order
let reasoningBuffer = ''
streamEvents.forEach((event) => {
switch (event.type) {
case 'reasoning_chunk':
// Accumulate reasoning chunks
reasoningBuffer += event.data.content
break
case 'tool_call':
case 'tool_output':
// Flush accumulated reasoning before tool event
if (reasoningBuffer.trim()) {
const cleanedBuffer = cleanReasoning(reasoningBuffer) // <--- Strip tags here
// Split accumulated reasoning by paragraphs for display
const paragraphs = cleanedBuffer
.split(/\n\s*\n/)
.filter((p) => p.trim().length > 0)
paragraphs.forEach((para) => {
steps.push({
type: 'reasoning',
content: para.trim(),
})
})
reasoningBuffer = ''
}
if (event.type === 'tool_call') {
// Add tool call
const toolCall = event.data.toolCall
steps.push({
type: 'tool_call',
content: toolCall?.function?.name || 'Tool Call',
metadata:
typeof toolCall?.function?.arguments === 'string'
? toolCall.function.arguments
: JSON.stringify(
toolCall?.function?.arguments || {},
null,
2
),
})
} else if (event.type === 'tool_output') {
// Add tool output
const result = event.data.result
let outputContent = JSON.stringify(result, null, 2) // Default fallback
const firstContentPart = result?.content?.[0]
if (firstContentPart?.type === 'text') {
const textContent = firstContentPart.text
// Robustly check for { value: string } structure or direct string
if (
typeof textContent === 'object' &&
textContent !== null &&
'value' in textContent
) {
outputContent = textContent.value as string
} else if (typeof textContent === 'string') {
outputContent = textContent
}
} else if (typeof result === 'string') {
outputContent = result
}
steps.push({
type: 'tool_output',
content: outputContent,
})
}
break
}
})
// Flush any remaining reasoning at the end
if (reasoningBuffer.trim()) {
const cleanedBuffer = cleanReasoning(reasoningBuffer) // <--- Strip tags here
const paragraphs = cleanedBuffer
.split(/\n\s*\n/)
.filter((p) => p.trim().length > 0)
paragraphs.forEach((para) => {
steps.push({
type: 'reasoning',
content: para.trim(),
})
})
}
} else if (isMessageFinalized) {
// FALLBACK PATH: No streamEvents - use split text for content construction
const rawReasoningContent = streamedReasoningText || ''
const reasoningParagraphs = rawReasoningContent
? rawReasoningContent // streamedReasoningText is now populated from <think> tags if present
.split(/\n\s*\n/)
.filter((s) => s.trim().length > 0)
.map((content) => content.trim())
: []
let reasoningIndex = 0
toolCalls.forEach((call) => {
// Add reasoning before this tool call
if (reasoningIndex < reasoningParagraphs.length) {
steps.push({
type: 'reasoning',
content: reasoningParagraphs[reasoningIndex],
})
reasoningIndex++
}
// Add tool call
steps.push({
type: 'tool_call',
content: call.tool?.function?.name || 'Tool Call',
metadata:
typeof call.tool?.function?.arguments === 'string'
? call.tool.function.arguments
: JSON.stringify(call.tool?.function?.arguments || {}, null, 2),
})
// Add tool output
if (call.response) {
const result = call.response
let outputContent = JSON.stringify(result, null, 2)
const firstContentPart = result?.content?.[0]
if (firstContentPart?.type === 'text') {
const textContent = firstContentPart.text
if (
typeof textContent === 'object' &&
textContent !== null &&
'value' in textContent
) {
outputContent = textContent.value as string
} else if (typeof textContent === 'string') {
outputContent = textContent
}
} else if (typeof result === 'string') {
outputContent = result
}
steps.push({
type: 'tool_output',
content: outputContent,
})
}
})
// Add remaining reasoning
while (reasoningIndex < reasoningParagraphs.length) {
steps.push({
type: 'reasoning',
content: reasoningParagraphs[reasoningIndex],
})
reasoningIndex++
}
}
// Add Done step
const totalTime = item.metadata?.totalThinkingTime as number | undefined
const lastStepType = steps[steps.length - 1]?.type
if (!isStreamingThisThread && hasReasoningSteps) {
const endsInToolOutputWithoutFinalText =
lastStepType === 'tool_output' && finalOutputText.length === 0
if (!endsInToolOutputWithoutFinalText) {
steps.push({
type: 'done',
content: 'Done',
time: totalTime,
})
}
}
return steps
}, [
item,
isStreamingThisThread,
hasReasoningSteps,
finalOutputText,
streamedReasoningText,
])
// END: Constructing allSteps
// ====================================================================
// If we have streamEvents, rely on 'steps' and pass an empty text buffer.
const streamingTextBuffer = useMemo(() => {
const streamEvents = item.metadata?.streamEvents
// Check if streamEvents exists AND is an array AND has a length greater than 0
if (Array.isArray(streamEvents) && streamEvents.length > 0) {
// We are using the chronological path (allSteps) for rendering
// Return empty string to disable the ThinkingBlock's raw text buffer
return ''
}
// Since we no longer concatenate reasoning to the main text,
// the only time we'd rely on text buffer is if streamEvents fails to load.
// For robustness, we can simply return an empty string to force use of 'steps'.
return ''
}, [item.metadata?.streamEvents]) // Use the object reference for dependency array
// ====================================================================
// Determine if we should show the thinking block
const shouldShowThinkingBlock =
hasReasoningSteps || isToolCalls || isReasoningActiveLoading
return (
<Fragment>
{item.role === 'user' && (
@ -667,33 +360,46 @@ export const ThreadContent = memo(
</div>
)}
{/* Single unified ThinkingBlock for both reasoning and tool calls */}
{shouldShowThinkingBlock && (
{reasoningSegment && (
<ThinkingBlock
id={
item.isLastMessage
? `${item.thread_id}-last-${(streamingTextBuffer || text).slice(0, 50).replace(/\s/g, '').slice(-10)}`
? `${item.thread_id}-last-${reasoningSegment.slice(0, 50).replace(/\s/g, '').slice(-10)}`
: `${item.thread_id}-${item.index ?? item.id}`
}
// Pass the safe buffer
text={streamingTextBuffer}
steps={allSteps}
loading={isReasoningActiveLoading}
duration={
item.metadata?.totalThinkingTime as number | undefined
}
linkComponents={linkComponents}
text={reasoningSegment}
/>
)}
{!isReasoningActiveLoading && finalOutputText.length > 0 && (
<RenderMarkdown
content={finalOutputText}
components={linkComponents}
/>
)}
<RenderMarkdown
content={textSegment.replace('</think>', '')}
components={linkComponents}
/>
{
{isToolCalls && item.metadata?.tool_calls ? (
<>
{(item.metadata.tool_calls as ToolCall[]).map((toolCall) => (
<ToolCallBlock
id={toolCall.tool?.id ?? 0}
key={toolCall.tool?.id}
name={
(item.streamTools?.tool_calls?.function?.name ||
toolCall.tool?.function?.name) ??
''
}
args={
item.streamTools?.tool_calls?.function?.arguments ||
toolCall.tool?.function?.arguments ||
undefined
}
result={JSON.stringify(toolCall.response)}
loading={toolCall.state === 'pending'}
/>
))}
</>
) : null}
{!isToolCalls && (
<div className="flex items-center gap-2 text-main-view-fg/60 text-xs">
<div className={cn('flex items-center gap-2')}>
<div
@ -708,10 +414,10 @@ export const ThreadContent = memo(
item.updateMessage && item.updateMessage(item, message)
}
/>
<CopyButton text={finalOutputText || ''} />{' '}
{/* Use finalOutputText for copy */}
<CopyButton text={item.content?.[0]?.text.value || ''} />
<DeleteMessageDialog onDelete={removeMessage} />
<MessageMetadataDialog metadata={item.metadata} />
{item.isLastMessage && selectedModel && (
<Tooltip>
<TooltipTrigger asChild>
@ -737,7 +443,7 @@ export const ThreadContent = memo(
/>
</div>
</div>
}
)}
</>
)}

View File

@ -1,4 +1,4 @@
import { useState, useMemo } from 'react'
import { useState } from 'react'
import { useTranslation } from '@/i18n/react-i18next-compat'
import {
Dialog,
@ -7,96 +7,21 @@ import {
DialogTitle,
DialogHeader,
} from '@/components/ui/dialog'
import {
IconInfoCircle,
IconRobot,
IconGauge,
IconId,
IconCalendar,
IconTemperature,
IconHierarchy,
IconTool,
IconBoxMultiple,
IconRuler,
IconMessageCircle,
} from '@tabler/icons-react'
import { IconInfoCircle } from '@tabler/icons-react'
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from '@/components/ui/tooltip'
// Removed CodeEditor and its styles
// Type definitions for the provided metadata structure
interface Parameters {
temperature: number
top_k: number
top_p: number
}
interface AssistantMetadata {
avatar: string
created_at: number
description: string
id: string
instructions: string
name: string
parameters: Parameters
tool_steps: number
}
interface TokenSpeedMetadata {
lastTimestamp: number
message: string
tokenCount: number
tokenSpeed: number
}
interface MessageMetadata {
assistant?: AssistantMetadata
tokenSpeed?: TokenSpeedMetadata
}
import CodeEditor from '@uiw/react-textarea-code-editor'
import '@uiw/react-textarea-code-editor/dist.css'
interface MessageMetadataDialogProps {
metadata: MessageMetadata | null | undefined // Use the specific interface
// eslint-disable-next-line @typescript-eslint/no-explicit-any
metadata: any
triggerElement?: React.ReactNode
}
// --- Helper Components & Utilities ---
// A utility component to display a single detail row
const DetailItem: React.FC<{
icon: React.ReactNode
label: string
value: React.ReactNode
}> = ({ icon, label, value }) => (
<div className="flex items-start text-sm p-2 bg-main-view-bg/5 rounded-md">
<div className="text-accent mr-3 flex-shrink-0">{icon}</div>
<div className="flex flex-col">
<span className="font-semibold text-main-view-fg/80">{label}:</span>
<span className="text-main-view-fg/90 whitespace-pre-wrap break-words">
{value}
</span>
</div>
</div>
)
// Helper for formatting timestamps
const formatDate = (timestamp: number) => {
if (!timestamp) return 'N/A'
return new Intl.DateTimeFormat('en-US', {
year: 'numeric',
month: 'short',
day: 'numeric',
hour: 'numeric',
minute: 'numeric',
second: 'numeric',
timeZoneName: 'short',
}).format(new Date(timestamp))
}
// --- Main Component ---
export function MessageMetadataDialog({
metadata,
triggerElement,
@ -104,12 +29,10 @@ export function MessageMetadataDialog({
const { t } = useTranslation()
const [isOpen, setIsOpen] = useState(false)
const { assistant, tokenSpeed } = (metadata || {}) as MessageMetadata
const defaultTrigger = (
<Tooltip>
<TooltipTrigger asChild>
<div
<div
className="outline-0 focus:outline-0 flex items-center gap-1 hover:text-accent transition-colors cursor-pointer group relative"
role="button"
tabIndex={0}
@ -129,127 +52,27 @@ export function MessageMetadataDialog({
</Tooltip>
)
const formattedTokenSpeed = useMemo(() => {
if (tokenSpeed?.tokenSpeed === undefined) return 'N/A'
return (
new Intl.NumberFormat('en-US', {
style: 'decimal',
minimumFractionDigits: 2,
maximumFractionDigits: 2,
}).format(tokenSpeed.tokenSpeed) + ' tokens/s'
)
}, [tokenSpeed])
return (
<Dialog open={isOpen} onOpenChange={setIsOpen}>
<DialogTrigger>{triggerElement || defaultTrigger}</DialogTrigger>
<DialogContent className="max-w-xl">
<DialogContent>
<DialogHeader>
<DialogTitle>{t('common:dialogs.messageMetadata.title')}</DialogTitle>
<div className="space-y-6 mt-4 max-h-[70vh] overflow-y-auto pr-2">
{/* --- Assistant/Model Section --- */}
{assistant && (
<section>
<h3 className="flex items-center text-lg font-bold border-b border-main-view-fg/10 pb-2 mb-3">
<IconRobot className="mr-2" size={20} />
{t('common:dialogs.messageMetadata.model')}
</h3>
<div className="grid grid-cols-1 md:grid-cols-2 gap-3">
<DetailItem
icon={<IconRobot size={18} />}
label={t('common:dialogs.messageMetadata.name')}
value={`${assistant.avatar} ${assistant.name}`}
/>
<DetailItem
icon={<IconId size={18} />}
label={t('common:dialogs.messageMetadata.id')}
value={assistant.id}
/>
<DetailItem
icon={<IconCalendar size={18} />}
label={t('common:dialogs.messageMetadata.createdAt')}
value={formatDate(assistant.created_at)}
/>
<DetailItem
icon={<IconTool size={18} />}
label={t('common:dialogs.messageMetadata.toolSteps')}
value={assistant.tool_steps}
/>
{/* Parameters */}
<div className="col-span-1 md:col-span-2 grid grid-cols-3 gap-3">
<DetailItem
icon={<IconTemperature size={18} />}
label={t('common:dialogs.messageMetadata.temperature')}
value={assistant.parameters.temperature}
/>
<DetailItem
icon={<IconHierarchy size={18} />}
label={t('common:dialogs.messageMetadata.topK')}
value={assistant.parameters.top_k}
/>
<DetailItem
icon={<IconBoxMultiple size={18} />}
label={t('common:dialogs.messageMetadata.topP')}
value={assistant.parameters.top_p}
/>
</div>
{/* Description/Instructions */}
{(assistant.description || assistant.instructions) && (
<div className="col-span-1 md:col-span-2 space-y-3">
{assistant.description && (
<DetailItem
icon={<IconMessageCircle size={18} />}
label={t('common:dialogs.messageMetadata.description')}
value={assistant.description}
/>
)}
{assistant.instructions && (
<DetailItem
icon={<IconMessageCircle size={18} />}
label={t('common:dialogs.messageMetadata.instructions')}
value={assistant.instructions}
/>
)}
</div>
)}
</div>
</section>
)}
{/* --- Token Speed Section --- */}
{tokenSpeed && (
<section>
<h3 className="flex items-center text-lg font-bold border-b border-main-view-fg/10 pb-2 mb-3">
<IconGauge className="mr-2" size={20} />
{t('Performance')}
</h3>
<div className="grid grid-cols-1 md:grid-cols-3 gap-3">
<DetailItem
icon={<IconGauge size={18} />}
label={t('common:dialogs.messageMetadata.tokenSpeed')}
value={formattedTokenSpeed}
/>
<DetailItem
icon={<IconRuler size={18} />}
label={t('common:dialogs.messageMetadata.tokenCount')}
value={tokenSpeed.tokenCount}
/>
<DetailItem
icon={<IconCalendar size={18} />}
label={t('common:dialogs.messageMetadata.lastUpdate')}
value={formatDate(tokenSpeed.lastTimestamp)}
/>
</div>
</section>
)}
{!assistant && !tokenSpeed && (
<p className="text-center text-main-view-fg/70 py-4">
{t('common:dialogs.messageMetadata.noMetadataAvailable.')}
</p>
)}
<div className="space-y-2 mt-4">
<div className="border border-main-view-fg/10 rounded-md">
<CodeEditor
value={JSON.stringify(metadata || {}, null, 2)}
language="json"
readOnly
data-color-mode="dark"
style={{
fontSize: 12,
backgroundColor: 'transparent',
fontFamily: 'monospace',
}}
className="w-full h-full !text-sm "
/>
</div>
</div>
</DialogHeader>
</DialogContent>

View File

@ -1,4 +1,3 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { useCallback, useMemo } from 'react'
import { flushSync } from 'react-dom'
import { usePrompt } from './usePrompt'
@ -42,12 +41,6 @@ import { TEMPORARY_CHAT_QUERY_ID, TEMPORARY_CHAT_ID } from '@/constants/chat'
import { toast } from 'sonner'
import { Attachment } from '@/types/attachment'
type StreamEvent = {
timestamp: number
type: 'reasoning_chunk' | 'tool_call' | 'tool_output'
data: any
}
export const useChat = () => {
const [
updateTokenSpeed,
@ -100,74 +93,66 @@ export const useChat = () => {
const setModelLoadError = useModelLoad((state) => state.setModelLoadError)
const router = useRouter()
const getCurrentThread = useCallback(
async (projectId?: string) => {
let currentThread = retrieveThread()
const getCurrentThread = useCallback(async (projectId?: string) => {
let currentThread = retrieveThread()
// Check if we're in temporary chat mode
const isTemporaryMode = window.location.search.includes(
`${TEMPORARY_CHAT_QUERY_ID}=true`
// Check if we're in temporary chat mode
const isTemporaryMode = window.location.search.includes(`${TEMPORARY_CHAT_QUERY_ID}=true`)
// Clear messages for existing temporary thread on reload to ensure fresh start
if (isTemporaryMode && currentThread?.id === TEMPORARY_CHAT_ID) {
setMessages(TEMPORARY_CHAT_ID, [])
}
if (!currentThread) {
// Get prompt directly from store when needed
const currentPrompt = usePrompt.getState().prompt
const currentAssistant = useAssistant.getState().currentAssistant
const assistants = useAssistant.getState().assistants
const selectedModel = useModelProvider.getState().selectedModel
const selectedProvider = useModelProvider.getState().selectedProvider
// Get project metadata if projectId is provided
let projectMetadata: { id: string; name: string; updated_at: number } | undefined
if (projectId) {
const project = await serviceHub.projects().getProjectById(projectId)
if (project) {
projectMetadata = {
id: project.id,
name: project.name,
updated_at: project.updated_at,
}
}
}
currentThread = await createThread(
{
id: selectedModel?.id ?? defaultModel(selectedProvider),
provider: selectedProvider,
},
isTemporaryMode ? 'Temporary Chat' : currentPrompt,
assistants.find((a) => a.id === currentAssistant?.id) || assistants[0],
projectMetadata,
isTemporaryMode // pass temporary flag
)
// Clear messages for existing temporary thread on reload to ensure fresh start
// Clear messages for temporary chat to ensure fresh start on reload
if (isTemporaryMode && currentThread?.id === TEMPORARY_CHAT_ID) {
setMessages(TEMPORARY_CHAT_ID, [])
}
if (!currentThread) {
// Get prompt directly from store when needed
const currentPrompt = usePrompt.getState().prompt
const currentAssistant = useAssistant.getState().currentAssistant
const assistants = useAssistant.getState().assistants
const selectedModel = useModelProvider.getState().selectedModel
const selectedProvider = useModelProvider.getState().selectedProvider
// Get project metadata if projectId is provided
let projectMetadata:
| { id: string; name: string; updated_at: number }
| undefined
if (projectId) {
const project = await serviceHub.projects().getProjectById(projectId)
if (project) {
projectMetadata = {
id: project.id,
name: project.name,
updated_at: project.updated_at,
}
}
}
currentThread = await createThread(
{
id: selectedModel?.id ?? defaultModel(selectedProvider),
provider: selectedProvider,
},
isTemporaryMode ? 'Temporary Chat' : currentPrompt,
assistants.find((a) => a.id === currentAssistant?.id) ||
assistants[0],
projectMetadata,
isTemporaryMode // pass temporary flag
)
// Clear messages for temporary chat to ensure fresh start on reload
if (isTemporaryMode && currentThread?.id === TEMPORARY_CHAT_ID) {
setMessages(TEMPORARY_CHAT_ID, [])
}
// Set flag for temporary chat navigation
if (currentThread.id === TEMPORARY_CHAT_ID) {
sessionStorage.setItem('temp-chat-nav', 'true')
}
router.navigate({
to: route.threadsDetail,
params: { threadId: currentThread.id },
})
// Set flag for temporary chat navigation
if (currentThread.id === TEMPORARY_CHAT_ID) {
sessionStorage.setItem('temp-chat-nav', 'true')
}
return currentThread
},
[createThread, retrieveThread, router, setMessages, serviceHub]
)
router.navigate({
to: route.threadsDetail,
params: { threadId: currentThread.id },
})
}
return currentThread
}, [createThread, retrieveThread, router, setMessages, serviceHub])
const restartModel = useCallback(
async (provider: ProviderObject, modelId: string) => {
@ -286,8 +271,6 @@ export const useChat = () => {
const selectedProvider = useModelProvider.getState().selectedProvider
let activeProvider = getProviderByName(selectedProvider)
const streamEvents: StreamEvent[] = []
resetTokenSpeed()
if (!activeThread || !activeProvider) return
@ -314,9 +297,7 @@ export const useChat = () => {
updateAttachmentProcessing(img.name, 'processing')
}
// Upload image, get id/URL
const res = await serviceHub
.uploads()
.ingestImage(activeThread.id, img)
const res = await serviceHub.uploads().ingestImage(activeThread.id, img)
processedAttachments.push({
...img,
id: res.id,
@ -332,9 +313,7 @@ export const useChat = () => {
updateAttachmentProcessing(img.name, 'error')
}
const desc = err instanceof Error ? err.message : String(err)
toast.error('Failed to ingest image attachment', {
description: desc,
})
toast.error('Failed to ingest image attachment', { description: desc })
return
}
}
@ -415,9 +394,6 @@ export const useChat = () => {
updateThreadTimestamp(activeThread.id)
usePrompt.getState().setPrompt('')
const selectedModel = useModelProvider.getState().selectedModel
const startTime = Date.now() // Start timer here
try {
if (selectedModel?.id) {
updateLoadingModel(true)
@ -434,8 +410,10 @@ export const useChat = () => {
// Using addUserMessage to respect legacy code. Should be using the userContent above.
if (troubleshooting) builder.addUserMessage(userContent)
let isCompleted = false
// Filter tools based on model capabilities and available tools for this thread
const availableTools = selectedModel?.capabilities?.includes('tools')
let availableTools = selectedModel?.capabilities?.includes('tools')
? useAppState.getState().tools.filter((tool) => {
const disabledTools = getDisabledToolsForThread(activeThread.id)
return !disabledTools.includes(tool.name)
@ -443,21 +421,13 @@ export const useChat = () => {
: []
// Check if proactive mode is enabled
const isProactiveMode =
selectedModel?.capabilities?.includes('proactive') ?? false
const isProactiveMode = selectedModel?.capabilities?.includes('proactive') ?? false
// Proactive mode: Capture initial screenshot/snapshot before first LLM call
if (
isProactiveMode &&
availableTools.length > 0 &&
!abortController.signal.aborted
) {
console.log(
'Proactive mode: Capturing initial screenshots before LLM call'
)
if (isProactiveMode && availableTools.length > 0 && !abortController.signal.aborted) {
console.log('Proactive mode: Capturing initial screenshots before LLM call')
try {
const initialScreenshots =
await captureProactiveScreenshots(abortController)
const initialScreenshots = await captureProactiveScreenshots(abortController)
// Add initial screenshots to builder
for (const screenshot of initialScreenshots) {
@ -471,91 +441,131 @@ export const useChat = () => {
}
}
// The agent logic is now self-contained within postMessageProcessing.
// We no longer need a `while` loop here.
let assistantLoopSteps = 0
if (abortController.signal.aborted || !activeProvider) return
while (
!isCompleted &&
!abortController.signal.aborted &&
activeProvider
) {
const modelConfig = activeProvider.models.find(
(m) => m.id === selectedModel?.id
)
assistantLoopSteps += 1
const modelConfig = activeProvider.models.find(
(m) => m.id === selectedModel?.id
)
const modelSettings = modelConfig?.settings
? Object.fromEntries(
Object.entries(modelConfig.settings)
.filter(
([key, value]) =>
key !== 'ctx_len' &&
key !== 'ngl' &&
value.controller_props?.value !== undefined &&
value.controller_props?.value !== null &&
value.controller_props?.value !== ''
)
.map(([key, value]) => [key, value.controller_props?.value])
)
: undefined
const modelSettings = modelConfig?.settings
? Object.fromEntries(
Object.entries(modelConfig.settings)
.filter(
([key, value]) =>
key !== 'ctx_len' &&
key !== 'ngl' &&
value.controller_props?.value !== undefined &&
value.controller_props?.value !== null &&
value.controller_props?.value !== ''
)
.map(([key, value]) => [key, value.controller_props?.value])
)
: undefined
const completion = await sendCompletion(
activeThread,
activeProvider,
builder.getMessages(),
abortController,
availableTools,
currentAssistant?.parameters?.stream === false ? false : true,
{
...modelSettings,
...(currentAssistant?.parameters || {}),
} as unknown as Record<string, object>
)
const completion = await sendCompletion(
activeThread,
activeProvider,
builder.getMessages(),
abortController,
availableTools,
currentAssistant?.parameters?.stream === false ? false : true,
{
...modelSettings,
...(currentAssistant?.parameters || {}),
} as unknown as Record<string, object>
)
if (!completion) throw new Error('No completion received')
let accumulatedText = ''
const currentCall: ChatCompletionMessageToolCall | null = null
const toolCalls: ChatCompletionMessageToolCall[] = []
const timeToFirstToken = Date.now()
let tokenUsage: CompletionUsage | undefined = undefined
try {
if (isCompletionResponse(completion)) {
const message = completion.choices[0]?.message
accumulatedText = (message?.content as string) || ''
if (!completion) throw new Error('No completion received')
let accumulatedText = ''
const currentCall: ChatCompletionMessageToolCall | null = null
const toolCalls: ChatCompletionMessageToolCall[] = []
const timeToFirstToken = Date.now()
let tokenUsage: CompletionUsage | undefined = undefined
try {
if (isCompletionResponse(completion)) {
const message = completion.choices[0]?.message
accumulatedText = (message?.content as string) || ''
// Handle reasoning field if there is one
const reasoning = extractReasoningFromMessage(message)
if (reasoning) {
accumulatedText = `<think>${reasoning}</think>` + accumulatedText
}
if (message?.tool_calls) {
toolCalls.push(...message.tool_calls)
}
if ('usage' in completion) {
tokenUsage = completion.usage
}
} else {
// High-throughput scheduler: batch UI updates on rAF (requestAnimationFrame)
let rafScheduled = false
let rafHandle: number | undefined
let pendingDeltaCount = 0
const reasoningProcessor = new ReasoningProcessor()
const scheduleFlush = () => {
if (rafScheduled || abortController.signal.aborted) return
rafScheduled = true
const doSchedule = (cb: () => void) => {
if (typeof requestAnimationFrame !== 'undefined') {
rafHandle = requestAnimationFrame(() => cb())
} else {
// Fallback for non-browser test environments
const t = setTimeout(() => cb(), 0) as unknown as number
rafHandle = t
}
// Handle reasoning field if there is one
const reasoning = extractReasoningFromMessage(message)
if (reasoning) {
accumulatedText =
`<think>${reasoning}</think>` + accumulatedText
}
doSchedule(() => {
// Check abort status before executing the scheduled callback
if (abortController.signal.aborted) {
rafScheduled = false
return
}
if (message?.tool_calls) {
toolCalls.push(...message.tool_calls)
}
if ('usage' in completion) {
tokenUsage = completion.usage
}
} else {
// High-throughput scheduler: batch UI updates on rAF (requestAnimationFrame)
let rafScheduled = false
let rafHandle: number | undefined
let pendingDeltaCount = 0
const reasoningProcessor = new ReasoningProcessor()
const scheduleFlush = () => {
if (rafScheduled || abortController.signal.aborted) return
rafScheduled = true
const doSchedule = (cb: () => void) => {
if (typeof requestAnimationFrame !== 'undefined') {
rafHandle = requestAnimationFrame(() => cb())
} else {
// Fallback for non-browser test environments
const t = setTimeout(() => cb(), 0) as unknown as number
rafHandle = t
}
}
doSchedule(() => {
// Check abort status before executing the scheduled callback
if (abortController.signal.aborted) {
rafScheduled = false
return
}
const currentContent = newAssistantThreadContent(
activeThread.id,
accumulatedText,
{
tool_calls: toolCalls.map((e) => ({
...e,
state: 'pending',
})),
}
)
updateStreamingContent(currentContent)
if (tokenUsage) {
setTokenSpeed(
currentContent,
tokenUsage.completion_tokens /
Math.max((Date.now() - timeToFirstToken) / 1000, 1),
tokenUsage.completion_tokens
)
} else if (pendingDeltaCount > 0) {
updateTokenSpeed(currentContent, pendingDeltaCount)
}
pendingDeltaCount = 0
rafScheduled = false
})
}
const flushIfPending = () => {
if (!rafScheduled) return
if (
typeof cancelAnimationFrame !== 'undefined' &&
rafHandle !== undefined
) {
cancelAnimationFrame(rafHandle)
} else if (rafHandle !== undefined) {
clearTimeout(rafHandle)
}
// Do an immediate flush
const currentContent = newAssistantThreadContent(
activeThread.id,
accumulatedText,
@ -564,7 +574,6 @@ export const useChat = () => {
...e,
state: 'pending',
})),
streamEvents: streamEvents,
}
)
updateStreamingContent(currentContent)
@ -580,232 +589,160 @@ export const useChat = () => {
}
pendingDeltaCount = 0
rafScheduled = false
})
}
const flushIfPending = () => {
if (!rafScheduled) return
if (
typeof cancelAnimationFrame !== 'undefined' &&
rafHandle !== undefined
) {
cancelAnimationFrame(rafHandle)
} else if (rafHandle !== undefined) {
clearTimeout(rafHandle)
}
// Do an immediate flush
const currentContent = newAssistantThreadContent(
activeThread.id,
accumulatedText,
{
tool_calls: toolCalls.map((e) => ({
...e,
state: 'pending',
})),
streamEvents: streamEvents,
}
)
updateStreamingContent(currentContent)
if (tokenUsage) {
setTokenSpeed(
currentContent,
tokenUsage.completion_tokens /
Math.max((Date.now() - timeToFirstToken) / 1000, 1),
tokenUsage.completion_tokens
)
} else if (pendingDeltaCount > 0) {
updateTokenSpeed(currentContent, pendingDeltaCount)
}
pendingDeltaCount = 0
rafScheduled = false
}
try {
for await (const part of completion) {
// Check if aborted before processing each part
if (abortController.signal.aborted) {
break
}
try {
for await (const part of completion) {
// Check if aborted before processing each part
if (abortController.signal.aborted) {
break
}
// Handle prompt progress if available
if ('prompt_progress' in part && part.prompt_progress) {
// Force immediate state update to ensure we see intermediate values
flushSync(() => {
updatePromptProgress(part.prompt_progress)
})
// Add a small delay to make progress visible
await new Promise((resolve) => setTimeout(resolve, 100))
}
// Error message
if (!part.choices) {
throw new Error(
'message' in part
? (part.message as string)
: (JSON.stringify(part) ?? '')
)
}
if ('usage' in part && part.usage) {
tokenUsage = part.usage
}
const deltaToolCalls = part.choices[0]?.delta?.tool_calls
if (deltaToolCalls) {
const index = deltaToolCalls[0]?.index
// Check if this chunk starts a brand new tool call
const isNewToolCallStart =
index !== undefined && toolCalls[index] === undefined
extractToolCall(part, currentCall, toolCalls)
if (isNewToolCallStart) {
// Track tool call event only when it begins
// toolCalls[index] is the newly created object due to extractToolCall
streamEvents.push({
timestamp: Date.now(),
type: 'tool_call',
data: { toolCall: toolCalls[index] },
// Handle prompt progress if available
if ('prompt_progress' in part && part.prompt_progress) {
// Force immediate state update to ensure we see intermediate values
flushSync(() => {
updatePromptProgress(part.prompt_progress)
})
// Add a small delay to make progress visible
await new Promise((resolve) => setTimeout(resolve, 100))
}
// Error message
if (!part.choices) {
throw new Error(
'message' in part
? (part.message as string)
: (JSON.stringify(part) ?? '')
)
}
if ('usage' in part && part.usage) {
tokenUsage = part.usage
}
if (part.choices[0]?.delta?.tool_calls) {
extractToolCall(part, currentCall, toolCalls)
// Schedule a flush to reflect tool update
scheduleFlush()
}
const deltaReasoning =
reasoningProcessor.processReasoningChunk(part)
if (deltaReasoning) {
accumulatedText += deltaReasoning
pendingDeltaCount += 1
// Schedule flush for reasoning updates
scheduleFlush()
}
const deltaContent = part.choices[0]?.delta?.content || ''
if (deltaContent) {
accumulatedText += deltaContent
pendingDeltaCount += 1
// Batch UI update on next animation frame
scheduleFlush()
}
}
const deltaReasoning =
reasoningProcessor.processReasoningChunk(part)
if (deltaReasoning) {
// accumulatedText += deltaReasoning
// Track reasoning event
streamEvents.push({
timestamp: Date.now(),
type: 'reasoning_chunk',
data: { content: deltaReasoning },
})
pendingDeltaCount += 1
// Schedule flush for reasoning updates
scheduleFlush()
} finally {
// Always clean up scheduled RAF when stream ends (either normally or via abort)
if (rafHandle !== undefined) {
if (typeof cancelAnimationFrame !== 'undefined') {
cancelAnimationFrame(rafHandle)
} else {
clearTimeout(rafHandle)
}
rafHandle = undefined
rafScheduled = false
}
const deltaContent = part.choices[0]?.delta?.content || ''
if (deltaContent) {
accumulatedText += deltaContent
pendingDeltaCount += 1
// Batch UI update on next animation frame
scheduleFlush()
}
}
} finally {
// Always clean up scheduled RAF when stream ends (either normally or via abort)
if (rafHandle !== undefined) {
if (typeof cancelAnimationFrame !== 'undefined') {
cancelAnimationFrame(rafHandle)
} else {
clearTimeout(rafHandle)
}
rafHandle = undefined
rafScheduled = false
}
// Only finalize and flush if not aborted
if (!abortController.signal.aborted) {
// Finalize reasoning (close any open think tags)
// accumulatedText += reasoningProcessor.finalize()
// Ensure any pending buffered content is rendered at the end
flushIfPending()
// Only finalize and flush if not aborted
if (!abortController.signal.aborted) {
// Finalize reasoning (close any open think tags)
accumulatedText += reasoningProcessor.finalize()
// Ensure any pending buffered content is rendered at the end
flushIfPending()
}
}
}
} catch (error) {
const errorMessage =
error && typeof error === 'object' && 'message' in error
? error.message
: error
if (
typeof errorMessage === 'string' &&
errorMessage.includes(OUT_OF_CONTEXT_SIZE) &&
selectedModel
) {
const method = await showIncreaseContextSizeModal()
if (method === 'ctx_len') {
/// Increase context size
activeProvider = await increaseModelContextSize(
selectedModel.id,
activeProvider
)
continue
} else if (method === 'context_shift' && selectedModel?.id) {
/// Enable context_shift
activeProvider = await toggleOnContextShifting(
selectedModel?.id,
activeProvider
)
continue
} else throw error
} else {
throw error
}
}
} catch (error) {
const errorMessage =
error && typeof error === 'object' && 'message' in error
? error.message
: error
// TODO: Remove this check when integrating new llama.cpp extension
if (
typeof errorMessage === 'string' &&
errorMessage.includes(OUT_OF_CONTEXT_SIZE) &&
selectedModel
accumulatedText.length === 0 &&
toolCalls.length === 0 &&
activeThread.model?.id &&
activeProvider?.provider === 'llamacpp'
) {
const method = await showIncreaseContextSizeModal()
if (method === 'ctx_len') {
/// Increase context size
activeProvider = await increaseModelContextSize(
selectedModel.id,
activeProvider
)
// NOTE: This will exit and not retry. A more robust solution might re-call sendMessage.
// For this change, we keep the existing behavior.
return
} else if (method === 'context_shift' && selectedModel?.id) {
/// Enable context_shift
activeProvider = await toggleOnContextShifting(
selectedModel?.id,
activeProvider
)
// NOTE: See above comment about retry.
return
} else throw error
} else {
throw error
await serviceHub
.models()
.stopModel(activeThread.model.id, 'llamacpp')
throw new Error('No response received from the model')
}
// Create a final content object for adding to the thread
const finalContent = newAssistantThreadContent(
activeThread.id,
accumulatedText,
{
tokenSpeed: useAppState.getState().tokenSpeed,
assistant: currentAssistant,
}
)
builder.addAssistantMessage(accumulatedText, undefined, toolCalls)
// Check if proactive mode is enabled for this model
const isProactiveMode = selectedModel?.capabilities?.includes('proactive') ?? false
const updatedMessage = await postMessageProcessing(
toolCalls,
builder,
finalContent,
abortController,
useToolApproval.getState().approvedTools,
allowAllMCPPermissions ? undefined : showApprovalModal,
allowAllMCPPermissions,
isProactiveMode
)
addMessage(updatedMessage ?? finalContent)
updateStreamingContent(emptyThreadContent)
updatePromptProgress(undefined)
updateThreadTimestamp(activeThread.id)
isCompleted = !toolCalls.length
// Do not create agent loop if there is no need for it
// Check if assistant loop steps are within limits
if (assistantLoopSteps >= (currentAssistant?.tool_steps ?? 20)) {
// Stop the assistant tool call if it exceeds the maximum steps
availableTools = []
}
}
// TODO: Remove this check when integrating new llama.cpp extension
if (
accumulatedText.length === 0 &&
toolCalls.length === 0 &&
activeThread.model?.id &&
activeProvider?.provider === 'llamacpp'
) {
await serviceHub.models().stopModel(activeThread.model.id, 'llamacpp')
throw new Error('No response received from the model')
}
const completionFinishTime = Date.now()
// Calculate the time taken for the initial completion (streaming or non-streaming)
const initialCompletionTime = completionFinishTime - startTime
const messageMetadata: Record<string, any> = {
tokenSpeed: useAppState.getState().tokenSpeed,
assistant: currentAssistant,
streamEvents, // Add chronological events
}
if (accumulatedText.includes('<think>') || toolCalls.length > 0) {
messageMetadata.totalThinkingTime = initialCompletionTime
}
// This is the message object that will be built upon by postMessageProcessing
const finalContent = newAssistantThreadContent(
activeThread.id,
accumulatedText,
messageMetadata
)
builder.addAssistantMessage(accumulatedText, undefined, toolCalls)
// All subsequent tool calls and follow-up completions will modify `finalContent`.
const updatedMessage = await postMessageProcessing(
toolCalls,
builder,
finalContent,
abortController,
useToolApproval.getState().approvedTools,
allowAllMCPPermissions ? undefined : showApprovalModal,
allowAllMCPPermissions,
activeThread,
activeProvider,
availableTools,
updateStreamingContent, // Pass the callback to update UI
currentAssistant?.tool_steps,
isProactiveMode
)
if (updatedMessage && updatedMessage.metadata) {
if (finalContent.metadata?.totalThinkingTime !== undefined) {
updatedMessage.metadata.totalThinkingTime =
finalContent.metadata.totalThinkingTime
}
}
// Add the single, final, composite message to the store.
addMessage(updatedMessage ?? finalContent)
updateStreamingContent(emptyThreadContent)
updatePromptProgress(undefined)
updateThreadTimestamp(activeThread.id)
} catch (error) {
if (!abortController.signal.aborted) {
if (error && typeof error === 'object' && 'message' in error) {

View File

@ -9,7 +9,7 @@ import {
normalizeTools,
extractToolCall,
postMessageProcessing,
captureProactiveScreenshots,
captureProactiveScreenshots
} from '../completion'
// Mock dependencies
@ -87,12 +87,10 @@ vi.mock('@/hooks/useServiceHub', () => ({
})),
rag: vi.fn(() => ({
getToolNames: vi.fn(() => Promise.resolve([])),
callTool: vi.fn(() =>
Promise.resolve({
content: [{ type: 'text', text: 'mock rag result' }],
error: '',
})
),
callTool: vi.fn(() => Promise.resolve({
content: [{ type: 'text', text: 'mock rag result' }],
error: '',
})),
})),
})),
}))
@ -135,15 +133,13 @@ describe('completion.ts', () => {
expect(result.type).toBe('text')
expect(result.role).toBe('user')
expect(result.thread_id).toBe('thread-123')
expect(result.content).toEqual([
{
type: 'text',
text: {
value: 'Hello world',
annotations: [],
},
expect(result.content).toEqual([{
type: 'text',
text: {
value: 'Hello world',
annotations: [],
},
])
}])
})
it('should handle empty text', () => {
@ -151,15 +147,13 @@ describe('completion.ts', () => {
expect(result.type).toBe('text')
expect(result.role).toBe('user')
expect(result.content).toEqual([
{
type: 'text',
text: {
value: '',
annotations: [],
},
expect(result.content).toEqual([{
type: 'text',
text: {
value: '',
annotations: [],
},
])
}])
})
})
@ -170,15 +164,13 @@ describe('completion.ts', () => {
expect(result.type).toBe('text')
expect(result.role).toBe('assistant')
expect(result.thread_id).toBe('thread-123')
expect(result.content).toEqual([
{
type: 'text',
text: {
value: 'AI response',
annotations: [],
},
expect(result.content).toEqual([{
type: 'text',
text: {
value: 'AI response',
annotations: [],
},
])
}])
})
})
@ -215,20 +207,16 @@ describe('completion.ts', () => {
describe('extractToolCall', () => {
it('should extract tool calls from message', () => {
const message = {
choices: [
{
delta: {
tool_calls: [
{
id: 'call_1',
type: 'function',
index: 0,
function: { name: 'test', arguments: '{}' },
},
],
},
},
],
choices: [{
delta: {
tool_calls: [{
id: 'call_1',
type: 'function',
index: 0,
function: { name: 'test', arguments: '{}' }
}]
}
}]
}
const calls = []
const result = extractToolCall(message, null, calls)
@ -238,11 +226,9 @@ describe('completion.ts', () => {
it('should handle message without tool calls', () => {
const message = {
choices: [
{
delta: {},
},
],
choices: [{
delta: {}
}]
}
const calls = []
const result = extractToolCall(message, null, calls)
@ -259,31 +245,23 @@ describe('completion.ts', () => {
const mockMcp = {
getTools: mockGetTools,
callToolWithCancellation: vi.fn(() => ({
promise: Promise.resolve({
content: [{ type: 'text', text: 'result' }],
error: '',
}),
promise: Promise.resolve({ content: [{ type: 'text', text: 'result' }], error: '' }),
cancel: vi.fn(),
})),
}))
}
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => mockMcp,
rag: () => ({ getToolNames: () => Promise.resolve([]) }),
rag: () => ({ getToolNames: () => Promise.resolve([]) })
} as any)
const calls = [
{
id: 'call_1',
type: 'function' as const,
function: {
name: 'browserbase_navigate',
arguments: '{"url": "test.com"}',
},
},
]
const calls = [{
id: 'call_1',
type: 'function' as const,
function: { name: 'browserbase_navigate', arguments: '{"url": "test.com"}' }
}]
const builder = {
addToolMessage: vi.fn(),
getMessages: vi.fn(() => []),
getMessages: vi.fn(() => [])
} as any
const message = { thread_id: 'test-thread', metadata: {} } as any
const abortController = new AbortController()
@ -306,44 +284,30 @@ describe('completion.ts', () => {
it('should detect browserbase tools', async () => {
const { getServiceHub } = await import('@/hooks/useServiceHub')
const mockCallTool = vi.fn(() => ({
promise: Promise.resolve({
content: [{ type: 'text', text: 'result' }],
error: '',
}),
promise: Promise.resolve({ content: [{ type: 'text', text: 'result' }], error: '' }),
cancel: vi.fn(),
}))
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => ({
getTools: () => Promise.resolve([]),
callToolWithCancellation: mockCallTool,
callToolWithCancellation: mockCallTool
}),
rag: () => ({ getToolNames: () => Promise.resolve([]) }),
rag: () => ({ getToolNames: () => Promise.resolve([]) })
} as any)
const calls = [
{
id: 'call_1',
type: 'function' as const,
function: { name: 'browserbase_screenshot', arguments: '{}' },
},
]
const calls = [{
id: 'call_1',
type: 'function' as const,
function: { name: 'browserbase_screenshot', arguments: '{}' }
}]
const builder = {
addToolMessage: vi.fn(),
getMessages: vi.fn(() => []),
getMessages: vi.fn(() => [])
} as any
const message = { thread_id: 'test-thread', metadata: {} } as any
const abortController = new AbortController()
await postMessageProcessing(
calls,
builder,
message,
abortController,
{},
undefined,
false,
true
)
await postMessageProcessing(calls, builder, message, abortController, {}, undefined, false, true)
expect(mockCallTool).toHaveBeenCalled()
})
@ -351,47 +315,30 @@ describe('completion.ts', () => {
it('should detect multi_browserbase tools', async () => {
const { getServiceHub } = await import('@/hooks/useServiceHub')
const mockCallTool = vi.fn(() => ({
promise: Promise.resolve({
content: [{ type: 'text', text: 'result' }],
error: '',
}),
promise: Promise.resolve({ content: [{ type: 'text', text: 'result' }], error: '' }),
cancel: vi.fn(),
}))
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => ({
getTools: () => Promise.resolve([]),
callToolWithCancellation: mockCallTool,
callToolWithCancellation: mockCallTool
}),
rag: () => ({ getToolNames: () => Promise.resolve([]) }),
rag: () => ({ getToolNames: () => Promise.resolve([]) })
} as any)
const calls = [
{
id: 'call_1',
type: 'function' as const,
function: {
name: 'multi_browserbase_stagehand_navigate',
arguments: '{}',
},
},
]
const calls = [{
id: 'call_1',
type: 'function' as const,
function: { name: 'multi_browserbase_stagehand_navigate', arguments: '{}' }
}]
const builder = {
addToolMessage: vi.fn(),
getMessages: vi.fn(() => []),
getMessages: vi.fn(() => [])
} as any
const message = { thread_id: 'test-thread', metadata: {} } as any
const abortController = new AbortController()
await postMessageProcessing(
calls,
builder,
message,
abortController,
{},
undefined,
false,
true
)
await postMessageProcessing(calls, builder, message, abortController, {}, undefined, false, true)
expect(mockCallTool).toHaveBeenCalled()
})
@ -403,40 +350,26 @@ describe('completion.ts', () => {
mcp: () => ({
getTools: mockGetTools,
callToolWithCancellation: vi.fn(() => ({
promise: Promise.resolve({
content: [{ type: 'text', text: 'result' }],
error: '',
}),
promise: Promise.resolve({ content: [{ type: 'text', text: 'result' }], error: '' }),
cancel: vi.fn(),
})),
}))
}),
rag: () => ({ getToolNames: () => Promise.resolve([]) }),
rag: () => ({ getToolNames: () => Promise.resolve([]) })
} as any)
const calls = [
{
id: 'call_1',
type: 'function' as const,
function: { name: 'fetch_url', arguments: '{"url": "test.com"}' },
},
]
const calls = [{
id: 'call_1',
type: 'function' as const,
function: { name: 'fetch_url', arguments: '{"url": "test.com"}' }
}]
const builder = {
addToolMessage: vi.fn(),
getMessages: vi.fn(() => []),
getMessages: vi.fn(() => [])
} as any
const message = { thread_id: 'test-thread', metadata: {} } as any
const abortController = new AbortController()
await postMessageProcessing(
calls,
builder,
message,
abortController,
{},
undefined,
false,
true
)
await postMessageProcessing(calls, builder, message, abortController, {}, undefined, false, true)
// Proactive screenshots should not be called for non-browser tools
expect(mockGetTools).not.toHaveBeenCalled()
@ -447,9 +380,7 @@ describe('completion.ts', () => {
it('should capture screenshot and snapshot when available', async () => {
const { getServiceHub } = await import('@/hooks/useServiceHub')
const mockScreenshotResult = {
content: [
{ type: 'image', data: 'base64screenshot', mimeType: 'image/png' },
],
content: [{ type: 'image', data: 'base64screenshot', mimeType: 'image/png' }],
error: '',
}
const mockSnapshotResult = {
@ -457,14 +388,11 @@ describe('completion.ts', () => {
error: '',
}
const mockGetTools = vi.fn(() =>
Promise.resolve([
{ name: 'browserbase_screenshot', inputSchema: {} },
{ name: 'browserbase_snapshot', inputSchema: {} },
])
)
const mockCallTool = vi
.fn()
const mockGetTools = vi.fn(() => Promise.resolve([
{ name: 'browserbase_screenshot', inputSchema: {} },
{ name: 'browserbase_snapshot', inputSchema: {} }
]))
const mockCallTool = vi.fn()
.mockReturnValueOnce({
promise: Promise.resolve(mockScreenshotResult),
cancel: vi.fn(),
@ -477,8 +405,8 @@ describe('completion.ts', () => {
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => ({
getTools: mockGetTools,
callToolWithCancellation: mockCallTool,
}),
callToolWithCancellation: mockCallTool
})
} as any)
const abortController = new AbortController()
@ -492,15 +420,15 @@ describe('completion.ts', () => {
it('should handle missing screenshot tool gracefully', async () => {
const { getServiceHub } = await import('@/hooks/useServiceHub')
const mockGetTools = vi.fn(() =>
Promise.resolve([{ name: 'some_other_tool', inputSchema: {} }])
)
const mockGetTools = vi.fn(() => Promise.resolve([
{ name: 'some_other_tool', inputSchema: {} }
]))
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => ({
getTools: mockGetTools,
callToolWithCancellation: vi.fn(),
}),
callToolWithCancellation: vi.fn()
})
} as any)
const abortController = new AbortController()
@ -511,9 +439,9 @@ describe('completion.ts', () => {
it('should handle screenshot capture errors gracefully', async () => {
const { getServiceHub } = await import('@/hooks/useServiceHub')
const mockGetTools = vi.fn(() =>
Promise.resolve([{ name: 'browserbase_screenshot', inputSchema: {} }])
)
const mockGetTools = vi.fn(() => Promise.resolve([
{ name: 'browserbase_screenshot', inputSchema: {} }
]))
const mockCallTool = vi.fn(() => ({
promise: Promise.reject(new Error('Screenshot failed')),
cancel: vi.fn(),
@ -522,8 +450,8 @@ describe('completion.ts', () => {
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => ({
getTools: mockGetTools,
callToolWithCancellation: mockCallTool,
}),
callToolWithCancellation: mockCallTool
})
} as any)
const abortController = new AbortController()
@ -535,30 +463,22 @@ describe('completion.ts', () => {
it('should respect abort controller', async () => {
const { getServiceHub } = await import('@/hooks/useServiceHub')
const mockGetTools = vi.fn(() =>
Promise.resolve([{ name: 'browserbase_screenshot', inputSchema: {} }])
)
const mockGetTools = vi.fn(() => Promise.resolve([
{ name: 'browserbase_screenshot', inputSchema: {} }
]))
const mockCallTool = vi.fn(() => ({
promise: new Promise((resolve) =>
setTimeout(
() =>
resolve({
content: [
{ type: 'image', data: 'base64', mimeType: 'image/png' },
],
error: '',
}),
100
)
),
promise: new Promise((resolve) => setTimeout(() => resolve({
content: [{ type: 'image', data: 'base64', mimeType: 'image/png' }],
error: '',
}), 100)),
cancel: vi.fn(),
}))
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => ({
getTools: mockGetTools,
callToolWithCancellation: mockCallTool,
}),
callToolWithCancellation: mockCallTool
})
} as any)
const abortController = new AbortController()
@ -580,15 +500,12 @@ describe('completion.ts', () => {
role: 'tool',
content: [
{ type: 'text', text: 'Tool result' },
{
type: 'image_url',
image_url: { url: 'data:image/png;base64,old' },
},
{ type: 'image_url', image_url: { url: 'data:image/png;base64,old' } }
],
tool_call_id: 'old_call',
tool_call_id: 'old_call'
},
{ role: 'assistant', content: 'Response' },
],
]
}
expect(builder.messages).toHaveLength(3)
@ -600,19 +517,13 @@ describe('completion.ts', () => {
const { getServiceHub } = await import('@/hooks/useServiceHub')
const mockScreenshotResult = {
content: [
{
type: 'image',
data: 'proactive_screenshot',
mimeType: 'image/png',
},
],
content: [{ type: 'image', data: 'proactive_screenshot', mimeType: 'image/png' }],
error: '',
}
const mockGetTools = vi.fn(() =>
Promise.resolve([{ name: 'browserbase_screenshot', inputSchema: {} }])
)
const mockGetTools = vi.fn(() => Promise.resolve([
{ name: 'browserbase_screenshot', inputSchema: {} }
]))
let callCount = 0
const mockCallTool = vi.fn(() => {
@ -638,24 +549,19 @@ describe('completion.ts', () => {
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => ({
getTools: mockGetTools,
callToolWithCancellation: mockCallTool,
callToolWithCancellation: mockCallTool
}),
rag: () => ({ getToolNames: () => Promise.resolve([]) }),
rag: () => ({ getToolNames: () => Promise.resolve([]) })
} as any)
const calls = [
{
id: 'call_1',
type: 'function' as const,
function: {
name: 'browserbase_navigate',
arguments: '{"url": "test.com"}',
},
},
]
const calls = [{
id: 'call_1',
type: 'function' as const,
function: { name: 'browserbase_navigate', arguments: '{"url": "test.com"}' }
}]
const builder = {
addToolMessage: vi.fn(),
getMessages: vi.fn(() => []),
getMessages: vi.fn(() => [])
} as any
const message = { thread_id: 'test-thread', metadata: {} } as any
const abortController = new AbortController()
@ -668,12 +574,7 @@ describe('completion.ts', () => {
{},
undefined,
false,
undefined, // thread
undefined, // provider
[], // tools
undefined, // updateStreamingUI
undefined, // maxToolSteps
true // isProactiveMode - Correctly set to true
true
)
// Should have called: 1) browser tool, 2) getTools, 3) proactive screenshot
@ -685,9 +586,9 @@ describe('completion.ts', () => {
it('should not trigger proactive screenshots when mode is disabled', async () => {
const { getServiceHub } = await import('@/hooks/useServiceHub')
const mockGetTools = vi.fn(() =>
Promise.resolve([{ name: 'browserbase_screenshot', inputSchema: {} }])
)
const mockGetTools = vi.fn(() => Promise.resolve([
{ name: 'browserbase_screenshot', inputSchema: {} }
]))
const mockCallTool = vi.fn(() => ({
promise: Promise.resolve({
@ -700,21 +601,19 @@ describe('completion.ts', () => {
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => ({
getTools: mockGetTools,
callToolWithCancellation: mockCallTool,
callToolWithCancellation: mockCallTool
}),
rag: () => ({ getToolNames: () => Promise.resolve([]) }),
rag: () => ({ getToolNames: () => Promise.resolve([]) })
} as any)
const calls = [
{
id: 'call_1',
type: 'function' as const,
function: { name: 'browserbase_navigate', arguments: '{}' },
},
]
const calls = [{
id: 'call_1',
type: 'function' as const,
function: { name: 'browserbase_navigate', arguments: '{}' }
}]
const builder = {
addToolMessage: vi.fn(),
getMessages: vi.fn(() => []),
getMessages: vi.fn(() => [])
} as any
const message = { thread_id: 'test-thread', metadata: {} } as any
const abortController = new AbortController()
@ -727,12 +626,7 @@ describe('completion.ts', () => {
{},
undefined,
false,
undefined, // thread
undefined, // provider
[], // tools
undefined, // updateStreamingUI
undefined, // maxToolSteps
false // isProactiveMode - Correctly set to false
false
)
expect(mockCallTool).toHaveBeenCalledTimes(1)
@ -754,21 +648,19 @@ describe('completion.ts', () => {
vi.mocked(getServiceHub).mockReturnValue({
mcp: () => ({
getTools: mockGetTools,
callToolWithCancellation: mockCallTool,
callToolWithCancellation: mockCallTool
}),
rag: () => ({ getToolNames: () => Promise.resolve([]) }),
rag: () => ({ getToolNames: () => Promise.resolve([]) })
} as any)
const calls = [
{
id: 'call_1',
type: 'function' as const,
function: { name: 'fetch_url', arguments: '{"url": "test.com"}' },
},
]
const calls = [{
id: 'call_1',
type: 'function' as const,
function: { name: 'fetch_url', arguments: '{"url": "test.com"}' }
}]
const builder = {
addToolMessage: vi.fn(),
getMessages: vi.fn(() => []),
getMessages: vi.fn(() => [])
} as any
const message = { thread_id: 'test-thread', metadata: {} } as any
const abortController = new AbortController()
@ -781,12 +673,7 @@ describe('completion.ts', () => {
{},
undefined,
false,
undefined, // thread
undefined, // provider
[], // tools
undefined, // updateStreamingUI
undefined, // maxToolSteps
true // isProactiveMode - Still set to true, but the non-browser tool should skip the proactive step
true
)
expect(mockCallTool).toHaveBeenCalledTimes(1)

View File

@ -41,7 +41,6 @@ import { useAppState } from '@/hooks/useAppState'
import { injectFilesIntoPrompt } from './fileMetadata'
import { Attachment } from '@/types/attachment'
import { ModelCapabilities } from '@/types/models'
import { ReasoningProcessor } from '@/utils/reasoning'
export type ChatCompletionResponse =
| chatCompletion
@ -49,12 +48,6 @@ export type ChatCompletionResponse =
| StreamCompletionResponse
| CompletionResponse
type ToolCallEntry = {
tool: object
response: any
state: 'pending' | 'ready'
}
/**
* @fileoverview Helper functions for creating thread content.
* These functions are used to create thread content objects
@ -80,14 +73,11 @@ export const newUserThreadContent = (
name: doc.name,
type: doc.fileType,
size: typeof doc.size === 'number' ? doc.size : undefined,
chunkCount:
typeof doc.chunkCount === 'number' ? doc.chunkCount : undefined,
chunkCount: typeof doc.chunkCount === 'number' ? doc.chunkCount : undefined,
}))
const textWithFiles =
docMetadata.length > 0
? injectFilesIntoPrompt(content, docMetadata)
: content
docMetadata.length > 0 ? injectFilesIntoPrompt(content, docMetadata) : content
const contentParts = [
{
@ -248,8 +238,10 @@ export const sendCompletion = async (
const providerModelConfig = provider.models?.find(
(model) => model.id === thread.model?.id || model.model === thread.model?.id
)
const effectiveCapabilities = Array.isArray(providerModelConfig?.capabilities)
? (providerModelConfig?.capabilities ?? [])
const effectiveCapabilities = Array.isArray(
providerModelConfig?.capabilities
)
? providerModelConfig?.capabilities ?? []
: getModelCapabilities(provider.provider, thread.model.id)
const modelSupportsTools = effectiveCapabilities.includes(
ModelCapabilities.TOOLS
@ -262,10 +254,7 @@ export const sendCompletion = async (
PlatformFeatures[PlatformFeature.ATTACHMENTS] &&
modelSupportsTools
) {
const ragTools = await getServiceHub()
.rag()
.getTools()
.catch(() => [])
const ragTools = await getServiceHub().rag().getTools().catch(() => [])
if (Array.isArray(ragTools) && ragTools.length) {
usableTools = [...tools, ...ragTools]
}
@ -406,6 +395,7 @@ export const extractToolCall = (
}
return calls
}
/**
* Helper function to check if a tool call is a browser MCP tool
* @param toolName - The name of the tool
@ -529,13 +519,7 @@ const filterOldProactiveScreenshots = (builder: CompletionMessagesBuilder) => {
* @param approvedTools
* @param showModal
* @param allowAllMCPPermissions
* @param thread
* @param provider
* @param tools
* @param updateStreamingUI
* @param maxToolSteps
* @param isProactiveMode
* @param currentStepCount - Internal counter for recursive calls (do not set manually)
*/
export const postMessageProcessing = async (
calls: ChatCompletionMessageToolCall[],
@ -549,30 +533,10 @@ export const postMessageProcessing = async (
toolParameters?: object
) => Promise<boolean>,
allowAllMCPPermissions: boolean = false,
thread?: Thread,
provider?: ModelProvider,
tools: MCPTool[] = [],
updateStreamingUI?: (content: ThreadMessage) => void,
maxToolSteps: number = 20,
isProactiveMode: boolean = false,
currentStepCount: number = 0
): Promise<ThreadMessage> => {
// Initialize/get the current total thinking time from metadata
// This value is passed from sendMessage (initial completion time) or previous recursive call
let currentTotalTime = (message.metadata?.totalThinkingTime as number) ?? 0
isProactiveMode: boolean = false
) => {
// Handle completed tool calls
if (calls.length > 0) {
// Check limit BEFORE processing
if (currentStepCount >= maxToolSteps) {
console.warn(
`Reached maximum tool steps (${maxToolSteps}), stopping chain to prevent infinite loop`
)
return message
}
const nextStepCount = currentStepCount + 1
if (calls.length) {
// Fetch RAG tool names from RAG service
let ragToolNames = new Set<string>()
try {
@ -582,42 +546,43 @@ export const postMessageProcessing = async (
console.error('Failed to load RAG tool names:', e)
}
const ragFeatureAvailable =
useAttachments.getState().enabled &&
PlatformFeatures[PlatformFeature.ATTACHMENTS]
const currentToolCalls =
message.metadata?.tool_calls && Array.isArray(message.metadata.tool_calls)
? [...message.metadata.tool_calls]
: []
useAttachments.getState().enabled && PlatformFeatures[PlatformFeature.ATTACHMENTS]
for (const toolCall of calls) {
if (abortController.signal.aborted) break
const toolId = ulid()
const toolCallEntry: ToolCallEntry = {
tool: {
...(toolCall as object),
id: toolId,
},
response: undefined,
state: 'pending' as 'pending' | 'ready',
}
currentToolCalls.push(toolCallEntry)
const toolCallsMetadata =
message.metadata?.tool_calls &&
Array.isArray(message.metadata?.tool_calls)
? message.metadata?.tool_calls
: []
message.metadata = {
...(message.metadata ?? {}),
tool_calls: currentToolCalls,
totalThinkingTime: currentTotalTime,
tool_calls: [
...toolCallsMetadata,
{
tool: {
...(toolCall as object),
id: toolId,
},
response: undefined,
state: 'pending',
},
],
}
if (updateStreamingUI) updateStreamingUI({ ...message }) // Show pending call
// Check if tool is approved or show modal for approval
let toolParameters = {}
if (toolCall.function.arguments.length) {
try {
console.log('Raw tool arguments:', toolCall.function.arguments)
toolParameters = JSON.parse(toolCall.function.arguments)
console.log('Parsed tool parameters:', toolParameters)
} catch (error) {
console.error('Failed to parse tool arguments:', error)
console.error(
'Raw arguments that failed:',
toolCall.function.arguments
)
}
}
@ -626,6 +591,7 @@ export const postMessageProcessing = async (
const isRagTool = ragToolNames.has(toolName)
const isBrowserTool = isBrowserMCPTool(toolName)
// Auto-approve RAG tools (local/safe operations), require permission for MCP tools
const approved = isRagTool
? true
: allowAllMCPPermissions ||
@ -638,16 +604,10 @@ export const postMessageProcessing = async (
)
: true)
const toolExecutionStartTime = Date.now()
const { promise, cancel } = isRagTool
? ragFeatureAvailable
? {
promise: getServiceHub().rag().callTool({
toolName,
arguments: toolArgs,
threadId: message.thread_id,
}),
promise: getServiceHub().rag().callTool({ toolName, arguments: toolArgs, threadId: message.thread_id }),
cancel: async () => {},
}
: {
@ -670,15 +630,18 @@ export const postMessageProcessing = async (
useAppState.getState().setCancelToolCall(cancel)
let result = approved
? await promise.catch((e) => ({
content: [
{
type: 'text',
text: `Error calling tool ${toolCall.function.name}: ${e.message ?? e}`,
},
],
error: String(e?.message ?? e ?? 'Tool call failed'),
}))
? await promise.catch((e) => {
console.error('Tool call failed:', e)
return {
content: [
{
type: 'text',
text: `Error calling tool ${toolCall.function.name}: ${e.message ?? e}`,
},
],
error: String(e?.message ?? e ?? 'Tool call failed'),
}
})
: {
content: [
{
@ -689,50 +652,43 @@ export const postMessageProcessing = async (
error: 'disallowed',
}
const toolExecutionTime = Date.now() - toolExecutionStartTime
if (typeof result === 'string') {
result = {
content: [{ type: 'text', text: result }],
content: [
{
type: 'text',
text: result,
},
],
error: '',
}
}
currentTotalTime += toolExecutionTime
// Update the entry in the metadata array
toolCallEntry.response = result
toolCallEntry.state = 'ready'
message.metadata = {
...(message.metadata ?? {}),
totalThinkingTime: currentTotalTime,
}
if (updateStreamingUI) updateStreamingUI({ ...message }) // Show result
const streamEvents = (message.metadata?.streamEvents || []) as any[]
streamEvents.push({
timestamp: Date.now(),
type: 'tool_output',
data: { result: result },
})
message.metadata = {
...(message.metadata ?? {}),
streamEvents: streamEvents,
tool_calls: [
...toolCallsMetadata,
{
tool: {
...toolCall,
id: toolId,
},
response: result,
state: 'ready',
},
],
}
builder.addToolMessage(result as ToolResult, toolCall.id)
// Proactive mode: Capture screenshot/snapshot after browser tool execution
if (isProactiveMode && isBrowserTool && !abortController.signal.aborted) {
console.log(
'Proactive mode: Capturing screenshots after browser tool call'
)
console.log('Proactive mode: Capturing screenshots after browser tool call')
// Filter out old screenshots before adding new ones
filterOldProactiveScreenshots(builder)
// Capture new screenshots
const proactiveScreenshots =
await captureProactiveScreenshots(abortController)
const proactiveScreenshots = await captureProactiveScreenshots(abortController)
// Add proactive screenshots to builder
for (const screenshot of proactiveScreenshots) {
@ -746,164 +702,6 @@ export const postMessageProcessing = async (
// update message metadata
}
// Process follow-up completion if conditions are met
if (thread && provider && !abortController.signal.aborted) {
try {
const messagesWithToolResults = builder.getMessages()
const followUpStartTime = Date.now()
const followUpCompletion = await sendCompletion(
thread,
provider,
messagesWithToolResults,
abortController,
tools,
true,
{}
)
let streamFinishTime = Date.now()
if (followUpCompletion) {
let followUpText = ''
const newToolCalls: ChatCompletionMessageToolCall[] = []
const streamEvents = (message.metadata?.streamEvents || []) as any[]
const textContent = message.content.find(
(c) => c.type === ContentType.Text
)
if (isCompletionResponse(followUpCompletion)) {
// Handle non-streaming response
const choice = followUpCompletion.choices[0]
const content = choice?.message?.content
if (content) followUpText = content as string
if (choice?.message?.tool_calls) {
newToolCalls.push(...choice.message.tool_calls)
}
if (textContent?.text) textContent.text.value += followUpText
if (updateStreamingUI) updateStreamingUI({ ...message })
streamFinishTime = Date.now()
} else {
// Handle streaming response
const reasoningProcessor = new ReasoningProcessor()
for await (const chunk of followUpCompletion) {
if (abortController.signal.aborted) break
const deltaReasoning =
reasoningProcessor.processReasoningChunk(chunk)
const deltaContent = chunk.choices[0]?.delta?.content || ''
if (textContent?.text) {
if (deltaContent) {
textContent.text.value += deltaContent
followUpText += deltaContent
}
}
if (deltaReasoning) {
streamEvents.push({
timestamp: Date.now(),
type: 'reasoning_chunk',
data: { content: deltaReasoning },
})
}
const initialToolCallCount = newToolCalls.length
if (chunk.choices[0]?.delta?.tool_calls) {
extractToolCall(chunk, null, newToolCalls)
if (newToolCalls.length > initialToolCallCount) {
// The new tool call is the last element added
streamEvents.push({
timestamp: Date.now(),
type: 'tool_call',
data: { toolCall: newToolCalls[newToolCalls.length - 1] },
})
}
}
// Ensure the metadata is updated before calling updateStreamingUI
message.metadata = {
...(message.metadata ?? {}),
streamEvents: streamEvents,
totalThinkingTime:
currentTotalTime + (Date.now() - followUpStartTime), // Optimistic update
}
if (updateStreamingUI) {
// Create a new object reference for the content array
// This forces the memoized component to detect the change in the mutated text
const uiMessage: ThreadMessage = {
...message,
content: message.content.map((c) => ({ ...c })),
}
updateStreamingUI(uiMessage)
}
}
streamFinishTime = Date.now()
if (textContent?.text && updateStreamingUI) {
// Final UI update after streaming completes
const uiMessage: ThreadMessage = {
...message,
content: message.content.map((c) => ({ ...c })),
}
updateStreamingUI(uiMessage)
}
}
const followUpTotalTime = streamFinishTime - followUpStartTime
currentTotalTime += followUpTotalTime //
message.metadata = {
...(message.metadata ?? {}),
totalThinkingTime: currentTotalTime,
}
// Recursively process new tool calls if any
if (newToolCalls.length > 0) {
builder.addAssistantMessage(followUpText, undefined, newToolCalls)
// Recursive call continues accumulation on the same message object
await postMessageProcessing(
newToolCalls,
builder,
message,
abortController,
approvedTools,
showModal,
allowAllMCPPermissions,
thread,
provider,
tools,
updateStreamingUI,
maxToolSteps,
isProactiveMode,
nextStepCount, // Pass the incremented step count
)
}
}
} catch (error) {
console.error(
'Failed to get follow-up completion after tool execution:',
String(error)
)
// Optionally add error to message metadata for UI display
const streamEvents = (message.metadata?.streamEvents || []) as any[]
streamEvents.push({
timestamp: Date.now(),
type: 'error',
data: {
message: 'Follow-up completion failed',
error: String(error),
},
})
message.metadata = {
...(message.metadata ?? {}),
streamEvents: streamEvents,
}
}
}
return message
}
return message
}

View File

@ -8,13 +8,5 @@
},
"sendMessage": "Send Message",
"newConversation": "New Conversation",
"clearHistory": "Clear History",
"thought_and_tool_call": "Thought and called tools",
"tool_called": "Called tools",
"calling_tool": "Calling a tool",
"thinking": "Thinking",
"thought": "Thought",
"for": "for",
"seconds": "seconds"
}
"clearHistory": "Clear History"
}

View File

@ -235,21 +235,7 @@
"title": "Edit Message"
},
"messageMetadata": {
"title": "Message Metadata",
"model": "Model",
"name": "Name",
"id": "ID",
"createdAt": "Created At",
"toolSteps": "Tool Steps",
"temperature": "Temperature",
"topK": "Top K",
"topP": "Top P",
"description": "Description",
"instructions": "Instructions",
"tokenSpeed": "Token Speed",
"tokenCount": "Token Count",
"lastUpdate": "Last Update",
"noMessageMetadataAvailable": "No Message Metadata Available"
"title": "Message Metadata"
}
},
"projects": {

View File

@ -0,0 +1,35 @@
{
"title": "Assistentes",
"editAssistant": "Editar Assistente",
"deleteAssistant": "Remover Assistente",
"deleteConfirmation": "Remover Assistente",
"deleteConfirmationDesc": "Tem certeza de que deseja remover este assistente? Esta ação não pode ser desfeita.",
"cancel": "Cancelar",
"delete": "Remover",
"addAssistant": "Adicionar Assistente",
"emoji": "Emoji",
"name": "Nome",
"enterName": "Digite o nome",
"nameRequired": "O nome é obrigatório",
"description": "Descrição (opcional)",
"enterDescription": "Digite a descrição",
"instructions": "Instruções",
"enterInstructions": "Digite as instruções",
"predefinedParameters": "Parâmetros Predefinidos",
"parameters": "Parâmetros",
"key": "Chave",
"value": "Valor",
"stringValue": "Texto",
"numberValue": "Número",
"booleanValue": "Booleano",
"jsonValue": "JSON",
"trueValue": "Verdadeiro",
"falseValue": "Falso",
"jsonValuePlaceholder": "JSON",
"save": "Salvar",
"createNew": "Criar Novo Assistente",
"personality": "Personalidade",
"capabilities": "Capacidades",
"instructionsDateHint": "Dica: Use {{current_date}} para inserir a data de hoje.",
"maxToolSteps": "Máximo de passos das ferramentas (tools)"
}

View File

@ -0,0 +1,12 @@
{
"welcome": "Olá, como você está?",
"description": "Como posso ajudá-lo hoje?",
"temporaryChat": "Chat Temporário",
"temporaryChatDescription": "Inicie uma conversa temporária que não será salva no seu histórico de chat.",
"status": {
"empty": "Nenhum Chat Encontrado"
},
"sendMessage": "Enviar Mensagem",
"newConversation": "Nova Conversa",
"clearHistory": "Limpar Histórico"
}

View File

@ -0,0 +1,375 @@
{
"assistants": "Assistentes",
"hardware": "Hardware",
"mcp-servers": "Servidores MCP",
"local_api_server": "Servidor de API Local",
"https_proxy": "Proxy HTTPS",
"extensions": "Extensões",
"attachments": "Anexos",
"general": "Geral",
"settings": "Configurações",
"modelProviders": "Provedores de Modelo",
"appearance": "Aparência",
"privacy": "Privacidade",
"keyboardShortcuts": "Atalhos",
"newChat": "Novo Chat",
"favorites": "Favoritos",
"recents": "Recentes",
"hub": "Hub",
"helpSupport": "Ajuda e Suporte",
"helpUsImproveJan": "Ajude-nos a Melhorar o Jan",
"unstarAll": "Remover todos os favoritos",
"unstar": "Remover Favorito",
"deleteAll": "Apagar Tudo",
"star": "Favoritar",
"rename": "Renomear",
"delete": "Apagar",
"copied": "Copiado!",
"dataFolder": "Pasta de Dados",
"others": "Outros",
"language": "Idioma",
"login": "Entrar",
"loginWith": "Entrar com {{provider}}",
"loginFailed": "Falha ao Entrar",
"logout": "Sair",
"loggingOut": "Saindo...",
"loggedOut": "Saída Realizada com Sucesso",
"logoutFailed": "Falha ao Sair",
"profile": "Perfil",
"reset": "Redefinir",
"search": "Buscar",
"name": "Nome",
"cancel": "Cancelar",
"create": "Criar",
"save": "Salvar",
"edit": "Editar",
"copy": "Copiar",
"back": "Voltar",
"close": "Fechar",
"next": "Próximo",
"finish": "Finalizar",
"skip": "Pular",
"allow": "Permitir",
"deny": "Negar",
"start": "Iniciar",
"stop": "Parar",
"preview": "Visualizar",
"compactWidth": "Largura Compacta",
"fullWidth": "Largura Completa",
"dark": "Escuro",
"light": "Claro",
"system": "Sistema",
"auto": "Automático",
"english": "Inglês",
"medium": "Médio",
"newThread": "Nova Conversa",
"noResultsFound": "Nenhum resultado encontrado",
"noThreadsYet": "Nenhuma conversa ainda",
"noThreadsYetDesc": "Inicie uma nova conversa para ver seu histórico de conversas aqui.",
"downloads": "Downloads",
"downloading": "Baixando",
"cancelDownload": "Cancelar download",
"downloadCancelled": "Download Cancelado",
"downloadComplete": "Download Concluído",
"thinking": "Pensando...",
"thought": "Pensamento",
"callingTool": "Chamando ferramenta",
"completed": "Concluído",
"image": "Imagem",
"vision": "Visão",
"embeddings": "Embeddings",
"tools": "Ferramentas",
"webSearch": "Busca na Web",
"reasoning": "Raciocínio",
"selectAModel": "Selecione um modelo",
"noToolsAvailable": "Nenhuma ferramenta disponível",
"noModelsFoundFor": "Nenhum modelo encontrado para \"{{searchValue}}\"",
"failedToLoadModels": "Falha ao carregar modelos",
"noModels": "Nenhum modelo encontrado",
"customAvatar": "Avatar personalizado",
"editAssistant": "Editar Assistente",
"jan": "Jan",
"metadata": "Metadados",
"regenerate": "Regenerar",
"threadImage": "Imagem da conversa",
"editMessage": "Editar Mensagem",
"deleteMessage": "Apagar Mensagem",
"deleteThread": "Apagar Conversa",
"renameThread": "Renomear Conversa",
"threadTitle": "Título da Conversa",
"deleteAllThreads": "Apagar Todas as Conversas",
"allThreadsUnfavorited": "Todas as Conversas Removidas dos Favoritos",
"deleteAllThreadsConfirm": "Tem certeza de que deseja deletar todas as conversas? Esta ação não pode ser desfeita.",
"addProvider": "Adicionar Provedor",
"addOpenAIProvider": "Adicionar Provedor OpenAI",
"enterNameForProvider": "Digite um nome para seu provedor",
"providerAlreadyExists": "Provedor com nome \"{{name}}\" já existe. Por favor, escolha um nome diferente.",
"adjustFontSize": "Ajustar Tamanho da Fonte",
"changeLanguage": "Alterar Idioma",
"editTheme": "Editar Tema",
"editCodeBlockStyle": "Editar Estilo do Bloco de Código",
"editServerHost": "Editar Host do Servidor",
"pickColorWindowBackground": "Escolher Cor do Fundo da Janela",
"pickColorAppMainView": "Escolher Cor da Visualização Principal do App",
"pickColorAppPrimary": "Escolher Cor Primária do App",
"pickColorAppAccent": "Escolher Cor de Destaque do App",
"pickColorAppDestructive": "Escolher Cor Destrutiva do App",
"apiKeyRequired": "Chave API é obrigatória",
"enterTrustedHosts": "Digite os hosts confiáveis",
"placeholder": {
"chatInput": "Pergunte-me qualquer coisa..."
},
"confirm": "Confirmar",
"continue": "Continuar",
"loading": "Carregando...",
"error": "Erro",
"success": "Sucesso",
"warning": "Aviso",
"conversationNotAvailable": "Conversa não disponível",
"conversationNotAvailableDescription": "A conversa que você está tentando acessar não está disponível ou foi removida.",
"temporaryChat": "Chat Temporário",
"temporaryChatTooltip": "Chat temporário não aparecerá no seu histórico",
"noResultsFoundDesc": "Não conseguimos encontrar chats correspondentes à sua busca. Tente uma palavra-chave diferente.",
"searchModels": "Buscar modelos...",
"searchStyles": "Buscar estilos...",
"createAssistant": "Criar Assistente",
"enterApiKey": "Digite a Chave API",
"scrollToBottom": "Rolar até embaixo",
"generateAiResponse": "Gerar Resposta da IA",
"addModel": {
"title": "Adicionar Modelo",
"modelId": "ID do Modelo",
"enterModelId": "Digite o ID do Modelo",
"addModel": "Adicionar Modelo",
"description": "Adicionar um novo modelo ao provedor",
"exploreModels": "Ver lista de modelos do provedor"
},
"mcpServers": {
"editServer": "Editar Servidor",
"addServer": "Adicionar Servidor",
"serverName": "Nome do Servidor",
"enterServerName": "Digite o nome do servidor",
"command": "Comando",
"enterCommand": "Digite o comando",
"arguments": "Argumentos",
"argument": "Argumento {{index}}",
"envVars": "Variáveis de Ambiente",
"key": "Chave",
"value": "Valor",
"save": "Salvar"
},
"deleteServer": {
"title": "Remover Servidor",
"delete": "Remover"
},
"editJson": {
"errorParse": "Falha ao analisar JSON",
"errorPaste": "Falha ao colar JSON",
"errorFormat": "Formato JSON inválido",
"titleAll": "Editar Configuração de Todos os Servidores",
"placeholder": "Entre com a configuração do JSON...",
"save": "Salvar"
},
"editModel": {
"title": "Editar Modelo: {{modelId}}",
"description": "Configure as capacidades do modelo alterando as opções abaixo.",
"capabilities": "Capacidades",
"tools": "Ferramentas",
"vision": "Visão",
"embeddings": "Embeddings",
"notAvailable": "Ainda não disponível"
},
"outOfContextError": {
"truncateInput": "Truncar Entrada",
"title": "Erro de contexto esgotado",
"description": "Este chat está atingindo o limite de memória da IA, como um quadro branco ficando cheio. Podemos expandir a janela de memória (chamada tamanho do contexto) para que ela lembre mais, mas pode usar mais da memória do seu computador. Também podemos truncar a entrada, o que significa que ela esquecerá parte do histórico do chat para dar espaço a novas mensagens.",
"increaseContextSizeDescription": "Você quer aumentar o tamanho do contexto?",
"increaseContextSize": "Aumentar Tamanho do Contexto"
},
"toolApproval": {
"title": "Solicitação de Permissão para Ferramenta",
"description": "O assistente quer usar <strong>{{toolName}}</strong>",
"securityNotice": "Permita apenas ferramentas em que você confia. Ferramentas podem acessar seu sistema e dados.",
"deny": "Negar",
"allowOnce": "Permitir Uma Vez",
"alwaysAllow": "Sempre Permitir"
},
"deleteModel": {
"title": "Remover Modelo: {{modelId}}",
"description": "Tem certeza de que deseja remover este modelo? Esta ação não pode ser desfeita.",
"success": "Modelo {{modelId}} foi removido permanentemente.",
"cancel": "Cancelar",
"delete": "Remover"
},
"deleteProvider": {
"title": "Remover Provedor",
"description": "Remover este provedor e todos os seus modelos. Esta ação não pode ser desfeita.",
"success": "Provedor {{provider}} foi removido permanentemente.",
"confirmTitle": "Remover Provedor: {{provider}}",
"confirmDescription": "Tem certeza de que deseja remover este provedor? Esta ação não pode ser desfeita.",
"cancel": "Cancelar",
"delete": "Remover"
},
"modelSettings": {
"title": "Configurações do Modelo - {{modelId}}",
"description": "Configure as configurações do modelo para otimizar desempenho e comportamento."
},
"dialogs": {
"changeDataFolder": {
"title": "Alterar Localização da Pasta de Dados",
"description": "Tem certeza de que deseja alterar a localização da pasta de dados? Isso moverá todos os seus dados para a nova localização e reiniciará a aplicação.",
"currentLocation": "Localização Atual:",
"newLocation": "Nova Localização:",
"cancel": "Cancelar",
"changeLocation": "Alterar Localização"
},
"deleteAllThreads": {
"title": "Remover Todas as Conversas",
"description": "Todas as conversas serão removidas. Esta ação não pode ser desfeita."
},
"deleteThread": {
"description": "Tem certeza de que deseja remover esta conversa? Esta ação não pode ser desfeita."
},
"editMessage": {
"title": "Editar Mensagem"
},
"messageMetadata": {
"title": "Metadados da Mensagem"
}
},
"projects": {
"title": "Projetos",
"addProject": "Adicionar Projeto",
"addToProject": "Adicionar ao projeto",
"removeFromProject": "Remover do projeto",
"createNewProject": "Criar Novo Projeto",
"editProject": "Editar Projeto",
"deleteProject": "Remover Projeto",
"projectName": "Nome do Projeto",
"enterProjectName": "Digite o nome do projeto...",
"noProjectsAvailable": "Nenhum projeto disponível",
"noProjectsYet": "Nenhum Projeto Ainda",
"noProjectsYetDesc": "Inicie um novo projeto clicando no botão Adicionar Projeto.",
"projectNotFound": "Projeto Não Encontrado",
"projectNotFoundDesc": "O projeto que você está procurando não existe ou foi removido.",
"deleteProjectDialog": {
"title": "Remover Projeto",
"permanentDelete": "ThiIsso removerá permanentemente todas as conversas.",
"permanentDeleteWarning": "Esta ação removerá permanentemente TODAS as conversas dentro do projeto!",
"deleteEmptyProject": "Esta ação removerá o projeto \"{{projectName}}\".",
"saveThreadsAdvice": "Para salvar conversas, mova-as para sua lista de conversas ou outro projeto antes de remover.",
"starredWarning": "Você ainda tem conversas favoritadas dentro do projeto.",
"deleteButton": "Remover",
"successWithName": "Projeto \"{{projectName}}\" removido com sucesso",
"successWithoutName": "Projeto removido com sucesso",
"error": "Falha ao remover projeto. Tente novamente.",
"ariaLabel": "Remover {{projectName}}"
},
"addProjectDialog": {
"createTitle": "Criar Novo Projeto",
"editTitle": "Editar Projeto",
"nameLabel": "Nome do Projeto",
"namePlaceholder": "Digite o nome do projeto...",
"createButton": "Criar",
"updateButton": "Atualizar",
"alreadyExists": "Projeto \"{{projectName}}\" já existe",
"createSuccess": "Projeto \"{{projectName}}\" criado com sucesso",
"renameSuccess": "Projeto renomeado de \"{{oldName}}\" para \"{{newName}}\""
},
"noConversationsIn": "Nenhuma Conversa em {{projectName}}",
"startNewConversation": "Inicie uma nova conversa com {{projectName}} abaixo",
"conversationsIn": "Conversas em {{projectName}}",
"conversationsDescription": "Clique em qualquer conversa para continuar o chat, ou inicie uma nova abaixo.",
"thread": "conversa",
"threads": "conversas",
"updated": "Atualizado:",
"collapseProject": "Recolher projeto",
"expandProject": "Expandir projeto",
"update": "Atualizar",
"searchProjects": "Buscar projetos...",
"noProjectsFound": "Nenhum projeto encontrado",
"tryDifferentSearch": "Tente um termo de busca diferente"
},
"toast": {
"allThreadsUnfavorited": {
"title": "Todas as Conversas Removidas dos Favoritos",
"description": "Todas as conversas foram removidas dos seus favoritos."
},
"deleteAllThreads": {
"title": "Remover Todas as Conversas",
"description": "Todas as conversas foram removidas permanentemente."
},
"renameThread": {
"title": "Renomear Conversa",
"description": "Título da conversa foi renomeado para '{{title}}'"
},
"deleteThread": {
"title": "Remover Conversa",
"description": "Esta conversa foi removida permanentemente."
},
"editMessage": {
"title": "Editar Mensagem",
"description": "Mensagem editada com sucesso. Aguarde a resposta do modelo."
},
"appUpdateDownloaded": {
"title": "Atualização do App Baixada",
"description": "A atualização do app foi baixada com sucesso."
},
"appUpdateDownloadFailed": {
"title": "Falha no Download da Atualização do App",
"description": "Falha ao baixar a atualização do app. Tente novamente."
},
"downloadComplete": {
"title": "Download Concluído",
"description": " {{item}} foi baixado"
},
"downloadCancelled": {
"title": "Download Cancelado",
"description": "O processo de download foi cancelado"
},
"downloadFailed": {
"title": "Falha no Download",
"description": "Falha no download de {{item}}"
},
"modelValidationStarted": {
"title": "Validando Modelo",
"description": "Modelo \"{{modelId}}\" baixado com sucesso. Verificando integridade..."
},
"modelValidationFailed": {
"title": "Falha na Validação do Modelo",
"description": "O modelo baixado \"{{modelId}}\" falhou na verificação de integridade e foi removido. O arquivo pode estar corrompido ou adulterado."
},
"downloadAndVerificationComplete": {
"title": "Download Concluído",
"description": "Modelo \"{{item}}\" baixado e verificado com sucesso"
},
"projectCreated": {
"title": "Projeto Criado",
"description": "Projeto \"{{projectName}}\" criado com sucesso"
},
"projectRenamed": {
"title": "Projeto Renomeado",
"description": "Projeto renomeado de \"{{oldName}}\" para \"{{newName}}\""
},
"projectDeleted": {
"title": "Projeto Removido",
"description": "Projeto \"{{projectName}}\" removido com sucesso"
},
"projectAlreadyExists": {
"title": "Projeto Já Existe",
"description": "Projeto \"{{projectName}}\" já existe"
},
"projectDeleteFailed": {
"title": "Falha ao Remover",
"description": "Falha ao remover projeto. Tente novamente."
},
"threadAssignedToProject": {
"title": "Conversa Atribuída",
"description": "Conversa atribuída a \"{{projectName}}\" com sucesso"
},
"threadRemovedFromProject": {
"title": "Conversa Removida",
"description": "Conversa removida de \"{{projectName}}\" com sucesso"
}
}
}

View File

@ -0,0 +1,31 @@
{
"sortNewest": "Mais Recentes",
"sortMostDownloaded": "Mais Baixados",
"use": "Usar",
"download": "Baixar",
"downloaded": "Baixado",
"loadingModels": "Carregando modelos...",
"noModels": "Nenhum modelo encontrado",
"by": "Por",
"downloads": "Downloads",
"variants": "Variantes",
"showVariants": "Mostrar variantes",
"useModel": "Usar este modelo",
"downloadModel": "Baixar modelo",
"tools": "Ferramentas",
"searchPlaceholder": "Buscar modelos no Hugging Face...",
"joyride": {
"recommendedModelTitle": "Modelo Recomendado",
"recommendedModelContent": "Navegue e baixe modelos de IA poderosos de vários provedores, tudo em um só lugar. Sugerimos começar com o Jan-Nano - um modelo otimizado para chamadas de função, integração de ferramentas e capacidades de pesquisa. É ideal para construir agentes de IA interativos.",
"downloadInProgressTitle": "Download em Progresso",
"downloadInProgressContent": "Seu modelo está sendo baixado. Acompanhe o progresso aqui - uma vez finalizado, estará pronto para usar.",
"downloadModelTitle": "Baixar Modelo",
"downloadModelContent": "Clique no botão Baixar para começar a baixar o modelo.",
"back": "Voltar",
"close": "Fechar",
"lastWithDownload": "Baixar",
"last": "Finalizar",
"next": "Próximo",
"skip": "Pular"
}
}

View File

@ -0,0 +1,3 @@
{
"noLogs": "Nenhum log disponível"
}

View File

@ -0,0 +1,47 @@
{
"editServer": "Editar Servidor MCP",
"addServer": "Adicionar Servidor MCP",
"serverName": "Nome do Servidor",
"enterServerName": "Digite o nome do servidor",
"command": "Comando",
"enterCommand": "Digite o comando (uvx ou npx)",
"arguments": "Argumentos",
"argument": "Argumento {{index}}",
"envVars": "Variáveis de Ambiente",
"key": "Chave",
"value": "Valor",
"save": "Salvar",
"status": "Status",
"connected": "Conectado",
"disconnected": "Desconectado",
"deleteServer": {
"title": "Remover Servidor MCP",
"description": "Tem certeza de que deseja remover o servidor MCP {{serverName}}? Esta ação não pode ser desfeita.",
"delete": "Remover",
"success": "Servidor MCP {{serverName}} removido com sucesso"
},
"editJson": {
"title": "Editar JSON para Servidor MCP: {{serverName}}",
"titleAll": "Editar JSON de Todos os Servidores MCP",
"placeholder": "Digite a configuração JSON",
"errorParse": "Falha ao analisar dados iniciais",
"errorPaste": "Formato JSON inválido no conteúdo colado",
"errorFormat": "Formato JSON inválido",
"errorServerName": " Nome do servidor é obrigatório e não pode estar vazio",
"errorMissingServerNameKey": "JSON deve estar estruturado como {\"serverName\": {config}} - chave com nome do servidor ausente",
"errorInvalidType": "Tipo inválido '{{type}}' para servidor '{{serverName}}'. Tipo deve ser 'stdio', 'http', ou 'sse'",
"save": "Salvar"
},
"checkParams": "Por favor, verifique os parâmetros de acordo com o tutorial.",
"title": "Servidores MCP",
"experimental": "Experimental",
"editAllJson": "Editar JSON de Todos os Servidores",
"findMore": "Encontre mais servidores MCP em",
"allowPermissions": "Permitir Todas as Permissões de Ferramentas MCP",
"allowPermissionsDesc": "Quando habilitado, todas as chamadas de ferramentas MCP serão automaticamente aprovadas sem mostrar diálogos de permissão. Esta configuração se aplica globalmente a todas as conversas, incluindo novos chats.",
"noServers": "Nenhum servidor MCP encontrado",
"args": "Args",
"env": "Env",
"serverStatusActive": "Servidor {{serverKey}} ativado com sucesso",
"serverStatusInactive": "Servidor {{serverKey}} desativado com sucesso"
}

View File

@ -0,0 +1,7 @@
{
"title": "Erro de contexto esgotado",
"description": "Este chat está atingindo o limite de memória da IA, como um quadro branco ficando cheio. Podemos expandir a janela de memória (chamada tamanho do contexto) para que ela lembre mais, mas pode usar mais da memória do seu computador. Também podemos truncar a entrada, o que significa que ela esquecerá parte do histórico do chat para dar espaço a novas mensagens.",
"increaseContextSizeDescription": "Você quer aumentar o tamanho do contexto?",
"truncateInput": "Truncar Entrada",
"increaseContextSize": "Aumentar Tamanho do Contexto"
}

View File

@ -0,0 +1,5 @@
{
"addProvider": "Adicionar Provedor",
"addOpenAIProvider": "Adicionar Provedor OpenAI",
"enterNameForProvider": "Digite o nome para o provedor"
}

View File

@ -0,0 +1,74 @@
{
"joyride": {
"chooseProviderTitle": "Escolha um Provedor",
"chooseProviderContent": "Escolha o provedor que você quer usar, certifique-se de ter acesso a uma chave API para ele.",
"getApiKeyTitle": "Obtenha sua Chave de API",
"getApiKeyContent": "Entre no painel do provedor para encontrar ou gerar sua chave de API.",
"insertApiKeyTitle": "Insira sua Chave de API",
"insertApiKeyContent": "Cole sua chave de API aqui para conectar e ativar o provedor.",
"back": "Voltar",
"close": "Fechar",
"last": "Finalizar",
"next": "Próximo",
"skip": "Pular"
},
"refreshModelsError": "Provedor deve ter URL base e chave API configuradas para buscar modelos.",
"refreshModelsSuccess": "Adicionado {{count}} novo(s) modelo(s) de {{provider}}.",
"noNewModels": "Nenhum modelo novo encontrado. Todos os modelos disponíveis já foram adicionados.",
"refreshModelsFailed": "Falha ao buscar modelos de {{provider}}. Verifique sua chave de API e URL base.",
"models": "Modelos",
"refreshing": "Atualizando...",
"refresh": "Atualizar",
"import": "Importar",
"importModelSuccess": "Modelo {{provider}} foi importado com sucesso.",
"importModelError": "Falha ao importar modelo:",
"stop": "Parar",
"start": "Iniciar",
"noModelFound": "Nenhum modelo encontrado",
"noModelFoundDesc": "Modelos disponíveis serão listados aqui. Se você ainda não tem modelos, visite o Hub para baixar.",
"configuration": "Configuração",
"apiEndpoint": "Endpoint da API",
"testConnection": "Testar Conexão",
"addModel": {
"title": "Adicionar Novo Modelo",
"description": "Adicionar um novo modelo ao provedor {{provider}}.",
"modelId": "ID do Modelo",
"enterModelId": "Digite o ID do modelo",
"exploreModels": "Ver lista de modelos de {{provider}}",
"addModel": "Adicionar Modelo",
"modelExists": "Modelo já existe",
"modelExistsDesc": "Por favor, escolha um ID de modelo diferente."
},
"deleteModel": {
"title": "Remover Modelo: {{modelId}}",
"description": "Tem certeza de que deseja remover este modelo? Esta ação não pode ser desfeita.",
"success": "Modelo {{modelId}} foi removido permanentemente.",
"cancel": "Cancelar",
"delete": "Remover"
},
"deleteProvider": {
"title": "Remover Provedor",
"description": "Remover este provedor e todos os seus modelos. Esta ação não pode ser desfeita.",
"success": "Provedor {{provider}} foi removido permanentemente.",
"confirmTitle": "Remover Provedor: {{provider}}",
"confirmDescription": "Tem certeza de que deseja remover este provedor? Esta ação não pode ser desfeita.",
"cancel": "Cancelar",
"delete": "Remover"
},
"editModel": {
"title": "Editar Modelo: {{modelId}}",
"description": "Configure as capacidades do modelo alterando as opções abaixo.",
"capabilities": "Capacidades",
"tools": "Ferramentas",
"vision": "Visão",
"embeddings": "Embeddings",
"notAvailable": "Ainda não disponível",
"warning": {
"title": "Prossiga com Cautela",
"description": "Modificar capacidades do modelo pode afetar desempenho e funcionalidade. Configurações incorretas podem causar comportamento inesperado ou erros."
}
},
"addProvider": "Adicionar Provedor",
"addOpenAIProvider": "Adicionar Provedor OpenAI",
"enterNameForProvider": "Digite o nome para o provedor"
}

View File

@ -0,0 +1,304 @@
{
"autoDownload": "Download automático de novas atualizações",
"checkForUpdates": "Verificar Atualizações",
"checkForUpdatesDesc": "Verificar se uma versão mais nova do Jan está disponível.",
"checkingForUpdates": "Verificando atualizações...",
"noUpdateAvailable": "Você está executando a versão mais recente",
"devVersion": "Versão de desenvolvimento detectada",
"updateError": "Falha ao verificar atualizações",
"checkForBackendUpdates": "Verificar Atualizações do Llamacpp",
"checkForBackendUpdatesDesc": "Verificar se uma versão mais nova do backend Llamacpp está disponível.",
"checkingForBackendUpdates": "Verificando atualizações do Llamacpp...",
"noBackendUpdateAvailable": "Você está executando a versão mais recente do Llamacpp",
"backendUpdateError": "Falha ao verificar atualizações do Llamacpp",
"changeLocation": "Alterar Localização",
"copied": "Copiado",
"copyPath": "Copiar Caminho",
"openLogs": "Abrir Logs",
"revealLogs": "Mostrar Logs",
"showInFinder": "Mostrar no Finder",
"showInFileExplorer": "Mostrar no Explorer de Arquivos",
"openContainingFolder": "Abrir Pasta",
"failedToRelocateDataFolder": "Falha ao realocar pasta de dados",
"failedToRelocateDataFolderDesc": "Falha ao realocar pasta de dados. Tente novamente.",
"factoryResetTitle": "Redefinir para Configurações originais",
"factoryResetDesc": "Isso redefinirá todas as configurações do app para os padrões originais. Isso não pode ser desfeito. Recomendamos isso apenas se o app estiver corrompido.",
"cancel": "Cancelar",
"reset": "Redefinir",
"resources": "Recursos",
"documentation": "Documentação",
"documentationDesc": "Aprenda como usar o Jan e explore seus recursos.",
"viewDocs": "Ver Documentação",
"releaseNotes": "Notas de Versão",
"releaseNotesDesc": "Veja o que há de novo na versão mais recente do Jan.",
"viewReleases": "Ver Versões",
"community": "Comunidade",
"github": "GitHub",
"githubDesc": "Contribua para o desenvolvimento do Jan.",
"discord": "Discord",
"discordDesc": "Junte-se à nossa comunidade para suporte e discussões.",
"support": "Suporte",
"reportAnIssue": "Reportar um Problema",
"reportAnIssueDesc": "Encontrou um bug? Ajude-nos relatando um problema no GitHub.",
"reportIssue": "Reportar Problema",
"credits": "Créditos",
"creditsDesc1": "👋 Jan é construído com ❤️ pela equipe Menlo Research.",
"creditsDesc2": "Agradecimentos especiais às nossas dependências de código aberto—especialmente llama.cpp e Tauri—e à nossa incrível comunidade de IA.",
"appVersion": "Versão do App",
"dataFolder": {
"appData": "Dados do App",
"appDataDesc": "Localização padrão para mensagens e outros dados do usuário.",
"appLogs": "Logs do App",
"appLogsDesc": "Ver logs detalhados do App."
},
"others": {
"spellCheck": "Verificação Ortográfica",
"spellCheckDesc": "Habilitar verificação ortográfica para suas conversas.",
"resetFactory": "Redefinir para Configurações Originais",
"resetFactoryDesc": "Restaurar aplicação ao seu estado inicial, apagando todos os modelos e histórico de chat. Esta ação é irreversível e recomendada apenas se a aplicação estiver corrompida."
},
"shortcuts": {
"application": "Aplicação",
"newChat": "Novo Chat",
"newChatDesc": "Criar um novo chat.",
"toggleSidebar": "Mostrar/Ocultar Barra Lateral",
"toggleSidebarDesc": "Mostrar/ocultar a barra lateral.",
"zoomIn": "Aumentar Zoom",
"zoomInDesc": "Aumentar o nível de zoom.",
"zoomOut": "Diminuir Zoom",
"zoomOutDesc": "Diminuir o nível de zoom.",
"chat": "Chat",
"sendMessage": "Enviar Mensagem",
"sendMessageDesc": "Enviar a mensagem atual.",
"enter": "Enter",
"newLine": "Nova Linha",
"newLineDesc": "Inserir uma nova linha.",
"shiftEnter": "Shift + Enter",
"navigation": "Navegação",
"goToSettings": "Ir para Configurações",
"goToSettingsDesc": "Abrir configurações."
},
"appearance": {
"title": "Aparência",
"theme": "Tema",
"themeDesc": "Corresponder ao tema do Sistema Operacional.",
"fontSize": "Tamanho da Fonte",
"fontSizeDesc": "Ajustar o tamanho da fonte do app.",
"windowBackground": "Fundo da Janela",
"windowBackgroundDesc": "Definir a cor de fundo da janela do app.",
"appMainView": "Visualização Principal do App",
"appMainViewDesc": "Definir a cor de fundo da área de conteúdo principal.",
"primary": "Primário",
"primaryDesc": "Definir a cor primária para componentes da UI.",
"accent": "Destaque",
"accentDesc": "Definir a cor de destaque para realces da UI.",
"destructive": "Destrutivo",
"destructiveDesc": "Definir a cor para ações destrutivas.",
"resetToDefault": "Redefinir para Padrão",
"resetToDefaultDesc": "Redefinir todas as configurações de aparência para padrão.",
"resetAppearanceSuccess": "Aparência redefinida com sucesso",
"resetAppearanceSuccessDesc": "Todas as configurações de aparência foram restauradas para padrão.",
"chatWidth": "Largura do Chat",
"chatWidthDesc": "Personalizar a largura da visualização do chat.",
"tokenCounterCompact": "Contador de Tokens Compacto",
"tokenCounterCompactDesc": "Mostrar contador de tokens dentro da entrada do chat. Quando desabilitado, contador de tokens aparece abaixo da entrada.",
"codeBlockTitle": "Bloco de Código",
"codeBlockDesc": "Escolher um estilo de realce de sintaxe.",
"showLineNumbers": "Mostrar Números de Linha",
"showLineNumbersDesc": "Exibir números de linha em blocos de código.",
"resetCodeBlockStyle": "Redefinir Estilo do Bloco de Código",
"resetCodeBlockStyleDesc": "Redefinir estilo do bloco de código para padrão.",
"resetCodeBlockSuccess": "Estilo do bloco de código redefinido com sucesso",
"resetCodeBlockSuccessDesc": "Estilo do bloco de código foi restaurado para padrão."
},
"hardware": {
"os": "Sistema Operacional",
"name": "Nome",
"version": "Versão",
"cpu": "CPU",
"model": "Modelo",
"architecture": "Arquitetura",
"cores": "Cores",
"instructions": "Instruções",
"usage": "Uso",
"memory": "Memória",
"totalRam": "RAM Total",
"availableRam": "RAM Disponível",
"vulkan": "Vulkan",
"enableVulkan": "Habilitar Vulkan",
"enableVulkanDesc": "Usar API Vulkan para aceleração GPU. Não habilite Vulkan se você tem uma GPU NVIDIA pois pode causar problemas de compatibilidade.",
"gpus": "GPUs",
"noGpus": "Nenhuma GPU detectada",
"vram": "VRAM",
"freeOf": "livre de",
"driverVersion": "Versão do Driver",
"computeCapability": "Capacidade de Computação",
"systemMonitor": "Monitor do Sistema"
},
"httpsProxy": {
"proxy": "Proxy",
"proxyUrl": "URL do Proxy",
"proxyUrlDesc": "A URL e porta do seu servidor proxy.",
"proxyUrlPlaceholder": "http://proxy.example.com:8080",
"authentication": "Autenticação",
"authenticationDesc": "Credenciais para o servidor proxy, se necessário.",
"username": "Nome de Usuário",
"password": "Senha",
"noProxy": "Sem Proxy",
"noProxyDesc": "Uma lista separada por vírgulas de hosts para contornar o proxy.",
"noProxyPlaceholder": "localhost,127.0.0.1,.local",
"sslVerification": "Verificação SSL",
"ignoreSsl": "Ignorar Certificados SSL",
"ignoreSslDesc": "Permitir certificados auto-assinados ou não verificados. Isso pode ser necessário para alguns proxies, mas reduz a segurança. Habilite apenas se confiar no seu proxy.",
"proxySsl": "SSL do Proxy",
"proxySslDesc": "Validar o certificado SSL ao conectar ao proxy.",
"proxyHostSsl": "SSL do Host do Proxy",
"proxyHostSslDesc": "Validar o certificado SSL do host do proxy.",
"peerSsl": "SSL do Peer",
"peerSslDesc": "Validar os certificados SSL das conexões peer.",
"hostSsl": "SSL do Host",
"hostSslDesc": "Validar os certificados SSL dos hosts de destino."
},
"localApiServer": {
"title": "Servidor de API Local",
"description": "Executar um servidor compatível com OpenAI localmente.",
"startServer": "Iniciar Servidor",
"loadingModel": "Carregando Modelo",
"startingServer": "Iniciando Servidor",
"stopServer": "Parar Servidor",
"serverLogs": "Logs do Servidor",
"serverLogsDesc": "Ver logs detalhados do servidor API local.",
"openLogs": "Abrir Logs",
"swaggerDocs": "Documentação da API",
"swaggerDocsDesc": "Ver documentação interativa da API (Swagger UI).",
"openDocs": "Abrir Documentos",
"startupConfiguration": "Configuração de Inicialização",
"runOnStartup": "Inicialização automática",
"runOnStartupDesc": "Iniciar automaticamente o Servidor API Local quando a aplicação for lançada. Usa o último modelo usado, ou escolhe o primeiro modelo disponível se indisponível.",
"serverConfiguration": "Configuração do Servidor",
"serverHost": "Host do Servidor",
"serverHostDesc": "Endereço de rede para o servidor.",
"serverPort": "Porta do Servidor",
"serverPortDesc": "Número da porta para o servidor API.",
"apiPrefix": "Prefixo da API",
"apiPrefixDesc": "Prefixo do caminho para endpoints da API.",
"apiKey": "Chave de API",
"apiKeyDesc": "Autenticar requisições com uma chave de API.",
"trustedHosts": "Hosts Confiáveis",
"trustedHostsDesc": "Hosts permitidos para acessar o servidor, separados por vírgulas.",
"advancedSettings": "Configurações Avançadas",
"cors": "Compartilhamento de Recursos de Origem Cruzada (CORS)",
"corsDesc": "Permitir requisições de origem cruzada para o servidor de API.",
"verboseLogs": "Logs Detalhados do Servidor",
"verboseLogsDesc": "Habilitar logs detalhados do servidor para depuração.",
"proxyTimeout": "Timeout de Requisição",
"proxyTimeoutDesc": "Tempo para aguardar uma resposta do modelo local, segundos."
},
"privacy": {
"analytics": "Analytics",
"helpUsImprove": "Ajude-nos a melhorar",
"helpUsImproveDesc": "Para nos ajudar a melhorar o Jan, você pode compartilhar dados anônimos como uso de recursos e contagem de usuários. Nunca coletamos seus chats ou informações pessoais.",
"privacyPolicy": "Você tem controle total sobre seus dados. Saiba mais em nossa Política de Privacidade.",
"analyticsDesc": "Para melhorar o Jan, precisamos entender como é usado—mas apenas com sua ajuda. Você pode alterar esta configuração a qualquer momento.",
"privacyPromises": "Sua escolha aqui não mudará nossas promessas básicas de privacidade:",
"promise1": "Suas conversas permanecem privadas e no seu dispositivo",
"promise2": "Nunca coletamos suas informações pessoais ou conteúdo de chat",
"promise3": "Todo compartilhamento de dados é anônimo e agregado",
"promise4": "Você pode optar por sair a qualquer momento sem perder funcionalidade",
"promise5": "Somos transparentes sobre o que coletamos e por quê"
},
"general": {
"showInFinder": "Mostrar no Finder",
"showInFileExplorer": "Mostrar no Explorador de Arquivos",
"openContainingFolder": "Abrir Pasta Contendo",
"failedToRelocateDataFolder": "Falha ao realocar pasta de dados",
"couldNotRelocateToRoot": "Não é possível realocar pasta de dados para diretório raiz. Por favor, escolha outra localização.",
"couldNotResetRootDirectory": "Não é possível redefinir pasta de dados quando está definida para um diretório raiz. Por favor, delete a pasta de dados manualmente.",
"failedToRelocateDataFolderDesc": "Falha ao realocar pasta de dados. Tente novamente.",
"devVersion": "Versão de desenvolvimento detectada",
"noUpdateAvailable": "Você está executando a versão mais recente",
"updateError": "Falha ao verificar atualizações",
"appVersion": "Versão do App",
"checkForUpdates": "Verificar Atualizações",
"checkForUpdatesDesc": "Verificar se uma versão mais nova do Jan está disponível.",
"checkingForUpdates": "Verificando atualizações...",
"copied": "Copiado",
"copyPath": "Copiar Caminho",
"changeLocation": "Alterar Localização",
"openLogs": "Abrir Logs",
"revealLogs": "Mostrar Logs",
"factoryResetTitle": "Redefinir para Configurações de Fábrica",
"factoryResetDesc": "Isso redefinirá todas as configurações do app para os padrões. Isso não pode ser desfeito. Recomendamos isso apenas se o app estiver corrompido.",
"cancel": "Cancelar",
"reset": "Redefinir",
"huggingfaceToken": "Token HuggingFace",
"huggingfaceTokenDesc": "Seu token da API HuggingFace para acessar modelos.",
"resources": "Recursos",
"documentation": "Documentação",
"documentationDesc": "Aprenda como usar o Jan e explore seus recursos.",
"viewDocs": "Ver Documentos",
"releaseNotes": "Notas de Versão",
"releaseNotesDesc": "Veja o que há de novo na versão mais recente do Jan.",
"viewReleases": "Ver Versões",
"community": "Comunidade",
"github": "GitHub",
"githubDesc": "Contribua para o desenvolvimento do Jan.",
"discord": "Discord",
"discordDesc": "Junte-se à nossa comunidade para suporte e discussões.",
"support": "Suporte",
"reportAnIssue": "Reportar um Problema",
"reportAnIssueDesc": "Encontrou um bug? Ajude-nos relatando um problema no GitHub.",
"reportIssue": "Reportar Problema",
"credits": "Créditos",
"creditsDesc1": "👋 Jan é construído com ❤️ pela equipe Menlo Research.",
"creditsDesc2": "Agradecimentos especiais às nossas dependências de código aberto—especialmente llama.cpp e Tauri—e à nossa incrível comunidade de IA."
},
"extensions": {
"title": "Extensões"
},
"attachments": {
"subtitle": "Configure anexos de documentos, limites de tamanho e comportamento de recuperação.",
"featureTitle": "Recurso",
"enable": "Habilitar Anexos",
"enableDesc": "Permitir upload e indexação de documentos para recuperação.",
"limitsTitle": "Limites",
"maxFile": "Tamanho Máximo do Arquivo (MB)",
"maxFileDesc": "Tamanho máximo por arquivo. Aplicado no upload e processamento.",
"retrievalTitle": "Recuperação",
"topK": "Top-K",
"topKDesc": "Máximo de citações para retornar.",
"threshold": "Limite de Afinidade",
"thresholdDesc": "Pontuação mínima de similaridade (0-1). Usado apenas para busca linear cosseno, não ANN.",
"searchMode": "Modo de Busca Vetorial",
"searchModeDesc": "Escolha entre sqlite-vec ANN, cosseno linear, ou auto.",
"searchModeAuto": "Auto (recomendado)",
"searchModeAnn": "ANN (sqlite-vec)",
"searchModeLinear": "Linear",
"chunkingTitle": "Fragmentação",
"chunkSize": "Tamanho do Fragmento (tokens)",
"chunkSizeDesc": "Máximo aproximado de tokens por fragmento para embeddings.",
"chunkOverlap": "Sobreposição (tokens)",
"chunkOverlapDesc": "Sobreposição de tokens entre fragmentos consecutivos."
},
"dialogs": {
"changeDataFolder": {
"title": "Alterar Localização da Pasta de Dados",
"description": "Tem certeza de que deseja alterar a localização da pasta de dados? Isso moverá todos os seus dados para a nova localização e reiniciará a aplicação.",
"currentLocation": "Localização Atual:",
"newLocation": "Nova Localização:",
"cancel": "Cancelar",
"changeLocation": "Alterar Localização"
}
},
"backendUpdater": {
"newBackendVersion": "Nova Versão Llamacpp {{version}}",
"backendUpdateAvailable": "Atualização Llamacpp Disponível",
"remindMeLater": "Lembre-me Mais Tarde",
"updating": "Atualizando...",
"updateNow": "Atualizar Agora",
"updateSuccess": "Llamacpp atualizado com sucesso",
"updateError": "Falha ao atualizar Llamacpp"
},
"backendInstallSuccess": "Backend instalado com sucesso",
"backendInstallError": "Falha ao instalar backend"
}

View File

@ -0,0 +1,6 @@
{
"welcome": "Bem-vindo ao Jan",
"description": "Para começar, você precisará baixar um modelo de IA local ou conectar-se a um modelo em nuvem usando uma chave de API",
"localModel": "Configurar modelo local",
"remoteProvider": "Configurar provedor remoto"
}

View File

@ -0,0 +1,28 @@
{
"title": "Monitor do Sistema",
"cpuUsage": "Uso da CPU",
"model": "Modelo",
"cores": "Núcleos",
"architecture": "Arquitetura",
"currentUsage": "Uso Atual",
"memoryUsage": "Uso da Memória",
"totalRam": "RAM Total",
"availableRam": "RAM Disponível",
"usedRam": "RAM Usada",
"runningModels": "Modelos em Execução",
"noRunningModels": "Nenhum modelo está executando atualmente",
"provider": "Provedor",
"uptime": "Tempo de Atividade",
"actions": "Ações",
"stop": "Parar",
"activeGpus": "GPUs Ativas",
"noGpus": "Nenhuma GPU detectada",
"noActiveGpus": "Nenhuma GPU ativa. Todas as GPUs estão atualmente desabilitadas.",
"vramUsage": "Uso da VRAM",
"driverVersion": "Versão do Driver:",
"computeCapability": "Capacidade de Computação:",
"active": "Ativo",
"performance": "Desempenho",
"resources": "Recursos",
"refresh": "Atualizar"
}

View File

@ -0,0 +1,12 @@
{
"title": "Solicitação de Chamada de Ferramenta",
"description": "O assistente quer usar a ferramenta: <strong>{{toolName}}</strong>",
"securityNotice": "<strong>Aviso de Segurança:</strong> Ferramentas maliciosas ou conteúdo de conversa podem potencialmente enganar o assistente para tentar ações prejudiciais. Revise cada chamada de ferramenta cuidadosamente antes de aprovar.",
"deny": "Negar",
"allowOnce": "Permitir Uma Vez",
"alwaysAllow": "Permitir na conversa",
"permissions": "Permissões",
"approve": "Aprovar",
"reject": "Rejeitar",
"parameters": "Parâmetros da Ferramenta"
}

View File

@ -0,0 +1,12 @@
{
"toolApproval": {
"title": "Aprovação de Ferramenta Necessária",
"description": "O assistente quer usar a ferramenta: <strong>{{toolName}}</strong>",
"securityNotice": "<strong>Aviso de Segurança:</strong> Ferramentas maliciosas ou conteúdo de conversa podem potencialmente enganar o assistente para tentar ações prejudiciais. Revise cada chamada de ferramenta cuidadosamente antes de aprovar.",
"deny": "Negar",
"allowOnce": "Permitir Uma Vez",
"alwaysAllow": "Permitir na conversa",
"parameters": "Parâmetros da Ferramenta",
"permissionScope": "Permissões concedidas aplicam-se apenas a esta conversa."
}
}

View File

@ -0,0 +1,10 @@
{
"newVersion": "Nova Versão {{version}}",
"updateAvailable": "Atualização Disponível",
"nightlyBuild": "Build Noturno",
"showReleaseNotes": "Mostrar Notas de Versão",
"hideReleaseNotes": "Ocultar Notas de Versão",
"remindMeLater": "Lembre-me Mais Tarde",
"downloading": "Baixando...",
"updateNow": "Atualizar Agora"
}