'use server' import { streamText } from 'ai' import { NextRequest, NextResponse } from 'next/server' import type { ChatRequest } from '@/lib/types' import { getConfiguredModel } from '@/lib/openrouter' import { getAgentDefinition } from '@/lib/agents/factory' import { getFlags } from '@/lib/flags' /** * POST /api/chat * Stream chat responses from the selected agent * * Request body: * - message: User message * - agentId: Selected agent (agent-1, agent-2, or custom-{uuid}) * - sessionId: Session ID for conversation tracking * - timestamp: Request timestamp * - images?: Base64 encoded images (optional) * - systemPrompt?: For custom agents, the agent's system prompt * * Response: * - Server-Sent Events (SSE) stream with text and tool calls */ export async function POST(request: NextRequest) { try { // Parse request body const body = (await request.json()) as Partial & { systemPrompt?: string } const { message, agentId, sessionId, timestamp, images, systemPrompt } = body // Validate required fields if (!message) { return NextResponse.json( { error: 'Message is required' }, { status: 400 } ) } if (!agentId) { return NextResponse.json( { error: 'Agent ID is required' }, { status: 400 } ) } // Check feature flags const flags = getFlags() if (images && images.length > 0 && !flags.IMAGE_UPLOADS_ENABLED) { return NextResponse.json( { error: 'Image uploads are not enabled', hint: 'Contact administrator to enable this feature', }, { status: 403 } ) } // Log request console.log(`[chat] Agent: ${agentId}, Session: ${sessionId}, Message length: ${message.length}`) // Load agent definition const agent = await getAgentDefinition(agentId, { systemPrompt: systemPrompt || '', tools: undefined, // Tools come from agent definition }) // Build message array with context const messageContent = buildMessageContent(message, images) const messages: Parameters[0]['messages'] = [ { role: 'user', content: messageContent as any, }, ] // Get configured model const model = getConfiguredModel() // Debug: Log tools being passed to streamText const toolsToPass = agent.tools || {} console.log(`[chat] Tools available for agent ${agentId}:`, Object.keys(toolsToPass)) // Stream response from agent const result = streamText({ model, system: agent.systemPrompt, tools: toolsToPass, messages, temperature: agent.temperature, // Note: maxTokens is not used in streamText - it uses maxRetries, retry logic, etc onFinish: (event) => { console.log(`[chat] Response completed for agent ${agentId}`) }, }) // Return as text stream response (Server-Sent Events format) return result.toTextStreamResponse() } catch (error) { console.error('[chat] Error:', error) const message = error instanceof Error ? error.message : 'Unknown error' return NextResponse.json( { error: 'Failed to process message', hint: message, }, { status: 500 } ) } } /** * Build message content with text and images * Supports both text-only and multimodal messages */ function buildMessageContent( text: string, images?: string[] ): string | Array<{ type: 'text' | 'image'; text?: string; image?: string; mimeType?: string }> { // Text only if (!images || images.length === 0) { return text } // Multimodal message with images const content: Array<{ type: 'text' | 'image'; text?: string; image?: string; mimeType?: string }> = [ { type: 'text', text, }, ] for (const base64Image of images) { // Determine MIME type from base64 prefix let mimeType = 'image/jpeg' if (base64Image.includes('data:image/png')) { mimeType = 'image/png' } else if (base64Image.includes('data:image/gif')) { mimeType = 'image/gif' } else if (base64Image.includes('data:image/webp')) { mimeType = 'image/webp' } // Extract base64 data (remove data URL prefix if present) const imageData = base64Image.includes(',') ? base64Image.split(',')[1] : base64Image content.push({ type: 'image', image: imageData, mimeType, }) } return content }