import { groq } from '@ai-sdk/groq'; import { convertToModelMessages, streamText, UIMessage } from 'ai'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; export async function POST(req: Request) { const { messages }: { messages: UIMessage[] } = await req.json(); const prompt = convertToModelMessages(messages); const result = streamText({ model: groq('llama-3.3-70b-versatile'), prompt, }); return result.toUIMessageStreamResponse(); }