commit baf3fed4728380f64718dbb3b6d2e1b22e02b1c3 Author: songtianlun <31945314+songtianlun@users.noreply.github.com> Date: Fri Sep 26 15:46:29 2025 +0000 Initial commit Created from https://vercel.com/new diff --git a/.env.local.example b/.env.local.example new file mode 100644 index 0000000..75ec40d --- /dev/null +++ b/.env.local.example @@ -0,0 +1,17 @@ +# You must first activate a Billing Account here: https://platform.openai.com/account/billing/overview +# Then get your OpenAI API Key here: https://platform.openai.com/account/api-keys +OPENAI_API_KEY=xxxxxxx + +# You must first create an OpenAI Assistant here: https://platform.openai.com/assistants +# Then get your Assistant ID here: https://platform.openai.com/assistants +ASSISTANT_ID=xxxxxxx + +# If you choose to use external files for attachments, you will need to configure a Vercel Blob Store. +# Instructions to create a Vercel Blob Store here: https://vercel.com/docs/storage/vercel-blob +BLOB_READ_WRITE_TOKEN=xxxxxxx + +# Required for reasoning example +FIREWORKS_API_KEY=xxxxxxx + +# Required for resumable streams. You can create a Redis store here: https://vercel.com/marketplace/redis +REDIS_URL=xxxxxx diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d186e88 --- /dev/null +++ b/.gitignore @@ -0,0 +1,39 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# local env files +.env*.local + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts + +# persistence +.chats +.streams diff --git a/README.md b/README.md new file mode 100644 index 0000000..96daa3e --- /dev/null +++ b/README.md @@ -0,0 +1,43 @@ +# AI SDK, Next.js, and OpenAI Chat Example + +This example shows how to use the [AI SDK](https://ai-sdk.dev/docs) with [Next.js](https://nextjs.org/) and [OpenAI](https://openai.com) to create a ChatGPT-like AI-powered streaming chat bot. + +## Deploy your own + +Deploy the example using [Vercel](https://vercel.com?utm_source=github&utm_medium=readme&utm_campaign=ai-sdk-example): + +[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fvercel%2Fai%2Ftree%2Fmain%2Fexamples%2Fnext-openai&env=OPENAI_API_KEY&project-name=ai-sdk-next-openai&repository-name=ai-sdk-next-openai) + +## How to use + +Execute [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app) with [npm](https://docs.npmjs.com/cli/init), [Yarn](https://yarnpkg.com/lang/en/docs/cli/create/), or [pnpm](https://pnpm.io) to bootstrap the example: + +```bash +npx create-next-app --example https://github.com/vercel/ai/tree/main/examples/next-openai next-openai-app +``` + +```bash +yarn create next-app --example https://github.com/vercel/ai/tree/main/examples/next-openai next-openai-app +``` + +```bash +pnpm create next-app --example https://github.com/vercel/ai/tree/main/examples/next-openai next-openai-app +``` + +To run the example locally you need to: + +1. Sign up at [OpenAI's Developer Platform](https://platform.openai.com/signup). +2. Go to [OpenAI's dashboard](https://platform.openai.com/account/api-keys) and create an API KEY. +3. If you choose to use external files for attachments, then create a [Vercel Blob Store](https://vercel.com/docs/storage/vercel-blob). +4. Set the required environment variable as the token value as shown [the example env file](./.env.local.example) but in a new file called `.env.local` +5. `pnpm install` to install the required dependencies. +6. `pnpm dev` to launch the development server. + +## Learn More + +To learn more about OpenAI, Next.js, and the AI SDK take a look at the following resources: + +- [AI SDK docs](https://ai-sdk.dev/docs) +- [Vercel AI Playground](https://ai-sdk.dev/playground) +- [OpenAI Documentation](https://platform.openai.com/docs) - learn about OpenAI features and API. +- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. diff --git a/app/anthropic-web-fetch/page.tsx b/app/anthropic-web-fetch/page.tsx new file mode 100644 index 0000000..ce684b1 --- /dev/null +++ b/app/anthropic-web-fetch/page.tsx @@ -0,0 +1,57 @@ +'use client'; + +import AnthropicWebFetchView from '@/component/anthropic-web-fetch-view'; +import ChatInput from '@/component/chat-input'; +import { useChat } from '@ai-sdk/react'; +import { DefaultChatTransport } from 'ai'; +import { AnthropicWebFetchMessage } from '../api/anthropic-web-fetch/route'; + +export default function TestAnthropicWebFetch() { + const { status, sendMessage, messages } = useChat({ + transport: new DefaultChatTransport({ + api: '/api/anthropic-web-fetch', + }), + }); + + return ( +
+

Anthropic Web Fetch Test

+ + {messages.map(message => ( +
+ {message.role === 'user' ? 'User: ' : 'AI: '} + {message.parts.map((part, index) => { + if (part.type === 'text') { + return
{part.text}
; + } + + if (part.type === 'tool-web_fetch') { + return ; + } + + if (part.type === 'source-url') { + return ( + + [ + + {part.title ?? new URL(part.url).hostname} + + ] + + ); + } + + return null; + })} +
+ ))} + + sendMessage({ text })} /> +
+ ); +} diff --git a/app/anthropic-web-search/page.tsx b/app/anthropic-web-search/page.tsx new file mode 100644 index 0000000..7e8ab1c --- /dev/null +++ b/app/anthropic-web-search/page.tsx @@ -0,0 +1,57 @@ +'use client'; + +import { useChat } from '@ai-sdk/react'; +import { DefaultChatTransport } from 'ai'; +import ChatInput from '@/component/chat-input'; +import { AnthropicWebSearchMessage } from '@/app/api/anthropic-web-search/route'; +import AnthropicWebSearchView from '@/component/anthropic-web-search-view'; + +export default function TestAnthropicWebSearch() { + const { status, sendMessage, messages } = useChat({ + transport: new DefaultChatTransport({ + api: '/api/anthropic-web-search', + }), + }); + + return ( +
+

Anthropic Web Search Test

+ + {messages.map(message => ( +
+ {message.role === 'user' ? 'User: ' : 'AI: '} + {message.parts.map((part, index) => { + if (part.type === 'text') { + return
{part.text}
; + } + + if (part.type === 'tool-web_search') { + return ; + } + + if (part.type === 'source-url') { + return ( + + [ + + {part.title ?? new URL(part.url).hostname} + + ] + + ); + } + + return null; + })} +
+ ))} + + sendMessage({ text })} /> +
+ ); +} diff --git a/app/api/anthropic-web-fetch/route.ts b/app/api/anthropic-web-fetch/route.ts new file mode 100644 index 0000000..260fe20 --- /dev/null +++ b/app/api/anthropic-web-fetch/route.ts @@ -0,0 +1,38 @@ +import { anthropic } from '@ai-sdk/anthropic'; +import { + convertToModelMessages, + InferUITools, + streamText, + ToolSet, + UIDataTypes, + UIMessage, + validateUIMessages, +} from 'ai'; + +const tools = { + web_fetch: anthropic.tools.webFetch_20250910(), +} satisfies ToolSet; + +export type AnthropicWebFetchMessage = UIMessage< + never, + UIDataTypes, + InferUITools +>; + +export async function POST(req: Request) { + const { messages } = await req.json(); + const uiMessages = await validateUIMessages({ messages }); + + const result = streamText({ + model: anthropic('claude-sonnet-4-0'), + tools, + messages: convertToModelMessages(uiMessages), + onStepFinish: ({ request }) => { + console.log(JSON.stringify(request.body, null, 2)); + }, + }); + + return result.toUIMessageStreamResponse({ + sendSources: true, + }); +} diff --git a/app/api/anthropic-web-search/route.ts b/app/api/anthropic-web-search/route.ts new file mode 100644 index 0000000..efcdff3 --- /dev/null +++ b/app/api/anthropic-web-search/route.ts @@ -0,0 +1,46 @@ +import { anthropic } from '@ai-sdk/anthropic'; +import { + convertToModelMessages, + InferUITools, + streamText, + ToolSet, + UIDataTypes, + UIMessage, + validateUIMessages, +} from 'ai'; + +const tools = { + web_search: anthropic.tools.webSearch_20250305({ + maxUses: 3, + userLocation: { + type: 'approximate', + city: 'New York', + country: 'US', + timezone: 'America/New_York', + }, + }), +} satisfies ToolSet; + +export type AnthropicWebSearchMessage = UIMessage< + never, + UIDataTypes, + InferUITools +>; + +export async function POST(req: Request) { + const { messages } = await req.json(); + const uiMessages = await validateUIMessages({ messages }); + + const result = streamText({ + model: anthropic('claude-sonnet-4-20250514'), + tools, + messages: convertToModelMessages(uiMessages), + onStepFinish: ({ request }) => { + console.log(JSON.stringify(request.body, null, 2)); + }, + }); + + return result.toUIMessageStreamResponse({ + sendSources: true, + }); +} diff --git a/app/api/bedrock/route.ts b/app/api/bedrock/route.ts new file mode 100644 index 0000000..066a068 --- /dev/null +++ b/app/api/bedrock/route.ts @@ -0,0 +1,29 @@ +import { bedrock } from '@ai-sdk/amazon-bedrock'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export async function POST(req: Request) { + try { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const result = streamText({ + model: bedrock('anthropic.claude-3-haiku-20240307-v1:0'), + prompt: convertToModelMessages(messages), + maxOutputTokens: 500, + temperature: 0.7, + }); + + return result.toUIMessageStreamResponse(); + } catch (error) { + console.error('Bedrock API Error:', error); + return new Response( + JSON.stringify({ + error: 'Bedrock API failed', + details: error instanceof Error ? error.message : 'Unknown error', + }), + { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }, + ); + } +} diff --git a/app/api/chat-cohere/route.ts b/app/api/chat-cohere/route.ts new file mode 100644 index 0000000..db165e7 --- /dev/null +++ b/app/api/chat-cohere/route.ts @@ -0,0 +1,17 @@ +import { cohere } from '@ai-sdk/cohere'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const prompt = convertToModelMessages(messages); + + const result = streamText({ + model: cohere('command-r-plus'), + prompt, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/chat-google/route.ts b/app/api/chat-google/route.ts new file mode 100644 index 0000000..6791fcb --- /dev/null +++ b/app/api/chat-google/route.ts @@ -0,0 +1,17 @@ +import { google } from '@ai-sdk/google'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const prompt = convertToModelMessages(messages); + + const result = streamText({ + model: google('gemini-2.0-flash'), + prompt, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/chat-groq/route.ts b/app/api/chat-groq/route.ts new file mode 100644 index 0000000..b13f19f --- /dev/null +++ b/app/api/chat-groq/route.ts @@ -0,0 +1,18 @@ +import { groq } from '@ai-sdk/groq'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const prompt = convertToModelMessages(messages); + + const result = streamText({ + model: groq('llama-3.3-70b-versatile'), + prompt, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/chat-mistral/route.ts b/app/api/chat-mistral/route.ts new file mode 100644 index 0000000..d4548a1 --- /dev/null +++ b/app/api/chat-mistral/route.ts @@ -0,0 +1,18 @@ +import { mistral } from '@ai-sdk/mistral'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const prompt = convertToModelMessages(messages); + + const result = streamText({ + model: mistral('mistral-small-latest'), + prompt, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/chat-openai-code-interpreter/route.ts b/app/api/chat-openai-code-interpreter/route.ts new file mode 100644 index 0000000..5e863f2 --- /dev/null +++ b/app/api/chat-openai-code-interpreter/route.ts @@ -0,0 +1,42 @@ +import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai'; +import { + convertToModelMessages, + InferUITools, + streamText, + ToolSet, + UIDataTypes, + UIMessage, + validateUIMessages, +} from 'ai'; + +const tools = { + code_interpreter: openai.tools.codeInterpreter(), +} satisfies ToolSet; + +export type OpenAICodeInterpreterMessage = UIMessage< + never, + UIDataTypes, + InferUITools +>; + +export async function POST(req: Request) { + const { messages } = await req.json(); + const uiMessages = await validateUIMessages({ messages }); + + const result = streamText({ + model: openai('gpt-5-nano'), + tools, + messages: convertToModelMessages(uiMessages), + onStepFinish: ({ request }) => { + console.log(JSON.stringify(request.body, null, 2)); + }, + providerOptions: { + openai: { + store: false, + include: ['reasoning.encrypted_content'], + } satisfies OpenAIResponsesProviderOptions, + }, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/chat-openai-file-search/route.ts b/app/api/chat-openai-file-search/route.ts new file mode 100644 index 0000000..23dd1c5 --- /dev/null +++ b/app/api/chat-openai-file-search/route.ts @@ -0,0 +1,47 @@ +import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai'; +import { + convertToModelMessages, + InferUITools, + streamText, + ToolSet, + UIDataTypes, + UIMessage, + validateUIMessages, +} from 'ai'; + +export const maxDuration = 30; + +const tools = { + file_search: openai.tools.fileSearch({ + vectorStoreIds: ['vs_68caad8bd5d88191ab766cf043d89a18'], + }), +} satisfies ToolSet; + +export type OpenAIFileSearchMessage = UIMessage< + never, + UIDataTypes, + InferUITools +>; + +export async function POST(req: Request) { + const { messages } = await req.json(); + const uiMessages = await validateUIMessages({ messages }); + + const result = streamText({ + model: openai('gpt-5-nano'), + tools, + messages: convertToModelMessages(uiMessages), + onStepFinish: ({ request }) => { + console.log(JSON.stringify(request.body, null, 2)); + }, + providerOptions: { + openai: { + include: ['file_search_call.results'], + } satisfies OpenAIResponsesProviderOptions, + }, + }); + + return result.toUIMessageStreamResponse({ + sendSources: true, + }); +} diff --git a/app/api/chat-openai-image-generation/route.ts b/app/api/chat-openai-image-generation/route.ts new file mode 100644 index 0000000..4c5e63e --- /dev/null +++ b/app/api/chat-openai-image-generation/route.ts @@ -0,0 +1,42 @@ +import { openai } from '@ai-sdk/openai'; +import { + convertToModelMessages, + InferUITools, + streamText, + ToolSet, + UIDataTypes, + UIMessage, + validateUIMessages, +} from 'ai'; + +const tools = { + image_generation: openai.tools.imageGeneration(), +} satisfies ToolSet; + +export type OpenAIImageGenerationMessage = UIMessage< + never, + UIDataTypes, + InferUITools +>; + +export async function POST(req: Request) { + const { messages } = await req.json(); + const uiMessages = await validateUIMessages({ messages }); + + const result = streamText({ + model: openai('gpt-5-nano'), + tools, + messages: convertToModelMessages(uiMessages), + onStepFinish: ({ request }) => { + console.log(JSON.stringify(request.body, null, 2)); + }, + providerOptions: { + // openai: { + // store: false, + // include: ['reasoning.encrypted_content'], + // } satisfies OpenAIResponsesProviderOptions, + }, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/chat-openai-responses/route.ts b/app/api/chat-openai-responses/route.ts new file mode 100644 index 0000000..f95fa33 --- /dev/null +++ b/app/api/chat-openai-responses/route.ts @@ -0,0 +1,23 @@ +import { openai } from '@ai-sdk/openai'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const prompt = convertToModelMessages(messages); + + const result = streamText({ + model: openai.responses('o3-mini'), + prompt, + providerOptions: { + openai: { + reasoningEffort: 'low', + reasoningSummary: 'auto', + }, + }, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/chat-openai-web-search/route.ts b/app/api/chat-openai-web-search/route.ts new file mode 100644 index 0000000..f859869 --- /dev/null +++ b/app/api/chat-openai-web-search/route.ts @@ -0,0 +1,52 @@ +import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai'; +import { + convertToModelMessages, + InferUITools, + streamText, + ToolSet, + UIDataTypes, + UIMessage, + validateUIMessages, +} from 'ai'; + +const tools = { + web_search: openai.tools.webSearch({ + searchContextSize: 'low', + userLocation: { + type: 'approximate', + city: 'San Francisco', + region: 'California', + country: 'US', + }, + }), +} satisfies ToolSet; + +export type OpenAIWebSearchMessage = UIMessage< + never, + UIDataTypes, + InferUITools +>; + +export async function POST(req: Request) { + const { messages } = await req.json(); + const uiMessages = await validateUIMessages({ messages }); + + const result = streamText({ + model: openai('gpt-5-nano'), + tools, + messages: convertToModelMessages(uiMessages), + onStepFinish: ({ request }) => { + console.log(JSON.stringify(request.body, null, 2)); + }, + providerOptions: { + openai: { + store: false, + include: ['reasoning.encrypted_content'], + } satisfies OpenAIResponsesProviderOptions, + }, + }); + + return result.toUIMessageStreamResponse({ + sendSources: true, + }); +} diff --git a/app/api/chat-perplexity/route.ts b/app/api/chat-perplexity/route.ts new file mode 100644 index 0000000..2406a67 --- /dev/null +++ b/app/api/chat-perplexity/route.ts @@ -0,0 +1,17 @@ +import { perplexity } from '@ai-sdk/perplexity'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const prompt = convertToModelMessages(messages); + + const result = streamText({ + model: perplexity('sonar-reasoning'), + prompt, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/chat-shared-context/route.ts b/app/api/chat-shared-context/route.ts new file mode 100644 index 0000000..10f1124 --- /dev/null +++ b/app/api/chat-shared-context/route.ts @@ -0,0 +1,15 @@ +import { openai } from '@ai-sdk/openai'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const result = streamText({ + model: openai('gpt-4o-mini'), + messages: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse({}); +} diff --git a/app/api/chat-xai/route.ts b/app/api/chat-xai/route.ts new file mode 100644 index 0000000..f88d37f --- /dev/null +++ b/app/api/chat-xai/route.ts @@ -0,0 +1,17 @@ +import { xai } from '@ai-sdk/xai'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const prompt = convertToModelMessages(messages); + + const result = streamText({ + model: xai('grok-beta'), + prompt, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts new file mode 100644 index 0000000..3fbd3d1 --- /dev/null +++ b/app/api/chat/route.ts @@ -0,0 +1,30 @@ +import { openai } from '@ai-sdk/openai'; +import { + consumeStream, + convertToModelMessages, + streamText, + UIMessage, +} from 'ai'; + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const prompt = convertToModelMessages(messages); + + const result = streamText({ + model: openai('gpt-4o'), + prompt, + abortSignal: req.signal, + }); + + return result.toUIMessageStreamResponse({ + onFinish: async ({ isAborted }) => { + if (isAborted) { + console.log('Aborted'); + } + }, + consumeSseStream: consumeStream, // needed for correct abort handling + }); +} diff --git a/app/api/completion/route.ts b/app/api/completion/route.ts new file mode 100644 index 0000000..9946494 --- /dev/null +++ b/app/api/completion/route.ts @@ -0,0 +1,19 @@ +import { openai } from '@ai-sdk/openai'; +import { streamText } from 'ai'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function POST(req: Request) { + // Extract the `prompt` from the body of the request + const { prompt } = await req.json(); + + // Ask OpenAI for a streaming completion given the prompt + const result = streamText({ + model: openai('gpt-3.5-turbo-instruct'), + prompt, + }); + + // Respond with the stream + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/dynamic-tools/route.ts b/app/api/dynamic-tools/route.ts new file mode 100644 index 0000000..93cb898 --- /dev/null +++ b/app/api/dynamic-tools/route.ts @@ -0,0 +1,84 @@ +import { openai } from '@ai-sdk/openai'; +import { + convertToModelMessages, + dynamicTool, + InferUITools, + stepCountIs, + streamText, + tool, + ToolSet, + UIDataTypes, + UIMessage, +} from 'ai'; +import { z } from 'zod'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +const getWeatherInformationTool = tool({ + description: 'show the weather in a given city to the user', + inputSchema: z.object({ city: z.string() }), + execute: async ({ city }: { city: string }, { messages }) => { + // count the number of assistant messages. throw error if 2 or less + const assistantMessageCount = messages.filter( + message => message.role === 'assistant', + ).length; + + if (assistantMessageCount <= 2) { + throw new Error('could not get weather information'); + } + + // Add artificial delay of 5 seconds + await new Promise(resolve => setTimeout(resolve, 5000)); + + const weatherOptions = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy']; + return weatherOptions[Math.floor(Math.random() * weatherOptions.length)]; + }, +}); + +const staticTools = { + // server-side tool with execute function: + getWeatherInformation: getWeatherInformationTool, +} as const; + +export type ToolsMessage = UIMessage< + never, + UIDataTypes, + InferUITools +>; + +function dynamicTools(): ToolSet { + return { + currentLocation: dynamicTool({ + description: 'Get the current location.', + inputSchema: z.object({}), + execute: async () => { + const locations = ['New York', 'London', 'Paris']; + return { + location: locations[Math.floor(Math.random() * locations.length)], + }; + }, + }), + }; +} + +export async function POST(req: Request) { + const { messages } = await req.json(); + + const result = streamText({ + model: openai('gpt-4o'), + messages: convertToModelMessages(messages), + stopWhen: stepCountIs(5), // multi-steps for server-side tools + tools: { + ...staticTools, + ...dynamicTools(), + }, + }); + + return result.toUIMessageStreamResponse({ + // originalMessages: messages, //add if you want to have correct ids + onFinish: options => { + console.log('onFinish', options); + }, + }); +} diff --git a/app/api/files/route.ts b/app/api/files/route.ts new file mode 100644 index 0000000..e95057e --- /dev/null +++ b/app/api/files/route.ts @@ -0,0 +1,61 @@ +import { handleUpload, type HandleUploadBody } from '@vercel/blob/client'; +import { NextResponse } from 'next/server'; + +/* + * This route is used to upload files to Vercel's Blob Storage. + * Example from https://vercel.com/docs/storage/vercel-blob/client-upload#create-a-client-upload-route + */ +export async function POST(request: Request): Promise { + const body = (await request.json()) as HandleUploadBody; + + try { + const jsonResponse = await handleUpload({ + body, + request, + onBeforeGenerateToken: async ( + pathname, + /* clientPayload */ + ) => { + // Generate a client token for the browser to upload the file + // ⚠️ Authenticate and authorize users before generating the token. + // Otherwise, you're allowing anonymous uploads. + + return { + allowedContentTypes: [ + 'image/jpeg', + 'image/png', + 'image/gif', + 'application/pdf', + 'text/plain', + ], + tokenPayload: JSON.stringify({ + // optional, sent to your server on upload completion + // you could pass a user id from auth, or a value from clientPayload + }), + }; + }, + onUploadCompleted: async ({ blob, tokenPayload }) => { + // Get notified of client upload completion + // ⚠️ This will not work on `localhost` websites, + // Use ngrok or similar to get the full upload flow + + console.log('file upload completed', blob, tokenPayload); + + try { + // Run any logic after the file upload completed + // const { userId } = JSON.parse(tokenPayload); + // await db.update({ avatar: blob.url, userId }); + } catch (error) { + throw new Error('Could not complete operation'); + } + }, + }); + + return NextResponse.json(jsonResponse); + } catch (error) { + return NextResponse.json( + { error: (error as Error).message }, + { status: 400 }, // The webhook will retry 5 times waiting for a 200 + ); + } +} diff --git a/app/api/generate-image/route.ts b/app/api/generate-image/route.ts new file mode 100644 index 0000000..17be33c --- /dev/null +++ b/app/api/generate-image/route.ts @@ -0,0 +1,20 @@ +import { openai } from '@ai-sdk/openai'; +import { experimental_generateImage as generateImage } from 'ai'; + +// Allow responses up to 60 seconds +export const maxDuration = 60; + +export async function POST(req: Request) { + const { prompt } = await req.json(); + + const { image } = await generateImage({ + model: openai.imageModel('dall-e-3'), + prompt, + size: '1024x1024', + providerOptions: { + openai: { style: 'vivid', quality: 'hd' }, + }, + }); + + return Response.json(image.base64); +} diff --git a/app/api/mcp-zapier/route.ts b/app/api/mcp-zapier/route.ts new file mode 100644 index 0000000..86147d3 --- /dev/null +++ b/app/api/mcp-zapier/route.ts @@ -0,0 +1,33 @@ +import { openai } from '@ai-sdk/openai'; +import { experimental_createMCPClient, stepCountIs, streamText } from 'ai'; + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages } = await req.json(); + + const mcpClient = await experimental_createMCPClient({ + transport: { + type: 'sse', + url: 'https://actions.zapier.com/mcp/[YOUR_KEY]/sse', + }, + }); + + try { + const zapierTools = await mcpClient.tools(); + + const result = streamText({ + model: openai('gpt-4o'), + messages, + tools: zapierTools, + onFinish: async () => { + await mcpClient.close(); + }, + stopWhen: stepCountIs(10), + }); + + return result.toUIMessageStreamResponse(); + } catch (error) { + return new Response('Internal Server Error', { status: 500 }); + } +} diff --git a/app/api/test-invalid-tool-call/route.ts b/app/api/test-invalid-tool-call/route.ts new file mode 100644 index 0000000..3ae4744 --- /dev/null +++ b/app/api/test-invalid-tool-call/route.ts @@ -0,0 +1,104 @@ +import { openai } from '@ai-sdk/openai'; +import { + convertToModelMessages, + InferUITools, + stepCountIs, + streamText, + tool, + UIDataTypes, + UIMessage, +} from 'ai'; +import { convertArrayToReadableStream, MockLanguageModelV2 } from 'ai/test'; +import { z } from 'zod'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +const getWeatherInformationTool = tool({ + description: 'show the weather in a given city to the user', + inputSchema: z.object({ city: z.string() }), + execute: async ({ city }: { city: string }) => { + // Add artificial delay of 5 seconds + await new Promise(resolve => setTimeout(resolve, 5000)); + + const weatherOptions = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy']; + return weatherOptions[Math.floor(Math.random() * weatherOptions.length)]; + }, +}); + +const tools = { + // server-side tool with execute function: + getWeatherInformation: getWeatherInformationTool, +} as const; + +export type UseChatToolsMessage = UIMessage< + never, + UIDataTypes, + InferUITools +>; + +export async function POST(req: Request) { + const { messages } = await req.json(); + + console.log('messages', JSON.stringify(messages, null, 2)); + + const result = streamText({ + model: openai('gpt-4o'), + messages: convertToModelMessages(messages), + stopWhen: stepCountIs(5), // multi-steps for server-side tools + tools, + prepareStep: async ({ stepNumber }) => { + // inject invalid tool call in first step: + if (stepNumber === 0) { + return { + model: new MockLanguageModelV2({ + doStream: async () => ({ + stream: convertArrayToReadableStream([ + { type: 'stream-start', warnings: [] }, + { + type: 'tool-input-start', + id: 'call-1', + toolName: 'getWeatherInformation', + providerExecuted: true, + }, + { + type: 'tool-input-delta', + id: 'call-1', + delta: `{ "cities": "San Francisco" }`, + }, + { + type: 'tool-input-end', + id: 'call-1', + }, + { + type: 'tool-call', + toolCallType: 'function', + toolCallId: 'call-1', + toolName: 'getWeatherInformation', + // wrong tool call arguments (city vs cities): + input: `{ "cities": "San Francisco" }`, + }, + { + type: 'finish', + finishReason: 'stop', + usage: { + inputTokens: 10, + outputTokens: 20, + totalTokens: 30, + }, + }, + ]), + }), + }), + }; + } + }, + }); + + return result.toUIMessageStreamResponse({ + // originalMessages: messages, //add if you want to have correct ids + onFinish: options => { + console.log('onFinish', options); + }, + }); +} diff --git a/app/api/use-chat-cache/route.ts b/app/api/use-chat-cache/route.ts new file mode 100644 index 0000000..81a5c8e --- /dev/null +++ b/app/api/use-chat-cache/route.ts @@ -0,0 +1,37 @@ +import { openai } from '@ai-sdk/openai'; +import { streamText } from 'ai'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +// simple cache implementation, use Vercel KV or a similar service for production +const cache = new Map(); + +export async function POST(req: Request) { + const { messages } = await req.json(); + + // come up with a key based on the request: + const key = JSON.stringify(messages); + + // Check if we have a cached response + const cached = cache.get(key); + if (cached != null) { + return new Response(`data: ${cached}\n\n`, { + status: 200, + headers: { 'Content-Type': 'text/plain' }, + }); + } + + // Call the language model: + const result = streamText({ + model: openai('gpt-4o'), + messages, + async onFinish({ text }) { + // Cache the response text: + cache.set(key, text); + }, + }); + + // Respond with the stream + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/use-chat-custom-sources/route.ts b/app/api/use-chat-custom-sources/route.ts new file mode 100644 index 0000000..07caf9f --- /dev/null +++ b/app/api/use-chat-custom-sources/route.ts @@ -0,0 +1,39 @@ +import { openai } from '@ai-sdk/openai'; +import { + convertToModelMessages, + createUIMessageStream, + createUIMessageStreamResponse, + streamText, + UIMessage, +} from 'ai'; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const stream = createUIMessageStream({ + execute: ({ writer }) => { + writer.write({ type: 'start' }); + + // write a custom url source to the stream: + writer.write({ + type: 'source-url', + sourceId: 'source-1', + url: 'https://example.com', + title: 'Example Source', + }); + + const result = streamText({ + model: openai('gpt-4o'), + messages: convertToModelMessages(messages), + }); + + writer.merge(result.toUIMessageStream({ sendStart: false })); + }, + originalMessages: messages, + onFinish: options => { + console.log('onFinish', JSON.stringify(options, null, 2)); + }, + }); + + return createUIMessageStreamResponse({ stream }); +} diff --git a/app/api/use-chat-data-ui-parts/route.ts b/app/api/use-chat-data-ui-parts/route.ts new file mode 100644 index 0000000..4a148f5 --- /dev/null +++ b/app/api/use-chat-data-ui-parts/route.ts @@ -0,0 +1,57 @@ +import { openai } from '@ai-sdk/openai'; +import { delay } from '@ai-sdk/provider-utils'; +import { + convertToModelMessages, + createUIMessageStream, + createUIMessageStreamResponse, + stepCountIs, + streamText, +} from 'ai'; +import { z } from 'zod'; + +export async function POST(req: Request) { + const { messages } = await req.json(); + + const stream = createUIMessageStream({ + execute: ({ writer }) => { + const result = streamText({ + model: openai('gpt-4o'), + stopWhen: stepCountIs(2), + tools: { + weather: { + description: 'Get the weather in a city', + inputSchema: z.object({ + city: z.string(), + }), + execute: async ({ city }, { toolCallId }) => { + // update display + writer.write({ + type: 'data-weather', + id: toolCallId, + data: { city, status: 'loading' }, + }); + + await delay(2000); // fake delay + const weather = 'sunny'; + + // update display + writer.write({ + type: 'data-weather', + id: toolCallId, + data: { city, weather, status: 'success' }, + }); + + // for LLM roundtrip + return { city, weather }; + }, + }, + }, + messages: convertToModelMessages(messages), + }); + + writer.merge(result.toUIMessageStream()); + }, + }); + + return createUIMessageStreamResponse({ stream }); +} diff --git a/app/api/use-chat-human-in-the-loop/route.ts b/app/api/use-chat-human-in-the-loop/route.ts new file mode 100644 index 0000000..4323134 --- /dev/null +++ b/app/api/use-chat-human-in-the-loop/route.ts @@ -0,0 +1,60 @@ +import { openai } from '@ai-sdk/openai'; +import { + createUIMessageStreamResponse, + streamText, + createUIMessageStream, + convertToModelMessages, + stepCountIs, +} from 'ai'; +import { processToolCalls } from './utils'; +import { tools } from './tools'; +import { HumanInTheLoopUIMessage } from './types'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages }: { messages: HumanInTheLoopUIMessage[] } = + await req.json(); + + const stream = createUIMessageStream({ + originalMessages: messages, + execute: async ({ writer }) => { + // Utility function to handle tools that require human confirmation + // Checks for confirmation in last message and then runs associated tool + const processedMessages = await processToolCalls( + { + messages, + writer, + tools, + }, + { + // type-safe object for tools without an execute function + getWeatherInformation: async ({ city }) => { + const conditions = ['sunny', 'cloudy', 'rainy', 'snowy']; + return `The weather in ${city} is ${ + conditions[Math.floor(Math.random() * conditions.length)] + }.`; + }, + }, + ); + + const result = streamText({ + model: openai('gpt-4o'), + messages: convertToModelMessages(processedMessages), + tools, + stopWhen: stepCountIs(5), + }); + + writer.merge( + result.toUIMessageStream({ originalMessages: processedMessages }), + ); + }, + onFinish: ({}) => { + // save messages here + console.log('Finished!'); + }, + }); + + return createUIMessageStreamResponse({ stream }); +} diff --git a/app/api/use-chat-human-in-the-loop/tools.ts b/app/api/use-chat-human-in-the-loop/tools.ts new file mode 100644 index 0000000..278a465 --- /dev/null +++ b/app/api/use-chat-human-in-the-loop/tools.ts @@ -0,0 +1,24 @@ +import { tool, ToolSet } from 'ai'; +import { z } from 'zod'; + +const getWeatherInformation = tool({ + description: 'show the weather in a given city to the user', + inputSchema: z.object({ city: z.string() }), + outputSchema: z.string(), // must define outputSchema + // no execute function, we want human in the loop +}); + +const getLocalTime = tool({ + description: 'get the local time for a specified location', + inputSchema: z.object({ location: z.string() }), + // including execute function -> no confirmation required + execute: async ({ location }) => { + console.log(`Getting local time for ${location}`); + return '10am'; + }, +}); + +export const tools = { + getWeatherInformation, + getLocalTime, +} satisfies ToolSet; diff --git a/app/api/use-chat-human-in-the-loop/types.ts b/app/api/use-chat-human-in-the-loop/types.ts new file mode 100644 index 0000000..25aa5d9 --- /dev/null +++ b/app/api/use-chat-human-in-the-loop/types.ts @@ -0,0 +1,11 @@ +import { InferUITools, UIDataTypes, UIMessage } from 'ai'; +import { tools } from './tools'; + +export type MyTools = InferUITools; + +// Define custom message type with data part schemas +export type HumanInTheLoopUIMessage = UIMessage< + never, // metadata type + UIDataTypes, + MyTools +>; diff --git a/app/api/use-chat-human-in-the-loop/utils.ts b/app/api/use-chat-human-in-the-loop/utils.ts new file mode 100644 index 0000000..abaf8e3 --- /dev/null +++ b/app/api/use-chat-human-in-the-loop/utils.ts @@ -0,0 +1,129 @@ +import { + convertToModelMessages, + Tool, + ToolCallOptions, + ToolSet, + UIMessageStreamWriter, + getToolName, + isToolUIPart, +} from 'ai'; +import { HumanInTheLoopUIMessage } from './types'; + +// Approval string to be shared across frontend and backend +export const APPROVAL = { + YES: 'Yes, confirmed.', + NO: 'No, denied.', +} as const; + +function isValidToolName( + key: K, + obj: T, +): key is K & keyof T { + return key in obj; +} + +/** + * Processes tool invocations where human input is required, executing tools when authorized. + * + * @param options - The function options + * @param options.tools - Map of tool names to Tool instances that may expose execute functions + * @param options.writer - UIMessageStream writer for sending results back to the client + * @param options.messages - Array of messages to process + * @param executionFunctions - Map of tool names to execute functions + * @returns Promise resolving to the processed messages + */ +export async function processToolCalls< + Tools extends ToolSet, + ExecutableTools extends { + [Tool in keyof Tools as Tools[Tool] extends { execute: Function } + ? never + : Tool]: Tools[Tool]; + }, +>( + { + writer, + messages, + }: { + tools: Tools; // used for type inference + writer: UIMessageStreamWriter; + messages: HumanInTheLoopUIMessage[]; // IMPORTANT: replace with your message type + }, + executeFunctions: { + [K in keyof Tools & keyof ExecutableTools]?: ( + args: ExecutableTools[K] extends Tool ? P : never, + context: ToolCallOptions, + ) => Promise; + }, +): Promise { + const lastMessage = messages[messages.length - 1]; + const parts = lastMessage.parts; + if (!parts) return messages; + + const processedParts = await Promise.all( + parts.map(async part => { + // Only process tool invocations parts + if (!isToolUIPart(part)) return part; + + const toolName = getToolName(part); + + // Only continue if we have an execute function for the tool (meaning it requires confirmation) and it's in a 'result' state + if (!(toolName in executeFunctions) || part.state !== 'output-available') + return part; + + let result; + + if (part.output === APPROVAL.YES) { + // Get the tool and check if the tool has an execute function. + if ( + !isValidToolName(toolName, executeFunctions) || + part.state !== 'output-available' + ) { + return part; + } + + const toolInstance = executeFunctions[toolName] as Tool['execute']; + if (toolInstance) { + result = await toolInstance(part.input, { + messages: convertToModelMessages(messages), + toolCallId: part.toolCallId, + }); + } else { + result = 'Error: No execute function found on tool'; + } + } else if (part.output === APPROVAL.NO) { + result = 'Error: User denied access to tool execution'; + } else { + // For any unhandled responses, return the original part. + return part; + } + + // Forward updated tool result to the client. + writer.write({ + type: 'tool-output-available', + toolCallId: part.toolCallId, + output: result, + }); + + // Return updated toolInvocation with the actual result. + return { + ...part, + output: result, + }; + }), + ); + + // Finally return the processed messages + return [...messages.slice(0, -1), { ...lastMessage, parts: processedParts }]; +} + +export function getToolsRequiringConfirmation< + T extends ToolSet, + // E extends { + // [K in keyof T as T[K] extends { execute: Function } ? never : K]: T[K]; + // }, +>(tools: T): string[] { + return (Object.keys(tools) as (keyof T)[]).filter(key => { + const maybeTool = tools[key]; + return typeof maybeTool.execute !== 'function'; + }) as string[]; +} diff --git a/app/api/use-chat-image-output/route.ts b/app/api/use-chat-image-output/route.ts new file mode 100644 index 0000000..c1b7b97 --- /dev/null +++ b/app/api/use-chat-image-output/route.ts @@ -0,0 +1,15 @@ +import { google } from '@ai-sdk/google'; +import { streamText, convertToModelMessages } from 'ai'; + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages } = await req.json(); + + const result = streamText({ + model: google('gemini-2.0-flash-exp'), + messages: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/use-chat-message-metadata/example-metadata-schema.ts b/app/api/use-chat-message-metadata/example-metadata-schema.ts new file mode 100644 index 0000000..9821f9e --- /dev/null +++ b/app/api/use-chat-message-metadata/example-metadata-schema.ts @@ -0,0 +1,11 @@ +import { z } from 'zod'; + +export const exampleMetadataSchema = z.object({ + createdAt: z.number().optional(), + duration: z.number().optional(), + model: z.string().optional(), + totalTokens: z.number().optional(), + finishReason: z.string().optional(), +}); + +export type ExampleMetadata = z.infer; diff --git a/app/api/use-chat-message-metadata/route.ts b/app/api/use-chat-message-metadata/route.ts new file mode 100644 index 0000000..4be1623 --- /dev/null +++ b/app/api/use-chat-message-metadata/route.ts @@ -0,0 +1,39 @@ +import { openai } from '@ai-sdk/openai'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; +import { ExampleMetadata } from './example-metadata-schema'; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const result = streamText({ + model: openai('gpt-4o'), + prompt: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse({ + messageMetadata: ({ part }): ExampleMetadata | undefined => { + // send custom information to the client on start: + if (part.type === 'start') { + return { + createdAt: Date.now(), + model: 'gpt-4o', // initial model id + }; + } + + // send additional model information on finish-step: + if (part.type === 'finish-step') { + return { + model: part.response.modelId, // update with the actual model id + }; + } + + // when the message is finished, send additional information: + if (part.type === 'finish') { + return { + totalTokens: part.totalUsage.totalTokens, + finishReason: part.finishReason, + }; + } + }, + }); +} diff --git a/app/api/use-chat-persistence-metadata/route.ts b/app/api/use-chat-persistence-metadata/route.ts new file mode 100644 index 0000000..3c3d166 --- /dev/null +++ b/app/api/use-chat-persistence-metadata/route.ts @@ -0,0 +1,25 @@ +import { openai } from '@ai-sdk/openai'; +import { saveChat } from '@util/chat-store'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export async function POST(req: Request) { + const { messages, chatId }: { messages: UIMessage[]; chatId: string } = + await req.json(); + + const result = streamText({ + model: openai('gpt-4o-mini'), + messages: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse({ + originalMessages: messages, + messageMetadata: ({ part }) => { + if (part.type === 'start') { + return { createdAt: Date.now() }; + } + }, + onFinish: ({ messages }) => { + saveChat({ chatId, messages }); + }, + }); +} diff --git a/app/api/use-chat-persistence-single-message/route.ts b/app/api/use-chat-persistence-single-message/route.ts new file mode 100644 index 0000000..93f3062 --- /dev/null +++ b/app/api/use-chat-persistence-single-message/route.ts @@ -0,0 +1,23 @@ +import { openai } from '@ai-sdk/openai'; +import { loadChat, saveChat } from '@util/chat-store'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export async function POST(req: Request) { + const { message, chatId }: { message: UIMessage; chatId: string } = + await req.json(); + + const previousMessages = await loadChat(chatId); + const messages = [...previousMessages, message]; + + const result = streamText({ + model: openai('gpt-4o-mini'), + messages: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse({ + originalMessages: messages, + onFinish: ({ messages }) => { + saveChat({ chatId, messages }); + }, + }); +} diff --git a/app/api/use-chat-persistence/route.ts b/app/api/use-chat-persistence/route.ts new file mode 100644 index 0000000..bc9c105 --- /dev/null +++ b/app/api/use-chat-persistence/route.ts @@ -0,0 +1,20 @@ +import { openai } from '@ai-sdk/openai'; +import { saveChat } from '@util/chat-store'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export async function POST(req: Request) { + const { messages, chatId }: { messages: UIMessage[]; chatId: string } = + await req.json(); + + const result = streamText({ + model: openai('gpt-4o-mini'), + messages: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse({ + originalMessages: messages, + onFinish: ({ messages }) => { + saveChat({ chatId, messages }); + }, + }); +} diff --git a/app/api/use-chat-reasoning-tools/route.ts b/app/api/use-chat-reasoning-tools/route.ts new file mode 100644 index 0000000..6ad22f6 --- /dev/null +++ b/app/api/use-chat-reasoning-tools/route.ts @@ -0,0 +1,48 @@ +import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai'; +import { + convertToModelMessages, + InferUITools, + streamText, + UIDataTypes, + UIMessage, +} from 'ai'; + +const tools = { + web_search: openai.tools.webSearch({ + searchContextSize: 'high', + userLocation: { + type: 'approximate', + city: 'San Francisco', + region: 'California', + country: 'US', + }, + }), +} as const; + +export type ReasoningToolsMessage = UIMessage< + never, // could define metadata here + UIDataTypes, // could define data parts here + InferUITools +>; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages } = await req.json(); + + console.log(JSON.stringify(messages, null, 2)); + + const result = streamText({ + model: openai('gpt-5'), + messages: convertToModelMessages(messages), + tools, + providerOptions: { + openai: { + reasoningSummary: 'detailed', // 'auto' for condensed or 'detailed' for comprehensive + } satisfies OpenAIResponsesProviderOptions, + }, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/use-chat-reasoning/route.ts b/app/api/use-chat-reasoning/route.ts new file mode 100644 index 0000000..719ca60 --- /dev/null +++ b/app/api/use-chat-reasoning/route.ts @@ -0,0 +1,24 @@ +import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai'; +import { convertToModelMessages, streamText } from 'ai'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages } = await req.json(); + + const result = streamText({ + model: openai('gpt-5-nano'), + messages: convertToModelMessages(messages), + providerOptions: { + openai: { + reasoningSummary: 'detailed', // 'auto' for condensed or 'detailed' for comprehensive + } satisfies OpenAIResponsesProviderOptions, + }, + onFinish: ({ request }) => { + console.dir(request.body, { depth: null }); + }, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/use-chat-resilient-persistence/route.ts b/app/api/use-chat-resilient-persistence/route.ts new file mode 100644 index 0000000..23862fd --- /dev/null +++ b/app/api/use-chat-resilient-persistence/route.ts @@ -0,0 +1,29 @@ +import { openai } from '@ai-sdk/openai'; +import { saveChat } from '@util/chat-store'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export async function POST(req: Request) { + const { messages, chatId }: { messages: UIMessage[]; chatId: string } = + await req.json(); + + const result = streamText({ + model: openai('gpt-4o-mini'), + messages: convertToModelMessages(messages), + }); + + // consume the stream to ensure it runs to completion and triggers onFinish + // even when the client response is aborted (e.g. when the browser tab is closed). + // no await + result.consumeStream({ + onError: error => { + console.log('Error during background stream consumption: ', error); // optional error callback + }, + }); + + return result.toUIMessageStreamResponse({ + originalMessages: messages, + onFinish: ({ messages }) => { + saveChat({ chatId, messages }); + }, + }); +} diff --git a/app/api/use-chat-resume/[id]/stream/route.ts b/app/api/use-chat-resume/[id]/stream/route.ts new file mode 100644 index 0000000..d000cdd --- /dev/null +++ b/app/api/use-chat-resume/[id]/stream/route.ts @@ -0,0 +1,43 @@ +import { loadStreams } from '@/util/chat-store'; +import { createUIMessageStream, JsonToSseTransformStream } from 'ai'; +import { after } from 'next/server'; +import { createResumableStreamContext } from 'resumable-stream'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function GET( + request: Request, + { params }: { params: Promise<{ id: string }> }, +) { + const { id } = await params; + + if (!id) { + return new Response('id is required', { status: 400 }); + } + + const streamIds = await loadStreams(id); + + if (!streamIds.length) { + return new Response(null, { status: 204 }); + } + + const recentStreamId = streamIds.at(-1); + + if (!recentStreamId) { + return new Response(null, { status: 204 }); + } + + const streamContext = createResumableStreamContext({ + waitUntil: after, + }); + + const resumedStream = + await streamContext.resumeExistingStream(recentStreamId); + + if (!resumedStream) { + return new Response(null, { status: 204 }); + } + + return new Response(resumedStream); +} diff --git a/app/api/use-chat-resume/route.ts b/app/api/use-chat-resume/route.ts new file mode 100644 index 0000000..1dea579 --- /dev/null +++ b/app/api/use-chat-resume/route.ts @@ -0,0 +1,54 @@ +import { + appendMessageToChat, + appendStreamId, + saveChat, +} from '@/util/chat-store'; +import { openai } from '@ai-sdk/openai'; +import { + convertToModelMessages, + createUIMessageStream, + generateId, + JsonToSseTransformStream, + streamText, + UIMessage, +} from 'ai'; +import { after } from 'next/server'; +import { createResumableStreamContext } from 'resumable-stream'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function POST(req: Request) { + const { chatId, messages }: { chatId: string; messages: UIMessage[] } = + await req.json(); + + const streamId = generateId(); + + const recentUserMessage = messages + .filter(message => message.role === 'user') + .at(-1); + + if (!recentUserMessage) { + throw new Error('No recent user message found'); + } + + await appendMessageToChat({ chatId, message: recentUserMessage }); + await appendStreamId({ chatId, streamId }); + + const result = streamText({ + model: openai('gpt-4o'), + messages: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse({ + originalMessages: messages, + onFinish: ({ messages }) => { + saveChat({ chatId, messages }); + }, + async consumeSseStream({ stream }) { + // send the sse stream into a resumable stream sink as well: + const streamContext = createResumableStreamContext({ waitUntil: after }); + await streamContext.createNewResumableStream(streamId, () => stream); + }, + }); +} diff --git a/app/api/use-chat-sources/route.ts b/app/api/use-chat-sources/route.ts new file mode 100644 index 0000000..75e2820 --- /dev/null +++ b/app/api/use-chat-sources/route.ts @@ -0,0 +1,34 @@ +import { anthropic } from '@ai-sdk/anthropic'; +import { + convertToModelMessages, + InferUITool, + streamText, + UIDataTypes, + UIMessage, +} from 'ai'; + +export type SourcesChatMessage = UIMessage< + never, + UIDataTypes, + { + web_search: InferUITool< + ReturnType + >; + } +>; + +export async function POST(req: Request) { + const { messages } = await req.json(); + + const result = streamText({ + model: anthropic('claude-3-5-sonnet-latest'), + tools: { + web_search: anthropic.tools.webSearch_20250305(), + }, + messages: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse({ + sendSources: true, + }); +} diff --git a/app/api/use-chat-streaming-tool-calls/route.ts b/app/api/use-chat-streaming-tool-calls/route.ts new file mode 100644 index 0000000..666cf06 --- /dev/null +++ b/app/api/use-chat-streaming-tool-calls/route.ts @@ -0,0 +1,66 @@ +import { openai } from '@ai-sdk/openai'; +import { convertToModelMessages, streamText, UIDataTypes, UIMessage } from 'ai'; +import { z } from 'zod'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export type StreamingToolCallsMessage = UIMessage< + never, + UIDataTypes, + { + showWeatherInformation: { + input: { + city: string; + weather: string; + temperature: number; + typicalWeather: string; + }; + output: string; + }; + } +>; + +export async function POST(req: Request) { + const { messages } = await req.json(); + + const result = streamText({ + model: openai('gpt-4o'), + messages: convertToModelMessages(messages), + system: + 'You are a helpful assistant that answers questions about the weather in a given city.' + + 'You use the showWeatherInformation tool to show the weather information to the user instead of talking about it.', + tools: { + // server-side tool with execute function: + getWeatherInformation: { + description: 'show the weather in a given city to the user', + inputSchema: z.object({ city: z.string() }), + execute: async ({}: { city: string }) => { + const weatherOptions = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy']; + return { + weather: + weatherOptions[Math.floor(Math.random() * weatherOptions.length)], + temperature: Math.floor(Math.random() * 50 - 10), + }; + }, + }, + // client-side tool that displays weather information to the user: + showWeatherInformation: { + description: + 'Show the weather information to the user. Always use this tool to tell weather information to the user.', + inputSchema: z.object({ + city: z.string(), + weather: z.string(), + temperature: z.number(), + typicalWeather: z + .string() + .describe( + '2-3 sentences about the typical weather in the city during spring.', + ), + }), + }, + }, + }); + + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/use-chat-throttle/route.ts b/app/api/use-chat-throttle/route.ts new file mode 100644 index 0000000..641c7a7 --- /dev/null +++ b/app/api/use-chat-throttle/route.ts @@ -0,0 +1,25 @@ +import { createUIMessageStreamResponse, simulateReadableStream } from 'ai'; + +export async function POST(req: Request) { + return createUIMessageStreamResponse({ + stream: simulateReadableStream({ + initialDelayInMs: 0, // Delay before the first chunk + chunkDelayInMs: 0, // Delay between chunks + chunks: [ + { + type: 'start', + }, + { + type: 'start-step', + }, + ...Array(5000).fill({ type: 'text', value: 'T\n' }), + { + type: 'finish-step', + }, + { + type: 'finish', + }, + ], + }), + }); +} diff --git a/app/api/use-chat-tools/route.ts b/app/api/use-chat-tools/route.ts new file mode 100644 index 0000000..b584fed --- /dev/null +++ b/app/api/use-chat-tools/route.ts @@ -0,0 +1,108 @@ +import { openai } from '@ai-sdk/openai'; +import { + convertToModelMessages, + InferUITools, + stepCountIs, + streamText, + tool, + UIDataTypes, + UIMessage, + validateUIMessages, +} from 'ai'; +import { z } from 'zod'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +const getWeatherInformationTool = tool({ + description: 'show the weather in a given city to the user', + inputSchema: z.object({ city: z.string() }), + async *execute({ city }: { city: string }, { messages }) { + yield { state: 'loading' as const }; + + // count the number of assistant messages. throw error if 2 or less + const assistantMessageCount = messages.filter( + message => message.role === 'assistant', + ).length; + + // if (assistantMessageCount <= 2) { + // throw new Error('could not get weather information'); + // } + + // Add artificial delay of 5 seconds + await new Promise(resolve => setTimeout(resolve, 5000)); + + const weatherOptions = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy']; + const weather = + weatherOptions[Math.floor(Math.random() * weatherOptions.length)]; + + yield { + state: 'ready' as const, + temperature: 72, + weather, + }; + }, + + onInputStart: () => { + console.log('onInputStart'); + }, + onInputDelta: ({ inputTextDelta }) => { + console.log('onInputDelta', inputTextDelta); + }, + onInputAvailable: ({ input }) => { + console.log('onInputAvailable', input); + }, +}); + +const askForConfirmationTool = tool({ + description: 'Ask the user for confirmation.', + inputSchema: z.object({ + message: z.string().describe('The message to ask for confirmation.'), + }), + outputSchema: z.string(), +}); + +const getLocationTool = tool({ + description: + 'Get the user location. Always ask for confirmation before using this tool.', + inputSchema: z.object({}), + outputSchema: z.string(), +}); + +const tools = { + // server-side tool with execute function: + getWeatherInformation: getWeatherInformationTool, + // client-side tool that starts user interaction: + askForConfirmation: askForConfirmationTool, + // client-side tool that is automatically executed on the client: + getLocation: getLocationTool, +} as const; + +export type UseChatToolsMessage = UIMessage< + never, + UIDataTypes, + InferUITools +>; + +export async function POST(req: Request) { + const body = await req.json(); + + const messages = await validateUIMessages({ + messages: body.messages, + tools, + }); + + const result = streamText({ + model: openai('gpt-4o'), + messages: convertToModelMessages(messages), + stopWhen: stepCountIs(5), // multi-steps for server-side tools + tools, + }); + + return result.toUIMessageStreamResponse({ + // originalMessages: messages, //add if you want to have correct ids + onFinish: options => { + console.log('onFinish', options); + }, + }); +} diff --git a/app/api/use-completion-server-side-multi-step/route.ts b/app/api/use-completion-server-side-multi-step/route.ts new file mode 100644 index 0000000..3b6aad2 --- /dev/null +++ b/app/api/use-completion-server-side-multi-step/route.ts @@ -0,0 +1,32 @@ +import { openai } from '@ai-sdk/openai'; +import { stepCountIs, streamText, tool } from 'ai'; +import { z } from 'zod'; + +// Allow streaming responses up to 60 seconds +export const maxDuration = 60; + +export async function POST(req: Request) { + // Extract the `prompt` from the body of the request + const { prompt } = await req.json(); + + const result = streamText({ + model: openai('gpt-4-turbo'), + tools: { + weather: tool({ + description: 'Get the weather in a location', + inputSchema: z.object({ + location: z.string().describe('The location to get the weather for'), + }), + execute: async ({ location }) => ({ + location, + temperature: 72 + Math.floor(Math.random() * 21) - 10, + }), + }), + }, + stopWhen: stepCountIs(4), + prompt, + }); + + // Respond with the stream + return result.toUIMessageStreamResponse(); +} diff --git a/app/api/use-completion-throttle/route.ts b/app/api/use-completion-throttle/route.ts new file mode 100644 index 0000000..641c7a7 --- /dev/null +++ b/app/api/use-completion-throttle/route.ts @@ -0,0 +1,25 @@ +import { createUIMessageStreamResponse, simulateReadableStream } from 'ai'; + +export async function POST(req: Request) { + return createUIMessageStreamResponse({ + stream: simulateReadableStream({ + initialDelayInMs: 0, // Delay before the first chunk + chunkDelayInMs: 0, // Delay between chunks + chunks: [ + { + type: 'start', + }, + { + type: 'start-step', + }, + ...Array(5000).fill({ type: 'text', value: 'T\n' }), + { + type: 'finish-step', + }, + { + type: 'finish', + }, + ], + }), + }); +} diff --git a/app/api/use-object-expense-tracker/route.ts b/app/api/use-object-expense-tracker/route.ts new file mode 100644 index 0000000..8a82a75 --- /dev/null +++ b/app/api/use-object-expense-tracker/route.ts @@ -0,0 +1,35 @@ +import { openai } from '@ai-sdk/openai'; +import { streamObject } from 'ai'; +import { expenseSchema } from './schema'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function POST(req: Request) { + const { expense }: { expense: string } = await req.json(); + + const result = streamObject({ + model: openai('gpt-4o'), + system: + 'You categorize expenses into one of the following categories: ' + + 'TRAVEL, MEALS, ENTERTAINMENT, OFFICE SUPPLIES, OTHER.' + + // provide date (including day of week) for reference: + 'The current date is: ' + + new Date() + .toLocaleDateString('en-US', { + year: 'numeric', + month: 'short', + day: '2-digit', + weekday: 'short', + }) + .replace(/(\w+), (\w+) (\d+), (\d+)/, '$4-$2-$3 ($1)') + + '. When no date is supplied, use the current date.', + prompt: `Please categorize the following expense: "${expense}"`, + schema: expenseSchema, + onFinish({ object }) { + // save object to database + }, + }); + + return result.toTextStreamResponse(); +} diff --git a/app/api/use-object-expense-tracker/schema.ts b/app/api/use-object-expense-tracker/schema.ts new file mode 100644 index 0000000..9f7ebd9 --- /dev/null +++ b/app/api/use-object-expense-tracker/schema.ts @@ -0,0 +1,21 @@ +import { DeepPartial } from 'ai'; +import { z } from 'zod'; + +export const expenseSchema = z.object({ + expense: z.object({ + category: z + .string() + .describe( + 'Category of the expense. Allowed categories: TRAVEL, MEALS, ENTERTAINMENT, OFFICE SUPPLIES, OTHER.', + ), + amount: z.number().describe('Amount of the expense in USD.'), + date: z + .string() + .describe('Date of the expense. Format yyyy-mmm-dd, e.g. 1952-Feb-19.'), + details: z.string().describe('Details of the expense.'), + }), +}); + +export type Expense = z.infer['expense']; + +export type PartialExpense = DeepPartial; diff --git a/app/api/use-object/route.ts b/app/api/use-object/route.ts new file mode 100644 index 0000000..8ce361a --- /dev/null +++ b/app/api/use-object/route.ts @@ -0,0 +1,18 @@ +import { openai } from '@ai-sdk/openai'; +import { streamObject } from 'ai'; +import { notificationSchema } from './schema'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +export async function POST(req: Request) { + const context = await req.json(); + + const result = streamObject({ + model: openai('gpt-4o'), + prompt: `Generate 3 notifications for a messages app in this context: ${context}`, + schema: notificationSchema, + }); + + return result.toTextStreamResponse(); +} diff --git a/app/api/use-object/schema.ts b/app/api/use-object/schema.ts new file mode 100644 index 0000000..e63dc96 --- /dev/null +++ b/app/api/use-object/schema.ts @@ -0,0 +1,16 @@ +import { DeepPartial } from 'ai'; +import { z } from 'zod'; + +// define a schema for the notifications +export const notificationSchema = z.object({ + notifications: z.array( + z.object({ + name: z.string().describe('Name of a fictional person.'), + message: z.string().describe('Message. Do not use emojis or links.'), + minutesAgo: z.number(), + }), + ), +}); + +// define a type for the partial notifications during generation +export type PartialNotification = DeepPartial; diff --git a/app/bedrock/page.tsx b/app/bedrock/page.tsx new file mode 100644 index 0000000..2d35f81 --- /dev/null +++ b/app/bedrock/page.tsx @@ -0,0 +1,54 @@ +'use client'; + +import { useChat } from '@ai-sdk/react'; +import { DefaultChatTransport } from 'ai'; +import ChatInput from '@/component/chat-input'; + +export default function Chat() { + const { error, status, sendMessage, messages, regenerate, stop } = useChat({ + transport: new DefaultChatTransport({ api: '/api/bedrock' }), + }); + + return ( +
+ {messages.map(m => ( +
+ {m.role === 'user' ? 'User: ' : 'AI: '} + {m.parts.map(part => { + if (part.type === 'text') { + return part.text; + } + })} +
+ ))} + + {(status === 'submitted' || status === 'streaming') && ( +
+ {status === 'submitted' &&
Loading...
} + +
+ )} + + {error && ( +
+
An error occurred.
+ +
+ )} + + sendMessage({ text })} /> +
+ ); +} diff --git a/app/completion-rsc/generate-completion.ts b/app/completion-rsc/generate-completion.ts new file mode 100644 index 0000000..6b4ebc4 --- /dev/null +++ b/app/completion-rsc/generate-completion.ts @@ -0,0 +1,15 @@ +'use server'; + +import { openai } from '@ai-sdk/openai'; +import { streamText } from 'ai'; +import { createStreamableValue } from '@ai-sdk/rsc'; + +export async function generateCompletion(prompt: string) { + const result = streamText({ + model: openai('gpt-4-turbo'), + maxOutputTokens: 2000, + prompt, + }); + + return createStreamableValue(result.textStream).value; +} diff --git a/app/completion-rsc/page.tsx b/app/completion-rsc/page.tsx new file mode 100644 index 0000000..7c96c2d --- /dev/null +++ b/app/completion-rsc/page.tsx @@ -0,0 +1,41 @@ +'use client'; + +import { readStreamableValue } from '@ai-sdk/rsc'; +import { useState } from 'react'; +import { generateCompletion } from './generate-completion'; + +export default function Chat() { + const [input, setInput] = useState(''); + const [completion, setCompletion] = useState(''); + + const handleInputChange = (event: React.ChangeEvent) => { + setInput(event.target.value); + }; + + return ( +
+

+ RSC Completion Example +

+ + {completion} +
{ + e.preventDefault(); + + const streamableCompletion = await generateCompletion(input); + for await (const text of readStreamableValue(streamableCompletion)) { + setCompletion(text ?? ''); + } + }} + > + +
+
+ ); +} diff --git a/app/completion/page.tsx b/app/completion/page.tsx new file mode 100644 index 0000000..b37d88c --- /dev/null +++ b/app/completion/page.tsx @@ -0,0 +1,50 @@ +'use client'; + +import { useCompletion } from '@ai-sdk/react'; + +export default function Page() { + const { + completion, + input, + handleInputChange, + handleSubmit, + error, + isLoading, + stop, + } = useCompletion(); + + return ( +
+

+ useCompletion Example +

+ {error && ( +
+ {error.message} +
+ )} + {isLoading && ( +
+
Loading...
+ +
+ )} + {completion} +
+ +
+
+ ); +} diff --git a/app/dynamic-tools/page.tsx b/app/dynamic-tools/page.tsx new file mode 100644 index 0000000..a048bce --- /dev/null +++ b/app/dynamic-tools/page.tsx @@ -0,0 +1,85 @@ +'use client'; + +import ChatInput from '@/component/chat-input'; +import { useChat } from '@ai-sdk/react'; +import { DefaultChatTransport } from 'ai'; +import { ToolsMessage } from '../api/dynamic-tools/route'; + +export default function Chat() { + const { messages, sendMessage, status } = useChat({ + transport: new DefaultChatTransport({ api: '/api/dynamic-tools' }), + }); + + return ( +
+ {messages?.map(message => ( +
+ {`${message.role}: `} + {message.parts.map((part, index) => { + switch (part.type) { + case 'text': + return
{part.text}
; + + case 'step-start': + return index > 0 ? ( +
+
+
+ ) : null; + + case 'dynamic-tool': { + switch (part.state) { + case 'input-streaming': + case 'input-available': + case 'output-available': + return ( +
{JSON.stringify(part, null, 2)}
+ ); + case 'output-error': + return ( +
+ Error: {part.errorText} +
+ ); + } + } + + case 'tool-getWeatherInformation': { + switch (part.state) { + // example of pre-rendering streaming tool calls: + case 'input-streaming': + return ( +
+                        {JSON.stringify(part.input, null, 2)}
+                      
+ ); + case 'input-available': + return ( +
+ Getting weather information for {part.input.city}... +
+ ); + case 'output-available': + return ( +
+ Weather in {part.input.city}: {part.output} +
+ ); + case 'output-error': + return ( +
+ Error: {part.errorText} +
+ ); + } + } + } + })} +
+
+ ))} + + sendMessage({ text })} /> +
+ ); +} diff --git a/app/favicon.ico b/app/favicon.ico new file mode 100644 index 0000000..718d6fe Binary files /dev/null and b/app/favicon.ico differ diff --git a/app/generate-image/page.tsx b/app/generate-image/page.tsx new file mode 100644 index 0000000..73ecf14 --- /dev/null +++ b/app/generate-image/page.tsx @@ -0,0 +1,89 @@ +'use client'; + +import { useState } from 'react'; + +export default function Page() { + const [inputValue, setInputValue] = useState(''); + const [imageSrc, setImageSrc] = useState(null); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(null); + + const handleSubmit = async (event: React.FormEvent) => { + event.preventDefault(); + + setIsLoading(true); + setImageSrc(null); + setError(null); + + try { + const response = await fetch('/api/generate-image', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ prompt: inputValue }), + }); + + if (response.ok) { + const image = await response.json(); + setImageSrc(`data:image/png;base64,${image}`); + return; + } + + setError(await response.text()); + } finally { + setIsLoading(false); + } + }; + + return ( +
+
+

+ Image Generator +

+

+ Generate images. +

+
+ +
+
+ setInputValue(e.target.value)} + disabled={isLoading} + /> + +
+
+ + {error && ( +
+ {error} +
+ )} + +
+ {isLoading ? ( +
+ ) : ( + imageSrc && ( + Generated Image + ) + )} +
+
+ ); +} diff --git a/app/globals.css b/app/globals.css new file mode 100644 index 0000000..b5c61c9 --- /dev/null +++ b/app/globals.css @@ -0,0 +1,3 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; diff --git a/app/layout.tsx b/app/layout.tsx new file mode 100644 index 0000000..d29a547 --- /dev/null +++ b/app/layout.tsx @@ -0,0 +1,18 @@ +import './globals.css'; + +export const metadata = { + title: 'AI SDK - Next.js OpenAI Examples', + description: 'Examples of using the AI SDK with Next.js and OpenAI.', +}; + +export default function RootLayout({ + children, +}: { + children: React.ReactNode; +}) { + return ( + + {children} + + ); +} diff --git a/app/mcp-zapier/page.tsx b/app/mcp-zapier/page.tsx new file mode 100644 index 0000000..17a139e --- /dev/null +++ b/app/mcp-zapier/page.tsx @@ -0,0 +1,49 @@ +'use client'; + +import { useChat } from '@ai-sdk/react'; +import { DefaultChatTransport, isToolUIPart } from 'ai'; +import { useState } from 'react'; + +export default function Page() { + const [input, setInput] = useState(''); + const { messages, sendMessage } = useChat({ + transport: new DefaultChatTransport({ api: '/api/mcp-zapier' }), + }); + + return ( +
+

My AI Assistant

+ +
+ {messages.map(message => ( +
+ {`${message.role}: `} + {message.parts.map((part, index) => { + if (part.type === 'text') { + return {part.text}; + } else if (isToolUIPart(part)) { + return
{JSON.stringify(part, null, 2)}
; + } + })} +
+ ))} +
+ +
+