Initial commit

Created from https://vercel.com/new
This commit is contained in:
songtianlun 2025-09-26 15:46:29 +00:00
commit baf3fed472
142 changed files with 6435 additions and 0 deletions

17
.env.local.example Normal file
View File

@ -0,0 +1,17 @@
# You must first activate a Billing Account here: https://platform.openai.com/account/billing/overview
# Then get your OpenAI API Key here: https://platform.openai.com/account/api-keys
OPENAI_API_KEY=xxxxxxx
# You must first create an OpenAI Assistant here: https://platform.openai.com/assistants
# Then get your Assistant ID here: https://platform.openai.com/assistants
ASSISTANT_ID=xxxxxxx
# If you choose to use external files for attachments, you will need to configure a Vercel Blob Store.
# Instructions to create a Vercel Blob Store here: https://vercel.com/docs/storage/vercel-blob
BLOB_READ_WRITE_TOKEN=xxxxxxx
# Required for reasoning example
FIREWORKS_API_KEY=xxxxxxx
# Required for resumable streams. You can create a Redis store here: https://vercel.com/marketplace/redis
REDIS_URL=xxxxxx

39
.gitignore vendored Normal file
View File

@ -0,0 +1,39 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.js
# testing
/coverage
# next.js
/.next/
/out/
# production
/build
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# local env files
.env*.local
# vercel
.vercel
# typescript
*.tsbuildinfo
next-env.d.ts
# persistence
.chats
.streams

43
README.md Normal file
View File

@ -0,0 +1,43 @@
# AI SDK, Next.js, and OpenAI Chat Example
This example shows how to use the [AI SDK](https://ai-sdk.dev/docs) with [Next.js](https://nextjs.org/) and [OpenAI](https://openai.com) to create a ChatGPT-like AI-powered streaming chat bot.
## Deploy your own
Deploy the example using [Vercel](https://vercel.com?utm_source=github&utm_medium=readme&utm_campaign=ai-sdk-example):
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fvercel%2Fai%2Ftree%2Fmain%2Fexamples%2Fnext-openai&env=OPENAI_API_KEY&project-name=ai-sdk-next-openai&repository-name=ai-sdk-next-openai)
## How to use
Execute [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app) with [npm](https://docs.npmjs.com/cli/init), [Yarn](https://yarnpkg.com/lang/en/docs/cli/create/), or [pnpm](https://pnpm.io) to bootstrap the example:
```bash
npx create-next-app --example https://github.com/vercel/ai/tree/main/examples/next-openai next-openai-app
```
```bash
yarn create next-app --example https://github.com/vercel/ai/tree/main/examples/next-openai next-openai-app
```
```bash
pnpm create next-app --example https://github.com/vercel/ai/tree/main/examples/next-openai next-openai-app
```
To run the example locally you need to:
1. Sign up at [OpenAI's Developer Platform](https://platform.openai.com/signup).
2. Go to [OpenAI's dashboard](https://platform.openai.com/account/api-keys) and create an API KEY.
3. If you choose to use external files for attachments, then create a [Vercel Blob Store](https://vercel.com/docs/storage/vercel-blob).
4. Set the required environment variable as the token value as shown [the example env file](./.env.local.example) but in a new file called `.env.local`
5. `pnpm install` to install the required dependencies.
6. `pnpm dev` to launch the development server.
## Learn More
To learn more about OpenAI, Next.js, and the AI SDK take a look at the following resources:
- [AI SDK docs](https://ai-sdk.dev/docs)
- [Vercel AI Playground](https://ai-sdk.dev/playground)
- [OpenAI Documentation](https://platform.openai.com/docs) - learn about OpenAI features and API.
- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.

View File

@ -0,0 +1,57 @@
'use client';
import AnthropicWebFetchView from '@/component/anthropic-web-fetch-view';
import ChatInput from '@/component/chat-input';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import { AnthropicWebFetchMessage } from '../api/anthropic-web-fetch/route';
export default function TestAnthropicWebFetch() {
const { status, sendMessage, messages } = useChat<AnthropicWebFetchMessage>({
transport: new DefaultChatTransport({
api: '/api/anthropic-web-fetch',
}),
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
<h1 className="mb-4 text-xl font-bold">Anthropic Web Fetch Test</h1>
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.parts.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
}
if (part.type === 'tool-web_fetch') {
return <AnthropicWebFetchView invocation={part} key={index} />;
}
if (part.type === 'source-url') {
return (
<span key={index}>
[
<a
href={part.url}
target="_blank"
rel="noopener noreferrer"
className="text-sm font-bold text-blue-500 hover:underline"
>
{part.title ?? new URL(part.url).hostname}
</a>
]
</span>
);
}
return null;
})}
</div>
))}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,57 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
import { AnthropicWebSearchMessage } from '@/app/api/anthropic-web-search/route';
import AnthropicWebSearchView from '@/component/anthropic-web-search-view';
export default function TestAnthropicWebSearch() {
const { status, sendMessage, messages } = useChat<AnthropicWebSearchMessage>({
transport: new DefaultChatTransport({
api: '/api/anthropic-web-search',
}),
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
<h1 className="mb-4 text-xl font-bold">Anthropic Web Search Test</h1>
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.parts.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
}
if (part.type === 'tool-web_search') {
return <AnthropicWebSearchView invocation={part} key={index} />;
}
if (part.type === 'source-url') {
return (
<span key={index}>
[
<a
href={part.url}
target="_blank"
rel="noopener noreferrer"
className="text-sm font-bold text-blue-500 hover:underline"
>
{part.title ?? new URL(part.url).hostname}
</a>
]
</span>
);
}
return null;
})}
</div>
))}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,38 @@
import { anthropic } from '@ai-sdk/anthropic';
import {
convertToModelMessages,
InferUITools,
streamText,
ToolSet,
UIDataTypes,
UIMessage,
validateUIMessages,
} from 'ai';
const tools = {
web_fetch: anthropic.tools.webFetch_20250910(),
} satisfies ToolSet;
export type AnthropicWebFetchMessage = UIMessage<
never,
UIDataTypes,
InferUITools<typeof tools>
>;
export async function POST(req: Request) {
const { messages } = await req.json();
const uiMessages = await validateUIMessages({ messages });
const result = streamText({
model: anthropic('claude-sonnet-4-0'),
tools,
messages: convertToModelMessages(uiMessages),
onStepFinish: ({ request }) => {
console.log(JSON.stringify(request.body, null, 2));
},
});
return result.toUIMessageStreamResponse({
sendSources: true,
});
}

View File

@ -0,0 +1,46 @@
import { anthropic } from '@ai-sdk/anthropic';
import {
convertToModelMessages,
InferUITools,
streamText,
ToolSet,
UIDataTypes,
UIMessage,
validateUIMessages,
} from 'ai';
const tools = {
web_search: anthropic.tools.webSearch_20250305({
maxUses: 3,
userLocation: {
type: 'approximate',
city: 'New York',
country: 'US',
timezone: 'America/New_York',
},
}),
} satisfies ToolSet;
export type AnthropicWebSearchMessage = UIMessage<
never,
UIDataTypes,
InferUITools<typeof tools>
>;
export async function POST(req: Request) {
const { messages } = await req.json();
const uiMessages = await validateUIMessages({ messages });
const result = streamText({
model: anthropic('claude-sonnet-4-20250514'),
tools,
messages: convertToModelMessages(uiMessages),
onStepFinish: ({ request }) => {
console.log(JSON.stringify(request.body, null, 2));
},
});
return result.toUIMessageStreamResponse({
sendSources: true,
});
}

29
app/api/bedrock/route.ts Normal file
View File

@ -0,0 +1,29 @@
import { bedrock } from '@ai-sdk/amazon-bedrock';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export async function POST(req: Request) {
try {
const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: bedrock('anthropic.claude-3-haiku-20240307-v1:0'),
prompt: convertToModelMessages(messages),
maxOutputTokens: 500,
temperature: 0.7,
});
return result.toUIMessageStreamResponse();
} catch (error) {
console.error('Bedrock API Error:', error);
return new Response(
JSON.stringify({
error: 'Bedrock API failed',
details: error instanceof Error ? error.message : 'Unknown error',
}),
{
status: 500,
headers: { 'Content-Type': 'application/json' },
},
);
}
}

View File

@ -0,0 +1,17 @@
import { cohere } from '@ai-sdk/cohere';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const prompt = convertToModelMessages(messages);
const result = streamText({
model: cohere('command-r-plus'),
prompt,
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,17 @@
import { google } from '@ai-sdk/google';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const prompt = convertToModelMessages(messages);
const result = streamText({
model: google('gemini-2.0-flash'),
prompt,
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,18 @@
import { groq } from '@ai-sdk/groq';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const prompt = convertToModelMessages(messages);
const result = streamText({
model: groq('llama-3.3-70b-versatile'),
prompt,
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,18 @@
import { mistral } from '@ai-sdk/mistral';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const prompt = convertToModelMessages(messages);
const result = streamText({
model: mistral('mistral-small-latest'),
prompt,
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,42 @@
import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai';
import {
convertToModelMessages,
InferUITools,
streamText,
ToolSet,
UIDataTypes,
UIMessage,
validateUIMessages,
} from 'ai';
const tools = {
code_interpreter: openai.tools.codeInterpreter(),
} satisfies ToolSet;
export type OpenAICodeInterpreterMessage = UIMessage<
never,
UIDataTypes,
InferUITools<typeof tools>
>;
export async function POST(req: Request) {
const { messages } = await req.json();
const uiMessages = await validateUIMessages({ messages });
const result = streamText({
model: openai('gpt-5-nano'),
tools,
messages: convertToModelMessages(uiMessages),
onStepFinish: ({ request }) => {
console.log(JSON.stringify(request.body, null, 2));
},
providerOptions: {
openai: {
store: false,
include: ['reasoning.encrypted_content'],
} satisfies OpenAIResponsesProviderOptions,
},
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,47 @@
import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai';
import {
convertToModelMessages,
InferUITools,
streamText,
ToolSet,
UIDataTypes,
UIMessage,
validateUIMessages,
} from 'ai';
export const maxDuration = 30;
const tools = {
file_search: openai.tools.fileSearch({
vectorStoreIds: ['vs_68caad8bd5d88191ab766cf043d89a18'],
}),
} satisfies ToolSet;
export type OpenAIFileSearchMessage = UIMessage<
never,
UIDataTypes,
InferUITools<typeof tools>
>;
export async function POST(req: Request) {
const { messages } = await req.json();
const uiMessages = await validateUIMessages({ messages });
const result = streamText({
model: openai('gpt-5-nano'),
tools,
messages: convertToModelMessages(uiMessages),
onStepFinish: ({ request }) => {
console.log(JSON.stringify(request.body, null, 2));
},
providerOptions: {
openai: {
include: ['file_search_call.results'],
} satisfies OpenAIResponsesProviderOptions,
},
});
return result.toUIMessageStreamResponse({
sendSources: true,
});
}

View File

@ -0,0 +1,42 @@
import { openai } from '@ai-sdk/openai';
import {
convertToModelMessages,
InferUITools,
streamText,
ToolSet,
UIDataTypes,
UIMessage,
validateUIMessages,
} from 'ai';
const tools = {
image_generation: openai.tools.imageGeneration(),
} satisfies ToolSet;
export type OpenAIImageGenerationMessage = UIMessage<
never,
UIDataTypes,
InferUITools<typeof tools>
>;
export async function POST(req: Request) {
const { messages } = await req.json();
const uiMessages = await validateUIMessages({ messages });
const result = streamText({
model: openai('gpt-5-nano'),
tools,
messages: convertToModelMessages(uiMessages),
onStepFinish: ({ request }) => {
console.log(JSON.stringify(request.body, null, 2));
},
providerOptions: {
// openai: {
// store: false,
// include: ['reasoning.encrypted_content'],
// } satisfies OpenAIResponsesProviderOptions,
},
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,23 @@
import { openai } from '@ai-sdk/openai';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const prompt = convertToModelMessages(messages);
const result = streamText({
model: openai.responses('o3-mini'),
prompt,
providerOptions: {
openai: {
reasoningEffort: 'low',
reasoningSummary: 'auto',
},
},
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,52 @@
import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai';
import {
convertToModelMessages,
InferUITools,
streamText,
ToolSet,
UIDataTypes,
UIMessage,
validateUIMessages,
} from 'ai';
const tools = {
web_search: openai.tools.webSearch({
searchContextSize: 'low',
userLocation: {
type: 'approximate',
city: 'San Francisco',
region: 'California',
country: 'US',
},
}),
} satisfies ToolSet;
export type OpenAIWebSearchMessage = UIMessage<
never,
UIDataTypes,
InferUITools<typeof tools>
>;
export async function POST(req: Request) {
const { messages } = await req.json();
const uiMessages = await validateUIMessages({ messages });
const result = streamText({
model: openai('gpt-5-nano'),
tools,
messages: convertToModelMessages(uiMessages),
onStepFinish: ({ request }) => {
console.log(JSON.stringify(request.body, null, 2));
},
providerOptions: {
openai: {
store: false,
include: ['reasoning.encrypted_content'],
} satisfies OpenAIResponsesProviderOptions,
},
});
return result.toUIMessageStreamResponse({
sendSources: true,
});
}

View File

@ -0,0 +1,17 @@
import { perplexity } from '@ai-sdk/perplexity';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const prompt = convertToModelMessages(messages);
const result = streamText({
model: perplexity('sonar-reasoning'),
prompt,
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,15 @@
import { openai } from '@ai-sdk/openai';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: openai('gpt-4o-mini'),
messages: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse({});
}

17
app/api/chat-xai/route.ts Normal file
View File

@ -0,0 +1,17 @@
import { xai } from '@ai-sdk/xai';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const prompt = convertToModelMessages(messages);
const result = streamText({
model: xai('grok-beta'),
prompt,
});
return result.toUIMessageStreamResponse();
}

30
app/api/chat/route.ts Normal file
View File

@ -0,0 +1,30 @@
import { openai } from '@ai-sdk/openai';
import {
consumeStream,
convertToModelMessages,
streamText,
UIMessage,
} from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const prompt = convertToModelMessages(messages);
const result = streamText({
model: openai('gpt-4o'),
prompt,
abortSignal: req.signal,
});
return result.toUIMessageStreamResponse({
onFinish: async ({ isAborted }) => {
if (isAborted) {
console.log('Aborted');
}
},
consumeSseStream: consumeStream, // needed for correct abort handling
});
}

View File

@ -0,0 +1,19 @@
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
// Extract the `prompt` from the body of the request
const { prompt } = await req.json();
// Ask OpenAI for a streaming completion given the prompt
const result = streamText({
model: openai('gpt-3.5-turbo-instruct'),
prompt,
});
// Respond with the stream
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,84 @@
import { openai } from '@ai-sdk/openai';
import {
convertToModelMessages,
dynamicTool,
InferUITools,
stepCountIs,
streamText,
tool,
ToolSet,
UIDataTypes,
UIMessage,
} from 'ai';
import { z } from 'zod';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
const getWeatherInformationTool = tool({
description: 'show the weather in a given city to the user',
inputSchema: z.object({ city: z.string() }),
execute: async ({ city }: { city: string }, { messages }) => {
// count the number of assistant messages. throw error if 2 or less
const assistantMessageCount = messages.filter(
message => message.role === 'assistant',
).length;
if (assistantMessageCount <= 2) {
throw new Error('could not get weather information');
}
// Add artificial delay of 5 seconds
await new Promise(resolve => setTimeout(resolve, 5000));
const weatherOptions = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy'];
return weatherOptions[Math.floor(Math.random() * weatherOptions.length)];
},
});
const staticTools = {
// server-side tool with execute function:
getWeatherInformation: getWeatherInformationTool,
} as const;
export type ToolsMessage = UIMessage<
never,
UIDataTypes,
InferUITools<typeof staticTools>
>;
function dynamicTools(): ToolSet {
return {
currentLocation: dynamicTool({
description: 'Get the current location.',
inputSchema: z.object({}),
execute: async () => {
const locations = ['New York', 'London', 'Paris'];
return {
location: locations[Math.floor(Math.random() * locations.length)],
};
},
}),
};
}
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai('gpt-4o'),
messages: convertToModelMessages(messages),
stopWhen: stepCountIs(5), // multi-steps for server-side tools
tools: {
...staticTools,
...dynamicTools(),
},
});
return result.toUIMessageStreamResponse({
// originalMessages: messages, //add if you want to have correct ids
onFinish: options => {
console.log('onFinish', options);
},
});
}

61
app/api/files/route.ts Normal file
View File

@ -0,0 +1,61 @@
import { handleUpload, type HandleUploadBody } from '@vercel/blob/client';
import { NextResponse } from 'next/server';
/*
* This route is used to upload files to Vercel's Blob Storage.
* Example from https://vercel.com/docs/storage/vercel-blob/client-upload#create-a-client-upload-route
*/
export async function POST(request: Request): Promise<NextResponse> {
const body = (await request.json()) as HandleUploadBody;
try {
const jsonResponse = await handleUpload({
body,
request,
onBeforeGenerateToken: async (
pathname,
/* clientPayload */
) => {
// Generate a client token for the browser to upload the file
// ⚠️ Authenticate and authorize users before generating the token.
// Otherwise, you're allowing anonymous uploads.
return {
allowedContentTypes: [
'image/jpeg',
'image/png',
'image/gif',
'application/pdf',
'text/plain',
],
tokenPayload: JSON.stringify({
// optional, sent to your server on upload completion
// you could pass a user id from auth, or a value from clientPayload
}),
};
},
onUploadCompleted: async ({ blob, tokenPayload }) => {
// Get notified of client upload completion
// ⚠️ This will not work on `localhost` websites,
// Use ngrok or similar to get the full upload flow
console.log('file upload completed', blob, tokenPayload);
try {
// Run any logic after the file upload completed
// const { userId } = JSON.parse(tokenPayload);
// await db.update({ avatar: blob.url, userId });
} catch (error) {
throw new Error('Could not complete operation');
}
},
});
return NextResponse.json(jsonResponse);
} catch (error) {
return NextResponse.json(
{ error: (error as Error).message },
{ status: 400 }, // The webhook will retry 5 times waiting for a 200
);
}
}

View File

@ -0,0 +1,20 @@
import { openai } from '@ai-sdk/openai';
import { experimental_generateImage as generateImage } from 'ai';
// Allow responses up to 60 seconds
export const maxDuration = 60;
export async function POST(req: Request) {
const { prompt } = await req.json();
const { image } = await generateImage({
model: openai.imageModel('dall-e-3'),
prompt,
size: '1024x1024',
providerOptions: {
openai: { style: 'vivid', quality: 'hd' },
},
});
return Response.json(image.base64);
}

View File

@ -0,0 +1,33 @@
import { openai } from '@ai-sdk/openai';
import { experimental_createMCPClient, stepCountIs, streamText } from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages } = await req.json();
const mcpClient = await experimental_createMCPClient({
transport: {
type: 'sse',
url: 'https://actions.zapier.com/mcp/[YOUR_KEY]/sse',
},
});
try {
const zapierTools = await mcpClient.tools();
const result = streamText({
model: openai('gpt-4o'),
messages,
tools: zapierTools,
onFinish: async () => {
await mcpClient.close();
},
stopWhen: stepCountIs(10),
});
return result.toUIMessageStreamResponse();
} catch (error) {
return new Response('Internal Server Error', { status: 500 });
}
}

View File

@ -0,0 +1,104 @@
import { openai } from '@ai-sdk/openai';
import {
convertToModelMessages,
InferUITools,
stepCountIs,
streamText,
tool,
UIDataTypes,
UIMessage,
} from 'ai';
import { convertArrayToReadableStream, MockLanguageModelV2 } from 'ai/test';
import { z } from 'zod';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
const getWeatherInformationTool = tool({
description: 'show the weather in a given city to the user',
inputSchema: z.object({ city: z.string() }),
execute: async ({ city }: { city: string }) => {
// Add artificial delay of 5 seconds
await new Promise(resolve => setTimeout(resolve, 5000));
const weatherOptions = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy'];
return weatherOptions[Math.floor(Math.random() * weatherOptions.length)];
},
});
const tools = {
// server-side tool with execute function:
getWeatherInformation: getWeatherInformationTool,
} as const;
export type UseChatToolsMessage = UIMessage<
never,
UIDataTypes,
InferUITools<typeof tools>
>;
export async function POST(req: Request) {
const { messages } = await req.json();
console.log('messages', JSON.stringify(messages, null, 2));
const result = streamText({
model: openai('gpt-4o'),
messages: convertToModelMessages(messages),
stopWhen: stepCountIs(5), // multi-steps for server-side tools
tools,
prepareStep: async ({ stepNumber }) => {
// inject invalid tool call in first step:
if (stepNumber === 0) {
return {
model: new MockLanguageModelV2({
doStream: async () => ({
stream: convertArrayToReadableStream([
{ type: 'stream-start', warnings: [] },
{
type: 'tool-input-start',
id: 'call-1',
toolName: 'getWeatherInformation',
providerExecuted: true,
},
{
type: 'tool-input-delta',
id: 'call-1',
delta: `{ "cities": "San Francisco" }`,
},
{
type: 'tool-input-end',
id: 'call-1',
},
{
type: 'tool-call',
toolCallType: 'function',
toolCallId: 'call-1',
toolName: 'getWeatherInformation',
// wrong tool call arguments (city vs cities):
input: `{ "cities": "San Francisco" }`,
},
{
type: 'finish',
finishReason: 'stop',
usage: {
inputTokens: 10,
outputTokens: 20,
totalTokens: 30,
},
},
]),
}),
}),
};
}
},
});
return result.toUIMessageStreamResponse({
// originalMessages: messages, //add if you want to have correct ids
onFinish: options => {
console.log('onFinish', options);
},
});
}

View File

@ -0,0 +1,37 @@
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
// simple cache implementation, use Vercel KV or a similar service for production
const cache = new Map<string, string>();
export async function POST(req: Request) {
const { messages } = await req.json();
// come up with a key based on the request:
const key = JSON.stringify(messages);
// Check if we have a cached response
const cached = cache.get(key);
if (cached != null) {
return new Response(`data: ${cached}\n\n`, {
status: 200,
headers: { 'Content-Type': 'text/plain' },
});
}
// Call the language model:
const result = streamText({
model: openai('gpt-4o'),
messages,
async onFinish({ text }) {
// Cache the response text:
cache.set(key, text);
},
});
// Respond with the stream
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,39 @@
import { openai } from '@ai-sdk/openai';
import {
convertToModelMessages,
createUIMessageStream,
createUIMessageStreamResponse,
streamText,
UIMessage,
} from 'ai';
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const stream = createUIMessageStream({
execute: ({ writer }) => {
writer.write({ type: 'start' });
// write a custom url source to the stream:
writer.write({
type: 'source-url',
sourceId: 'source-1',
url: 'https://example.com',
title: 'Example Source',
});
const result = streamText({
model: openai('gpt-4o'),
messages: convertToModelMessages(messages),
});
writer.merge(result.toUIMessageStream({ sendStart: false }));
},
originalMessages: messages,
onFinish: options => {
console.log('onFinish', JSON.stringify(options, null, 2));
},
});
return createUIMessageStreamResponse({ stream });
}

View File

@ -0,0 +1,57 @@
import { openai } from '@ai-sdk/openai';
import { delay } from '@ai-sdk/provider-utils';
import {
convertToModelMessages,
createUIMessageStream,
createUIMessageStreamResponse,
stepCountIs,
streamText,
} from 'ai';
import { z } from 'zod';
export async function POST(req: Request) {
const { messages } = await req.json();
const stream = createUIMessageStream({
execute: ({ writer }) => {
const result = streamText({
model: openai('gpt-4o'),
stopWhen: stepCountIs(2),
tools: {
weather: {
description: 'Get the weather in a city',
inputSchema: z.object({
city: z.string(),
}),
execute: async ({ city }, { toolCallId }) => {
// update display
writer.write({
type: 'data-weather',
id: toolCallId,
data: { city, status: 'loading' },
});
await delay(2000); // fake delay
const weather = 'sunny';
// update display
writer.write({
type: 'data-weather',
id: toolCallId,
data: { city, weather, status: 'success' },
});
// for LLM roundtrip
return { city, weather };
},
},
},
messages: convertToModelMessages(messages),
});
writer.merge(result.toUIMessageStream());
},
});
return createUIMessageStreamResponse({ stream });
}

View File

@ -0,0 +1,60 @@
import { openai } from '@ai-sdk/openai';
import {
createUIMessageStreamResponse,
streamText,
createUIMessageStream,
convertToModelMessages,
stepCountIs,
} from 'ai';
import { processToolCalls } from './utils';
import { tools } from './tools';
import { HumanInTheLoopUIMessage } from './types';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages }: { messages: HumanInTheLoopUIMessage[] } =
await req.json();
const stream = createUIMessageStream({
originalMessages: messages,
execute: async ({ writer }) => {
// Utility function to handle tools that require human confirmation
// Checks for confirmation in last message and then runs associated tool
const processedMessages = await processToolCalls(
{
messages,
writer,
tools,
},
{
// type-safe object for tools without an execute function
getWeatherInformation: async ({ city }) => {
const conditions = ['sunny', 'cloudy', 'rainy', 'snowy'];
return `The weather in ${city} is ${
conditions[Math.floor(Math.random() * conditions.length)]
}.`;
},
},
);
const result = streamText({
model: openai('gpt-4o'),
messages: convertToModelMessages(processedMessages),
tools,
stopWhen: stepCountIs(5),
});
writer.merge(
result.toUIMessageStream({ originalMessages: processedMessages }),
);
},
onFinish: ({}) => {
// save messages here
console.log('Finished!');
},
});
return createUIMessageStreamResponse({ stream });
}

View File

@ -0,0 +1,24 @@
import { tool, ToolSet } from 'ai';
import { z } from 'zod';
const getWeatherInformation = tool({
description: 'show the weather in a given city to the user',
inputSchema: z.object({ city: z.string() }),
outputSchema: z.string(), // must define outputSchema
// no execute function, we want human in the loop
});
const getLocalTime = tool({
description: 'get the local time for a specified location',
inputSchema: z.object({ location: z.string() }),
// including execute function -> no confirmation required
execute: async ({ location }) => {
console.log(`Getting local time for ${location}`);
return '10am';
},
});
export const tools = {
getWeatherInformation,
getLocalTime,
} satisfies ToolSet;

View File

@ -0,0 +1,11 @@
import { InferUITools, UIDataTypes, UIMessage } from 'ai';
import { tools } from './tools';
export type MyTools = InferUITools<typeof tools>;
// Define custom message type with data part schemas
export type HumanInTheLoopUIMessage = UIMessage<
never, // metadata type
UIDataTypes,
MyTools
>;

View File

@ -0,0 +1,129 @@
import {
convertToModelMessages,
Tool,
ToolCallOptions,
ToolSet,
UIMessageStreamWriter,
getToolName,
isToolUIPart,
} from 'ai';
import { HumanInTheLoopUIMessage } from './types';
// Approval string to be shared across frontend and backend
export const APPROVAL = {
YES: 'Yes, confirmed.',
NO: 'No, denied.',
} as const;
function isValidToolName<K extends PropertyKey, T extends object>(
key: K,
obj: T,
): key is K & keyof T {
return key in obj;
}
/**
* Processes tool invocations where human input is required, executing tools when authorized.
*
* @param options - The function options
* @param options.tools - Map of tool names to Tool instances that may expose execute functions
* @param options.writer - UIMessageStream writer for sending results back to the client
* @param options.messages - Array of messages to process
* @param executionFunctions - Map of tool names to execute functions
* @returns Promise resolving to the processed messages
*/
export async function processToolCalls<
Tools extends ToolSet,
ExecutableTools extends {
[Tool in keyof Tools as Tools[Tool] extends { execute: Function }
? never
: Tool]: Tools[Tool];
},
>(
{
writer,
messages,
}: {
tools: Tools; // used for type inference
writer: UIMessageStreamWriter;
messages: HumanInTheLoopUIMessage[]; // IMPORTANT: replace with your message type
},
executeFunctions: {
[K in keyof Tools & keyof ExecutableTools]?: (
args: ExecutableTools[K] extends Tool<infer P> ? P : never,
context: ToolCallOptions,
) => Promise<any>;
},
): Promise<HumanInTheLoopUIMessage[]> {
const lastMessage = messages[messages.length - 1];
const parts = lastMessage.parts;
if (!parts) return messages;
const processedParts = await Promise.all(
parts.map(async part => {
// Only process tool invocations parts
if (!isToolUIPart(part)) return part;
const toolName = getToolName(part);
// Only continue if we have an execute function for the tool (meaning it requires confirmation) and it's in a 'result' state
if (!(toolName in executeFunctions) || part.state !== 'output-available')
return part;
let result;
if (part.output === APPROVAL.YES) {
// Get the tool and check if the tool has an execute function.
if (
!isValidToolName(toolName, executeFunctions) ||
part.state !== 'output-available'
) {
return part;
}
const toolInstance = executeFunctions[toolName] as Tool['execute'];
if (toolInstance) {
result = await toolInstance(part.input, {
messages: convertToModelMessages(messages),
toolCallId: part.toolCallId,
});
} else {
result = 'Error: No execute function found on tool';
}
} else if (part.output === APPROVAL.NO) {
result = 'Error: User denied access to tool execution';
} else {
// For any unhandled responses, return the original part.
return part;
}
// Forward updated tool result to the client.
writer.write({
type: 'tool-output-available',
toolCallId: part.toolCallId,
output: result,
});
// Return updated toolInvocation with the actual result.
return {
...part,
output: result,
};
}),
);
// Finally return the processed messages
return [...messages.slice(0, -1), { ...lastMessage, parts: processedParts }];
}
export function getToolsRequiringConfirmation<
T extends ToolSet,
// E extends {
// [K in keyof T as T[K] extends { execute: Function } ? never : K]: T[K];
// },
>(tools: T): string[] {
return (Object.keys(tools) as (keyof T)[]).filter(key => {
const maybeTool = tools[key];
return typeof maybeTool.execute !== 'function';
}) as string[];
}

View File

@ -0,0 +1,15 @@
import { google } from '@ai-sdk/google';
import { streamText, convertToModelMessages } from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: google('gemini-2.0-flash-exp'),
messages: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,11 @@
import { z } from 'zod';
export const exampleMetadataSchema = z.object({
createdAt: z.number().optional(),
duration: z.number().optional(),
model: z.string().optional(),
totalTokens: z.number().optional(),
finishReason: z.string().optional(),
});
export type ExampleMetadata = z.infer<typeof exampleMetadataSchema>;

View File

@ -0,0 +1,39 @@
import { openai } from '@ai-sdk/openai';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
import { ExampleMetadata } from './example-metadata-schema';
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: openai('gpt-4o'),
prompt: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse({
messageMetadata: ({ part }): ExampleMetadata | undefined => {
// send custom information to the client on start:
if (part.type === 'start') {
return {
createdAt: Date.now(),
model: 'gpt-4o', // initial model id
};
}
// send additional model information on finish-step:
if (part.type === 'finish-step') {
return {
model: part.response.modelId, // update with the actual model id
};
}
// when the message is finished, send additional information:
if (part.type === 'finish') {
return {
totalTokens: part.totalUsage.totalTokens,
finishReason: part.finishReason,
};
}
},
});
}

View File

@ -0,0 +1,25 @@
import { openai } from '@ai-sdk/openai';
import { saveChat } from '@util/chat-store';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export async function POST(req: Request) {
const { messages, chatId }: { messages: UIMessage[]; chatId: string } =
await req.json();
const result = streamText({
model: openai('gpt-4o-mini'),
messages: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse({
originalMessages: messages,
messageMetadata: ({ part }) => {
if (part.type === 'start') {
return { createdAt: Date.now() };
}
},
onFinish: ({ messages }) => {
saveChat({ chatId, messages });
},
});
}

View File

@ -0,0 +1,23 @@
import { openai } from '@ai-sdk/openai';
import { loadChat, saveChat } from '@util/chat-store';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export async function POST(req: Request) {
const { message, chatId }: { message: UIMessage; chatId: string } =
await req.json();
const previousMessages = await loadChat(chatId);
const messages = [...previousMessages, message];
const result = streamText({
model: openai('gpt-4o-mini'),
messages: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse({
originalMessages: messages,
onFinish: ({ messages }) => {
saveChat({ chatId, messages });
},
});
}

View File

@ -0,0 +1,20 @@
import { openai } from '@ai-sdk/openai';
import { saveChat } from '@util/chat-store';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export async function POST(req: Request) {
const { messages, chatId }: { messages: UIMessage[]; chatId: string } =
await req.json();
const result = streamText({
model: openai('gpt-4o-mini'),
messages: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse({
originalMessages: messages,
onFinish: ({ messages }) => {
saveChat({ chatId, messages });
},
});
}

View File

@ -0,0 +1,48 @@
import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai';
import {
convertToModelMessages,
InferUITools,
streamText,
UIDataTypes,
UIMessage,
} from 'ai';
const tools = {
web_search: openai.tools.webSearch({
searchContextSize: 'high',
userLocation: {
type: 'approximate',
city: 'San Francisco',
region: 'California',
country: 'US',
},
}),
} as const;
export type ReasoningToolsMessage = UIMessage<
never, // could define metadata here
UIDataTypes, // could define data parts here
InferUITools<typeof tools>
>;
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages } = await req.json();
console.log(JSON.stringify(messages, null, 2));
const result = streamText({
model: openai('gpt-5'),
messages: convertToModelMessages(messages),
tools,
providerOptions: {
openai: {
reasoningSummary: 'detailed', // 'auto' for condensed or 'detailed' for comprehensive
} satisfies OpenAIResponsesProviderOptions,
},
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,24 @@
import { openai, OpenAIResponsesProviderOptions } from '@ai-sdk/openai';
import { convertToModelMessages, streamText } from 'ai';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai('gpt-5-nano'),
messages: convertToModelMessages(messages),
providerOptions: {
openai: {
reasoningSummary: 'detailed', // 'auto' for condensed or 'detailed' for comprehensive
} satisfies OpenAIResponsesProviderOptions,
},
onFinish: ({ request }) => {
console.dir(request.body, { depth: null });
},
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,29 @@
import { openai } from '@ai-sdk/openai';
import { saveChat } from '@util/chat-store';
import { convertToModelMessages, streamText, UIMessage } from 'ai';
export async function POST(req: Request) {
const { messages, chatId }: { messages: UIMessage[]; chatId: string } =
await req.json();
const result = streamText({
model: openai('gpt-4o-mini'),
messages: convertToModelMessages(messages),
});
// consume the stream to ensure it runs to completion and triggers onFinish
// even when the client response is aborted (e.g. when the browser tab is closed).
// no await
result.consumeStream({
onError: error => {
console.log('Error during background stream consumption: ', error); // optional error callback
},
});
return result.toUIMessageStreamResponse({
originalMessages: messages,
onFinish: ({ messages }) => {
saveChat({ chatId, messages });
},
});
}

View File

@ -0,0 +1,43 @@
import { loadStreams } from '@/util/chat-store';
import { createUIMessageStream, JsonToSseTransformStream } from 'ai';
import { after } from 'next/server';
import { createResumableStreamContext } from 'resumable-stream';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function GET(
request: Request,
{ params }: { params: Promise<{ id: string }> },
) {
const { id } = await params;
if (!id) {
return new Response('id is required', { status: 400 });
}
const streamIds = await loadStreams(id);
if (!streamIds.length) {
return new Response(null, { status: 204 });
}
const recentStreamId = streamIds.at(-1);
if (!recentStreamId) {
return new Response(null, { status: 204 });
}
const streamContext = createResumableStreamContext({
waitUntil: after,
});
const resumedStream =
await streamContext.resumeExistingStream(recentStreamId);
if (!resumedStream) {
return new Response(null, { status: 204 });
}
return new Response(resumedStream);
}

View File

@ -0,0 +1,54 @@
import {
appendMessageToChat,
appendStreamId,
saveChat,
} from '@/util/chat-store';
import { openai } from '@ai-sdk/openai';
import {
convertToModelMessages,
createUIMessageStream,
generateId,
JsonToSseTransformStream,
streamText,
UIMessage,
} from 'ai';
import { after } from 'next/server';
import { createResumableStreamContext } from 'resumable-stream';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { chatId, messages }: { chatId: string; messages: UIMessage[] } =
await req.json();
const streamId = generateId();
const recentUserMessage = messages
.filter(message => message.role === 'user')
.at(-1);
if (!recentUserMessage) {
throw new Error('No recent user message found');
}
await appendMessageToChat({ chatId, message: recentUserMessage });
await appendStreamId({ chatId, streamId });
const result = streamText({
model: openai('gpt-4o'),
messages: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse({
originalMessages: messages,
onFinish: ({ messages }) => {
saveChat({ chatId, messages });
},
async consumeSseStream({ stream }) {
// send the sse stream into a resumable stream sink as well:
const streamContext = createResumableStreamContext({ waitUntil: after });
await streamContext.createNewResumableStream(streamId, () => stream);
},
});
}

View File

@ -0,0 +1,34 @@
import { anthropic } from '@ai-sdk/anthropic';
import {
convertToModelMessages,
InferUITool,
streamText,
UIDataTypes,
UIMessage,
} from 'ai';
export type SourcesChatMessage = UIMessage<
never,
UIDataTypes,
{
web_search: InferUITool<
ReturnType<typeof anthropic.tools.webSearch_20250305>
>;
}
>;
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: anthropic('claude-3-5-sonnet-latest'),
tools: {
web_search: anthropic.tools.webSearch_20250305(),
},
messages: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse({
sendSources: true,
});
}

View File

@ -0,0 +1,66 @@
import { openai } from '@ai-sdk/openai';
import { convertToModelMessages, streamText, UIDataTypes, UIMessage } from 'ai';
import { z } from 'zod';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export type StreamingToolCallsMessage = UIMessage<
never,
UIDataTypes,
{
showWeatherInformation: {
input: {
city: string;
weather: string;
temperature: number;
typicalWeather: string;
};
output: string;
};
}
>;
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai('gpt-4o'),
messages: convertToModelMessages(messages),
system:
'You are a helpful assistant that answers questions about the weather in a given city.' +
'You use the showWeatherInformation tool to show the weather information to the user instead of talking about it.',
tools: {
// server-side tool with execute function:
getWeatherInformation: {
description: 'show the weather in a given city to the user',
inputSchema: z.object({ city: z.string() }),
execute: async ({}: { city: string }) => {
const weatherOptions = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy'];
return {
weather:
weatherOptions[Math.floor(Math.random() * weatherOptions.length)],
temperature: Math.floor(Math.random() * 50 - 10),
};
},
},
// client-side tool that displays weather information to the user:
showWeatherInformation: {
description:
'Show the weather information to the user. Always use this tool to tell weather information to the user.',
inputSchema: z.object({
city: z.string(),
weather: z.string(),
temperature: z.number(),
typicalWeather: z
.string()
.describe(
'2-3 sentences about the typical weather in the city during spring.',
),
}),
},
},
});
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,25 @@
import { createUIMessageStreamResponse, simulateReadableStream } from 'ai';
export async function POST(req: Request) {
return createUIMessageStreamResponse({
stream: simulateReadableStream({
initialDelayInMs: 0, // Delay before the first chunk
chunkDelayInMs: 0, // Delay between chunks
chunks: [
{
type: 'start',
},
{
type: 'start-step',
},
...Array(5000).fill({ type: 'text', value: 'T\n' }),
{
type: 'finish-step',
},
{
type: 'finish',
},
],
}),
});
}

View File

@ -0,0 +1,108 @@
import { openai } from '@ai-sdk/openai';
import {
convertToModelMessages,
InferUITools,
stepCountIs,
streamText,
tool,
UIDataTypes,
UIMessage,
validateUIMessages,
} from 'ai';
import { z } from 'zod';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
const getWeatherInformationTool = tool({
description: 'show the weather in a given city to the user',
inputSchema: z.object({ city: z.string() }),
async *execute({ city }: { city: string }, { messages }) {
yield { state: 'loading' as const };
// count the number of assistant messages. throw error if 2 or less
const assistantMessageCount = messages.filter(
message => message.role === 'assistant',
).length;
// if (assistantMessageCount <= 2) {
// throw new Error('could not get weather information');
// }
// Add artificial delay of 5 seconds
await new Promise(resolve => setTimeout(resolve, 5000));
const weatherOptions = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy'];
const weather =
weatherOptions[Math.floor(Math.random() * weatherOptions.length)];
yield {
state: 'ready' as const,
temperature: 72,
weather,
};
},
onInputStart: () => {
console.log('onInputStart');
},
onInputDelta: ({ inputTextDelta }) => {
console.log('onInputDelta', inputTextDelta);
},
onInputAvailable: ({ input }) => {
console.log('onInputAvailable', input);
},
});
const askForConfirmationTool = tool({
description: 'Ask the user for confirmation.',
inputSchema: z.object({
message: z.string().describe('The message to ask for confirmation.'),
}),
outputSchema: z.string(),
});
const getLocationTool = tool({
description:
'Get the user location. Always ask for confirmation before using this tool.',
inputSchema: z.object({}),
outputSchema: z.string(),
});
const tools = {
// server-side tool with execute function:
getWeatherInformation: getWeatherInformationTool,
// client-side tool that starts user interaction:
askForConfirmation: askForConfirmationTool,
// client-side tool that is automatically executed on the client:
getLocation: getLocationTool,
} as const;
export type UseChatToolsMessage = UIMessage<
never,
UIDataTypes,
InferUITools<typeof tools>
>;
export async function POST(req: Request) {
const body = await req.json();
const messages = await validateUIMessages<UseChatToolsMessage>({
messages: body.messages,
tools,
});
const result = streamText({
model: openai('gpt-4o'),
messages: convertToModelMessages(messages),
stopWhen: stepCountIs(5), // multi-steps for server-side tools
tools,
});
return result.toUIMessageStreamResponse({
// originalMessages: messages, //add if you want to have correct ids
onFinish: options => {
console.log('onFinish', options);
},
});
}

View File

@ -0,0 +1,32 @@
import { openai } from '@ai-sdk/openai';
import { stepCountIs, streamText, tool } from 'ai';
import { z } from 'zod';
// Allow streaming responses up to 60 seconds
export const maxDuration = 60;
export async function POST(req: Request) {
// Extract the `prompt` from the body of the request
const { prompt } = await req.json();
const result = streamText({
model: openai('gpt-4-turbo'),
tools: {
weather: tool({
description: 'Get the weather in a location',
inputSchema: z.object({
location: z.string().describe('The location to get the weather for'),
}),
execute: async ({ location }) => ({
location,
temperature: 72 + Math.floor(Math.random() * 21) - 10,
}),
}),
},
stopWhen: stepCountIs(4),
prompt,
});
// Respond with the stream
return result.toUIMessageStreamResponse();
}

View File

@ -0,0 +1,25 @@
import { createUIMessageStreamResponse, simulateReadableStream } from 'ai';
export async function POST(req: Request) {
return createUIMessageStreamResponse({
stream: simulateReadableStream({
initialDelayInMs: 0, // Delay before the first chunk
chunkDelayInMs: 0, // Delay between chunks
chunks: [
{
type: 'start',
},
{
type: 'start-step',
},
...Array(5000).fill({ type: 'text', value: 'T\n' }),
{
type: 'finish-step',
},
{
type: 'finish',
},
],
}),
});
}

View File

@ -0,0 +1,35 @@
import { openai } from '@ai-sdk/openai';
import { streamObject } from 'ai';
import { expenseSchema } from './schema';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { expense }: { expense: string } = await req.json();
const result = streamObject({
model: openai('gpt-4o'),
system:
'You categorize expenses into one of the following categories: ' +
'TRAVEL, MEALS, ENTERTAINMENT, OFFICE SUPPLIES, OTHER.' +
// provide date (including day of week) for reference:
'The current date is: ' +
new Date()
.toLocaleDateString('en-US', {
year: 'numeric',
month: 'short',
day: '2-digit',
weekday: 'short',
})
.replace(/(\w+), (\w+) (\d+), (\d+)/, '$4-$2-$3 ($1)') +
'. When no date is supplied, use the current date.',
prompt: `Please categorize the following expense: "${expense}"`,
schema: expenseSchema,
onFinish({ object }) {
// save object to database
},
});
return result.toTextStreamResponse();
}

View File

@ -0,0 +1,21 @@
import { DeepPartial } from 'ai';
import { z } from 'zod';
export const expenseSchema = z.object({
expense: z.object({
category: z
.string()
.describe(
'Category of the expense. Allowed categories: TRAVEL, MEALS, ENTERTAINMENT, OFFICE SUPPLIES, OTHER.',
),
amount: z.number().describe('Amount of the expense in USD.'),
date: z
.string()
.describe('Date of the expense. Format yyyy-mmm-dd, e.g. 1952-Feb-19.'),
details: z.string().describe('Details of the expense.'),
}),
});
export type Expense = z.infer<typeof expenseSchema>['expense'];
export type PartialExpense = DeepPartial<Expense>;

View File

@ -0,0 +1,18 @@
import { openai } from '@ai-sdk/openai';
import { streamObject } from 'ai';
import { notificationSchema } from './schema';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const context = await req.json();
const result = streamObject({
model: openai('gpt-4o'),
prompt: `Generate 3 notifications for a messages app in this context: ${context}`,
schema: notificationSchema,
});
return result.toTextStreamResponse();
}

View File

@ -0,0 +1,16 @@
import { DeepPartial } from 'ai';
import { z } from 'zod';
// define a schema for the notifications
export const notificationSchema = z.object({
notifications: z.array(
z.object({
name: z.string().describe('Name of a fictional person.'),
message: z.string().describe('Message. Do not use emojis or links.'),
minutesAgo: z.number(),
}),
),
});
// define a type for the partial notifications during generation
export type PartialNotification = DeepPartial<typeof notificationSchema>;

54
app/bedrock/page.tsx Normal file
View File

@ -0,0 +1,54 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
export default function Chat() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({ api: '/api/bedrock' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts.map(part => {
if (part.type === 'text') {
return part.text;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,15 @@
'use server';
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
import { createStreamableValue } from '@ai-sdk/rsc';
export async function generateCompletion(prompt: string) {
const result = streamText({
model: openai('gpt-4-turbo'),
maxOutputTokens: 2000,
prompt,
});
return createStreamableValue(result.textStream).value;
}

View File

@ -0,0 +1,41 @@
'use client';
import { readStreamableValue } from '@ai-sdk/rsc';
import { useState } from 'react';
import { generateCompletion } from './generate-completion';
export default function Chat() {
const [input, setInput] = useState('');
const [completion, setCompletion] = useState('');
const handleInputChange = (event: React.ChangeEvent<HTMLInputElement>) => {
setInput(event.target.value);
};
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
<h4 className="pb-4 text-xl font-bold text-gray-900 md:text-xl">
RSC Completion Example
</h4>
{completion}
<form
onSubmit={async e => {
e.preventDefault();
const streamableCompletion = await generateCompletion(input);
for await (const text of readStreamableValue(streamableCompletion)) {
setCompletion(text ?? '');
}
}}
>
<input
className="fixed bottom-0 w-full max-w-md p-2 mb-8 border border-gray-300 rounded shadow-xl"
value={input}
placeholder="Say something..."
onChange={handleInputChange}
/>
</form>
</div>
);
}

50
app/completion/page.tsx Normal file
View File

@ -0,0 +1,50 @@
'use client';
import { useCompletion } from '@ai-sdk/react';
export default function Page() {
const {
completion,
input,
handleInputChange,
handleSubmit,
error,
isLoading,
stop,
} = useCompletion();
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
<h4 className="pb-4 text-xl font-bold text-gray-900 md:text-xl">
useCompletion Example
</h4>
{error && (
<div className="fixed top-0 left-0 w-full p-4 text-center text-white bg-red-500">
{error.message}
</div>
)}
{isLoading && (
<div className="mt-4 text-gray-500">
<div>Loading...</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{completion}
<form onSubmit={handleSubmit}>
<input
className="fixed bottom-0 w-full max-w-md p-2 mb-8 border border-gray-300 rounded shadow-xl"
value={input}
placeholder="Say something..."
onChange={handleInputChange}
disabled={isLoading}
/>
</form>
</div>
);
}

View File

@ -0,0 +1,85 @@
'use client';
import ChatInput from '@/component/chat-input';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import { ToolsMessage } from '../api/dynamic-tools/route';
export default function Chat() {
const { messages, sendMessage, status } = useChat<ToolsMessage>({
transport: new DefaultChatTransport({ api: '/api/dynamic-tools' }),
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
{messages?.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
<strong>{`${message.role}: `}</strong>
{message.parts.map((part, index) => {
switch (part.type) {
case 'text':
return <div key={index}>{part.text}</div>;
case 'step-start':
return index > 0 ? (
<div key={index} className="text-gray-500">
<hr className="my-2 border-gray-300" />
</div>
) : null;
case 'dynamic-tool': {
switch (part.state) {
case 'input-streaming':
case 'input-available':
case 'output-available':
return (
<pre key={index}>{JSON.stringify(part, null, 2)}</pre>
);
case 'output-error':
return (
<div key={index} className="text-red-500">
Error: {part.errorText}
</div>
);
}
}
case 'tool-getWeatherInformation': {
switch (part.state) {
// example of pre-rendering streaming tool calls:
case 'input-streaming':
return (
<pre key={index}>
{JSON.stringify(part.input, null, 2)}
</pre>
);
case 'input-available':
return (
<div key={index} className="text-gray-500">
Getting weather information for {part.input.city}...
</div>
);
case 'output-available':
return (
<div key={index} className="text-gray-500">
Weather in {part.input.city}: {part.output}
</div>
);
case 'output-error':
return (
<div key={index} className="text-red-500">
Error: {part.errorText}
</div>
);
}
}
}
})}
<br />
</div>
))}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

BIN
app/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

View File

@ -0,0 +1,89 @@
'use client';
import { useState } from 'react';
export default function Page() {
const [inputValue, setInputValue] = useState('');
const [imageSrc, setImageSrc] = useState<string | null>(null);
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
const handleSubmit = async (event: React.FormEvent<HTMLFormElement>) => {
event.preventDefault();
setIsLoading(true);
setImageSrc(null);
setError(null);
try {
const response = await fetch('/api/generate-image', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ prompt: inputValue }),
});
if (response.ok) {
const image = await response.json();
setImageSrc(`data:image/png;base64,${image}`);
return;
}
setError(await response.text());
} finally {
setIsLoading(false);
}
};
return (
<div className="flex flex-col items-center min-h-screen p-24">
<div className="space-y-2">
<h2 className="text-3xl font-bold tracking-tighter text-center sm:text-4xl md:text-5xl">
Image Generator
</h2>
<p className="max-w-[600px] text-gray-500 md:text-xl/relaxed lg:text-base/relaxed xl:text-xl/relaxed dark:text-gray-400">
Generate images.
</p>
</div>
<div className="w-full max-w-sm pt-6 pb-8 space-y-2">
<form className="flex space-x-2" onSubmit={handleSubmit}>
<input
className="flex-1 max-w-lg px-3 py-2 border border-gray-300 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent disabled:bg-gray-100 disabled:cursor-not-allowed"
placeholder="Describe the image"
type="text"
value={inputValue}
onChange={e => setInputValue(e.target.value)}
disabled={isLoading}
/>
<button
type="submit"
disabled={isLoading}
className="px-4 py-2 text-white bg-blue-500 rounded-md hover:bg-blue-600 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 disabled:bg-blue-300 disabled:cursor-not-allowed"
>
Generate
</button>
</form>
</div>
{error && (
<div className="p-4 mb-4 text-red-700 bg-red-100 border border-red-400 rounded-lg">
{error}
</div>
)}
<div className="w-[512px] h-[512px] space-y-2">
{isLoading ? (
<div className="h-[512px] w-[512px] animate-pulse bg-gray-200 rounded-lg" />
) : (
imageSrc && (
<img
alt="Generated Image"
className="object-cover overflow-hidden rounded-lg"
src={imageSrc}
/>
)
)}
</div>
</div>
);
}

3
app/globals.css Normal file
View File

@ -0,0 +1,3 @@
@tailwind base;
@tailwind components;
@tailwind utilities;

18
app/layout.tsx Normal file
View File

@ -0,0 +1,18 @@
import './globals.css';
export const metadata = {
title: 'AI SDK - Next.js OpenAI Examples',
description: 'Examples of using the AI SDK with Next.js and OpenAI.',
};
export default function RootLayout({
children,
}: {
children: React.ReactNode;
}) {
return (
<html lang="en">
<body>{children}</body>
</html>
);
}

49
app/mcp-zapier/page.tsx Normal file
View File

@ -0,0 +1,49 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport, isToolUIPart } from 'ai';
import { useState } from 'react';
export default function Page() {
const [input, setInput] = useState('');
const { messages, sendMessage } = useChat({
transport: new DefaultChatTransport({ api: '/api/mcp-zapier' }),
});
return (
<div className="flex flex-col items-center justify-end h-screen gap-4">
<h1 className="p-4 text-xl">My AI Assistant</h1>
<div className="flex flex-col gap-2 p-4 mt-auto">
{messages.map(message => (
<div key={message.id}>
<strong>{`${message.role}: `}</strong>
{message.parts.map((part, index) => {
if (part.type === 'text') {
return <span key={index}>{part.text}</span>;
} else if (isToolUIPart(part)) {
return <pre key={index}>{JSON.stringify(part, null, 2)}</pre>;
}
})}
</div>
))}
</div>
<div className="flex flex-col items-center gap-2 p-4">
<textarea
value={input}
onChange={e => setInput(e.target.value)}
placeholder="Start chatting"
className="h-32 p-2 border-2 border-gray-300 rounded-md w-96"
/>
<button
className="w-full p-2 px-4 text-white bg-blue-500 rounded-md"
type="button"
onClick={() => sendMessage({ text: input })}
>
Send
</button>
</div>
</div>
);
}

47
app/mcp/chat/route.ts Normal file
View File

@ -0,0 +1,47 @@
import { openai } from '@ai-sdk/openai';
import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js';
import {
convertToModelMessages,
experimental_createMCPClient,
stepCountIs,
streamText,
} from 'ai';
export async function POST(req: Request) {
const url = new URL('http://localhost:3000/mcp/server');
const transport = new StreamableHTTPClientTransport(url);
const [client, { messages }] = await Promise.all([
experimental_createMCPClient({
transport,
}),
req.json(),
]);
try {
const tools = await client.tools();
const result = streamText({
model: openai('gpt-4o-mini'),
tools,
stopWhen: stepCountIs(5),
onStepFinish: async ({ toolResults }) => {
console.log(`STEP RESULTS: ${JSON.stringify(toolResults, null, 2)}`);
},
system: 'You are a helpful chatbot capable of basic arithmetic problems',
messages: convertToModelMessages(messages),
onFinish: async () => {
await client.close();
},
// Optional, enables immediate clean up of resources but connection will not be retained for retries:
// onError: async error => {
// await client.close();
// },
});
return result.toUIMessageStreamResponse();
} catch (error) {
console.error(error);
return Response.json({ error: 'Unexpected error' }, { status: 500 });
}
}

52
app/mcp/page.tsx Normal file
View File

@ -0,0 +1,52 @@
'use client';
import ChatInput from '@/component/chat-input';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
export default function Chat() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({ api: '/mcp/chat' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts
.map(part => (part.type === 'text' ? part.text : ''))
.join('')}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

16
app/mcp/server/route.ts Normal file
View File

@ -0,0 +1,16 @@
import { mcpApiHandler } from '@/util/mcp/handler';
import { createServerResponseAdapter } from '@/util/mcp/server-response';
import { NextRequest } from 'next/server';
// This route (/mcp/server) serves the MCP server; it's called by the /mcp/chat route that's used by useChat to connect to the server and fetch tools:
const requestHandler = (req: NextRequest) => {
return createServerResponseAdapter(req.signal, res => {
mcpApiHandler(req, res);
});
};
export {
requestHandler as DELETE,
requestHandler as GET,
requestHandler as POST,
};

51
app/page.tsx Normal file
View File

@ -0,0 +1,51 @@
'use client';
import { useChat } from '@ai-sdk/react';
import ChatInput from '@/component/chat-input';
export default function Chat() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat();
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts.map(part => {
if (part.type === 'text') {
return part.text;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,26 @@
'use server';
import { openai } from '@ai-sdk/openai';
import { streamObject } from 'ai';
import { createStreamableValue } from '@ai-sdk/rsc';
import { PartialNotification, notificationSchema } from './schema';
export async function generateNotifications(context: string) {
const notificationsStream = createStreamableValue<PartialNotification>();
const result = streamObject({
model: openai('gpt-4-turbo'),
prompt: `Generate 3 notifications for a messages app in this context: ${context}`,
schema: notificationSchema,
});
try {
for await (const partialObject of result.partialObjectStream) {
notificationsStream.update(partialObject);
}
} finally {
notificationsStream.done();
}
return notificationsStream.value;
}

View File

@ -0,0 +1,67 @@
'use client';
import { StreamableValue, useStreamableValue } from '@ai-sdk/rsc';
import { useState } from 'react';
import { generateNotifications } from './actions';
import { PartialNotification } from './schema';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
// page component with a button to generate notifications
export default function Page() {
const [notificationStream, setNotificationStream] =
useState<StreamableValue<PartialNotification> | null>(null);
return (
<div className="flex flex-col items-center min-h-screen p-4 m-4">
<button
className="px-4 py-2 mt-4 text-white bg-blue-500 rounded-md"
onClick={async () => {
setNotificationStream(
await generateNotifications('Messages during finals week.'),
);
}}
>
Generate notifications
</button>
{notificationStream && (
<NotificationsView notificationStream={notificationStream} />
)}
</div>
);
}
// separate component to display notifications that received the streamable value:
function NotificationsView({
notificationStream,
}: {
notificationStream: StreamableValue<PartialNotification>;
}) {
const [data, pending, error] = useStreamableValue(notificationStream);
return (
<div className="flex flex-col gap-4 mt-4">
{data?.notifications?.map((notification, index) => (
<div
className="flex items-start gap-4 p-4 bg-gray-100 rounded-md dark:bg-gray-800"
key={index}
>
<div className="flex-1 space-y-1">
<div className="flex items-center justify-between">
<p className="font-medium">{notification?.name}</p>
<p className="text-sm text-gray-500 dark:text-gray-400">
{notification?.minutesAgo}
{notification?.minutesAgo != null ? ' minutes ago' : ''}
</p>
</div>
<p className="text-gray-700 dark:text-gray-300">
{notification?.message}
</p>
</div>
</div>
))}
</div>
);
}

View File

@ -0,0 +1,16 @@
import { DeepPartial } from 'ai';
import { z } from 'zod';
// define a schema for the notifications
export const notificationSchema = z.object({
notifications: z.array(
z.object({
name: z.string().describe('Name of a fictional person.'),
message: z.string().describe('Message. Do not use emojis or links.'),
minutesAgo: z.number(),
}),
),
});
// define a type for the partial notifications during generation
export type PartialNotification = DeepPartial<typeof notificationSchema>;

113
app/stream-ui/actions.tsx Normal file
View File

@ -0,0 +1,113 @@
import { openai } from '@ai-sdk/openai';
import { ModelMessage, generateId } from 'ai';
import {
createAI,
createStreamableValue,
getMutableAIState as $getMutableAIState,
streamUI,
} from '@ai-sdk/rsc';
import { Message, BotMessage } from './message';
import { z } from 'zod';
type AIProviderNoActions = ReturnType<typeof createAI<AIState, UIState>>;
// typed wrapper *without* actions defined to avoid circular dependencies
const getMutableAIState = $getMutableAIState<AIProviderNoActions>;
// mock function to fetch weather data
const fetchWeatherData = async (location: string) => {
await new Promise(resolve => setTimeout(resolve, 1000));
return { temperature: '72°F' };
};
export async function submitUserMessage(content: string) {
'use server';
const aiState = getMutableAIState();
aiState.update({
...aiState.get(),
messages: [
...aiState.get().messages,
{ id: generateId(), role: 'user', content },
],
});
let textStream: undefined | ReturnType<typeof createStreamableValue<string>>;
let textNode: React.ReactNode;
const result = await streamUI({
model: openai('gpt-4-turbo'),
initial: <Message role="assistant">Working on that...</Message>,
system: 'You are a weather assistant.',
messages: aiState
.get()
.messages.map(({ role, content }) => ({ role, content }) as ModelMessage),
text: ({ content, done, delta }) => {
if (!textStream) {
textStream = createStreamableValue('');
textNode = <BotMessage textStream={textStream.value} />;
}
if (done) {
textStream.done();
aiState.update({
...aiState.get(),
messages: [
...aiState.get().messages,
{ id: generateId(), role: 'assistant', content },
],
});
} else {
textStream.append(delta);
}
return textNode;
},
tools: {
get_current_weather: {
description: 'Get the current weather',
inputSchema: z.object({
location: z.string(),
}),
generate: async function* ({ location }) {
yield (
<Message role="assistant">Loading weather for {location}</Message>
);
const { temperature } = await fetchWeatherData(location);
return (
<Message role="assistant">
<span>
The temperature in {location} is{' '}
<span className="font-semibold">{temperature}</span>
</span>
</Message>
);
},
},
},
onFinish: event => {
// your own logic, e.g. for saving the chat history or recording usage
console.log(`[onFinish]: ${JSON.stringify(event, null, 2)}`);
},
});
return {
id: generateId(),
display: result.value,
};
}
export type ClientMessage = ModelMessage & {
id: string;
};
export type AIState = {
chatId: string;
messages: ClientMessage[];
};
export type UIState = {
id: string;
display: React.ReactNode;
}[];

9
app/stream-ui/ai.ts Normal file
View File

@ -0,0 +1,9 @@
import { createAI } from '@ai-sdk/rsc';
import { AIState, submitUserMessage, UIState } from './actions';
import { generateId } from 'ai';
export const AI = createAI({
actions: { submitUserMessage },
initialUIState: [] as UIState,
initialAIState: { chatId: generateId(), messages: [] } as AIState,
});

5
app/stream-ui/layout.tsx Normal file
View File

@ -0,0 +1,5 @@
import { AI } from './ai';
export default function Layout({ children }: { children: React.ReactNode }) {
return <AI>{children}</AI>;
}

25
app/stream-ui/message.tsx Normal file
View File

@ -0,0 +1,25 @@
'use client';
import { StreamableValue, useStreamableValue } from '@ai-sdk/rsc';
export function BotMessage({ textStream }: { textStream: StreamableValue }) {
const [text] = useStreamableValue(textStream);
return <Message role="assistant">{text}</Message>;
}
export function Message({
role,
children,
}: {
role: string;
children: React.ReactNode;
}) {
return (
<div className="flex flex-col gap-1 border-b p-2">
<div className="flex flex-row justify-between">
<div className="text-sm text-zinc-500">{role}</div>
</div>
{children}
</div>
);
}

61
app/stream-ui/page.tsx Normal file
View File

@ -0,0 +1,61 @@
'use client';
import { Fragment, useState } from 'react';
import type { AI } from './ai';
import { useActions } from '@ai-sdk/rsc';
import { useAIState, useUIState } from '@ai-sdk/rsc';
import { generateId } from 'ai';
import { Message } from './message';
export default function Home() {
const [input, setInput] = useState('');
const [messages, setMessages] = useUIState<typeof AI>();
const { submitUserMessage } = useActions<typeof AI>();
const handleSubmission = async () => {
setMessages(currentMessages => [
...currentMessages,
{
id: generateId(),
display: <Message role="user">{input}</Message>,
},
]);
const response = await submitUserMessage(input);
setMessages(currentMessages => [...currentMessages, response]);
setInput('');
};
return (
<div className="flex flex-col-reverse">
<div className="flex flex-row gap-2 p-2 bg-zinc-100 w-full">
<input
className="bg-zinc-100 w-full p-2 outline-none"
value={input}
onChange={event => setInput(event.target.value)}
placeholder="Ask a question"
onKeyDown={event => {
if (event.key === 'Enter') {
handleSubmission();
}
}}
/>
<button
className="p-2 bg-zinc-900 text-zinc-100 rounded-md"
onClick={handleSubmission}
>
Send
</button>
</div>
<div className="flex flex-col h-[calc(100dvh-56px)] overflow-y-scroll">
<div>
{messages.map(message => (
<Fragment key={message.id}>{message.display}</Fragment>
))}
</div>
</div>
</div>
);
}

58
app/test-cohere/page.tsx Normal file
View File

@ -0,0 +1,58 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
export default function TestCohere() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({ api: '/api/chat-cohere' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
<h1 className="mb-4 text-xl font-bold">
Cohere Block-Based Streaming Test
</h1>
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts.map(part => {
if (part.type === 'text') {
return part.text;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

58
app/test-google/page.tsx Normal file
View File

@ -0,0 +1,58 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
export default function TestGoogle() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({ api: '/api/chat-google' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
<h1 className="mb-4 text-xl font-bold">
Google Block-Based Streaming Test
</h1>
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts.map(part => {
if (part.type === 'text') {
return part.text;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

58
app/test-groq/page.tsx Normal file
View File

@ -0,0 +1,58 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
export default function TestGroq() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({ api: '/api/chat-groq' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
<h1 className="mb-4 text-xl font-bold">
Groq Block-Based Streaming Test
</h1>
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts.map(part => {
if (part.type === 'text') {
return part.text;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,70 @@
'use client';
import ChatInput from '@/component/chat-input';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import { UseChatToolsMessage } from '../api/test-invalid-tool-call/route';
export default function Chat() {
const { messages, sendMessage, status } = useChat<UseChatToolsMessage>({
transport: new DefaultChatTransport({
api: '/api/test-invalid-tool-call',
}),
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
{messages?.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
<strong>{`${message.role}: `}</strong>
{message.parts.map((part, index) => {
switch (part.type) {
case 'text':
return <div key={index}>{part.text}</div>;
case 'step-start':
return index > 0 ? (
<div key={index} className="text-gray-500">
<hr className="my-2 border-gray-300" />
</div>
) : null;
case 'tool-getWeatherInformation': {
switch (part.state) {
// example of pre-rendering streaming tool calls:
case 'input-streaming':
return (
<pre key={index}>
{JSON.stringify(part.input, null, 2)}
</pre>
);
case 'input-available':
return (
<div key={index} className="text-gray-500">
Getting weather information for {part.input.city}...
</div>
);
case 'output-available':
return (
<div key={index} className="text-gray-500">
Weather in {part.input.city}: {part.output}
</div>
);
case 'output-error':
return (
<div key={index} className="text-red-500">
Error: {part.errorText}
</div>
);
}
}
}
})}
<br />
</div>
))}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

58
app/test-mistral/page.tsx Normal file
View File

@ -0,0 +1,58 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
export default function TestMistral() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({ api: '/api/chat-mistral' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
<h1 className="mb-4 text-xl font-bold">
Mistral Block-Based Streaming Test
</h1>
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts.map(part => {
if (part.type === 'text') {
return part.text;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,38 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
import { OpenAICodeInterpreterMessage } from '@/app/api/chat-openai-code-interpreter/route';
import CodeInterpreterView from '@/component/openai-code-interpreter-view';
export default function TestOpenAIWebSearch() {
const { status, sendMessage, messages } =
useChat<OpenAICodeInterpreterMessage>({
transport: new DefaultChatTransport({
api: '/api/chat-openai-code-interpreter',
}),
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
<h1 className="mb-4 text-xl font-bold">OpenAI Code Interpreter Test</h1>
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.parts.map((part, index) => {
switch (part.type) {
case 'text':
return <div key={index}>{part.text}</div>;
case 'tool-code_interpreter':
return <CodeInterpreterView key={index} invocation={part} />;
}
})}
</div>
))}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,37 @@
'use client';
import ChatInput from '@/component/chat-input';
import FileSearchView from '@/component/openai-file-search-view';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import { OpenAIFileSearchMessage } from '../api/chat-openai-file-search/route';
export default function TestOpenAIFileSearch() {
const { status, sendMessage, messages } = useChat<OpenAIFileSearchMessage>({
transport: new DefaultChatTransport({
api: '/api/chat-openai-file-search',
}),
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
<h1 className="mb-4 text-xl font-bold">OpenAI File Search Test</h1>
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.parts.map((part, index) => {
switch (part.type) {
case 'text':
return <div key={index}>{part.text}</div>;
case 'tool-file_search':
return <FileSearchView key={index} invocation={part} />;
}
})}
</div>
))}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,38 @@
'use client';
import { OpenAIImageGenerationMessage } from '@/app/api/chat-openai-image-generation/route';
import ChatInput from '@/component/chat-input';
import ImageGenerationView from '@/component/openai-image-generation-view';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
export default function TestOpenAIWebSearch() {
const { status, sendMessage, messages } =
useChat<OpenAIImageGenerationMessage>({
transport: new DefaultChatTransport({
api: '/api/chat-openai-image-generation',
}),
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
<h1 className="mb-4 text-xl font-bold">OpenAI Image Generation Test</h1>
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.parts.map((part, index) => {
switch (part.type) {
case 'text':
return <div key={index}>{part.text}</div>;
case 'tool-image_generation':
return <ImageGenerationView key={index} invocation={part} />;
}
})}
</div>
))}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,69 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
export default function TestOpenAIResponses() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({ api: '/api/chat-openai-responses' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
<h1 className="mb-4 text-xl font-bold">
OpenAI Responses Block-Based Streaming Test
</h1>
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap mb-4">
<div className="font-semibold mb-1">
{m.role === 'user' ? 'User:' : 'AI:'}
</div>
{m.parts.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
} else if (part.type === 'reasoning') {
return (
<div
key={index}
className="mt-2 p-2 bg-blue-50 border-l-2 border-blue-300 text-blue-800 text-sm"
>
<strong>Reasoning:</strong> {part.text}
</div>
);
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,103 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
import { OpenAIWebSearchMessage } from '@/app/api/chat-openai-web-search/route';
export default function TestOpenAIWebSearch() {
const { error, status, sendMessage, messages, regenerate, stop } =
useChat<OpenAIWebSearchMessage>({
transport: new DefaultChatTransport({
api: '/api/chat-openai-web-search',
}),
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
<h1 className="mb-4 text-xl font-bold">OpenAI Web Search Test</h1>
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.parts.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
}
if (part.type === 'tool-web_search') {
if (part.state === 'input-available') {
return (
<pre
key={index}
className="overflow-auto p-2 text-sm bg-gray-100 rounded"
>
{JSON.stringify(part.input, null, 2)}
</pre>
);
}
if (part.state === 'output-available') {
return (
<pre
key={index}
className="overflow-auto p-2 text-sm bg-gray-100 rounded"
>
{JSON.stringify(part.input, null, 2)}
{`\n\nDONE - Web search completed`}
</pre>
);
}
}
if (part.type === 'source-url') {
return (
<span key={index}>
[
<a
href={part.url}
target="_blank"
rel="noopener noreferrer"
className="text-sm font-bold text-blue-500 hover:underline"
>
{part.title ?? new URL(part.url).hostname}
</a>
]
</span>
);
}
return null;
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 rounded-md border border-blue-500"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 rounded-md border border-blue-500"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,58 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
export default function TestPerplexity() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({ api: '/api/chat-perplexity' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
<h1 className="mb-4 text-xl font-bold">
Perplexity Block-Based Streaming Test
</h1>
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts.map(part => {
if (part.type === 'text') {
return part.text;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

56
app/test-xai/page.tsx Normal file
View File

@ -0,0 +1,56 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import ChatInput from '@/component/chat-input';
export default function TestXai() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({ api: '/api/chat-xai' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
<h1 className="mb-4 text-xl font-bold">XAI Block-Based Streaming Test</h1>
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts.map(part => {
if (part.type === 'text') {
return part.text;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,103 @@
'use client';
/* eslint-disable @next/next/no-img-element */
import { useChat } from '@ai-sdk/react';
import { useRef, useState } from 'react';
export default function Page() {
const { messages, sendMessage, status } = useChat();
const [input, setInput] = useState('');
const [files, setFiles] = useState<FileList | undefined>(undefined);
const fileInputRef = useRef<HTMLInputElement>(null);
return (
<div className="flex flex-col gap-2">
<div className="flex flex-col gap-2 p-2">
{messages.map(message => (
<div key={message.id} className="flex flex-row gap-2">
<div className="flex-shrink-0 w-24 text-zinc-500">{`${message.role}: `}</div>
<div className="flex flex-col gap-2">
{message.parts.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
}
if (
part.type === 'file' &&
part.mediaType?.startsWith('image/')
) {
return (
<div key={index}>
<img
className="rounded-md w-60"
src={part.url}
alt={part.filename}
/>
</div>
);
}
})}
</div>
</div>
))}
</div>
<form
onSubmit={async event => {
event.preventDefault();
sendMessage({ text: input, files });
setFiles(undefined);
setInput('');
if (fileInputRef.current) {
fileInputRef.current.value = '';
}
}}
className="fixed bottom-0 flex flex-col w-full gap-2 p-2"
>
<div className="fixed flex flex-row items-end gap-2 right-2 bottom-14">
{files
? Array.from(files).map(attachment => {
const { type } = attachment;
if (type.startsWith('image/')) {
return (
<div key={attachment.name}>
<img
className="w-24 rounded-md"
src={URL.createObjectURL(attachment)}
alt={attachment.name}
/>
<span className="text-sm text-zinc-500">
{attachment.name}
</span>
</div>
);
}
})
: ''}
</div>
<input
type="file"
onChange={event => {
if (event.target.files) {
setFiles(event.target.files);
}
}}
multiple
ref={fileInputRef}
/>
<input
value={input}
placeholder="Send message..."
onChange={e => setInput(e.target.value)}
className="w-full p-2 bg-zinc-100"
disabled={status !== 'ready'}
/>
</form>
</div>
);
}

View File

@ -0,0 +1,119 @@
'use client';
/* eslint-disable @next/next/no-img-element */
import { useChat } from '@ai-sdk/react';
import { upload } from '@vercel/blob/client';
import { FileUIPart } from 'ai';
import { useRef, useState } from 'react';
export default function Page() {
const [input, setInput] = useState('');
const { messages, sendMessage, status } = useChat();
const [files, setFiles] = useState<FileUIPart[]>([]);
const [isUploading, setIsUploading] = useState<boolean>(false);
const fileInputRef = useRef<HTMLInputElement>(null);
return (
<div className="flex flex-col gap-2">
<div className="flex flex-col gap-2 p-2">
{messages.map(message => (
<div key={message.id} className="flex flex-row gap-2">
<div className="flex-shrink-0 w-24 text-zinc-500">{`${message.role}: `}</div>
<div className="flex flex-col gap-2">
{message.parts.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
}
if (
part.type === 'file' &&
part.mediaType?.startsWith('image/')
) {
return (
<div key={index}>
<img
className="rounded-md w-60"
src={part.url}
alt={part.filename}
/>
</div>
);
}
})}
</div>
</div>
))}
</div>
<form
onSubmit={event => {
if (isUploading) {
alert('Please wait for the files to finish uploading.');
return;
}
sendMessage({ text: input, files });
setInput('');
setFiles([]);
if (fileInputRef.current) {
fileInputRef.current.value = '';
}
}}
className="fixed bottom-0 flex flex-col w-full gap-2 p-2"
>
<div className="fixed flex flex-row items-end gap-2 right-2 bottom-14">
{Array.from(files)
.filter(file => file.mediaType?.startsWith('image/'))
.map(file => (
<div key={file.url}>
<img
className="w-24 rounded-md"
src={file.url}
alt={file.filename}
/>
<span className="text-sm text-zinc-500">{file.filename}</span>
</div>
))}
</div>
<input
type="file"
onChange={async event => {
if (event.target.files) {
setIsUploading(true);
for (const file of Array.from(event.target.files)) {
const blob = await upload(file.name, file, {
access: 'public',
handleUploadUrl: '/api/files',
});
setFiles(prevFiles => [
...prevFiles,
{
type: 'file' as const,
filename: file.name,
mediaType: blob.contentType ?? '*/*',
url: blob.url,
},
]);
}
setIsUploading(false);
}
}}
multiple
ref={fileInputRef}
/>
<input
value={input}
placeholder="Send message..."
onChange={e => setInput(e.target.value)}
className="w-full p-2 bg-zinc-100"
disabled={status !== 'ready'}
/>
</form>
</div>
);
}

View File

@ -0,0 +1,114 @@
'use client';
/* eslint-disable @next/next/no-img-element */
import { useChat } from '@ai-sdk/react';
import { useRef, useState } from 'react';
export default function Page() {
const [input, setInput] = useState('');
const { messages, sendMessage, status } = useChat();
const [files, setFiles] = useState<FileList | undefined>(undefined);
const fileInputRef = useRef<HTMLInputElement>(null);
return (
<div className="flex flex-col gap-2">
<div className="flex flex-col gap-2 p-2">
{messages.map(message => (
<div key={message.id} className="flex flex-row gap-2">
<div className="flex-shrink-0 w-24 text-zinc-500">{`${message.role}: `}</div>
<div className="flex flex-col gap-2">
{message.parts.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
}
if (
part.type === 'file' &&
part.mediaType?.startsWith('image/')
) {
return (
<div key={index}>
<img
className="rounded-md w-60"
src={part.url}
alt={part.filename}
/>
<span className="text-sm text-zinc-500">
{part.filename}
</span>
</div>
);
}
})}
</div>
</div>
))}
</div>
<form
onSubmit={e => {
e.preventDefault();
sendMessage({ text: input, files });
setFiles(undefined);
setInput('');
if (fileInputRef.current) {
fileInputRef.current.value = '';
}
}}
className="fixed bottom-0 flex flex-col w-full gap-2 p-2"
>
<div className="fixed flex flex-row items-end gap-2 right-2 bottom-14">
{files
? Array.from(files).map(attachment => {
const { type } = attachment;
if (type.startsWith('image/')) {
return (
<div key={attachment.name}>
<img
className="w-24 rounded-md"
src={URL.createObjectURL(attachment)}
alt={attachment.name}
/>
<span className="text-sm text-zinc-500">
{attachment.name}
</span>
</div>
);
} else if (type.startsWith('text/')) {
return (
<div
key={attachment.name}
className="flex flex-col flex-shrink-0 w-24 gap-1 text-sm text-zinc-500"
>
<div className="w-16 h-20 rounded-md bg-zinc-100" />
{attachment.name}
</div>
);
}
})
: ''}
</div>
<input
type="file"
onChange={event => {
if (event.target.files) {
setFiles(event.target.files);
}
}}
multiple
ref={fileInputRef}
/>
<input
value={input}
placeholder="Send message..."
onChange={e => setInput(e.target.value)}
className="w-full p-2 bg-zinc-100"
disabled={status !== 'ready'}
/>
</form>
</div>
);
}

View File

@ -0,0 +1,73 @@
'use client';
import ChatInput from '@/component/chat-input';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
export default function Chat() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: new DefaultChatTransport({
api: '/api/use-chat-custom-sources',
}),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.parts
.filter(part => part.type !== 'source-url')
.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
}
})}
{message.parts
.filter(part => part.type === 'source-url')
.map(part => (
<span key={`source-${part.sourceId}`}>
[
<a
href={part.url}
target="_blank"
className="text-sm font-bold text-blue-500 hover:underline"
>
{part.title ?? new URL(part.url).hostname}
</a>
]
</span>
))}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,100 @@
'use client';
import ChatInput from '@/component/chat-input';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport, UIMessage } from 'ai';
type MyMessage = UIMessage<
never,
{
weather: {
city: string;
weather: string;
status: 'loading' | 'success';
};
}
>;
export default function Chat() {
const { error, status, sendMessage, messages, regenerate, stop } =
useChat<MyMessage>({
transport: new DefaultChatTransport({
api: '/api/use-chat-data-ui-parts',
}),
onData: dataPart => {
console.log('dataPart', JSON.stringify(dataPart, null, 2));
},
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}{' '}
{message.parts
.filter(part => part.type === 'data-weather')
.map((part, index) => (
<span
key={index}
style={{
border: '2px solid red',
padding: '2px',
borderRadius: '4px',
display: 'inline-block',
minWidth: '180px',
}}
>
{part.data.status === 'loading' ? (
<>
Getting weather for <b>{part.data.city}</b>...
</>
) : part.data.status === 'success' ? (
<>
Weather in <b>{part.data.city}</b>:{' '}
<b>{part.data.weather}</b>
</>
) : (
<>Unknown weather state</>
)}
</span>
))}
{message.parts
.filter(part => part.type !== 'data-weather')
.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 rounded-md border border-blue-500"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 rounded-md border border-blue-500"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,75 @@
'use client';
import { UIMessage, useChat } from '@ai-sdk/react';
import ChatInput from '@/component/chat-input';
import { ChatTransport, convertToModelMessages, streamText } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
// Note: this needs a client-side OpenAI API key to work.
// DO NOT USE THIS IN ENVIRONMENTS WHERE THE API KEY IS CONFIDENTIAL.
const openai = createOpenAI({
apiKey: process.env.NEXT_PUBLIC_OPENAI_API_KEY,
});
export default function Chat() {
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
transport: {
sendMessages: async ({ messages, abortSignal }) => {
const result = streamText({
model: openai('gpt-4o'),
messages: convertToModelMessages(messages),
abortSignal,
});
return result.toUIMessageStream();
},
reconnectToStream: async ({ chatId }) => {
throw new Error('Not implemented');
},
} satisfies ChatTransport<UIMessage>,
});
return (
<div className="flex flex-col py-24 mx-auto w-full max-w-md stretch">
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.parts.map(part => {
if (part.type === 'text') {
return part.text;
}
})}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 rounded-md border border-blue-500"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 rounded-md border border-blue-500"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,135 @@
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport, getToolName, isToolUIPart } from 'ai';
import { tools } from '../api/use-chat-human-in-the-loop/tools';
import {
APPROVAL,
getToolsRequiringConfirmation,
} from '../api/use-chat-human-in-the-loop/utils';
import { useState } from 'react';
import {
HumanInTheLoopUIMessage,
MyTools,
} from '../api/use-chat-human-in-the-loop/types';
export default function Chat() {
const [input, setInput] = useState('');
const { messages, sendMessage, addToolResult } =
useChat<HumanInTheLoopUIMessage>({
transport: new DefaultChatTransport({
api: '/api/use-chat-human-in-the-loop',
}),
});
const toolsRequiringConfirmation = getToolsRequiringConfirmation(tools);
const pendingToolCallConfirmation = messages.some(m =>
m.parts?.some(
part =>
isToolUIPart(part) &&
part.state === 'input-available' &&
toolsRequiringConfirmation.includes(getToolName(part)),
),
);
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
{messages?.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
<strong>{`${m.role}: `}</strong>
{m.parts?.map((part, i) => {
if (part.type === 'text') {
return <div key={i}>{part.text}</div>;
}
if (isToolUIPart<MyTools>(part)) {
const toolInvocation = part;
const toolName = getToolName(toolInvocation);
const toolCallId = toolInvocation.toolCallId;
const dynamicInfoStyles = 'font-mono bg-zinc-100 p-1 text-sm';
// render confirmation tool (client-side tool with user interaction)
if (
toolsRequiringConfirmation.includes(toolName) &&
toolInvocation.state === 'input-available'
) {
return (
<div key={toolCallId}>
Run <span className={dynamicInfoStyles}>{toolName}</span>{' '}
with args: <br />
<span className={dynamicInfoStyles}>
{JSON.stringify(toolInvocation.input, null, 2)}
</span>
<div className="flex gap-2 pt-2">
<button
className="px-4 py-2 font-bold text-white bg-blue-500 rounded hover:bg-blue-700"
onClick={async () => {
await addToolResult({
toolCallId,
tool: toolName,
output: APPROVAL.YES,
});
// trigger new message
// can also use sendAutomaticallyWhen on useChat
sendMessage();
}}
>
Yes
</button>
<button
className="px-4 py-2 font-bold text-white bg-red-500 rounded hover:bg-red-700"
onClick={async () => {
await addToolResult({
toolCallId,
tool: toolName,
output: APPROVAL.NO,
});
// trigger new message
// can also use sendAutomaticallyWhen on useChat
sendMessage();
}}
>
No
</button>
</div>
</div>
);
}
return (
<div key={toolCallId}>
<div className="font-mono text-sm bg-zinc-100 w-fit">
call
{toolInvocation.state === 'output-available'
? 'ed'
: 'ing'}{' '}
{toolName}
{part.output && (
<div>{JSON.stringify(part.output, null, 2)}</div>
)}
</div>
</div>
);
}
})}
<br />
</div>
))}
<form
onSubmit={e => {
e.preventDefault();
sendMessage({ text: input });
setInput('');
}}
>
<input
disabled={pendingToolCallConfirmation}
className="fixed bottom-0 w-full max-w-md p-2 mb-8 border border-zinc-300 rounded shadow-xl"
value={input}
placeholder="Say something..."
onChange={e => setInput(e.target.value)}
/>
</form>
</div>
);
}

View File

@ -0,0 +1,36 @@
'use client';
import ChatInput from '@/component/chat-input';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
export default function Chat() {
const { status, sendMessage, messages } = useChat({
transport: new DefaultChatTransport({ api: '/api/use-chat-image-output' }),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.parts.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
} else if (
part.type === 'file' &&
part.mediaType.startsWith('image/')
) {
return (
// eslint-disable-next-line @next/next/no-img-element
<img key={index} src={part.url} alt="Generated image" />
);
}
})}
</div>
))}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,73 @@
'use client';
import ChatInput from '@/component/chat-input';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport, UIMessage } from 'ai';
import { ExampleMetadata } from '../api/use-chat-message-metadata/example-metadata-schema';
type MyMessage = UIMessage<ExampleMetadata>;
export default function Chat() {
const { error, status, sendMessage, messages, regenerate, stop } =
useChat<MyMessage>({
transport: new DefaultChatTransport({
api: '/api/use-chat-message-metadata',
}),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap">
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.metadata?.createdAt && (
<div>
Created at:{' '}
{new Date(message.metadata.createdAt).toLocaleString()}
</div>
)}
{message.metadata?.duration && (
<div>Duration: {message.metadata.duration}ms</div>
)}
{message.metadata?.model && (
<div>Model: {message.metadata.model}</div>
)}
{message.metadata?.totalTokens && (
<div>Total tokens: {message.metadata.totalTokens}</div>
)}
{message.metadata?.finishReason && (
<div>Finish reason: {message.metadata.finishReason}</div>
)}
</div>
))}
{(status === 'submitted' || status === 'streaming') && (
<div className="mt-4 text-gray-500">
{status === 'submitted' && <div>Loading...</div>}
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={stop}
>
Stop
</button>
</div>
)}
{error && (
<div className="mt-4">
<div className="text-red-500">An error occurred.</div>
<button
type="button"
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
onClick={() => regenerate()}
>
Retry
</button>
</div>
)}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,50 @@
'use client';
import ChatInput from '@/component/chat-input';
import { zodSchema } from '@ai-sdk/provider-utils';
import { UIMessage, useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import { z } from 'zod';
export default function Chat({
id,
initialMessages,
}: {
id?: string | undefined;
initialMessages?: UIMessage<{ createdAt: string }>[];
} = {}) {
const { sendMessage, status, messages } = useChat({
id, // use the provided chatId
messages: initialMessages,
transport: new DefaultChatTransport({
api: '/api/use-chat-persistence-metadata',
}),
messageMetadataSchema: zodSchema(
z.object({
createdAt: z.string().datetime(),
}),
),
});
return (
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
{messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.metadata?.createdAt && (
<div>
Created at: {new Date(m.metadata.createdAt).toLocaleString()}
</div>
)}
{m.parts.map((part, index) => {
if (part.type === 'text') {
return <div key={index}>{part.text}</div>;
}
})}
</div>
))}
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}

View File

@ -0,0 +1,14 @@
import { loadChat } from '@util/chat-store';
import Chat from './chat';
import { UIMessage } from 'ai';
export default async function Page(props: { params: Promise<{ id: string }> }) {
// get the chat ID from the URL:
const { id } = await props.params;
// load the chat messages:
const messages = (await loadChat(id)) as UIMessage<{ createdAt: string }>[];
// display the chat:
return <Chat id={id} initialMessages={messages} />;
}

Some files were not shown because too many files have changed in this diff Show More