238 lines
6.8 KiB
TypeScript
238 lines
6.8 KiB
TypeScript
import { NextRequest, NextResponse } from 'next/server'
|
||
import { prisma } from '@/lib/prisma'
|
||
|
||
interface RouteParams {
|
||
params: Promise<{ id: string }>
|
||
}
|
||
|
||
// POST /api/prompts/[id]/test - 运行 prompt 测试
|
||
export async function POST(request: NextRequest, { params }: RouteParams) {
|
||
const { id } = await params
|
||
const body = await request.json()
|
||
|
||
try {
|
||
const {
|
||
content,
|
||
model = 'gpt-3.5-turbo',
|
||
temperature = 0.7,
|
||
maxTokens = 1000,
|
||
userId
|
||
} = body
|
||
|
||
if (!userId) {
|
||
return NextResponse.json({ error: 'User ID is required' }, { status: 401 })
|
||
}
|
||
|
||
if (!content) {
|
||
return NextResponse.json(
|
||
{ error: 'Prompt content is required' },
|
||
{ status: 400 }
|
||
)
|
||
}
|
||
|
||
// 验证 prompt 是否存在且属于用户
|
||
const prompt = await prisma.prompt.findFirst({
|
||
where: { id, userId }
|
||
})
|
||
|
||
if (!prompt) {
|
||
return NextResponse.json({ error: 'Prompt not found' }, { status: 404 })
|
||
}
|
||
|
||
// 检查用户积分(这里简化处理,实际应该从用户表获取)
|
||
const estimatedCost = calculateCost(model, content, maxTokens)
|
||
|
||
// 模拟 AI API 调用
|
||
const testResult = await runAITest({
|
||
content,
|
||
model,
|
||
temperature,
|
||
maxTokens
|
||
})
|
||
|
||
// 保存测试记录
|
||
const testRun = await prisma.promptTestRun.create({
|
||
data: {
|
||
promptId: id,
|
||
input: content,
|
||
output: testResult.output,
|
||
success: testResult.success,
|
||
error: testResult.error,
|
||
}
|
||
})
|
||
|
||
return NextResponse.json({
|
||
testRun,
|
||
result: testResult,
|
||
cost: estimatedCost,
|
||
model,
|
||
timestamp: new Date().toISOString()
|
||
})
|
||
|
||
} catch (error) {
|
||
console.error('Error running prompt test:', error)
|
||
|
||
// 保存失败的测试记录
|
||
try {
|
||
await prisma.promptTestRun.create({
|
||
data: {
|
||
promptId: id,
|
||
input: body.content || '',
|
||
output: null,
|
||
success: false,
|
||
error: error instanceof Error ? error.message : 'Unknown error',
|
||
}
|
||
})
|
||
} catch (dbError) {
|
||
console.error('Error saving failed test run:', dbError)
|
||
}
|
||
|
||
return NextResponse.json(
|
||
{ error: 'Failed to run prompt test' },
|
||
{ status: 500 }
|
||
)
|
||
}
|
||
}
|
||
|
||
// 模拟 AI API 调用
|
||
async function runAITest({
|
||
content,
|
||
model
|
||
}: {
|
||
content: string
|
||
model: string
|
||
temperature: number
|
||
maxTokens: number
|
||
}) {
|
||
// 模拟 API 延迟
|
||
await new Promise(resolve => setTimeout(resolve, 1000 + Math.random() * 2000))
|
||
|
||
// 模拟不同的响应结果
|
||
const scenarios = [
|
||
{
|
||
success: true,
|
||
output: generateMockResponse(content, model),
|
||
error: null
|
||
},
|
||
{
|
||
success: false,
|
||
output: null,
|
||
error: 'Rate limit exceeded. Please try again later.'
|
||
},
|
||
{
|
||
success: false,
|
||
output: null,
|
||
error: 'Invalid API key or insufficient credits.'
|
||
}
|
||
]
|
||
|
||
// 90% 成功率
|
||
const isSuccess = Math.random() > 0.1
|
||
|
||
if (isSuccess) {
|
||
return scenarios[0]
|
||
} else {
|
||
return scenarios[Math.floor(Math.random() * (scenarios.length - 1)) + 1]
|
||
}
|
||
}
|
||
|
||
// 生成模拟响应
|
||
function generateMockResponse(content: string, model: string): string {
|
||
const responses = [
|
||
`Based on your prompt: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"
|
||
|
||
Here's a comprehensive response generated by ${model}:
|
||
|
||
✨ **Key Insights:**
|
||
1. Your prompt is well-structured and clear
|
||
2. The instructions provide good context
|
||
3. The expected output format is defined
|
||
|
||
📊 **Analysis:**
|
||
- Prompt clarity: Excellent (95%)
|
||
- Specificity: High (88%)
|
||
- Context provided: Good (82%)
|
||
- Output format: Well-defined (90%)
|
||
|
||
🎯 **Suggestions for improvement:**
|
||
- Consider adding more specific examples
|
||
- Define the target audience more clearly
|
||
- Specify the desired tone or style
|
||
|
||
💡 **Generated Content:**
|
||
[This would be the actual AI-generated content based on your prompt. The response would vary depending on the specific instructions and context provided in your prompt.]
|
||
|
||
📈 **Performance Metrics:**
|
||
- Response time: ${(Math.random() * 2 + 0.5).toFixed(2)}s
|
||
- Tokens used: ${Math.floor(Math.random() * 500 + 100)}
|
||
- Quality score: ${Math.floor(Math.random() * 20 + 80)}%`,
|
||
|
||
`Response from ${model}:
|
||
|
||
Your prompt has been processed successfully. Here are the results:
|
||
|
||
🔍 **Prompt Analysis:**
|
||
The input prompt demonstrates good structure with clear instructions. The model was able to understand the context and generate appropriate responses.
|
||
|
||
📝 **Generated Output:**
|
||
[Simulated AI response content would appear here. This represents what the actual AI model would generate based on your specific prompt instructions.]
|
||
|
||
⚡ **Performance Data:**
|
||
- Model: ${model}
|
||
- Processing time: ${(Math.random() * 3 + 1).toFixed(2)} seconds
|
||
- Input tokens: ${Math.floor(content.length / 4)}
|
||
- Output tokens: ${Math.floor(Math.random() * 400 + 200)}
|
||
- Total cost: $${(Math.random() * 0.05 + 0.01).toFixed(4)}
|
||
|
||
✅ **Quality Indicators:**
|
||
- Relevance: High
|
||
- Coherence: Excellent
|
||
- Completeness: Good
|
||
- Accuracy: Very Good`,
|
||
|
||
`${model} Response:
|
||
|
||
Thank you for your prompt. I've analyzed your request and generated the following response:
|
||
|
||
🎯 **Understanding:** Your prompt asks for specific information/action, and I've interpreted it as follows...
|
||
|
||
📋 **Response:**
|
||
[This section would contain the actual generated content based on your prompt. The AI model processes your instructions and provides relevant, contextual responses.]
|
||
|
||
🔧 **Technical Details:**
|
||
- Model used: ${model}
|
||
- Temperature: ${Math.random().toFixed(2)}
|
||
- Max tokens: ${Math.floor(Math.random() * 1000 + 500)}
|
||
- Actual tokens: ${Math.floor(Math.random() * 800 + 200)}
|
||
|
||
📊 **Evaluation:**
|
||
- Prompt effectiveness: ${Math.floor(Math.random() * 15 + 85)}%
|
||
- Response quality: ${Math.floor(Math.random() * 10 + 90)}%
|
||
- Instruction following: ${Math.floor(Math.random() * 8 + 92)}%`
|
||
]
|
||
|
||
return responses[Math.floor(Math.random() * responses.length)]
|
||
}
|
||
|
||
// 计算估算成本
|
||
function calculateCost(model: string, content: string, maxTokens: number): number {
|
||
const inputTokens = Math.ceil(content.length / 4) // 粗略估算
|
||
const outputTokens = Math.min(maxTokens, 500) // 估算输出 tokens
|
||
|
||
// 不同模型的价格(每1000 tokens)
|
||
const pricing: Record<string, { input: number; output: number }> = {
|
||
'gpt-3.5-turbo': { input: 0.001, output: 0.002 },
|
||
'gpt-4': { input: 0.03, output: 0.06 },
|
||
'gpt-4-turbo': { input: 0.01, output: 0.03 },
|
||
'claude-3-sonnet': { input: 0.003, output: 0.015 },
|
||
'claude-3-haiku': { input: 0.00025, output: 0.00125 }
|
||
}
|
||
|
||
const modelPricing = pricing[model] || pricing['gpt-3.5-turbo']
|
||
|
||
const inputCost = (inputTokens / 1000) * modelPricing.input
|
||
const outputCost = (outputTokens / 1000) * modelPricing.output
|
||
|
||
return Number((inputCost + outputCost).toFixed(6))
|
||
}
|