Yu Chen
Add comprehensive question support and enhance example loading logic
d655736
import { openai } from '@ai-sdk/openai';
import { generateObject } from 'ai';
import { z } from 'zod';
import { langfuse } from '@/lib/langfuse';
import { PROMPTS, renderTemplate } from '@/prompts/prompt-management';
import { loadCsvExamples, loadComprehensiveExamples } from '@/lib/csvUtils';
import { getExamplesBySubType } from '@/lib/exampleUtils';
// Standardized question schema - all questions use this format
const QuestionSchema = z.object({
Question: z.string().describe("The question text"),
Options: z.object({
A: z.string(),
B: z.string(),
C: z.string(),
D: z.string(),
}).describe("Multiple choice options"),
Answer: z.enum(["A", "B", "C", "D"]).describe("The correct answer option"),
});
// Category translation map (shared with frontend)
const COMPREHENSIVE_CATEGORY_TRANSLATIONS: Record<string, string> = {
'單字與片語': 'Vocabulary & Phrases',
'文法與句型': 'Grammar & Sentence Patterns',
'文意理解與應用': 'Text Comprehension & Application',
};
// Helper function to get display name with English translation
const getDisplayName = (type: string, subType?: string) => {
if (type.startsWith('comprehensive-')) {
const category = type.replace('comprehensive-', '');
return COMPREHENSIVE_CATEGORY_TRANSLATIONS[category] || category;
}
if (type === 'paragraph-summary' && subType) {
const translations: Record<string, string> = {
'主要宗旨': 'Main Purpose',
'篇章細節': 'Passage Details',
'篇章推論': 'Passage Inference'
};
return `Paragraph Summary (${translations[subType] || subType})`;
}
if (type === 'paragraph-details' && subType) {
const translations: Record<string, string> = {
'反面推論': 'Reverse Inference',
'特定細節': 'Specific Details',
'基本運算': 'Basic Calculation',
'資訊截取': 'Information Extraction'
};
return `Paragraph Details (${translations[subType] || subType})`;
}
return type;
};
export async function POST(req: Request) {
try {
const { type, parameters, sourceArticle } = await req.json();
let prompt = '';
const isComprehensive = type.startsWith('comprehensive-');
// Fetch prompt template from Langfuse (with local fallback)
const langfusePromptName = isComprehensive ? 'question/comprehensive_questions' : `question/${type}`;
const localFallback = isComprehensive
? PROMPTS['comprehensive']
: PROMPTS[type as keyof typeof PROMPTS];
const langfusePrompt = await langfuse.prompt.get(langfusePromptName, {
label: 'production',
fallback: localFallback || '',
});
const promptTemplate = langfusePrompt.prompt;
if (isComprehensive) {
// Comprehensive questions: use CSV examples only, no article
const category = type.replace('comprehensive-', '');
const csvExamples = await loadComprehensiveExamples(category);
const templateParams: Record<string, string | number | boolean | Record<string, string>[]> = {
category,
examples: csvExamples.slice(0, 5).map(ex => ({
question: ex.question,
choices: ex.choices,
answer: ex.answer,
})),
};
prompt = renderTemplate(promptTemplate, templateParams);
} else {
// Reading comprehension types: use article + CSV examples
const templateParams: Record<string, string | number | boolean | Record<string, string>[]> = {
...parameters,
article: sourceArticle || '',
};
// Load CSV examples for types that support sub-types
const csvFileMap: Record<string, string> = {
'paragraph-summary': 'paragraph_summary.csv',
'paragraph-details': 'paragraph_details.csv',
'word-comprehension': 'word_comprehension.csv',
'grammatical-structure': 'grammar_structure.csv',
'paragraph-structure': 'paragraph_structure.csv',
'textual-inference': 'textual_inference.csv',
};
const csvFile = csvFileMap[type];
if (csvFile && parameters.subType) {
try {
const allExamples = await loadCsvExamples(csvFile);
const subTypeExamples = getExamplesBySubType(allExamples, parameters.subType);
// Pre-format examples as text for Langfuse simple {{examples}} variable
const examplesText = subTypeExamples.slice(0, 5).map((example, i) =>
`Example ${i + 1}:\nQuestion: ${example.題幹}\nOptions: ${example.選項}`
).join('\n\n');
templateParams.examples = examplesText;
console.log(`Loaded ${subTypeExamples.length} examples for ${type}/${parameters.subType}`);
} catch (error) {
console.error('Error loading examples:', error);
}
}
prompt = renderTemplate(promptTemplate, templateParams);
}
// All questions use the same JSON format
const result = await generateObject({
model: openai('gpt-4o-mini'),
schema: QuestionSchema,
prompt: prompt,
});
return Response.json({
type: type, // Keep original type ID for matching
displayName: getDisplayName(type, parameters.subType), // Add display name separately
stem: result.object.Question,
content: {
Question: result.object.Question,
Options: result.object.Options,
Answer: result.object.Answer,
},
points: 1,
createdAt: new Date().toISOString(),
}, {
headers: {
'Cache-Control': 'no-store, no-cache, must-revalidate, proxy-revalidate',
'Pragma': 'no-cache',
'Expires': '0',
}
});
} catch (error) {
console.error('Error generating question:', error);
return Response.json(
{ error: 'Failed to generate question' },
{ status: 500 }
);
}
}