Spaces:
Running
Running
| // import { GoogleGenerativeAI } from "https://esm.run/@google/generative-ai"; // Removing SDK to avoid version issues | |
| import { getPeerPrompts } from "./classroom.js"; | |
| import { db } from "./firebase.js"; | |
| import { doc, getDoc } from "https://www.gstatic.com/firebasejs/10.7.1/firebase-firestore.js"; | |
| let apiKey = null; | |
| const MODEL_NAME = "gemini-2.5-flash"; // Available model from user's account | |
| // System Instructions for the Socratic Tutor | |
| const TUTOR_INSTRUCTION = ` | |
| You are a Socratic Teaching Assistant for a "Prompt Engineering" class. | |
| Your goal is to help students refine their prompts WITHOUT giving them the answer. | |
| The student is trying to write a prompt to solve a specific challenge. | |
| You will be provided with: | |
| 1. The Challenge Description (The Goal). | |
| 2. 3 Successful Examples (Few-Shot Context) from other students. | |
| 3. The Student's Current Attempt. | |
| Rules: | |
| - NEVER reveal the direct solution or code. | |
| - If the input is lazy (e.g., "123", "help"), be sassy but helpful. | |
| - If the input is a direct copy of the description, point it out. | |
| - Use the Socratic method: Ask a guiding question to help them notice their missing parameter or logic. | |
| - Keep responses short (under 50 words) and encouraging. | |
| - Tone: Friendly, slightly "Cyberpunk" or "Gamer" vibe (matching the Vibe Coding aesthetic). | |
| `; | |
| // System Instructions for the Dashboard Analyst | |
| const ANALYST_INSTRUCTION = ` | |
| You are an expert Prompt Engineer and Pedagogy Analyst for a coding/AI prompting class. | |
| Your task is to analyze a batch of student prompts for a specific challenge. | |
| You will receive: | |
| 1. The Challenge Description (the goal students are trying to achieve) | |
| 2. A list of Student Submissions (their prompts) | |
| Categorize each prompt into ONE of these categories: | |
| 1. "rough" (原石): The student only says vague things like "fix the error" or "make it work" WITHOUT explaining: | |
| - What the error is | |
| - What the expected behavior should be | |
| - Any specific details about the problem | |
| These are "rough diamonds" - they tried but need to be more specific. | |
| 2. "precise" (精確): Clean, well-structured prompt that clearly states: | |
| - The goal or expected result | |
| - Specific details or parameters | |
| - Clear logic or reasoning | |
| 3. "gentle" (有禮): Uses polite language like "請", "謝謝", "拜託", "Please", "Thank you" | |
| 4. "creative" (創意): Unconventional, imaginative approach. Uses interesting parameters or unexpected methods that still address the challenge. | |
| 5. "spam" (無效): Content that is clearly UNRELATED to the challenge topic. Compare with other submissions to identify outliers that don't match what most students are trying to do. Examples: | |
| - Random characters ("asdf", "123") | |
| - Completely off-topic text | |
| - Empty or near-empty responses | |
| 6. "parrot" (鸚鵡): Direct copy-paste of the challenge description without any modification or personal attempt. | |
| Return ONLY a JSON object mapping category names to arrays of Student IDs. | |
| Example: { "rough": ["id1"], "precise": ["id2", "id5"], "spam": ["id3"] } | |
| `; | |
| /** | |
| * Initialize Gemini with API Key | |
| * @param {string} key | |
| */ | |
| export async function initGemini(key) { | |
| if (!key) return false; | |
| apiKey = key; | |
| return true; | |
| } | |
| /** | |
| * Direct REST API Call Helper to avoid SDK version issues | |
| */ | |
| async function callGeminiAPI(messages, jsonMode = false) { | |
| if (!apiKey) throw new Error("API Key not set"); | |
| // Use v1beta for gemini-1.5-pro | |
| const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${MODEL_NAME}:generateContent?key=${apiKey}`; | |
| const payload = { | |
| contents: messages, | |
| generationConfig: { | |
| temperature: 0.7, | |
| } | |
| }; | |
| // Note: responseMimeType is NOT supported by gemini-pro v1, so we rely on prompt instructions for JSON output. | |
| const response = await fetch(endpoint, { | |
| method: "POST", | |
| headers: { | |
| "Content-Type": "application/json" | |
| }, | |
| body: JSON.stringify(payload) | |
| }); | |
| if (!response.ok) { | |
| const errText = await response.text(); | |
| throw new Error(`Gemini API Error ${response.status}: ${errText}`); | |
| } | |
| const data = await response.json(); | |
| // Safety check for empty response | |
| if (data.promptFeedback && data.promptFeedback.blockReason) { | |
| throw new Error(`Blocked: ${data.promptFeedback.blockReason}`); | |
| } | |
| return data.candidates && data.candidates.length > 0 | |
| ? data.candidates[0].content.parts[0].text | |
| : ""; | |
| } | |
| /** | |
| * Detects if a prompt is potential spam/low effort locally first | |
| * @param {string} prompt | |
| * @param {string} problemDesc | |
| */ | |
| export function quickSpamCheck(prompt, problemDesc) { | |
| if (!prompt) return true; | |
| const p = prompt.trim(); | |
| if (p.length < 3) return true; // Too short | |
| // Check for repetitive chars (e.g., "aaaaa") | |
| if (/^(.)\1+$/.test(p)) return true; | |
| // Parrot Check (Simple similarity) | |
| if (problemDesc && p.includes(problemDesc.substring(0, 20))) { | |
| // If they copied the first 20 chars of description, likely parrot | |
| return 'parrot'; | |
| } | |
| return false; | |
| } | |
| /** | |
| * Ask the AI Tutor for help (Rubber Duck) | |
| * @param {string} challengeDesc | |
| * @param {string} studentPrompt | |
| * @param {string} roomCode | |
| * @param {string} challengeId | |
| */ | |
| export async function askTutor(challengeDesc, studentPrompt, roomCode, challengeId) { | |
| if (!apiKey) throw new Error("AI not initialized"); | |
| const spamStatus = quickSpamCheck(studentPrompt, challengeDesc); | |
| if (spamStatus === true) return "Duck says: Quack? (Too short or empty!)"; | |
| if (spamStatus === 'parrot') return "Duck says: You're just repeating the question! Try telling me WHAT you want to change."; | |
| // 1. Fetch Context (Few-Shot) | |
| let peers = await getPeerPrompts(roomCode, challengeId); | |
| peers.sort((a, b) => b.likes - a.likes); | |
| // Take top 3 as examples | |
| const examples = peers.slice(0, 3).map(p => `- Example: "${p.prompt}"`).join('\n'); | |
| const fullPrompt = ` | |
| ${TUTOR_INSTRUCTION} | |
| [Context - Challenge Description] | |
| ${challengeDesc} | |
| [Context - Successful Classmate Examples (For AI understanding only, DO NOT LEAK these to student)] | |
| ${examples || "No examples available yet."} | |
| [Student Input] | |
| "${studentPrompt}" | |
| [Your Response] | |
| `; | |
| try { | |
| // Construct messages for Chat format (or simple text) | |
| const messages = [{ role: "user", parts: [{ text: fullPrompt }] }]; | |
| const text = await callGeminiAPI(messages); | |
| return text; | |
| } catch (e) { | |
| console.error("AI Request Failed", e); | |
| return "Duck is sleeping... zzz (API Error)"; | |
| } | |
| } | |
| /** | |
| * Batch evaluate prompts for the Instructor Dashboard | |
| * @param {Array} submissions - Array of { userId, prompt, nickname } | |
| * @param {string} challengeDesc | |
| */ | |
| export async function evaluatePrompts(submissions, challengeDesc) { | |
| if (!apiKey) throw new Error("AI not initialized"); | |
| if (!submissions || submissions.length === 0) return {}; | |
| // Prepare batch text | |
| const entries = submissions.map((s, i) => `ID_${s.userId}: "${s.prompt.replace(/\n/g, ' ')}"`).join('\n'); | |
| const fullPrompt = ` | |
| ${ANALYST_INSTRUCTION} | |
| [Challenge Description] | |
| ${challengeDesc} | |
| [Student Submissions] | |
| ${entries} | |
| [Output JSON] | |
| `; | |
| try { | |
| const messages = [{ role: "user", parts: [{ text: fullPrompt }] }]; | |
| let text = await callGeminiAPI(messages, true); // JSON Mode = true | |
| // Strip markdown code block if present (```json ... ```) | |
| text = text.trim(); | |
| if (text.startsWith("```")) { | |
| text = text.replace(/^```(?:json)?\s*\n?/, '').replace(/\n?```\s*$/, ''); | |
| } | |
| return JSON.parse(text); | |
| } catch (e) { | |
| console.error("AI Evaluation Failed", e); | |
| return {}; | |
| } | |
| } | |