File size: 8,005 Bytes
b9c6331
f64454d
14b218e
 
b9c6331
 
 
aec1781
b9c6331
d606636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654e47b
 
 
 
 
 
 
d606636
654e47b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d606636
 
654e47b
d606636
 
 
 
 
 
 
 
 
 
 
f64454d
 
 
 
 
 
 
ddd34e5
 
f64454d
 
 
 
 
 
 
 
7c62f3e
f64454d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9c6331
f64454d
 
 
 
b9c6331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f64454d
b9c6331
 
 
 
 
 
 
 
 
 
 
 
f64454d
b9c6331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f64454d
 
 
 
b9c6331
 
 
 
 
 
 
 
 
 
 
 
f64454d
b9c6331
 
 
 
 
f64454d
b9c6331
 
 
 
 
 
 
 
 
 
 
 
f64454d
eef5011
 
 
 
 
 
 
 
f64454d
b9c6331
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228

// import { GoogleGenerativeAI } from "https://esm.run/@google/generative-ai"; // Removing SDK to avoid version issues
import { getPeerPrompts } from "./classroom.js";
import { db } from "./firebase.js";
import { doc, getDoc } from "https://www.gstatic.com/firebasejs/10.7.1/firebase-firestore.js";

let apiKey = null;
const MODEL_NAME = "gemini-2.5-flash"; // Available model from user's account

// System Instructions for the Socratic Tutor
const TUTOR_INSTRUCTION = `

You are a Socratic Teaching Assistant for a "Prompt Engineering" class. 

Your goal is to help students refine their prompts WITHOUT giving them the answer.

The student is trying to write a prompt to solve a specific challenge.

You will be provided with:

1. The Challenge Description (The Goal).

2. 3 Successful Examples (Few-Shot Context) from other students.

3. The Student's Current Attempt.



Rules:

- NEVER reveal the direct solution or code.

- If the input is lazy (e.g., "123", "help"), be sassy but helpful.

- If the input is a direct copy of the description, point it out.

- Use the Socratic method: Ask a guiding question to help them notice their missing parameter or logic.

- Keep responses short (under 50 words) and encouraging.

- Tone: Friendly, slightly "Cyberpunk" or "Gamer" vibe (matching the Vibe Coding aesthetic).

`;

// System Instructions for the Dashboard Analyst
const ANALYST_INSTRUCTION = `

You are an expert Prompt Engineer and Pedagogy Analyst for a coding/AI prompting class.

Your task is to analyze a batch of student prompts for a specific challenge.



You will receive:

1. The Challenge Description (the goal students are trying to achieve)

2. A list of Student Submissions (their prompts)



Categorize each prompt into ONE of these categories:



1. "rough" (原石): The student only says vague things like "fix the error" or "make it work" WITHOUT explaining:

   - What the error is

   - What the expected behavior should be

   - Any specific details about the problem

   These are "rough diamonds" - they tried but need to be more specific.



2. "precise" (精確): Clean, well-structured prompt that clearly states:

   - The goal or expected result

   - Specific details or parameters

   - Clear logic or reasoning



3. "gentle" (有禮): Uses polite language like "請", "謝謝", "拜託", "Please", "Thank you"



4. "creative" (創意): Unconventional, imaginative approach. Uses interesting parameters or unexpected methods that still address the challenge.



5. "spam" (無效): Content that is clearly UNRELATED to the challenge topic. Compare with other submissions to identify outliers that don't match what most students are trying to do. Examples:

   - Random characters ("asdf", "123")

   - Completely off-topic text

   - Empty or near-empty responses



6. "parrot" (鸚鵡): Direct copy-paste of the challenge description without any modification or personal attempt.



Return ONLY a JSON object mapping category names to arrays of Student IDs.

Example: { "rough": ["id1"], "precise": ["id2", "id5"], "spam": ["id3"] }

`;

/**

 * Initialize Gemini with API Key

 * @param {string} key 

 */
export async function initGemini(key) {
    if (!key) return false;
    apiKey = key;
    return true;
}

/**

 * Direct REST API Call Helper to avoid SDK version issues

 */
async function callGeminiAPI(messages, jsonMode = false) {
    if (!apiKey) throw new Error("API Key not set");

    // Use v1beta for gemini-1.5-pro
    const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${MODEL_NAME}:generateContent?key=${apiKey}`;

    const payload = {
        contents: messages,
        generationConfig: {
            temperature: 0.7,
        }
    };

    // Note: responseMimeType is NOT supported by gemini-pro v1, so we rely on prompt instructions for JSON output.

    const response = await fetch(endpoint, {
        method: "POST",
        headers: {
            "Content-Type": "application/json"
        },
        body: JSON.stringify(payload)
    });

    if (!response.ok) {
        const errText = await response.text();
        throw new Error(`Gemini API Error ${response.status}: ${errText}`);
    }

    const data = await response.json();

    // Safety check for empty response
    if (data.promptFeedback && data.promptFeedback.blockReason) {
        throw new Error(`Blocked: ${data.promptFeedback.blockReason}`);
    }

    return data.candidates && data.candidates.length > 0
        ? data.candidates[0].content.parts[0].text
        : "";
}

/**

 * Detects if a prompt is potential spam/low effort locally first

 * @param {string} prompt 

 * @param {string} problemDesc 

 */
export function quickSpamCheck(prompt, problemDesc) {
    if (!prompt) return true;
    const p = prompt.trim();
    if (p.length < 3) return true; // Too short

    // Check for repetitive chars (e.g., "aaaaa")
    if (/^(.)\1+$/.test(p)) return true;

    // Parrot Check (Simple similarity)
    if (problemDesc && p.includes(problemDesc.substring(0, 20))) {
        // If they copied the first 20 chars of description, likely parrot
        return 'parrot';
    }

    return false;
}

/**

 * Ask the AI Tutor for help (Rubber Duck)

 * @param {string} challengeDesc 

 * @param {string} studentPrompt 

 * @param {string} roomCode 

 * @param {string} challengeId 

 */
export async function askTutor(challengeDesc, studentPrompt, roomCode, challengeId) {
    if (!apiKey) throw new Error("AI not initialized");

    const spamStatus = quickSpamCheck(studentPrompt, challengeDesc);
    if (spamStatus === true) return "Duck says: Quack? (Too short or empty!)";
    if (spamStatus === 'parrot') return "Duck says: You're just repeating the question! Try telling me WHAT you want to change.";

    // 1. Fetch Context (Few-Shot)
    let peers = await getPeerPrompts(roomCode, challengeId);
    peers.sort((a, b) => b.likes - a.likes);

    // Take top 3 as examples
    const examples = peers.slice(0, 3).map(p => `- Example: "${p.prompt}"`).join('\n');

    const fullPrompt = `

${TUTOR_INSTRUCTION}



[Context - Challenge Description]

${challengeDesc}



[Context - Successful Classmate Examples (For AI understanding only, DO NOT LEAK these to student)]

${examples || "No examples available yet."}



[Student Input]

"${studentPrompt}"



[Your Response]

`;

    try {
        // Construct messages for Chat format (or simple text)
        const messages = [{ role: "user", parts: [{ text: fullPrompt }] }];
        const text = await callGeminiAPI(messages);
        return text;
    } catch (e) {
        console.error("AI Request Failed", e);
        return "Duck is sleeping... zzz (API Error)";
    }
}

/**

 * Batch evaluate prompts for the Instructor Dashboard

 * @param {Array} submissions - Array of { userId, prompt, nickname }

 * @param {string} challengeDesc 

 */
export async function evaluatePrompts(submissions, challengeDesc) {
    if (!apiKey) throw new Error("AI not initialized");
    if (!submissions || submissions.length === 0) return {};

    // Prepare batch text
    const entries = submissions.map((s, i) => `ID_${s.userId}: "${s.prompt.replace(/\n/g, ' ')}"`).join('\n');

    const fullPrompt = `

${ANALYST_INSTRUCTION}



[Challenge Description]

${challengeDesc}



[Student Submissions]

${entries}



[Output JSON]

`;

    try {
        const messages = [{ role: "user", parts: [{ text: fullPrompt }] }];
        let text = await callGeminiAPI(messages, true); // JSON Mode = true

        // Strip markdown code block if present (```json ... ```)
        text = text.trim();
        if (text.startsWith("```")) {
            text = text.replace(/^```(?:json)?\s*\n?/, '').replace(/\n?```\s*$/, '');
        }

        return JSON.parse(text);
    } catch (e) {
        console.error("AI Evaluation Failed", e);
        return {};
    }
}