awellis commited on
Commit
aa9aebe
Β·
1 Parent(s): 65f7070

Replaces mo.ui.button with mo.ui.run_button for consistency across labs, loads environment variables using dotenv, and refactors output variables for clarity. Also restructures output stacking and callouts for lab results and reflections, improving code readability and maintainability.

Browse files
Files changed (2) hide show
  1. app.py +128 -154
  2. app.py.BAK +707 -0
app.py CHANGED
@@ -19,6 +19,11 @@ def _():
19
  from pydantic import BaseModel, Field
20
  from typing import Literal
21
  import os
 
 
 
 
 
22
  return BaseModel, Field, OpenAI, mo, os
23
 
24
 
@@ -85,9 +90,11 @@ def _(BaseModel, Field):
85
 
86
  class SimpleExample(BaseModel):
87
  """Minimal structure for prompt comparison"""
 
88
  problem: str = Field(description="The problem to solve")
89
  solution: str = Field(description="Step-by-step solution")
90
  explanation: str = Field(description="Why this approach works")
 
91
  return (SimpleExample,)
92
 
93
 
@@ -101,7 +108,7 @@ def _(mo):
101
  label="Basic Prompt (no pedagogical grounding):",
102
  value="""Create an example problem about Python for loops and solve it step by step.""",
103
  full_width=True,
104
- rows=3
105
  )
106
 
107
  clt_prompt = mo.ui.text_area(
@@ -115,7 +122,7 @@ def _(mo):
115
 
116
  Keep cognitive load low: avoid technical jargon, use concrete examples.""",
117
  full_width=True,
118
- rows=8
119
  )
120
 
121
  mo.vstack([basic_prompt, clt_prompt])
@@ -126,9 +133,9 @@ def _(mo):
126
  def _(mo):
127
  """Lab 1: Generate button"""
128
 
129
- lab1_button = mo.ui.button(
130
  label="πŸ”¬ Generate Both Examples",
131
- kind="success"
132
  )
133
 
134
  mo.md(f"### Compare the Results\n\n{lab1_button}")
@@ -141,27 +148,24 @@ def _(SimpleExample, basic_prompt, client, clt_prompt, lab1_button, mo):
141
 
142
  lab1_output = None
143
 
144
- # Debug: Show button state
145
- if lab1_button.value:
146
- try:
147
- with mo.status.spinner(title="Generating both examples..."):
148
- # Generate basic example
149
- basic_response = client.responses.parse(
150
- model="gpt-4o-mini",
151
- input=[{"role": "user", "content": basic_prompt.value}],
152
- text_format=SimpleExample
153
- )
154
- basic_example = basic_response.output_parsed
155
-
156
- # Generate CLT-grounded example
157
- clt_response = client.responses.parse(
158
- model="gpt-4o-mini",
159
- input=[{"role": "user", "content": clt_prompt.value}],
160
- text_format=SimpleExample
161
- )
162
- clt_example = clt_response.output_parsed
163
-
164
- _comparison = mo.vstack([
165
  mo.md("### πŸ“Š Basic Prompt Result"),
166
  mo.md(f"**Problem:** {basic_example.problem}"),
167
  mo.md(f"**Solution:** {basic_example.solution}"),
@@ -171,9 +175,8 @@ def _(SimpleExample, basic_prompt, client, clt_prompt, lab1_button, mo):
171
  mo.md(f"**Problem:** {clt_example.problem}"),
172
  mo.md(f"**Solution:** {clt_example.solution}"),
173
  mo.md(f"**Explanation:** {clt_example.explanation}"),
174
- ])
175
-
176
- _reflection = mo.callout(mo.md("""
177
  ### πŸ’­ What Do You Notice?
178
 
179
  - Which problem is clearer and more specific?
@@ -181,35 +184,11 @@ def _(SimpleExample, basic_prompt, client, clt_prompt, lab1_button, mo):
181
  - Which explanation helps you understand WHY, not just WHAT?
182
 
183
  **The prompt IS your pedagogical design!**
184
- """), kind="info")
185
-
186
- lab1_output = mo.vstack([_comparison, _reflection])
187
-
188
- except Exception as e:
189
- import traceback
190
- lab1_output = mo.callout(
191
- mo.md(f"""
192
- ### ⚠️ Error Generating Examples
193
-
194
- **Error type:** {type(e).__name__}
195
-
196
- **Error message:** {str(e)}
197
-
198
- **Full traceback:**
199
- ```
200
- {traceback.format_exc()}
201
- ```
202
-
203
- **Common fixes:**
204
- - Make sure you have a `.env` file with `OPENAI_API_KEY=sk-...`
205
- - Check that your API key is valid
206
- - Ensure you have API credits available
207
- """),
208
- kind="danger"
209
- )
210
- else:
211
- # Show this when button hasn't been clicked yet
212
- lab1_output = mo.md("_Click the button above to generate examples_")
213
 
214
  lab1_output
215
 
@@ -243,13 +222,13 @@ def _(mo):
243
  your_hobby = mo.ui.text(
244
  label="Your hobby or interest:",
245
  placeholder="e.g., photography, cooking, gaming",
246
- full_width=True
247
  )
248
 
249
  your_goal = mo.ui.text(
250
  label="What you want to achieve:",
251
  placeholder="e.g., build a recipe app, automate photo editing",
252
- full_width=True
253
  )
254
 
255
  mo.vstack([your_hobby, your_goal])
@@ -260,9 +239,9 @@ def _(mo):
260
  def _(mo):
261
  """Lab 2: Generate button"""
262
 
263
- lab2_button = mo.ui.button(
264
  label="βš–οΈ Generate A/B Comparison",
265
- kind="success"
266
  )
267
 
268
  mo.md(f"{lab2_button}")
@@ -276,43 +255,44 @@ def _(SimpleExample, client, lab2_button, mo, your_goal, your_hobby):
276
  lab2_output = None
277
 
278
  if lab2_button.value and your_hobby.value and your_goal.value:
279
- try:
280
- with mo.status.spinner(title="Generating generic and personalized examples..."):
281
- # Generic example
282
- generic_prompt = "Create a worked example about Python dictionaries for beginners."
283
- generic_response = client.responses.parse(
284
- model="gpt-4o-mini",
285
- input=[{"role": "user", "content": generic_prompt}],
286
- text_format=SimpleExample
287
- )
288
- generic_example = generic_response.output_parsed
289
-
290
- # Personalized example
291
- personalized_prompt = f"""Create a worked example about Python dictionaries for beginners.
292
 
293
  IMPORTANT: Personalize this example for someone who is interested in {your_hobby.value} and wants to {your_goal.value}.
294
  Use familiar contexts and examples from their interest to make the concept more relatable and reduce cognitive load."""
295
 
296
- personalized_response = client.responses.parse(
297
- model="gpt-4o-mini",
298
- input=[{"role": "user", "content": personalized_prompt}],
299
- text_format=SimpleExample
300
- )
301
- personalized_example = personalized_response.output_parsed
302
 
303
- _comparison = mo.vstack([
 
304
  mo.md("### πŸ“– Generic Example (Standard Textbook Style)"),
305
  mo.md(f"**Problem:** {generic_example.problem}"),
306
  mo.md(f"**Solution:** {generic_example.solution}"),
307
  mo.md(f"**Explanation:** {generic_example.explanation}"),
308
  mo.md("---"),
309
- mo.md(f"### ✨ Personalized Example (Your Context: {your_hobby.value})"),
 
 
310
  mo.md(f"**Problem:** {personalized_example.problem}"),
311
  mo.md(f"**Solution:** {personalized_example.solution}"),
312
  mo.md(f"**Explanation:** {personalized_example.explanation}"),
313
- ])
314
-
315
- _reflection = mo.callout(mo.md("""
316
  ### πŸ’­ How Did That Feel?
317
 
318
  - Which example was more engaging to read?
@@ -320,21 +300,11 @@ Use familiar contexts and examples from their interest to make the concept more
320
  - Could you visualize the personalized example more easily?
321
 
322
  **This is the personalization effect in action!** Familiar contexts reduce extraneous cognitive load.
323
- """), kind="success")
324
-
325
- lab2_output = mo.vstack([_comparison, _reflection])
326
-
327
- except Exception as e:
328
- lab2_output = mo.callout(
329
- mo.md(f"""
330
- ### ⚠️ Error Generating Examples
331
-
332
- **Error:** {str(e)}
333
-
334
- Check your `.env` file and API key.
335
- """),
336
- kind="danger"
337
- )
338
 
339
  lab2_output
340
 
@@ -381,13 +351,18 @@ def _(mo):
381
  "key_insight: str": "Why this approach works",
382
  "code_with_comments: str": "Annotated code",
383
  "common_mistakes: str": "What to avoid",
384
- "connection_to_real_world: str": "Practical relevance"
385
  }
386
 
387
  field_selector = mo.ui.multiselect(
388
  options=list(field_options.keys()),
389
  label="Select fields for YOUR ideal worked example:",
390
- value=["problem: str", "solution_steps: list[str]", "final_answer: str", "key_insight: str"]
 
 
 
 
 
391
  )
392
 
393
  field_selector
@@ -411,7 +386,7 @@ def _(field_selector, mo):
411
 
412
  ```python
413
  class WorkedExample:
414
- {chr(10).join([' ' + f for f in field_selector.value])}
415
  ```
416
 
417
  ### πŸ’­ Design Analysis
@@ -460,13 +435,13 @@ def _(mo):
460
  reasoning_effort = mo.ui.dropdown(
461
  options=["none", "low", "medium", "high"],
462
  value="low",
463
- label="Reasoning Effort (how much thinking?)"
464
  )
465
 
466
  verbosity = mo.ui.dropdown(
467
  options=["low", "medium", "high"],
468
  value="medium",
469
- label="Verbosity (explanation detail)"
470
  )
471
 
472
  mo.vstack([reasoning_effort, verbosity])
@@ -476,7 +451,8 @@ def _(mo):
476
  @app.cell
477
  def _(mo, reasoning_effort, verbosity):
478
  """Lab 4: Display parameter info"""
479
- mo.callout(mo.md(f"""
 
480
  **Current Settings:**
481
 
482
  - Reasoning: {reasoning_effort.value}
@@ -487,7 +463,9 @@ def _(mo, reasoning_effort, verbosity):
487
  **For experts**: Higher reasoning (better solutions), lower verbosity (concise)
488
 
489
  The "best" parameters depend on your learners!
490
- """), kind="info")
 
 
491
  return
492
 
493
 
@@ -514,9 +492,9 @@ def _(mo):
514
 
515
  mo.md("### Generate an Example to Analyze")
516
 
517
- lab5_button = mo.ui.button(
518
  label="🎲 Generate Random Example",
519
- kind="neutral"
520
  )
521
 
522
  lab5_button
@@ -527,46 +505,38 @@ def _(mo):
527
  def _(SimpleExample, client, lab5_button, mo):
528
  """Lab 5: Generate and display example to analyze"""
529
 
530
- lab5_output = None
531
 
532
  if lab5_button.value:
533
- try:
534
- with mo.status.spinner(title="Generating example..."):
535
- response = client.responses.parse(
536
- model="gpt-4o-mini",
537
- input=[{"role": "user", "content": "Create a worked example about Python dictionaries for beginners."}],
538
- text_format=SimpleExample
539
- )
540
- analyze_example = response.output_parsed
541
-
542
- lab5_output = mo.vstack([
 
 
 
 
 
543
  mo.md("### Example to Analyze"),
544
  mo.md(f"**Problem:** {analyze_example.problem}"),
545
  mo.md(f"**Solution:** {analyze_example.solution}"),
546
  mo.md(f"**Explanation:** {analyze_example.explanation}"),
547
- ])
548
-
549
- except Exception as e:
550
- lab5_output = mo.callout(
551
- mo.md(f"""
552
- ### ⚠️ Error Generating Example
553
-
554
- **Error:** {str(e)}
555
-
556
- Check your `.env` file and API key.
557
- """),
558
- kind="danger"
559
- )
560
 
561
- lab5_output
562
 
563
 
564
  @app.cell
565
  def _(mo):
566
  """Lab 5: CLT evaluation checklist"""
567
 
568
- mo.md("### Evaluate Using CLT Principles")
569
-
570
  reduces_extraneous = mo.ui.checkbox(
571
  label="βœ… Reduces extraneous cognitive load (no unnecessary complexity)"
572
  )
@@ -583,22 +553,20 @@ def _(mo):
583
  label="βœ… Is a WORKED example (shows complete solution, not a puzzle)"
584
  )
585
 
586
- clear_steps = mo.ui.checkbox(
587
- label="βœ… Has clear step-by-step progression"
588
- )
589
 
590
- explains_why = mo.ui.checkbox(
591
- label="βœ… Explains WHY, not just WHAT"
592
- )
593
 
594
- mo.vstack([
595
- reduces_extraneous,
596
- manages_intrinsic,
597
- optimizes_germane,
598
- worked_not_problem,
599
- clear_steps,
600
- explains_why
601
- ])
 
 
602
  return (
603
  clear_steps,
604
  explains_why,
@@ -627,13 +595,16 @@ def _(
627
  optimizes_germane.value,
628
  worked_not_problem.value,
629
  clear_steps.value,
630
- explains_why.value
631
  ]
632
 
633
  score = sum(1 for v in checklist_values if v)
634
 
 
 
635
  if score > 0:
636
- mo.callout(f"""
 
637
  ### Score: {score}/6
638
 
639
  {"🌟" * score}
@@ -645,8 +616,11 @@ def _(
645
  - 0: Not yet evaluated
646
 
647
  **Key Skill**: You're developing a CLT-grounded critical lens for evaluating AI tools!
648
- """, kind="success" if score >= 5 else "info")
649
- return
 
 
 
650
 
651
 
652
  @app.cell
 
19
  from pydantic import BaseModel, Field
20
  from typing import Literal
21
  import os
22
+ from dotenv import load_dotenv
23
+
24
+ # Load environment variables from .env file
25
+ load_dotenv()
26
+
27
  return BaseModel, Field, OpenAI, mo, os
28
 
29
 
 
90
 
91
  class SimpleExample(BaseModel):
92
  """Minimal structure for prompt comparison"""
93
+
94
  problem: str = Field(description="The problem to solve")
95
  solution: str = Field(description="Step-by-step solution")
96
  explanation: str = Field(description="Why this approach works")
97
+
98
  return (SimpleExample,)
99
 
100
 
 
108
  label="Basic Prompt (no pedagogical grounding):",
109
  value="""Create an example problem about Python for loops and solve it step by step.""",
110
  full_width=True,
111
+ rows=3,
112
  )
113
 
114
  clt_prompt = mo.ui.text_area(
 
122
 
123
  Keep cognitive load low: avoid technical jargon, use concrete examples.""",
124
  full_width=True,
125
+ rows=8,
126
  )
127
 
128
  mo.vstack([basic_prompt, clt_prompt])
 
133
  def _(mo):
134
  """Lab 1: Generate button"""
135
 
136
+ lab1_button = mo.ui.run_button(
137
  label="πŸ”¬ Generate Both Examples",
138
+ kind="success",
139
  )
140
 
141
  mo.md(f"### Compare the Results\n\n{lab1_button}")
 
148
 
149
  lab1_output = None
150
 
151
+ if lab1_button.value and basic_prompt.value and clt_prompt.value:
152
+ with mo.status.spinner(title="Generating both examples..."):
153
+ basic_response = client.responses.parse(
154
+ model="gpt-5.1",
155
+ input=[{"role": "user", "content": basic_prompt.value}],
156
+ text_format=SimpleExample,
157
+ )
158
+ basic_example = basic_response.output_parsed
159
+
160
+ clt_response = client.responses.parse(
161
+ model="gpt-5.1",
162
+ input=[{"role": "user", "content": clt_prompt.value}],
163
+ text_format=SimpleExample,
164
+ )
165
+ clt_example = clt_response.output_parsed
166
+
167
+ lab1_output = mo.vstack(
168
+ [
 
 
 
169
  mo.md("### πŸ“Š Basic Prompt Result"),
170
  mo.md(f"**Problem:** {basic_example.problem}"),
171
  mo.md(f"**Solution:** {basic_example.solution}"),
 
175
  mo.md(f"**Problem:** {clt_example.problem}"),
176
  mo.md(f"**Solution:** {clt_example.solution}"),
177
  mo.md(f"**Explanation:** {clt_example.explanation}"),
178
+ mo.callout(
179
+ mo.md("""
 
180
  ### πŸ’­ What Do You Notice?
181
 
182
  - Which problem is clearer and more specific?
 
184
  - Which explanation helps you understand WHY, not just WHAT?
185
 
186
  **The prompt IS your pedagogical design!**
187
+ """),
188
+ kind="info",
189
+ ),
190
+ ]
191
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
  lab1_output
194
 
 
222
  your_hobby = mo.ui.text(
223
  label="Your hobby or interest:",
224
  placeholder="e.g., photography, cooking, gaming",
225
+ full_width=True,
226
  )
227
 
228
  your_goal = mo.ui.text(
229
  label="What you want to achieve:",
230
  placeholder="e.g., build a recipe app, automate photo editing",
231
+ full_width=True,
232
  )
233
 
234
  mo.vstack([your_hobby, your_goal])
 
239
  def _(mo):
240
  """Lab 2: Generate button"""
241
 
242
+ lab2_button = mo.ui.run_button(
243
  label="βš–οΈ Generate A/B Comparison",
244
+ kind="success",
245
  )
246
 
247
  mo.md(f"{lab2_button}")
 
255
  lab2_output = None
256
 
257
  if lab2_button.value and your_hobby.value and your_goal.value:
258
+ with mo.status.spinner(title="Generating generic and personalized examples..."):
259
+ generic_prompt = (
260
+ "Create a worked example about Python dictionaries for beginners."
261
+ )
262
+ generic_response = client.responses.parse(
263
+ model="gpt-5.1",
264
+ input=[{"role": "user", "content": generic_prompt}],
265
+ text_format=SimpleExample,
266
+ )
267
+ generic_example = generic_response.output_parsed
268
+
269
+ personalized_prompt = f"""Create a worked example about Python dictionaries for beginners.
 
270
 
271
  IMPORTANT: Personalize this example for someone who is interested in {your_hobby.value} and wants to {your_goal.value}.
272
  Use familiar contexts and examples from their interest to make the concept more relatable and reduce cognitive load."""
273
 
274
+ personalized_response = client.responses.parse(
275
+ model="gpt-5.1",
276
+ input=[{"role": "user", "content": personalized_prompt}],
277
+ text_format=SimpleExample,
278
+ )
279
+ personalized_example = personalized_response.output_parsed
280
 
281
+ lab2_output = mo.vstack(
282
+ [
283
  mo.md("### πŸ“– Generic Example (Standard Textbook Style)"),
284
  mo.md(f"**Problem:** {generic_example.problem}"),
285
  mo.md(f"**Solution:** {generic_example.solution}"),
286
  mo.md(f"**Explanation:** {generic_example.explanation}"),
287
  mo.md("---"),
288
+ mo.md(
289
+ f"### ✨ Personalized Example (Your Context: {your_hobby.value})"
290
+ ),
291
  mo.md(f"**Problem:** {personalized_example.problem}"),
292
  mo.md(f"**Solution:** {personalized_example.solution}"),
293
  mo.md(f"**Explanation:** {personalized_example.explanation}"),
294
+ mo.callout(
295
+ mo.md("""
 
296
  ### πŸ’­ How Did That Feel?
297
 
298
  - Which example was more engaging to read?
 
300
  - Could you visualize the personalized example more easily?
301
 
302
  **This is the personalization effect in action!** Familiar contexts reduce extraneous cognitive load.
303
+ """),
304
+ kind="success",
305
+ ),
306
+ ]
307
+ )
 
 
 
 
 
 
 
 
 
 
308
 
309
  lab2_output
310
 
 
351
  "key_insight: str": "Why this approach works",
352
  "code_with_comments: str": "Annotated code",
353
  "common_mistakes: str": "What to avoid",
354
+ "connection_to_real_world: str": "Practical relevance",
355
  }
356
 
357
  field_selector = mo.ui.multiselect(
358
  options=list(field_options.keys()),
359
  label="Select fields for YOUR ideal worked example:",
360
+ value=[
361
+ "problem: str",
362
+ "solution_steps: list[str]",
363
+ "final_answer: str",
364
+ "key_insight: str",
365
+ ],
366
  )
367
 
368
  field_selector
 
386
 
387
  ```python
388
  class WorkedExample:
389
+ {chr(10).join([" " + f for f in field_selector.value])}
390
  ```
391
 
392
  ### πŸ’­ Design Analysis
 
435
  reasoning_effort = mo.ui.dropdown(
436
  options=["none", "low", "medium", "high"],
437
  value="low",
438
+ label="Reasoning Effort (how much thinking?)",
439
  )
440
 
441
  verbosity = mo.ui.dropdown(
442
  options=["low", "medium", "high"],
443
  value="medium",
444
+ label="Verbosity (explanation detail)",
445
  )
446
 
447
  mo.vstack([reasoning_effort, verbosity])
 
451
  @app.cell
452
  def _(mo, reasoning_effort, verbosity):
453
  """Lab 4: Display parameter info"""
454
+ mo.callout(
455
+ mo.md(f"""
456
  **Current Settings:**
457
 
458
  - Reasoning: {reasoning_effort.value}
 
463
  **For experts**: Higher reasoning (better solutions), lower verbosity (concise)
464
 
465
  The "best" parameters depend on your learners!
466
+ """),
467
+ kind="info",
468
+ )
469
  return
470
 
471
 
 
492
 
493
  mo.md("### Generate an Example to Analyze")
494
 
495
+ lab5_button = mo.ui.run_button(
496
  label="🎲 Generate Random Example",
497
+ kind="neutral",
498
  )
499
 
500
  lab5_button
 
505
  def _(SimpleExample, client, lab5_button, mo):
506
  """Lab 5: Generate and display example to analyze"""
507
 
508
+ example_output = None
509
 
510
  if lab5_button.value:
511
+ with mo.status.spinner(title="Generating example..."):
512
+ response = client.responses.parse(
513
+ model="gpt-5.1",
514
+ input=[
515
+ {
516
+ "role": "user",
517
+ "content": "Create a worked example about Python dictionaries for beginners.",
518
+ }
519
+ ],
520
+ text_format=SimpleExample,
521
+ )
522
+ analyze_example = response.output_parsed
523
+
524
+ example_output = mo.vstack(
525
+ [
526
  mo.md("### Example to Analyze"),
527
  mo.md(f"**Problem:** {analyze_example.problem}"),
528
  mo.md(f"**Solution:** {analyze_example.solution}"),
529
  mo.md(f"**Explanation:** {analyze_example.explanation}"),
530
+ ]
531
+ )
 
 
 
 
 
 
 
 
 
 
 
532
 
533
+ example_output
534
 
535
 
536
  @app.cell
537
  def _(mo):
538
  """Lab 5: CLT evaluation checklist"""
539
 
 
 
540
  reduces_extraneous = mo.ui.checkbox(
541
  label="βœ… Reduces extraneous cognitive load (no unnecessary complexity)"
542
  )
 
553
  label="βœ… Is a WORKED example (shows complete solution, not a puzzle)"
554
  )
555
 
556
+ clear_steps = mo.ui.checkbox(label="βœ… Has clear step-by-step progression")
 
 
557
 
558
+ explains_why = mo.ui.checkbox(label="βœ… Explains WHY, not just WHAT")
 
 
559
 
560
+ mo.vstack(
561
+ [
562
+ reduces_extraneous,
563
+ manages_intrinsic,
564
+ optimizes_germane,
565
+ worked_not_problem,
566
+ clear_steps,
567
+ explains_why,
568
+ ]
569
+ )
570
  return (
571
  clear_steps,
572
  explains_why,
 
595
  optimizes_germane.value,
596
  worked_not_problem.value,
597
  clear_steps.value,
598
+ explains_why.value,
599
  ]
600
 
601
  score = sum(1 for v in checklist_values if v)
602
 
603
+ score_output = None
604
+
605
  if score > 0:
606
+ score_output = mo.callout(
607
+ f"""
608
  ### Score: {score}/6
609
 
610
  {"🌟" * score}
 
616
  - 0: Not yet evaluated
617
 
618
  **Key Skill**: You're developing a CLT-grounded critical lens for evaluating AI tools!
619
+ """,
620
+ kind="success" if score >= 5 else "info",
621
+ )
622
+
623
+ score_output
624
 
625
 
626
  @app.cell
app.py.BAK ADDED
@@ -0,0 +1,707 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa
2
+ """
3
+ Interactive Exploration: Cognitive Load Theory & AI-Generated Worked Examples
4
+ Five hands-on labs to understand how to design educational AI tools
5
+
6
+ Built for embedding in Quarto workshop materials
7
+ """
8
+
9
+ import marimo
10
+
11
+ __generated_with = "0.17.8"
12
+ app = marimo.App(width="medium")
13
+
14
+
15
+ @app.cell
16
+ def _():
17
+ import marimo as mo
18
+ from openai import OpenAI
19
+ from pydantic import BaseModel, Field
20
+ from typing import Literal
21
+ import os
22
+ return BaseModel, Field, OpenAI, mo, os
23
+
24
+
25
+ @app.cell
26
+ def _(mo):
27
+ mo.md("""
28
+ # πŸ§ͺ Interactive Exploration Lab
29
+ ## Designing AI Tools Grounded in Cognitive Load Theory
30
+
31
+ Welcome to the **interactive exploration**! This isn't a complete toolβ€”it's a laboratory
32
+ where you'll experiment with the key design decisions that make AI educational tools effective.
33
+
34
+ ### What You'll Explore
35
+
36
+ Through 5 hands-on labs, you'll discover:
37
+
38
+ 1. 🎨 **Prompt Design Lab** - How prompt engineering shapes learning
39
+ 2. βš–οΈ **Personalization A/B Test** - Feel the cognitive load difference
40
+ 3. πŸ—οΈ **Data Model Designer** - What makes examples "worked"
41
+ 4. πŸŽ›οΈ **Parameter Playground** - Model settings and pedagogy
42
+ 5. πŸ” **CLT Analyzer** - Evaluate examples with a critical lens
43
+
44
+ ### Why This Matters
45
+
46
+ You could just use a tool. But **understanding the design principles** lets you:
47
+ - Adapt tools to your specific domain
48
+ - Critique and improve existing AI educational tools
49
+ - Design new tools grounded in learning science
50
+
51
+ **Ready to explore?** Let's start with the setup.
52
+ """)
53
+ return
54
+
55
+
56
+ @app.cell
57
+ def _(OpenAI, os):
58
+ """Setup: Initialize OpenAI client"""
59
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
60
+ return (client,)
61
+
62
+
63
+ @app.cell
64
+ def _(mo):
65
+ mo.md("""
66
+ ---
67
+
68
+ ## 🎨 Lab 1: Prompt Design Laboratory
69
+
70
+ **Learning Question**: How does prompt engineering affect the quality of worked examples?
71
+
72
+ ### The Experiment
73
+
74
+ You'll see **two prompts** - a basic one and one grounded in CLT principles.
75
+ Try editing them and see how the outputs change.
76
+
77
+ **Key insight**: The prompt IS your pedagogical design encoded in language.
78
+ """)
79
+ return
80
+
81
+
82
+ @app.cell
83
+ def _(BaseModel, Field):
84
+ """Simple data model for Lab 1"""
85
+
86
+ class SimpleExample(BaseModel):
87
+ """Minimal structure for prompt comparison"""
88
+ problem: str = Field(description="The problem to solve")
89
+ solution: str = Field(description="Step-by-step solution")
90
+ explanation: str = Field(description="Why this approach works")
91
+ return (SimpleExample,)
92
+
93
+
94
+ @app.cell
95
+ def _(mo):
96
+ """Lab 1: Prompt inputs"""
97
+
98
+ mo.md("### Try These Prompts")
99
+
100
+ basic_prompt = mo.ui.text_area(
101
+ label="Basic Prompt (no pedagogical grounding):",
102
+ value="""Create an example problem about Python for loops and solve it step by step.""",
103
+ full_width=True,
104
+ rows=3
105
+ )
106
+
107
+ clt_prompt = mo.ui.text_area(
108
+ label="CLT-Grounded Prompt (reduces cognitive load):",
109
+ value="""Create a worked example about Python for loops.
110
+
111
+ CRITICAL: This is a WORKED EXAMPLE for novice learners.
112
+ - Problem: Clear, specific, uses familiar context (counting items)
113
+ - Solution: Break into small steps, explain each step's purpose
114
+ - Explanation: Connect to WHY this pattern works (not just WHAT it does)
115
+
116
+ Keep cognitive load low: avoid technical jargon, use concrete examples.""",
117
+ full_width=True,
118
+ rows=8
119
+ )
120
+
121
+ mo.vstack([basic_prompt, clt_prompt])
122
+ return basic_prompt, clt_prompt
123
+
124
+
125
+ @app.cell
126
+ def _(mo):
127
+ """Lab 1: Generate button"""
128
+
129
+ lab1_button = mo.ui.button(
130
+ label="πŸ”¬ Generate Both Examples",
131
+ kind="success"
132
+ )
133
+
134
+ mo.md(f"### Compare the Results\n\n{lab1_button}")
135
+ return (lab1_button,)
136
+
137
+
138
+ @app.cell
139
+ def _(SimpleExample, basic_prompt, client, clt_prompt, lab1_button, mo):
140
+ """Lab 1: Generate and compare both examples"""
141
+
142
+ lab1_output = None
143
+
144
+ # Debug: Show button state
145
+ if lab1_button.value:
146
+ try:
147
+ with mo.status.spinner(title="Generating both examples..."):
148
+ # Generate basic example
149
+ basic_response = client.responses.parse(
150
+ model="gpt-4o-mini",
151
+ input=[{"role": "user", "content": basic_prompt.value}],
152
+ text_format=SimpleExample
153
+ )
154
+ basic_example = basic_response.output_parsed
155
+
156
+ # Generate CLT-grounded example
157
+ clt_response = client.responses.parse(
158
+ model="gpt-4o-mini",
159
+ input=[{"role": "user", "content": clt_prompt.value}],
160
+ text_format=SimpleExample
161
+ )
162
+ clt_example = clt_response.output_parsed
163
+
164
+ _comparison = mo.vstack([
165
+ mo.md("### πŸ“Š Basic Prompt Result"),
166
+ mo.md(f"**Problem:** {basic_example.problem}"),
167
+ mo.md(f"**Solution:** {basic_example.solution}"),
168
+ mo.md(f"**Explanation:** {basic_example.explanation}"),
169
+ mo.md("---"),
170
+ mo.md("### πŸŽ“ CLT-Grounded Prompt Result"),
171
+ mo.md(f"**Problem:** {clt_example.problem}"),
172
+ mo.md(f"**Solution:** {clt_example.solution}"),
173
+ mo.md(f"**Explanation:** {clt_example.explanation}"),
174
+ ])
175
+
176
+ _reflection = mo.callout(mo.md("""
177
+ ### πŸ’­ What Do You Notice?
178
+
179
+ - Which problem is clearer and more specific?
180
+ - Which solution breaks down steps better?
181
+ - Which explanation helps you understand WHY, not just WHAT?
182
+
183
+ **The prompt IS your pedagogical design!**
184
+ """), kind="info")
185
+
186
+ lab1_output = mo.vstack([_comparison, _reflection])
187
+
188
+ except Exception as e:
189
+ import traceback
190
+ lab1_output = mo.callout(
191
+ mo.md(f"""
192
+ ### ⚠️ Error Generating Examples
193
+
194
+ **Error type:** {type(e).__name__}
195
+
196
+ **Error message:** {str(e)}
197
+
198
+ **Full traceback:**
199
+ ```
200
+ {traceback.format_exc()}
201
+ ```
202
+
203
+ **Common fixes:**
204
+ - Make sure you have a `.env` file with `OPENAI_API_KEY=sk-...`
205
+ - Check that your API key is valid
206
+ - Ensure you have API credits available
207
+ """),
208
+ kind="danger"
209
+ )
210
+ else:
211
+ # Show this when button hasn't been clicked yet
212
+ lab1_output = mo.md("_Click the button above to generate examples_")
213
+
214
+ lab1_output
215
+
216
+
217
+ @app.cell
218
+ def _(mo):
219
+ mo.md("""
220
+ ---
221
+
222
+ ## βš–οΈ Lab 2: Personalization A/B Test
223
+
224
+ **Learning Question**: Can you FEEL the difference in cognitive load?
225
+
226
+ ### The Experiment
227
+
228
+ You'll enter YOUR context (hobby, goal), then see the SAME concept taught:
229
+ - **Generic**: Standard textbook style
230
+ - **Personalized**: Using your context
231
+
232
+ **Hypothesis**: The personalized version should feel more engaging and easier to process.
233
+ """)
234
+ return
235
+
236
+
237
+ @app.cell
238
+ def _(mo):
239
+ """Lab 2: Context inputs"""
240
+
241
+ mo.md("### Your Context")
242
+
243
+ your_hobby = mo.ui.text(
244
+ label="Your hobby or interest:",
245
+ placeholder="e.g., photography, cooking, gaming",
246
+ full_width=True
247
+ )
248
+
249
+ your_goal = mo.ui.text(
250
+ label="What you want to achieve:",
251
+ placeholder="e.g., build a recipe app, automate photo editing",
252
+ full_width=True
253
+ )
254
+
255
+ mo.vstack([your_hobby, your_goal])
256
+ return your_hobby, your_goal
257
+
258
+
259
+ @app.cell
260
+ def _(mo):
261
+ """Lab 2: Generate button"""
262
+
263
+ lab2_button = mo.ui.button(
264
+ label="βš–οΈ Generate A/B Comparison",
265
+ kind="success"
266
+ )
267
+
268
+ mo.md(f"{lab2_button}")
269
+ return (lab2_button,)
270
+
271
+
272
+ @app.cell
273
+ def _(SimpleExample, client, lab2_button, mo, your_goal, your_hobby):
274
+ """Lab 2: Generate A/B comparison"""
275
+
276
+ lab2_output = None
277
+
278
+ if lab2_button.value and your_hobby.value and your_goal.value:
279
+ try:
280
+ with mo.status.spinner(title="Generating generic and personalized examples..."):
281
+ # Generic example
282
+ generic_prompt = "Create a worked example about Python dictionaries for beginners."
283
+ generic_response = client.responses.parse(
284
+ model="gpt-4o-mini",
285
+ input=[{"role": "user", "content": generic_prompt}],
286
+ text_format=SimpleExample
287
+ )
288
+ generic_example = generic_response.output_parsed
289
+
290
+ # Personalized example
291
+ personalized_prompt = f"""Create a worked example about Python dictionaries for beginners.
292
+
293
+ IMPORTANT: Personalize this example for someone who is interested in {your_hobby.value} and wants to {your_goal.value}.
294
+ Use familiar contexts and examples from their interest to make the concept more relatable and reduce cognitive load."""
295
+
296
+ personalized_response = client.responses.parse(
297
+ model="gpt-4o-mini",
298
+ input=[{"role": "user", "content": personalized_prompt}],
299
+ text_format=SimpleExample
300
+ )
301
+ personalized_example = personalized_response.output_parsed
302
+
303
+ _comparison = mo.vstack([
304
+ mo.md("### πŸ“– Generic Example (Standard Textbook Style)"),
305
+ mo.md(f"**Problem:** {generic_example.problem}"),
306
+ mo.md(f"**Solution:** {generic_example.solution}"),
307
+ mo.md(f"**Explanation:** {generic_example.explanation}"),
308
+ mo.md("---"),
309
+ mo.md(f"### ✨ Personalized Example (Your Context: {your_hobby.value})"),
310
+ mo.md(f"**Problem:** {personalized_example.problem}"),
311
+ mo.md(f"**Solution:** {personalized_example.solution}"),
312
+ mo.md(f"**Explanation:** {personalized_example.explanation}"),
313
+ ])
314
+
315
+ _reflection = mo.callout(mo.md("""
316
+ ### πŸ’­ How Did That Feel?
317
+
318
+ - Which example was more engaging to read?
319
+ - Which one felt easier to process mentally?
320
+ - Could you visualize the personalized example more easily?
321
+
322
+ **This is the personalization effect in action!** Familiar contexts reduce extraneous cognitive load.
323
+ """), kind="success")
324
+
325
+ lab2_output = mo.vstack([_comparison, _reflection])
326
+
327
+ except Exception as e:
328
+ lab2_output = mo.callout(
329
+ mo.md(f"""
330
+ ### ⚠️ Error Generating Examples
331
+
332
+ **Error:** {str(e)}
333
+
334
+ Check your `.env` file and API key.
335
+ """),
336
+ kind="danger"
337
+ )
338
+
339
+ lab2_output
340
+
341
+
342
+ @app.cell
343
+ def _(mo):
344
+ mo.md("""
345
+ ---
346
+
347
+ ## πŸ—οΈ Lab 3: Data Model Designer
348
+
349
+ **Learning Question**: What makes a worked example "worked"?
350
+
351
+ ### The Experiment
352
+
353
+ Design the data structure for a worked example. What fields do you need?
354
+ Think about:
355
+ - What cognitive load principle does each field support?
356
+ - How does structure guide the AI's output?
357
+
358
+ **Current Model** (you can modify this in your mind):
359
+ ```python
360
+ class WorkedExample:
361
+ problem: str # What they need to solve
362
+ solution_steps: list # Broken into chunks (why a list?)
363
+ final_answer: str # Clear conclusion
364
+ key_insight: str # Schema activation
365
+ ```
366
+ """)
367
+ return
368
+
369
+
370
+ @app.cell
371
+ def _(mo):
372
+ """Lab 3: Interactive field selector"""
373
+
374
+ mo.md("### Which Fields Support Learning?")
375
+
376
+ field_options = {
377
+ "problem: str": "The problem statement",
378
+ "solution_steps: list[str]": "Steps as a list (chunking!)",
379
+ "solution: str": "Solution as one big block",
380
+ "final_answer: str": "Explicit conclusion",
381
+ "key_insight: str": "Why this approach works",
382
+ "code_with_comments: str": "Annotated code",
383
+ "common_mistakes: str": "What to avoid",
384
+ "connection_to_real_world: str": "Practical relevance"
385
+ }
386
+
387
+ field_selector = mo.ui.multiselect(
388
+ options=list(field_options.keys()),
389
+ label="Select fields for YOUR ideal worked example:",
390
+ value=["problem: str", "solution_steps: list[str]", "final_answer: str", "key_insight: str"]
391
+ )
392
+
393
+ field_selector
394
+ return (field_selector,)
395
+
396
+
397
+ @app.cell
398
+ def _(field_selector, mo):
399
+ """Lab 3: Display selection count"""
400
+ mo.md(f"**You selected {len(field_selector.value)} fields**")
401
+ return
402
+
403
+
404
+ @app.cell
405
+ def _(field_selector, mo):
406
+ """Lab 3: Analysis"""
407
+
408
+ if field_selector.value:
409
+ mo.md(f"""
410
+ ### Your Selected Structure
411
+
412
+ ```python
413
+ class WorkedExample:
414
+ {chr(10).join([' ' + f for f in field_selector.value])}
415
+ ```
416
+
417
+ ### πŸ’­ Design Analysis
418
+
419
+ **Key Questions:**
420
+ - Did you choose `solution_steps: list[str]` or `solution: str`?
421
+ - **List = chunking** (reduces cognitive load)
422
+ - **String = one big block** (higher load for novices)
423
+
424
+ - Did you include `key_insight`?
425
+ - Helps with **schema activation** (connecting to prior knowledge)
426
+
427
+ - Did you include `common_mistakes`?
428
+ - **Desirable difficulty**: learning from contrasts
429
+
430
+ **The design IS the pedagogy**. Each field choice implements a CLT principle.
431
+ """)
432
+ return
433
+
434
+
435
+ @app.cell
436
+ def _(mo):
437
+ mo.md("""
438
+ ---
439
+
440
+ ## πŸŽ›οΈ Lab 4: Parameter Playground
441
+
442
+ **Learning Question**: How do model parameters affect pedagogical quality?
443
+
444
+ ### The Experiment
445
+
446
+ GPT-5.1 has parameters like `reasoning.effort`. Try different settings and see
447
+ how they affect example quality.
448
+
449
+ **Note**: This lab is conceptual---showing the parameters you COULD control.
450
+ """)
451
+ return
452
+
453
+
454
+ @app.cell
455
+ def _(mo):
456
+ """Lab 4: Parameter sliders"""
457
+
458
+ mo.md("### Adjust Parameters")
459
+
460
+ reasoning_effort = mo.ui.dropdown(
461
+ options=["none", "low", "medium", "high"],
462
+ value="low",
463
+ label="Reasoning Effort (how much thinking?)"
464
+ )
465
+
466
+ verbosity = mo.ui.dropdown(
467
+ options=["low", "medium", "high"],
468
+ value="medium",
469
+ label="Verbosity (explanation detail)"
470
+ )
471
+
472
+ mo.vstack([reasoning_effort, verbosity])
473
+ return reasoning_effort, verbosity
474
+
475
+
476
+ @app.cell
477
+ def _(mo, reasoning_effort, verbosity):
478
+ """Lab 4: Display parameter info"""
479
+ mo.callout(mo.md(f"""
480
+ **Current Settings:**
481
+
482
+ - Reasoning: {reasoning_effort.value}
483
+ - Verbosity: {verbosity.value}
484
+
485
+ **For novices**: Low reasoning (fast), medium-high verbosity (detailed explanations)
486
+
487
+ **For experts**: Higher reasoning (better solutions), lower verbosity (concise)
488
+
489
+ The "best" parameters depend on your learners!
490
+ """), kind="info")
491
+ return
492
+
493
+
494
+ @app.cell
495
+ def _(mo):
496
+ mo.md("""
497
+ ---
498
+
499
+ ## πŸ” Lab 5: CLT Analyzer
500
+
501
+ **Learning Question**: Can you evaluate examples using CLT principles?
502
+
503
+ ### The Experiment
504
+
505
+ Read an AI-generated example and evaluate it against CLT criteria.
506
+ This develops your **critical lens** for educational AI.
507
+ """)
508
+ return
509
+
510
+
511
+ @app.cell
512
+ def _(mo):
513
+ """Lab 5: Generate button"""
514
+
515
+ mo.md("### Generate an Example to Analyze")
516
+
517
+ lab5_button = mo.ui.button(
518
+ label="🎲 Generate Random Example",
519
+ kind="neutral"
520
+ )
521
+
522
+ lab5_button
523
+ return (lab5_button,)
524
+
525
+
526
+ @app.cell
527
+ def _(SimpleExample, client, lab5_button, mo):
528
+ """Lab 5: Generate and display example to analyze"""
529
+
530
+ lab5_output = None
531
+
532
+ if lab5_button.value:
533
+ try:
534
+ with mo.status.spinner(title="Generating example..."):
535
+ response = client.responses.parse(
536
+ model="gpt-4o-mini",
537
+ input=[{"role": "user", "content": "Create a worked example about Python dictionaries for beginners."}],
538
+ text_format=SimpleExample
539
+ )
540
+ analyze_example = response.output_parsed
541
+
542
+ lab5_output = mo.vstack([
543
+ mo.md("### Example to Analyze"),
544
+ mo.md(f"**Problem:** {analyze_example.problem}"),
545
+ mo.md(f"**Solution:** {analyze_example.solution}"),
546
+ mo.md(f"**Explanation:** {analyze_example.explanation}"),
547
+ ])
548
+
549
+ except Exception as e:
550
+ lab5_output = mo.callout(
551
+ mo.md(f"""
552
+ ### ⚠️ Error Generating Example
553
+
554
+ **Error:** {str(e)}
555
+
556
+ Check your `.env` file and API key.
557
+ """),
558
+ kind="danger"
559
+ )
560
+
561
+ lab5_output
562
+
563
+
564
+ @app.cell
565
+ def _(mo):
566
+ """Lab 5: CLT evaluation checklist"""
567
+
568
+ mo.md("### Evaluate Using CLT Principles")
569
+
570
+ reduces_extraneous = mo.ui.checkbox(
571
+ label="βœ… Reduces extraneous cognitive load (no unnecessary complexity)"
572
+ )
573
+
574
+ manages_intrinsic = mo.ui.checkbox(
575
+ label="βœ… Manages intrinsic load (breaks problem into chunks)"
576
+ )
577
+
578
+ optimizes_germane = mo.ui.checkbox(
579
+ label="βœ… Optimizes germane load (helps build schemas/patterns)"
580
+ )
581
+
582
+ worked_not_problem = mo.ui.checkbox(
583
+ label="βœ… Is a WORKED example (shows complete solution, not a puzzle)"
584
+ )
585
+
586
+ clear_steps = mo.ui.checkbox(
587
+ label="βœ… Has clear step-by-step progression"
588
+ )
589
+
590
+ explains_why = mo.ui.checkbox(
591
+ label="βœ… Explains WHY, not just WHAT"
592
+ )
593
+
594
+ mo.vstack([
595
+ reduces_extraneous,
596
+ manages_intrinsic,
597
+ optimizes_germane,
598
+ worked_not_problem,
599
+ clear_steps,
600
+ explains_why
601
+ ])
602
+ return (
603
+ clear_steps,
604
+ explains_why,
605
+ manages_intrinsic,
606
+ optimizes_germane,
607
+ reduces_extraneous,
608
+ worked_not_problem,
609
+ )
610
+
611
+
612
+ @app.cell
613
+ def _(
614
+ clear_steps,
615
+ explains_why,
616
+ manages_intrinsic,
617
+ mo,
618
+ optimizes_germane,
619
+ reduces_extraneous,
620
+ worked_not_problem,
621
+ ):
622
+ """Lab 5: Scoring"""
623
+
624
+ checklist_values = [
625
+ reduces_extraneous.value,
626
+ manages_intrinsic.value,
627
+ optimizes_germane.value,
628
+ worked_not_problem.value,
629
+ clear_steps.value,
630
+ explains_why.value
631
+ ]
632
+
633
+ score = sum(1 for v in checklist_values if v)
634
+
635
+ if score > 0:
636
+ mo.callout(f"""
637
+ ### Score: {score}/6
638
+
639
+ {"🌟" * score}
640
+
641
+ **Interpretation:**
642
+ - 5-6: Excellent pedagogical design
643
+ - 3-4: Good, but room for improvement
644
+ - 1-2: Needs significant pedagogical revision
645
+ - 0: Not yet evaluated
646
+
647
+ **Key Skill**: You're developing a CLT-grounded critical lens for evaluating AI tools!
648
+ """, kind="success" if score >= 5 else "info")
649
+ return
650
+
651
+
652
+ @app.cell
653
+ def _(mo):
654
+ mo.md("""
655
+ ---
656
+
657
+ ## 🎯 Conclusion: From Exploration to Creation
658
+
659
+ ### What You Discovered
660
+
661
+ Through these 5 labs, you explored:
662
+
663
+ 1. βœ… **Prompts encode pedagogy** - Design drives outputs
664
+ 2. βœ… **Personalization reduces load** - Context matters
665
+ 3. βœ… **Structure shapes learning** - Data models are pedagogical choices
666
+ 4. βœ… **Parameters affect quality** - Settings have learning implications
667
+ 5. βœ… **Critical evaluation is a skill** - You can assess AI tools with CLT
668
+
669
+ ### What's Next?
670
+
671
+ Now that you understand the **design principles**, you're ready to:
672
+
673
+ **Option 1: Build Your Own Tool**
674
+ - Use the simplified code from the workshop
675
+ - Apply these design principles
676
+ - Deploy to HuggingFace Spaces
677
+
678
+ **Option 2: Use the Complete Tool**
679
+ - [Try the full Worked Example Weaver](https://huggingface.co/spaces/virtuelleakademie/worked-example-weaver-app)
680
+ - See all 5 principles integrated
681
+
682
+ **Option 3: Adapt to Your Domain**
683
+ - Take the template
684
+ - Add your concepts
685
+ - Customize for your learners
686
+
687
+ ### The Big Idea
688
+
689
+ AI tools for education should be **grounded in learning science**, not just technically impressive.
690
+
691
+ You now have:
692
+ - 🧠 The theoretical foundation (CLT)
693
+ - πŸ”¬ Hands-on experience (these labs)
694
+ - πŸ› οΈ The technical skills (simple OpenAI API)
695
+ - 🎯 A critical lens (can evaluate tools)
696
+
697
+ **Go build something that helps people learn!**
698
+
699
+ ---
700
+
701
+ *Created by the [Virtual Academy](https://virtuelleakademie.ch/), BFH*
702
+ """)
703
+ return
704
+
705
+
706
+ if __name__ == "__main__":
707
+ app.run()