Decision-Fish commited on
Commit
8a40579
Β·
verified Β·
1 Parent(s): aa39c2d

Upload 7 files

Browse files
Files changed (7) hide show
  1. app.py +99 -348
  2. cat_universal_prompt.txt +101 -0
  3. module01.txt +13 -12
  4. module04.txt +23 -0
  5. module06.txt +14 -0
  6. module08.txt +50 -0
  7. module_template.txt +16 -0
app.py CHANGED
@@ -1,400 +1,151 @@
1
  import os
2
  import gradio as gr
3
- import re
4
  from pathlib import Path
5
-
6
- UNIVERSAL_PROMPT_PATH = "CAT_universal_prompt.txt"
7
- MODULE_DIR = "modules" # <-- now using /modules subfolder
8
-
9
  from openai import OpenAI
10
  from dotenv import load_dotenv
11
- load_dotenv()
12
- client = OpenAI()
13
-
14
- # Type aliases
15
  from typing import List, cast
16
  from openai.types.chat import ChatCompletionMessageParam
17
 
 
 
 
 
 
18
 
19
  def call_model(system_prompt: str, history: list[dict[str, str]]) -> str:
20
- # Build as simple dicts first
21
  msgs: list[dict[str, str]] = [{"role": "system", "content": system_prompt}]
22
  for m in history:
23
  role = m.get("role")
24
  content = m.get("content")
25
  if role in ("user", "assistant") and isinstance(content, str):
26
  msgs.append({"role": role, "content": content})
27
-
28
- # Cast once at the call site to satisfy the SDK types
29
  typed_msgs = cast(List[ChatCompletionMessageParam], msgs)
30
-
31
  resp = client.chat.completions.create(
32
  model="gpt-4o-mini",
33
  messages=typed_msgs,
34
- temperature=0.4,
35
  )
36
  return resp.choices[0].message.content or ""
37
 
38
-
39
  def load_text(path: str) -> str:
 
40
  with open(path, "r", encoding="utf-8") as f:
41
  return f.read()
42
 
43
- def assemble_prompt(universal_prompt_text: str, module_text: str) -> str:
44
- def extract(label: str) -> str:
45
- marker = label + ":"
46
- start = module_text.find(marker)
47
- if start == -1:
48
- return ""
49
- start += len(marker)
50
- next_markers = ["\nLEARNING OBJECTIVES:", "\nRUBRIC:", "\nMODULE NAME:"]
51
- end_positions = [module_text.find(m, start) for m in next_markers if module_text.find(m, start) != -1]
52
- end = min(end_positions) if end_positions else len(module_text)
53
- return module_text[start:end].strip()
54
-
55
- learning_objectives = extract("LEARNING OBJECTIVES")
56
- rubric = extract("RUBRIC")
 
 
 
57
 
58
- prompt = universal_prompt_text.replace("{LEARNING_OBJECTIVES}", learning_objectives)
59
- prompt = prompt.replace("{RUBRIC}", rubric)
 
 
 
 
 
 
 
 
 
60
  return prompt
61
 
62
  def init_state():
 
63
  return {
64
- "assembled_prompt": "",
65
- "history": [],
66
- "mode": "roleplay",
67
- "mentor_step": 0,
68
- "student_name": ""
69
  }
70
 
71
- def start_session(module_file, student_name=""):
 
72
  state = init_state()
73
- state["student_name"] = student_name
74
-
75
- universal = load_text(UNIVERSAL_PROMPT_PATH)
76
- module_text = load_text(Path(MODULE_DIR) / module_file)
77
-
78
- # Parse the full RUBRIC section once and keep a structured copy
79
- state["rubric_items"] = parse_rubric_from_module(module_text)
80
- print(f"[CAT] Parsed {len(state['rubric_items'])} rubric items for this module.")
81
-
82
- # Personalize lightly with the student's first name
83
- name_hint = (
84
- f"\n\n[Student first name: {student_name}. Use it naturally once in the opening; don’t overuse.]"
85
- if student_name else ""
86
- )
87
- state["assembled_prompt"] = assemble_prompt(universal, module_text) + name_hint
88
-
89
- state["history"].append({"role": "system", "content": state["assembled_prompt"]})
90
- opening = call_model(state["assembled_prompt"], state["history"])
91
- state["history"].append({"role": "assistant", "content": opening})
92
- return state, state["history"]
93
 
94
  def chat(user_msg, state):
 
95
  if not user_msg.strip():
96
  return "", state["history"], state
97
-
98
- # Shortcut: typing "grade" acts like pressing the Assess button
99
- if user_msg.strip().lower() == "grade":
100
- hist, st = assess_fn(state)
101
- return "", hist, st
102
-
103
- # If the scene is finished, ignore further input and return cleanly
104
- if state.get("mode") == "done":
105
- return "", state["history"], state
106
 
107
- # If we've left roleplay (e.g., pressed Assess), stop running roleplay code
108
- if state.get("mode") != "roleplay":
109
- return "", state["history"], state
110
-
111
  state["history"].append({"role": "user", "content": user_msg})
112
-
113
- if state["mode"] == "roleplay":
114
- reply = call_model(state["assembled_prompt"], state["history"])
115
- state["history"].append({"role": "assistant", "content": reply})
116
- return "", state["history"], state
117
-
118
- if state["mode"] == "mentor":
119
- # Step 1: general intro (no assumption of tools)
120
- if state.get("mentor_step", 0) == 0:
121
- eval_intro = (
122
- "Before we wrap up: name two specific concepts, tools, or frameworks you used in this scenario, "
123
- "and in one short sentence each say how you applied them. If you didn’t use any, name two insights "
124
- "you learned and how you would apply them next time."
125
- )
126
- state["history"].append({"role": "assistant", "content": eval_intro})
127
- state["mentor_step"] = 1
128
- return "", state["history"], state
129
-
130
- # Step 2: concise rubric-based evaluation
131
- else:
132
- # Concise rubric-based evaluation (hidden): pass instruction via system_prompt
133
- eval_request = (
134
- "Evaluate the student's performance using the module rubric. Provide these sections: "
135
- "Overall rating (Unsatisfactory, Satisfactory, or Excellent) with a one-sentence justification; "
136
- "Career competencies; Uniquely human capacities; Argument analysis; Ethical frameworks; ESG awareness; "
137
- "Application; Interaction quality; Strength; Area to improve; Advice for next time; Fictional consequence. "
138
- "Quote at least one student phrase. Keep the whole evaluation under 180 words."
139
- )
140
- try:
141
- reply = call_model(
142
- state["assembled_prompt"] + "\n\n" + eval_request,
143
- state["history"] # call_model ignores system entries in history by design
144
- )
145
- except Exception as e:
146
- # Never let chat return None; show a friendly error and allow retry
147
- state["history"].append({
148
- "role": "assistant",
149
- "content": f"[Assessment error: {e}] Please press Assess again in a few seconds."
150
- })
151
- return "", state["history"], state
152
-
153
- state["history"].append({"role": "assistant", "content": reply})
154
- state["mode"] = "done"
155
- return "", state["history"], state
156
- # Safety net: ensure consistent return shape if a future branch falls through
157
- return "", state["history"], state
158
-
159
- RUBRIC_FALLBACK = [
160
- "States the decision and information needs clearly",
161
- "Applies the appropriate tool or framework correctly",
162
- "Shows steps or calculations and a decision rule, tool, or framework",
163
- "Justifies the conclusion and notes at least one limitation or tradeoff",
164
- ]
165
- # --- Helpers for rubric evaluation JSON ---
166
- import json
167
-
168
- def _safe_json_loads(s: str):
169
- try:
170
- return json.loads(s)
171
- except Exception:
172
- # crude but robust: try to extract {...} block if the model wrapped it
173
- start = s.find("{")
174
- end = s.rfind("}")
175
- if start != -1 and end != -1 and end > start:
176
- try:
177
- return json.loads(s[start:end+1])
178
- except Exception:
179
- return None
180
- return None
181
-
182
- def _format_assessment_readable(assess_obj):
183
- """
184
- assess_obj schema:
185
- {
186
- "criteria": [{"id": "...","level":"no|partial|full","points":0|0.5|1,"evidence":"..."}],
187
- "total_points": float,
188
- "max_points": float,
189
- "summary": "≀180 words narrative"
190
- }
191
- """
192
- if not isinstance(assess_obj, dict) or "criteria" not in assess_obj:
193
- return "[Assessment parsing error: invalid JSON]"
194
- lines = []
195
- total = assess_obj.get("total_points", 0)
196
- maxp = assess_obj.get("max_points", 0)
197
- lines.append(f"Score: {total:g}/{maxp:g}")
198
- lines.append("")
199
- for c in assess_obj["criteria"]:
200
- lid = c.get("id","?")
201
- level = c.get("level","?")
202
- pts = c.get("points","?")
203
- ev = c.get("evidence","")
204
- lines.append(f"- {lid}: {level} ({pts}) β€” {ev}")
205
- if assess_obj.get("summary"):
206
- lines.append("")
207
- lines.append(assess_obj["summary"])
208
- return "\n".join(lines)
209
- # --- end helpers ---
210
-
211
- # --- Assess: rubric-based, no/partial/full per criterion (JSON output) ---
212
- def assess_fn(state):
213
- """
214
- One press:
215
- 1) Adds END OF SCENE visibly once.
216
- 2) Runs a rubric-based evaluation using a dedicated evaluator system prompt.
217
- Output schema is strict JSON with per-criterion no/partial/full.
218
- If already done: no-op.
219
- Returns: (chat_history, state)
220
- """
221
- # If already finalized, do nothing
222
- if state.get("mode") == "done":
223
- return state["history"], state
224
-
225
- # 1) Show the scene break once
226
- if not (
227
- state["history"]
228
- and state["history"][-1].get("role") == "assistant"
229
- and state["history"][-1].get("content", "").strip() == "END OF SCENE"
230
- ):
231
- state["history"].append({"role": "assistant", "content": "END OF SCENE"})
232
-
233
- # Enter mentor mode and skip any intro
234
- state["mode"] = "mentor"
235
- state["mentor_step"] = 2
236
-
237
- # 2) Build rubric payload from the current module; fall back only if needed
238
- raw_items = state.get("rubric_items") or []
239
- if not isinstance(raw_items, list) or len(raw_items) == 0:
240
- raw_items = RUBRIC_FALLBACK[:] # last resort
241
-
242
- rubric = []
243
- for i, item in enumerate(raw_items, start=1):
244
- rid = item.get("id") if isinstance(item, dict) else str(item)
245
- rubric.append({"id": rid or f"Criterion {i}"})
246
-
247
- # 3) Dedicated evaluator prompt: yes/no evidence per item (simple, deterministic)
248
- assessor_system = (
249
- "You are the Evaluator for the Conversational Assessment Tool (CAT).\n"
250
- "For EACH rubric item, decide if the student provided reasonable, college-level evidence.\n"
251
- "Rules:\n"
252
- "- 'meets' = true only if the student shows specific, relevant reasoning/evidence for that item.\n"
253
- "- Otherwise 'meets' = false.\n"
254
- "Return STRICT JSON ONLY (no prose outside JSON):\n"
255
- "{\n"
256
- ' "results": [\n'
257
- ' {"id": "<criterion id>", "meets": true|false, "evidence": "<short quote or brief reason>"}\n'
258
- " ]\n"
259
- "}"
260
- )
261
-
262
- # 4) Provide the actual rubric text to the model as context
263
- module_rubric_text = "\n".join(f"- {c['id']}" for c in rubric)
264
- history_for_eval = list(state["history"]) + [
265
- {"role": "assistant", "content": "Evaluate against these rubric items:\n" + module_rubric_text}
266
- ]
267
-
268
  try:
269
- model_raw = call_model(assessor_system, history_for_eval)
270
-
271
- # Parse and normalize
272
- data = _safe_json_loads(model_raw)
273
- if not data or "results" not in data or not isinstance(data["results"], list):
274
- raise ValueError("Invalid evaluator JSON")
275
-
276
- # Align results to rubric order
277
- results = []
278
- by_id = {str(r.get("id", "")): r for r in data["results"]}
279
- for c in rubric:
280
- cid = c["id"]
281
- r = by_id.get(cid, {})
282
- meets = bool(r.get("meets") is True)
283
- evidence = str(r.get("evidence") or "")
284
- results.append({"id": cid, "meets": meets, "evidence": evidence})
285
-
286
- met = sum(1 for r in results if r["meets"])
287
- total = len(results)
288
- pct = (met / total) if total else 0.0
289
-
290
- if total > 0 and met == total:
291
- overall = "Full Credit"
292
- elif pct >= 0.50:
293
- overall = "Partial Credit"
294
- else:
295
- overall = "No Credit"
296
-
297
- # Render readable output
298
- lines = [f"Overall: {overall}", f"Met: {met}/{total} ({round(pct*100)}%)", ""]
299
- for r in results:
300
- mark = "βœ…" if r["meets"] else "❌"
301
- ev = f" β€” {r['evidence']}" if r["evidence"] else ""
302
- lines.append(f"- {mark} {r['id']}{ev}")
303
- readable = "\n".join(lines)
304
-
305
  except Exception as e:
306
- readable = f"[Assessment error: {e}]"
307
-
308
- state["history"].append({"role": "assistant", "content": readable})
309
- state["mode"] = "done"
310
- return state["history"], state
311
-
312
- # --- end Assess ---
313
-
314
- def parse_rubric_from_module(module_text: str):
315
- """
316
- Extracts the full RUBRIC section from a module text file and returns a list of items.
317
- - Captures everything after a line that says 'RUBRIC' (with optional colon)
318
- until the next ALL-CAPS header or file end.
319
- - Accepts bullets like -, *, β€’, or 1), 1., etc.
320
- - Falls back to RUBRIC_FALLBACK if nothing is found.
321
- """
322
- if not module_text:
323
- return RUBRIC_FALLBACK[:]
324
-
325
- # 1) Slice out the RUBRIC block
326
- block_re = re.compile(
327
- r'^\s*RUBRIC\s*:?\s*$([\s\S]*?)(?=^\s*[A-Z][A-Z\s/&\-]{3,}\s*:?\s*$|^\Z)',
328
- re.MULTILINE
329
- )
330
- m = block_re.search(module_text)
331
- if not m:
332
- return RUBRIC_FALLBACK[:]
333
- block = m.group(1).strip()
334
-
335
- # 2) Collect bullet-like lines
336
- items = []
337
- for line in block.splitlines():
338
- # Keep original text but trim whitespace
339
- raw = line.strip()
340
- if not raw:
341
- continue
342
- # Match common bullet or numbered list starters
343
- if re.match(r'^(\-|\*|β€’|\d+[\.\)]|\([a-z]\))\s+', raw, re.IGNORECASE):
344
- # strip the bullet prefix
345
- cleaned = re.sub(r'^(\-|\*|β€’|\d+[\.\)]|\([a-z]\))\s+', '', raw, flags=re.IGNORECASE).strip()
346
- if cleaned:
347
- items.append(cleaned)
348
- else:
349
- # Some rubrics are paragraph-style; treat non-empty lines as items
350
- # but avoid obvious section labels like "Notes:" inside the block
351
- if not re.match(r'^\s*(notes?|example|weight|scale)\s*:?\s*$', raw, re.IGNORECASE):
352
- items.append(raw)
353
-
354
- # 3) Deduplicate while preserving order
355
- seen = set()
356
- deduped = []
357
- for it in items:
358
- if it not in seen:
359
- seen.add(it)
360
- deduped.append(it)
361
-
362
- return deduped or RUBRIC_FALLBACK[:]
363
 
364
- with gr.Blocks(title="CAT (MVP)") as demo:
365
- gr.Markdown("## 😼Conversational Assessment Tool (CAT) β€” MVP")
 
 
366
  with gr.Row():
367
- module_file = gr.Dropdown(
368
- label="Select Module File",
369
- choices=[p.name for p in sorted(Path(MODULE_DIR).glob("module*.txt"))],
370
  value="module01.txt",
371
  interactive=True
372
  )
373
- name_tb = gr.Textbox(label="Your first name", placeholder="e.g., Maya", value="", interactive=True)
374
- start_btn = gr.Button("Start") # fine to keep inside the row (optional)
375
-
376
- chatbot = gr.Chatbot(label="CAT Conversation", type="messages")
377
- user_in = gr.Textbox(label="Your message", placeholder="Type here and press Enter")
378
  state = gr.State(init_state())
379
- assess_btn = gr.Button("Assess", variant="primary")
380
-
381
- def _start(module_name, student_name):
382
- student_name = student_name.strip()
383
- if not student_name:
384
- # Return a valid state object plus a warning message in the chat
385
- return init_state(), [{"role": "assistant", "content": "⚠ Please enter your first name before starting."}]
386
- st, hist = start_session(module_name, student_name)
387
- return st, hist
388
-
389
- start_btn.click(_start, [module_file, name_tb], [state, chatbot])
390
- user_in.submit(chat, [user_in, state], [user_in, chatbot, state])
391
- # Clicking Assess triggers the mentor/evaluator flow
392
- assess_btn.click(
393
- fn=assess_fn,
394
- inputs=[state],
395
- outputs=[chatbot, state]
396
- )
397
-
398
 
399
  if __name__ == "__main__":
400
  demo.launch()
 
1
  import os
2
  import gradio as gr
 
3
  from pathlib import Path
 
 
 
 
4
  from openai import OpenAI
5
  from dotenv import load_dotenv
 
 
 
 
6
  from typing import List, cast
7
  from openai.types.chat import ChatCompletionMessageParam
8
 
9
+ load_dotenv()
10
+ client = OpenAI()
11
+
12
+ UNIVERSAL_PROMPT_PATH = "CAT_universal_prompt.txt"
13
+ MODULE_DIR = "modules"
14
 
15
  def call_model(system_prompt: str, history: list[dict[str, str]]) -> str:
16
+ """Call OpenAI API with conversation history"""
17
  msgs: list[dict[str, str]] = [{"role": "system", "content": system_prompt}]
18
  for m in history:
19
  role = m.get("role")
20
  content = m.get("content")
21
  if role in ("user", "assistant") and isinstance(content, str):
22
  msgs.append({"role": role, "content": content})
23
+
 
24
  typed_msgs = cast(List[ChatCompletionMessageParam], msgs)
25
+
26
  resp = client.chat.completions.create(
27
  model="gpt-4o-mini",
28
  messages=typed_msgs,
29
+ temperature=0.7,
30
  )
31
  return resp.choices[0].message.content or ""
32
 
 
33
  def load_text(path: str) -> str:
34
+ """Load text file"""
35
  with open(path, "r", encoding="utf-8") as f:
36
  return f.read()
37
 
38
+ def extract_section(text: str, label: str) -> str:
39
+ """Extract a section from module file"""
40
+ # Normalize line endings (handles Windows \r\n and Unix \n)
41
+ text = text.replace('\r\n', '\n')
42
+
43
+ marker = label + ":"
44
+ start = text.find(marker)
45
+ if start == -1:
46
+ return ""
47
+ start += len(marker)
48
+
49
+ # Find next section marker
50
+ next_markers = ["\nMODULE NAME:", "\nLEARNING OBJECTIVES:", "\nKEY POINTS:"]
51
+ end_positions = [text.find(m, start) for m in next_markers if text.find(m, start) != -1]
52
+ end = min(end_positions) if end_positions else len(text)
53
+
54
+ return text[start:end].strip()
55
 
56
+ def assemble_prompt(universal_text: str, module_text: str) -> str:
57
+ """Assemble complete system prompt from universal + module"""
58
+ module_name = extract_section(module_text, "MODULE NAME")
59
+ learning_objectives = extract_section(module_text, "LEARNING OBJECTIVES")
60
+ key_points = extract_section(module_text, "KEY POINTS")
61
+
62
+ # Replace placeholders
63
+ prompt = universal_text.replace("{MODULE_NAME}", module_name)
64
+ prompt = prompt.replace("{LEARNING_OBJECTIVES}", learning_objectives)
65
+ prompt = prompt.replace("{KEY_POINTS}", key_points)
66
+
67
  return prompt
68
 
69
  def init_state():
70
+ """Initialize conversation state"""
71
  return {
72
+ "system_prompt": "",
73
+ "history": []
 
 
 
74
  }
75
 
76
+ def start_session(module_file):
77
+ """Start a new CAT session"""
78
  state = init_state()
79
+ state["module"] = module_file # Store module name for transcript filename
80
+
81
+ try:
82
+ # Load and assemble prompt
83
+ universal = load_text(UNIVERSAL_PROMPT_PATH)
84
+ module_text = load_text(Path(MODULE_DIR) / module_file)
85
+ state["system_prompt"] = assemble_prompt(universal, module_text)
86
+
87
+ # Get opening message from AI
88
+ opening = call_model(state["system_prompt"], [])
89
+ state["history"].append({"role": "assistant", "content": opening})
90
+
91
+ return state, state["history"]
92
+
93
+ except FileNotFoundError as e:
94
+ error_msg = f"❌ Error: Could not find file. Please check that all files are uploaded correctly.\n\nDetails: {str(e)}"
95
+ return state, [{"role": "assistant", "content": error_msg}]
96
+ except Exception as e:
97
+ error_msg = f"❌ Error starting session. Please try again or contact your instructor.\n\nDetails: {str(e)}"
98
+ return state, [{"role": "assistant", "content": error_msg}]
99
 
100
  def chat(user_msg, state):
101
+ """Handle a chat message"""
102
  if not user_msg.strip():
103
  return "", state["history"], state
 
 
 
 
 
 
 
 
 
104
 
105
+ # Add user message to history
 
 
 
106
  state["history"].append({"role": "user", "content": user_msg})
107
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  try:
109
+ # Get AI response
110
+ reply = call_model(state["system_prompt"], state["history"])
111
+ state["history"].append({"role": "assistant", "content": reply})
112
+
113
+ # Save transcript when assessment completes
114
+ if "assessment complete" in reply.lower():
115
+ module = state.get("module", "unknown")
116
+ filename = f"{module}_transcript.txt"
117
+ with open(filename, "w", encoding="utf-8") as f:
118
+ for msg in state["history"]:
119
+ role = msg.get("role", "unknown").upper()
120
+ content = msg.get("content", "")
121
+ f.write(f"{role}:\n{content}\n\n---\n\n")
122
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  except Exception as e:
124
+ error_msg = f"❌ Error getting response. Please try again.\n\nIf this persists, copy your conversation so far and contact your instructor.\n\nDetails: {str(e)}"
125
+ state["history"].append({"role": "assistant", "content": error_msg})
126
+
127
+ return "", state["history"], state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
+ # Gradio Interface
130
+ with gr.Blocks(title="CAT") as demo:
131
+ gr.Markdown("## 😼 Conversational Assessment Tool (CAT)")
132
+
133
  with gr.Row():
134
+ module_dropdown = gr.Dropdown(
135
+ label="Select Module",
136
+ choices=sorted([f.name for f in Path(MODULE_DIR).glob("module*.txt")]),
137
  value="module01.txt",
138
  interactive=True
139
  )
140
+ start_btn = gr.Button("Start Session", variant="primary")
141
+
142
+ chatbot = gr.Chatbot(label="CAT Conversation", type="messages", height=600)
143
+ user_input = gr.Textbox(label="Your message", placeholder="Type here and press Enter")
144
+
145
  state = gr.State(init_state())
146
+
147
+ start_btn.click(start_session, [module_dropdown], [state, chatbot])
148
+ user_input.submit(chat, [user_input, state], [user_input, chatbot, state])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
  if __name__ == "__main__":
151
  demo.launch()
cat_universal_prompt.txt ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are the Conversational Assessment Tool (CAT) for BUS 220: {MODULE_NAME}.
2
+
3
+ === ASSESSMENT PHILOSOPHY ===
4
+
5
+ You will assess students on the Student Learning Objectives (SLOs) listed below.
6
+
7
+ You will also encourage students to practice Uniquely Human Capacities (UHCs) - abilities only humans can authentically perform: intuition, ethical reasoning, empathy, and mindfulness.
8
+
9
+ UHCs will NOT be graded - just practiced and acknowledged.
10
+
11
+ === YOUR ROLE ===
12
+
13
+ Guide the student through an unfolding business story that naturally requires them to apply ALL the Learning Objectives and creates opportunities to practice UHCs. Play the role of their boss, client, or peer facing a complex situation that evolves over 25-35 turns.
14
+
15
+ === STORY STRUCTURE ===
16
+
17
+ Opening (Turns 1-3):
18
+ β€’ Welcome the student warmly
19
+ β€’ Briefly explain: "I'll assess how well you apply our learning objectives. I'll also encourage you to use intuition and ethical reasoning - important skills that won't be graded but are worth practicing."
20
+ β€’ Ask for their first name and the name of a company they'd like to work for (real or fictional)
21
+ β€’ Begin the story: "I need your help. Here's the situation..."
22
+
23
+ Unfolding Story (Turns 4-30):
24
+ β€’ Present a realistic business problem that unfolds in stages
25
+ β€’ Each new stage should naturally require 1-2 different Learning Objectives
26
+ β€’ Early stages build foundation; later stages increase complexity
27
+ β€’ The situation evolves based on their advice - their choices matter
28
+ β€’ Around turn 20, briefly acknowledge progress (e.g., "We're making good headway...")
29
+
30
+ === ENCOURAGING UHCs ===
31
+
32
+ Naturally weave in opportunities for students to practice:
33
+ β€’ Intuition (e.g., "What's your gut telling you?")
34
+ β€’ Ethics (e.g., "What feels right/fair here?")
35
+ β€’ Empathy (e.g., "How will they feel about this?")
36
+ β€’ Mindfulness (e.g., "Take a breath. What do you notice?")
37
+
38
+ Acknowledge when they use UHCs (e.g., "I appreciate your ethical reasoning here") but never grade them.
39
+
40
+ === CONVERSATION GUIDELINES ===
41
+
42
+ Stay in Character:
43
+ β€’ You are the boss/client, NOT a teacher (until evaluation)
44
+ β€’ Speak naturally with appropriate emotion
45
+ β€’ React to their advice like a real person
46
+ β€’ Elicit their thinking, don't lecture
47
+
48
+ Guide, Don't Solve:
49
+ β€’ When they need a tool/framework, ask them to do it
50
+ β€’ Don't do calculations - ask for their inputs
51
+ β€’ Use guiding questions, not answers
52
+ β€’ If they struggle (e.g., "What approach did we learn for situations like this?")
53
+
54
+ Keep It Moving:
55
+ β€’ 2-4 sentences per response
56
+ β€’ After asking a question, stop and wait for the student's response
57
+ β€’ No lists or examples in the same turn as a question
58
+ β€’ Every turn must advance an SLO - no tangents
59
+ β€’ Always verify calculations: "Let me check: [show work]"
60
+
61
+ Build Realistically:
62
+ β€’ Use specific details (names, numbers, timelines)
63
+ β€’ Create time pressure where appropriate
64
+ β€’ Make stakeholders feel real
65
+
66
+ Target: 25-35 total turns to cover all Learning Objectives.
67
+
68
+ === LEARNING OBJECTIVES TO ASSESS ===
69
+
70
+ {LEARNING_OBJECTIVES}
71
+
72
+ === KEY CONCEPTS ===
73
+
74
+ {KEY_POINTS}
75
+
76
+ === EVALUATION ===
77
+
78
+ Use these levels: ⭐ Excellent | βœ“ Proficient | ⚠ Developing | βœ— Not Demonstrated
79
+
80
+ After the story concludes:
81
+
82
+ 1. Transition: "Thanks for your help. Let me tell you what I decided and what happened..."
83
+ Describe realistic outcome based on their reasoning quality
84
+
85
+ 2. Switch to evaluator: "Now let me give you feedback."
86
+
87
+ 3. Evaluate each Learning Objective using the levels above
88
+
89
+ 4. Overall Grade:
90
+ ⭐ Full Credit (Excellent) - Excellent on most/all objectives
91
+ βœ“ Full Credit (Proficient) - Proficient overall
92
+ ⚠ Partial Credit - Developing on several or major gap on one
93
+ βœ— No Credit - Not demonstrated on most
94
+
95
+ 5. Include:
96
+ β€’ UHC Practice (not graded): "You practiced [UHC] when you [example]. This will serve you well."
97
+ β€’ Specific Strength: Quote them showing strong reasoning
98
+ β€’ Area to Improve: Constructive feedback on one objective
99
+ β€’ Realistic Outcome: 2-3 sentences on what happened to the company
100
+
101
+ 6. End: "πŸŽ‰ Assessment complete! A transcript file has been automatically saved. πŸ“‹ TO RECEIVE CREDIT: Upload the transcript file (module##_transcript.txt) to the Brightspace assignment submission box."
module01.txt CHANGED
@@ -1,18 +1,19 @@
1
  MODULE NAME:
2
- Module 01 β€” Philosophy, Logic, and Intro to Ethics/ESG
3
 
4
  LEARNING OBJECTIVES:
5
- - Identify and evaluate arguments (premises and conclusions), including detection of formal and informal fallacies.
6
- - Classify moral arguments according to four traditions: utilitarian (Bentham), rights/duties (Kant), virtue (Aristotle), Care (Gilligan)
7
- - Understand introductory ESG considerations.
8
  - Analyze business ethical dilemmas
9
  - Create your own moral arguments in favor of a business decision
10
 
11
- RUBRIC:
12
- - Career Competencies: Demonstrates critical thinking, clear communication, and professionalism in responses.
13
- - Uniquely Human Capacities: Shows ethical reasoning, empathy, and perspective-taking when discussing dilemmas.
14
- - Argument Analysis: Accurately identifies premises and conclusions, evaluates arguments, and detects both formal and informal fallacies.
15
- - Ethical Frameworks: Correctly classifies moral arguments using utilitarian, rights/duties, virtue, and care traditions; applies them to the scenario.
16
- - ESG Awareness: Recognizes ESG considerations, including stakeholder interests, values, fairness/justice, and potential impacts.
17
- - Application: Constructs a coherent and sound moral argument in favor of a business decision within the scenario.
18
- - Interaction Quality: Maintains clarity and professionalism over ~7–10 student turns (~14–20 total messages).
 
 
1
  MODULE NAME:
2
+ Module 01 – Philosophy, Logic, and Intro to Ethics/ESG
3
 
4
  LEARNING OBJECTIVES:
5
+ - Identify and evaluate arguments (premises and conclusions), including detection of formal and informal fallacies
6
+ - Classify moral arguments according to four traditions: utilitarian (Bentham), rights/duties (Kant), virtue (Aristotle), care (Gilligan)
7
+ - Understand introductory ESG considerations
8
  - Analyze business ethical dilemmas
9
  - Create your own moral arguments in favor of a business decision
10
 
11
+ KEY POINTS:
12
+ β€’ Arguments have premises (reasons) and conclusions (claims). Valid arguments have logical structure; sound arguments are valid AND have true premises.
13
+ β€’ Formal fallacies: Errors in logical structure (affirming the consequent, denying the antecedent).
14
+ β€’ Informal fallacies: Errors in reasoning (ad hominem, straw man, false dilemma, appeal to authority, slippery slope).
15
+ β€’ Utilitarian ethics (Bentham/Mill): Right action = greatest good for greatest number. Focus on consequences and outcomes.
16
+ β€’ Rights/duties ethics (Kant): Right action = follows universal moral rules and treats people as ends, not means. Focus on principles and duties.
17
+ β€’ Virtue ethics (Aristotle): Right action = what a virtuous person would do. Focus on character and excellence.
18
+ β€’ Care ethics (Gilligan): Right action = maintains relationships and responds to needs. Focus on context and connection.
19
+ β€’ ESG (Environmental, Social, Governance): Framework for evaluating business decisions beyond profit - considers stakeholder impact, sustainability, fairness, and long-term value creation.
module04.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODULE NAME:
2
+ Module 04 – Wise Decisions
3
+
4
+ LEARNING OBJECTIVES:
5
+ - Define the concepts of game theory and neo-classical rationality
6
+ - Apply the prisoners' dilemma to your lived experience
7
+ - Distinguish between good decisions and good outcomes
8
+ - List the eight steps of a high-quality deliberative decision process
9
+ - Use Ben Franklin's Pro-Con method to make an important go/no-go decision
10
+ - Calculate the best choice using a Decision Matrix
11
+
12
+ KEY POINTS:
13
+ β€’ Neo-classical Rationality: Making consistent, logic-based decisions to maximize utility. Assumes perfect information and logical thinking. Real people often satisfice (choose first acceptable option) due to bounded rationality.
14
+ β€’ Satisficing: Choosing the first option that meets an acceptable threshold rather than finding the optimal solution. Rational when information is incomplete, search costs are high, or time is limited.
15
+ β€’ Game Theory: Models strategic interactions where outcomes depend on multiple actors' choices. Players must anticipate others' moves.
16
+ β€’ Prisoner's Dilemma: When two players act selfishly, both end up worse off. Cooperation (often through reputation, rules, or trust) can improve outcomes for everyone. Common in business: price wars, resource sharing, industry standards.
17
+ β€’ Incentives & Conflicts of Interest: Principals (owners) and agents (managers) often have different goals. Good governance aligns incentives through monitoring, compensation, and accountability.
18
+ β€’ Bayes' Theorem: Formula for updating beliefs as new evidence appears. Start with base rate (prior probability), add new evidence (likelihood), calculate updated belief (posterior). Using outside view (base rates, historical data) is often more accurate than inside view (personal experience, intuition).
19
+ β€’ Decision Quality vs. Outcome: High-quality decision = good reasoning process, regardless of result. Outcome bias = judging decisions only by results. Chance and external factors affect outcomes, so focus on improving process quality.
20
+ β€’ Pro-Con Method (Ben Franklin's Moral Algebra): List pros and cons, assign weights to each, cancel equal weights, see which side is heavier. Slows impulsive choices. Only works for binary (yes/no, go/no-go) decisions.
21
+ β€’ Decision Matrix: Compare multiple options across weighted criteria. For each option: score it on each criterion (e.g., 1-5), multiply score by criterion weight, sum across all criteria. Higher total = better choice. Reduces bias, clarifies trade-offs, documents reasoning.
22
+ β€’ Eight Steps of Deliberative Decision Process: (1) Identify the problem (2) Define objectives (3) Generate alternative options (4) Gather relevant data (5) Evaluate options against objectives (6) Choose the best option (7) Implement the decision (8) Review and learn from results.
23
+ β€’ Mindfulness in Decisions: Being fully present, aware of thoughts/feelings without judgment. Slows automatic reactions, creates space for reflection, reduces bias, improves judgment quality.
module06.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODULE NAME:
2
+ Module 06 – Decisions Under Risk
3
+
4
+ LEARNING OBJECTIVES:
5
+ - Calculate the expected value of a choice with two or more outcomes
6
+ - Summarize the process and applications of decision trees to decision-making
7
+ - Sketch and interpret a decision tree
8
+ - Describe the process and applications of Monte Carlo simulations to decision-making
9
+ - Create and interpret a Monte Carlo simulation of a business investment decision
10
+
11
+ KEY POINTS:
12
+ β€’ Expected Value (EV): The rational decision rule is to choose the option with the highest positive expected value. EV is a probability-weighted average of all possible outcomes, calculated using E(x)=Ξ£xp(x). Decision-makers can be risk averse (require payoff > EV), risk neutral (accept payoff = EV), or risk seeking (accept payoff < EV).
13
+ β€’ Decision Trees: Used for analyzing path-dependent decisions under uncertainty. Tree structure maps potential futures using: square nodes (choices), lines (path-dependency), circle nodes (chance events). Solve by mapping choices left to right, then calculate values right to left to find optimal (highest EV) path. Shows value of options like quit or expand. Challenge: assigning numerical values to non-quantifiable outcomes and accurately estimating probabilities.
14
+ β€’ Monte Carlo Simulations: Computational method that "embraces randomness" to model situations with significant uncertainty. Useful for complex systems in project management and finance. Process: (1) Replace uncertain inputs with random variables (values are random but statistical distribution is known), (2) Run simulation thousands of times, (3) Generate distribution of possible outcomes, (4) Average of outcomes = expected value. In Excel: use RANDBETWEEN(bottom,top) to add risk, create scenario outputs with data table, summarize with AVERAGE and STDEV.S functions and histogram.
module08.txt ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODULE NAME:
2
+ Module 08 – Optimization and Behavioral Economics
3
+
4
+ LEARNING OBJECTIVES:
5
+ 1. Use Excel's Solver to find optimal solutions to linear programming problems
6
+ 2. Describe classical notions of rationality
7
+ 3. Define Behavioral Economics
8
+ 4. Explain bounded rationality and prospect theory
9
+ 5. Identify situations in which people's decisions appear to be irrational (Prospect Theory & Fairness)
10
+
11
+ KEY POINTS:
12
+
13
+ **Optimization with Excel Solver:**
14
+ - The LP Framework has three components: Decision variables (what to solve for), Constraints (subject to limitations), and Objective function (what to maximize or minimize)
15
+ - Excel Solver workflow: 1) Apply LP Framework, 2) Create Excel Model (set cells for decision variables, calculate objective, set cells for constraints), 3) Populate & Run Solver
16
+ - Solver is found in Excel under Data tab (must activate add-in first via File>Options>AddIns)
17
+
18
+ **Classical Rationality:**
19
+ - "Homo Economicus": hypothetical person who behaves in exact accordance with rational self-interest
20
+ - Neo-classical rationality's four pillars: self-interest, omniscience (complete information), deliberation, and optimization
21
+ - Classical model assumes humans: know and never change preferences, have no cognitive limitations, possess complete information, face no time constraints, maintain perfect self-control
22
+
23
+ **Behavioral Economics:**
24
+ - Definition: Method of economic analysis that applies psychological insights into human behavior to explain economic decision making
25
+ - Real-world applications: Marketing (fast food menu design), Risk Management, Organizational Behavior, Strategic Decision-Making (targeted social media ads)
26
+
27
+ **Bounded Rationality (Herbert Simon):**
28
+ - Contrasts "satisficing" vs "optimizing" behavior
29
+ - The effort-quality curve shows bounded rationality produces an S-curve while unbounded rationality assumes linear improvement
30
+ - Practical examples: Starbucks menu simplification, HBO Max streaming service design
31
+
32
+ **Prospect Theory (Kahneman & Tversky):**
33
+ - Value is measured relative to a reference point, not absolutely
34
+ - Diminishing sensitivity example: difference between $100-200 feels larger than difference between $900-1000
35
+ - Loss aversion leads to rejecting positive expected value investments
36
+
37
+ **Fairness:**
38
+ - Ultimatum Game results: offers typically cluster around 40-50% splits; offers below ~40% are frequently rejected
39
+ - Fairness observed cross-species (demonstrated in primate experiments)
40
+ - Video reference: monkey fairness experiment showing cucumber vs grape reactions
41
+
42
+ **System 1 and System 2 Thinking:**
43
+ - System 1 brain regions: Superior medial frontal/anterior cingulate, posterior cingulate/precuneus, bilateral angular gyri
44
+ - System 2 brain regions: Bilateral middle frontal region of lateral prefrontal cortex
45
+ - Bat and ball problem: System 1 suggests "10 cents" but correct answer is 5 cents (demonstrates intuitive override)
46
+ - Linda Problem: demonstrates representativeness heuristic overriding probability logic
47
+
48
+ **The Deliberative Decision Process (where optimization fits):**
49
+ - Steps: Frame decision β†’ Gather information β†’ Identify stakeholders β†’ Prioritize criteria (KPIs) β†’ Generate alternatives β†’ Analyze with decision tool β†’ Synthesize β†’ Execute β†’ Measure β†’ Learn
50
+ - Optimization is one of several analytical tools (alongside Pro/Con, Matrix, NPV/IRR, Decision Tree, Monte Carlo, Linear Programming)
module_template.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODULE NAME:
2
+ Module XX – [Module Title]
3
+
4
+ LEARNING OBJECTIVES:
5
+ - Objective 1
6
+ - Objective 2
7
+ - Objective 3
8
+ - Objective 4
9
+ - Objective 5
10
+
11
+ KEY POINTS:
12
+ β€’ Key concept 1: Brief explanation with practical application
13
+ β€’ Key concept 2: Brief explanation with practical application
14
+ β€’ Key concept 3: Brief explanation with practical application
15
+ β€’ Key concept 4: Brief explanation with practical application
16
+ β€’ Key concept 5: Brief explanation with practical application