Decision-Fish commited on
Commit
44fada6
·
verified ·
1 Parent(s): 5f7bb05

Upload app.py; module01.txt; requirements.txt

Browse files
Files changed (3) hide show
  1. app.py +180 -0
  2. module01.txt +18 -0
  3. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from pathlib import Path
4
+
5
+ UNIVERSAL_PROMPT_PATH = "CAT_universal_prompt.txt"
6
+ MODULE_DIR = "modules" # <-- now using /modules subfolder
7
+
8
+ from openai import OpenAI
9
+ from dotenv import load_dotenv
10
+ load_dotenv()
11
+ client = OpenAI()
12
+
13
+ # Type aliases
14
+ from typing import List, cast
15
+ from openai.types.chat import ChatCompletionMessageParam
16
+
17
+
18
+ def call_model(system_prompt: str, history: list[dict[str, str]]) -> str:
19
+ # Build as simple dicts first
20
+ msgs: list[dict[str, str]] = [{"role": "system", "content": system_prompt}]
21
+ for m in history:
22
+ role = m.get("role")
23
+ content = m.get("content")
24
+ if role in ("user", "assistant") and isinstance(content, str):
25
+ msgs.append({"role": role, "content": content})
26
+
27
+ # Cast once at the call site to satisfy the SDK types
28
+ typed_msgs = cast(List[ChatCompletionMessageParam], msgs)
29
+
30
+ resp = client.chat.completions.create(
31
+ model="gpt-4o-mini",
32
+ messages=typed_msgs,
33
+ temperature=0.4,
34
+ )
35
+ return resp.choices[0].message.content or ""
36
+
37
+
38
+ def load_text(path: str) -> str:
39
+ with open(path, "r", encoding="utf-8") as f:
40
+ return f.read()
41
+
42
+ def assemble_prompt(universal_prompt_text: str, module_text: str) -> str:
43
+ def extract(label: str) -> str:
44
+ marker = label + ":"
45
+ start = module_text.find(marker)
46
+ if start == -1:
47
+ return ""
48
+ start += len(marker)
49
+ next_markers = ["\nLEARNING OBJECTIVES:", "\nRUBRIC:", "\nMODULE NAME:"]
50
+ end_positions = [module_text.find(m, start) for m in next_markers if module_text.find(m, start) != -1]
51
+ end = min(end_positions) if end_positions else len(module_text)
52
+ return module_text[start:end].strip()
53
+
54
+ learning_objectives = extract("LEARNING OBJECTIVES")
55
+ rubric = extract("RUBRIC")
56
+
57
+ prompt = universal_prompt_text.replace("{LEARNING_OBJECTIVES}", learning_objectives)
58
+ prompt = prompt.replace("{RUBRIC}", rubric)
59
+ return prompt
60
+
61
+ def init_state():
62
+ return {
63
+ "assembled_prompt": "",
64
+ "history": [],
65
+ "mode": "roleplay",
66
+ "student_turns": 0,
67
+ "invited_wrap": False,
68
+ "mentor_step": 0,
69
+ "student_name": ""
70
+ }
71
+
72
+ def start_session(module_file, student_name=""):
73
+ state = init_state()
74
+ state["student_name"] = student_name
75
+ universal = load_text(UNIVERSAL_PROMPT_PATH)
76
+ module_text = load_text(Path(MODULE_DIR) / module_file)
77
+
78
+ # Give the model the name so it can personalize the dialogue (without overusing it)
79
+ name_hint = f"\n\n[Student first name: {student_name}. Use it naturally once in the opening; don’t overuse.]" if student_name else ""
80
+ state["assembled_prompt"] = assemble_prompt(universal, module_text) + name_hint
81
+
82
+ state["history"].append({"role": "system", "content": state["assembled_prompt"]})
83
+ opening = call_model(state["assembled_prompt"], state["history"])
84
+ state["history"].append({"role": "assistant", "content": opening})
85
+ return state, state["history"]
86
+
87
+ def chat(user_msg, state):
88
+ if not user_msg.strip():
89
+ return "", state["history"], state
90
+
91
+ if state["mode"] == "roleplay":
92
+ state["student_turns"] += 1
93
+ state["history"].append({"role": "user", "content": user_msg})
94
+
95
+ invite_now = False
96
+ force_now = False
97
+
98
+ if state["mode"] == "roleplay":
99
+ if state["student_turns"] >= 10:
100
+ force_now = True
101
+ elif (7 <= state["student_turns"] <= 9) and not state["invited_wrap"]:
102
+ invite_now = True
103
+
104
+ if state["mode"] == "roleplay" and (invite_now or force_now):
105
+ if not state["invited_wrap"]:
106
+ invite = ('Thanks for walking through this with me. We’ve covered a lot. '
107
+ 'Before we wrap up, is there anything else you’d like me to consider '
108
+ 'before I give you a preliminary assessment?')
109
+ state["history"].append({"role": "assistant", "content": invite})
110
+ state["invited_wrap"] = True
111
+ return "", state["history"], state
112
+ else:
113
+ ack = "I appreciate you sharing that. I’ll take it into account. Let’s step back and review how you approached this situation."
114
+ state["history"].append({"role": "assistant", "content": ack})
115
+ state["mode"] = "mentor"
116
+
117
+ if state["mode"] == "roleplay" and not state["invited_wrap"]:
118
+ reply = call_model(state["assembled_prompt"], state["history"])
119
+ state["history"].append({"role": "assistant", "content": reply})
120
+ return "", state["history"], state
121
+
122
+ if state["mode"] == "mentor":
123
+ # Step 1: general intro (no assumption of tools)
124
+ if state.get("mentor_step", 0) == 0:
125
+ eval_intro = (
126
+ "Before we wrap up: name two specific concepts, tools, or frameworks you used in this scenario, "
127
+ "and in one short sentence each say how you applied them. If you didn’t use any, name two insights "
128
+ "you learned and how you would apply them next time."
129
+ )
130
+ state["history"].append({"role": "assistant", "content": eval_intro})
131
+ state["mentor_step"] = 1
132
+ return "", state["history"], state
133
+
134
+ # Step 2: concise rubric-based evaluation
135
+ else:
136
+ eval_request = (
137
+ "Evaluate the student's performance using the module rubric. Provide these sections: "
138
+ "Overall rating (Unsatisfactory, Satisfactory, or Excellent) with a one-sentence justification; "
139
+ 'Career competencies; Uniquely human capacities; Argument analysis; Ethical frameworks; ESG awareness; '
140
+ "Application; Interaction quality; Strength; Area to improve; Advice for next time; Fictional consequence. "
141
+ "Quote at least one student phrase. Keep the whole evaluation under 180 words and end with 'END OF SCENE'."
142
+ )
143
+ state["history"].append({"role": "user", "content": eval_request})
144
+ reply = call_model(state["assembled_prompt"], state["history"])
145
+ if "END OF SCENE" not in reply:
146
+ reply += "\n\nEND OF SCENE"
147
+ state["history"].append({"role": "assistant", "content": reply})
148
+ state["mode"] = "done"
149
+ return "", state["history"], state
150
+
151
+
152
+ with gr.Blocks(title="CAT (MVP)") as demo:
153
+ gr.Markdown("## 😼Conversational Assessment Tool (CAT) — MVP")
154
+ with gr.Row():
155
+ module_file = gr.Dropdown(
156
+ label="Select Module File",
157
+ choices=[p.name for p in sorted(Path(MODULE_DIR).glob("module*.txt"))],
158
+ value="module01.txt",
159
+ interactive=True
160
+ )
161
+ name_tb = gr.Textbox(label="Your first name", placeholder="e.g., Maya", value="", interactive=True)
162
+ start_btn = gr.Button("Start") # fine to keep inside the row (optional)
163
+
164
+ chatbot = gr.Chatbot(label="CAT Conversation", type="messages")
165
+ user_in = gr.Textbox(label="Your message", placeholder="Type here and press Enter")
166
+ state = gr.State(init_state())
167
+
168
+ def _start(module_name, student_name):
169
+ student_name = student_name.strip()
170
+ if not student_name:
171
+ # Return a valid state object plus a warning message in the chat
172
+ return init_state(), [{"role": "assistant", "content": "⚠ Please enter your first name before starting."}]
173
+ st, hist = start_session(module_name, student_name)
174
+ return st, hist
175
+
176
+ start_btn.click(_start, [module_file, name_tb], [state, chatbot])
177
+ user_in.submit(chat, [user_in, state], [user_in, chatbot, state])
178
+
179
+ if __name__ == "__main__":
180
+ demo.launch()
module01.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODULE NAME:
2
+ Module 01 — Philosophy, Logic, and Intro to Ethics/ESG
3
+
4
+ LEARNING OBJECTIVES:
5
+ - Identify and evaluate arguments (premises and conclusions), including detection of formal and informal fallacies.
6
+ - Classify moral arguments according to four traditions: utilitarian (Bentham), rights/duties (Kant), virtue (Aristotle), Care (Gilligan)
7
+ - Understand introductory ESG considerations.
8
+ - Analyze business ethical dilemmas
9
+ - Create your own moral arguments in favor of a business decision
10
+
11
+ RUBRIC:
12
+ - Career Competencies: Demonstrates critical thinking, clear communication, and professionalism in responses.
13
+ - Uniquely Human Capacities: Shows ethical reasoning, empathy, and perspective-taking when discussing dilemmas.
14
+ - Argument Analysis: Accurately identifies premises and conclusions, evaluates arguments, and detects both formal and informal fallacies.
15
+ - Ethical Frameworks: Correctly classifies moral arguments using utilitarian, rights/duties, virtue, and care traditions; applies them to the scenario.
16
+ - ESG Awareness: Recognizes ESG considerations, including stakeholder interests, values, fairness/justice, and potential impacts.
17
+ - Application: Constructs a coherent and sound moral argument in favor of a business decision within the scenario.
18
+ - Interaction Quality: Maintains clarity and professionalism over ~7–10 student turns (~14–20 total messages).
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ openai
3
+ python-dotenv