cat / app.py
Decision-Fish's picture
change back to gpt 5 mini.
82bad00 verified
import os
import gradio as gr
from pathlib import Path
from openai import OpenAI
from dotenv import load_dotenv
from typing import List, cast
from openai.types.chat import ChatCompletionMessageParam
print(f"--- Gradio Version Installed: {gr.__version__} ---")
load_dotenv()
client = OpenAI()
UNIVERSAL_PROMPT_PATH = "CAT_universal_prompt.txt"
MODULE_DIR = "modules"
def call_model(system_prompt: str, history: list[dict[str, str]]) -> str:
"""Call OpenAI API with conversation history"""
msgs: list[dict[str, str]] = [{"role": "system", "content": system_prompt}]
for m in history:
role = m.get("role")
content = m.get("content")
if role in ("user", "assistant") and isinstance(content, str):
msgs.append({"role": role, "content": content})
typed_msgs = cast(List[ChatCompletionMessageParam], msgs)
resp = client.chat.completions.create(
model="gpt-5-mini",
messages=typed_msgs,
temperature=0.7,
)
return resp.choices[0].message.content or ""
def load_text(path: str) -> str:
"""Load text file"""
with open(path, "r", encoding="utf-8") as f:
return f.read()
def extract_section(text: str, label: str) -> str:
"""Extract a section from module file"""
# Normalize line endings (handles Windows \r\n and Unix \n)
text = text.replace('\r\n', '\n')
marker = label + ":"
start = text.find(marker)
if start == -1:
return ""
start += len(marker)
# Find next section marker
next_markers = ["\nMODULE NAME:", "\nLEARNING OBJECTIVES:", "\nKEY POINTS:"]
end_positions = [text.find(m, start) for m in next_markers if text.find(m, start) != -1]
end = min(end_positions) if end_positions else len(text)
return text[start:end].strip()
def assemble_prompt(universal_text: str, module_text: str) -> str:
"""Assemble complete system prompt from universal + module"""
module_name = extract_section(module_text, "MODULE NAME")
learning_objectives = extract_section(module_text, "LEARNING OBJECTIVES")
key_points = extract_section(module_text, "KEY POINTS")
# Replace placeholders
prompt = universal_text.replace("{MODULE_NAME}", module_name)
prompt = prompt.replace("{LEARNING_OBJECTIVES}", learning_objectives)
prompt = prompt.replace("{KEY_POINTS}", key_points)
return prompt
def init_state():
"""Initialize conversation state"""
return {
"system_prompt": "",
"history": []
}
def start_session(module_file):
"""Start a new CAT session"""
state = init_state()
state["module"] = module_file # Store module name for transcript filename
try:
# Load and assemble prompt
universal = load_text(UNIVERSAL_PROMPT_PATH)
module_text = load_text(Path(MODULE_DIR) / module_file)
state["system_prompt"] = assemble_prompt(universal, module_text)
# Get opening message from AI
opening = call_model(state["system_prompt"], [])
state["history"].append({"role": "assistant", "content": opening})
return state, state["history"], gr.DownloadButton(visible=False)
except FileNotFoundError as e:
error_msg = f"❌ Error: Could not find file. Please check that all files are uploaded correctly.\n\nDetails: {str(e)}"
return state, [{"role": "assistant", "content": error_msg}], gr.DownloadButton(visible=False)
except Exception as e:
error_msg = f"❌ Error starting session. Please try again or contact your instructor.\n\nDetails: {str(e)}"
return state, [{"role": "assistant", "content": error_msg}], gr.DownloadButton(visible=False)
def chat(user_msg, state):
"""Handle a chat message with Turn-Counting and Reasoning Effort"""
if not user_msg.strip():
return "", state["history"], state, gr.DownloadButton(visible=False)
# Add user message to history
state["history"].append({"role": "user", "content": user_msg})
try:
# === NEW LOGIC: START OF TURN COUNTING ===
# We count 'pairs' of messages (User + AI).
# (len/2) + 1 tells the AI which turn it is currently acting on.
turn_count = (len(state["history"]) // 2) + 1
# Load the universal text and insert the turn count into the placeholder
universal_text = load_text(UNIVERSAL_PROMPT_PATH)
system_prompt_with_count = universal_text.replace("{TURN_COUNT}", str(turn_count))
# Now we assemble the full prompt using your existing extraction logic
# (Assuming your code later replaces {LEARNING_OBJECTIVES} and {KEY_POINTS})
final_prompt = assemble_prompt(system_prompt_with_count, state["module_text"])
# === END OF TURN COUNTING LOGIC ===
# Get AI response using the new GPT-5 Mini model
# Note: We now pass the 'final_prompt' which contains the turn count
reply = call_model(final_prompt, state["history"])
state["history"].append({"role": "assistant", "content": reply})
# Save transcript when assessment completes (your existing logic)
if "assessment complete" in reply.lower():
module = state.get("module", "unknown")
filename = f"{module}_transcript.txt"
with open(filename, "w", encoding="utf-8") as f:
for msg in state["history"]:
role = msg.get("role", "unknown").upper()
content = msg.get("content", "")
f.write(f"{role}:\n{content}\n\n---\n\n")
return "", state["history"], state, gr.DownloadButton(value=filename, visible=True)
except Exception as e:
error_msg = f"❌ Error getting response. Details: {str(e)}"
state["history"].append({"role": "assistant", "content": error_msg})
return "", state["history"], state, gr.DownloadButton(visible=False)
# Gradio Interface
with gr.Blocks(title="CAT") as demo:
gr.Markdown("## 🐱 Conversational Assessment Tool (CAT)")
gr.Markdown("""
**Instructions:** Select your module below, then click "Start Session" and apply your new skills and knowledge. Continue until CAT wraps up the conversation with feedback. Then click the download button to get your transcript and submit it to Brightspace.
""")
with gr.Row():
module_dropdown = gr.Dropdown(
label="Select Module",
choices=sorted([f.name for f in Path(MODULE_DIR).glob("module*.txt")]),
value="module01.txt",
interactive=True
)
start_btn = gr.Button("Start Session", variant="primary")
chatbot = gr.Chatbot(label="CAT Conversation", type="messages", height=600)
user_input = gr.Textbox(label="Your message", placeholder="Type here and press Enter")
download_btn = gr.DownloadButton("📥 Download Transcript", visible=False)
state = gr.State(init_state())
start_btn.click(start_session, [module_dropdown], [state, chatbot, download_btn])
user_input.submit(chat, [user_input, state], [user_input, chatbot, state, download_btn])
if __name__ == "__main__":
demo.launch()