Spaces:
Sleeping
Sleeping
File size: 7,227 Bytes
aa39c2d 6fdb439 8a40579 aa39c2d 8a40579 aa39c2d 8a40579 aa39c2d 8a40579 aa39c2d 82bad00 aa39c2d 49a9ef9 aa39c2d 8a40579 aa39c2d 8a40579 aa39c2d 8a40579 aa39c2d 8a40579 aa39c2d 8a40579 aa39c2d 8a40579 aa39c2d 8a40579 0c9e618 8a40579 0c9e618 8a40579 0c9e618 aa39c2d efef915 aa39c2d 0c9e618 aa39c2d 8a40579 aa39c2d 8a40579 aa39c2d efef915 8a40579 efef915 8a40579 0c9e618 8a40579 aa39c2d efef915 8a40579 0c9e618 aa39c2d 8a40579 0c9e618 8a40579 9f86883 0c9e618 9f86883 aa39c2d 8a40579 9f86883 aa39c2d 8a40579 0c9e618 8a40579 aa39c2d 8a40579 0c9e618 aa39c2d ea85357 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 | import os
import gradio as gr
from pathlib import Path
from openai import OpenAI
from dotenv import load_dotenv
from typing import List, cast
from openai.types.chat import ChatCompletionMessageParam
print(f"--- Gradio Version Installed: {gr.__version__} ---")
load_dotenv()
client = OpenAI()
UNIVERSAL_PROMPT_PATH = "CAT_universal_prompt.txt"
MODULE_DIR = "modules"
def call_model(system_prompt: str, history: list[dict[str, str]]) -> str:
"""Call OpenAI API with conversation history"""
msgs: list[dict[str, str]] = [{"role": "system", "content": system_prompt}]
for m in history:
role = m.get("role")
content = m.get("content")
if role in ("user", "assistant") and isinstance(content, str):
msgs.append({"role": role, "content": content})
typed_msgs = cast(List[ChatCompletionMessageParam], msgs)
resp = client.chat.completions.create(
model="gpt-5-mini",
messages=typed_msgs,
temperature=0.7,
)
return resp.choices[0].message.content or ""
def load_text(path: str) -> str:
"""Load text file"""
with open(path, "r", encoding="utf-8") as f:
return f.read()
def extract_section(text: str, label: str) -> str:
"""Extract a section from module file"""
# Normalize line endings (handles Windows \r\n and Unix \n)
text = text.replace('\r\n', '\n')
marker = label + ":"
start = text.find(marker)
if start == -1:
return ""
start += len(marker)
# Find next section marker
next_markers = ["\nMODULE NAME:", "\nLEARNING OBJECTIVES:", "\nKEY POINTS:"]
end_positions = [text.find(m, start) for m in next_markers if text.find(m, start) != -1]
end = min(end_positions) if end_positions else len(text)
return text[start:end].strip()
def assemble_prompt(universal_text: str, module_text: str) -> str:
"""Assemble complete system prompt from universal + module"""
module_name = extract_section(module_text, "MODULE NAME")
learning_objectives = extract_section(module_text, "LEARNING OBJECTIVES")
key_points = extract_section(module_text, "KEY POINTS")
# Replace placeholders
prompt = universal_text.replace("{MODULE_NAME}", module_name)
prompt = prompt.replace("{LEARNING_OBJECTIVES}", learning_objectives)
prompt = prompt.replace("{KEY_POINTS}", key_points)
return prompt
def init_state():
"""Initialize conversation state"""
return {
"system_prompt": "",
"history": []
}
def start_session(module_file):
"""Start a new CAT session"""
state = init_state()
state["module"] = module_file # Store module name for transcript filename
try:
# Load and assemble prompt
universal = load_text(UNIVERSAL_PROMPT_PATH)
module_text = load_text(Path(MODULE_DIR) / module_file)
state["system_prompt"] = assemble_prompt(universal, module_text)
# Get opening message from AI
opening = call_model(state["system_prompt"], [])
state["history"].append({"role": "assistant", "content": opening})
return state, state["history"], gr.DownloadButton(visible=False)
except FileNotFoundError as e:
error_msg = f"❌ Error: Could not find file. Please check that all files are uploaded correctly.\n\nDetails: {str(e)}"
return state, [{"role": "assistant", "content": error_msg}], gr.DownloadButton(visible=False)
except Exception as e:
error_msg = f"❌ Error starting session. Please try again or contact your instructor.\n\nDetails: {str(e)}"
return state, [{"role": "assistant", "content": error_msg}], gr.DownloadButton(visible=False)
def chat(user_msg, state):
"""Handle a chat message with Turn-Counting and Reasoning Effort"""
if not user_msg.strip():
return "", state["history"], state, gr.DownloadButton(visible=False)
# Add user message to history
state["history"].append({"role": "user", "content": user_msg})
try:
# === NEW LOGIC: START OF TURN COUNTING ===
# We count 'pairs' of messages (User + AI).
# (len/2) + 1 tells the AI which turn it is currently acting on.
turn_count = (len(state["history"]) // 2) + 1
# Load the universal text and insert the turn count into the placeholder
universal_text = load_text(UNIVERSAL_PROMPT_PATH)
system_prompt_with_count = universal_text.replace("{TURN_COUNT}", str(turn_count))
# Now we assemble the full prompt using your existing extraction logic
# (Assuming your code later replaces {LEARNING_OBJECTIVES} and {KEY_POINTS})
final_prompt = assemble_prompt(system_prompt_with_count, state["module_text"])
# === END OF TURN COUNTING LOGIC ===
# Get AI response using the new GPT-5 Mini model
# Note: We now pass the 'final_prompt' which contains the turn count
reply = call_model(final_prompt, state["history"])
state["history"].append({"role": "assistant", "content": reply})
# Save transcript when assessment completes (your existing logic)
if "assessment complete" in reply.lower():
module = state.get("module", "unknown")
filename = f"{module}_transcript.txt"
with open(filename, "w", encoding="utf-8") as f:
for msg in state["history"]:
role = msg.get("role", "unknown").upper()
content = msg.get("content", "")
f.write(f"{role}:\n{content}\n\n---\n\n")
return "", state["history"], state, gr.DownloadButton(value=filename, visible=True)
except Exception as e:
error_msg = f"❌ Error getting response. Details: {str(e)}"
state["history"].append({"role": "assistant", "content": error_msg})
return "", state["history"], state, gr.DownloadButton(visible=False)
# Gradio Interface
with gr.Blocks(title="CAT") as demo:
gr.Markdown("## 🐱 Conversational Assessment Tool (CAT)")
gr.Markdown("""
**Instructions:** Select your module below, then click "Start Session" and apply your new skills and knowledge. Continue until CAT wraps up the conversation with feedback. Then click the download button to get your transcript and submit it to Brightspace.
""")
with gr.Row():
module_dropdown = gr.Dropdown(
label="Select Module",
choices=sorted([f.name for f in Path(MODULE_DIR).glob("module*.txt")]),
value="module01.txt",
interactive=True
)
start_btn = gr.Button("Start Session", variant="primary")
chatbot = gr.Chatbot(label="CAT Conversation", type="messages", height=600)
user_input = gr.Textbox(label="Your message", placeholder="Type here and press Enter")
download_btn = gr.DownloadButton("📥 Download Transcript", visible=False)
state = gr.State(init_state())
start_btn.click(start_session, [module_dropdown], [state, chatbot, download_btn])
user_input.submit(chat, [user_input, state], [user_input, chatbot, state, download_btn])
if __name__ == "__main__":
demo.launch() |