import os
import time
import base64
from collections import defaultdict
from typing import List, Dict
import gradio as gr
from langsmith import Client # LangSmith 客户端
from config import (
DEFAULT_MODEL,
DEFAULT_COURSE_TOPICS,
LEARNING_MODES,
DOC_TYPES,
)
from clare_core import (
update_weaknesses_from_message,
update_cognitive_state_from_message,
render_session_status,
find_similar_past_question,
detect_language,
chat_with_clare,
export_conversation,
generate_quiz_from_history,
get_empty_input_prompt,
summarize_conversation,
)
from rag_engine import (
build_rag_chunks_from_file,
retrieve_relevant_chunks,
)
from syllabus_utils import extract_course_topics_from_file
# ================== Assets ==================
CLARE_LOGO_PATH = "clare_mascot.png"
CLARE_RUN_PATH = "Clare_Run.png"
CLARE_READING_PATH = "Clare_reading.png" # 确保存在
# ================== Base64 Helper ==================
def image_to_base64(image_path: str) -> str:
if not os.path.exists(image_path):
return ""
with open(image_path, "rb") as img_file:
encoded_string = base64.b64encode(img_file.read()).decode("utf-8")
if image_path.lower().endswith(".png"):
mime = "image/png"
elif image_path.lower().endswith((".jpg", ".jpeg")):
mime = "image/jpeg"
else:
mime = "image/png"
return f"data:{mime};base64,{encoded_string}"
# ================== User Guide Content ==================
USER_GUIDE_SECTIONS = {
"getting_started": """
Welcome to **Clare — Your Personalized AI Tutor**.
For this controlled experiment, Clare is already pre-loaded with:
📘 **Module 10 Reading – Responsible AI (Alto, 2024, Chapter 12)**
You do **NOT** need to upload any materials.
You may optionally upload extra files, but Clare will always include the Module 10 reading as core context.
**To begin:**
1. Log in with your **Student Name + Email/ID** on the right.
2. Select your **Learning Mode** on the left.
3. (Optional) Upload additional Module 10 slides / notes at the top.
4. Ask Clare any question about **Module 10 – Responsible AI**.
""",
"mode_definition": """
Clare offers different teaching modes to match how you prefer to learn.
### Concept Explainer
Clear, structured explanations with examples — ideal for learning new topics.
### Socratic Tutor
Clare asks guiding questions instead of giving direct answers.
Helps you build reasoning and problem-solving skills.
### Exam Prep / Quiz
Generates short practice questions aligned with your course week.
Useful for self-testing and preparing for exams.
### Assignment Helper
Helps you interpret assignment prompts, plan structure, and understand requirements.
❗ Clare does **not** produce full assignment answers (academic integrity).
### Quick Summary
Gives brief summaries of slides, reading materials, or long questions.
""",
"how_clare_works": """
Clare combines **course context + learning science + AI reasoning** to generate answers.
For this experiment, Clare always includes:
- Module 10 Reading – Responsible AI (Alto, 2024, Chapter 12)
- Any additional Module 10 files you upload
Clare uses:
- **Learning Mode**: tone, depth, and interaction style.
- **Reinforcement model**: may prioritize concepts you’re likely to forget.
- **Responsible AI principles**: avoids harmful output and preserves academic integrity.
""",
"memory_line": """
**Memory Line** is a visualization of your *learning reinforcement cycle*.
Based on the **forgetting-curve model**, Clare organizes your review topics into:
- **T+0 (Current Week)** – new concepts
- **T+7** – first spaced review
- **T+14** – reinforcement review
- **T+30** – long-term consolidation
In this experiment, Memory Line should be interpreted as your **Module 10** reinforcement status.
""",
"learning_progress": """
The Learning Progress Report highlights:
- **Concepts mastered**
- **Concepts in progress**
- **Concepts due for review**
- Your recent **micro-quiz results**
- Suggested **next-step topics**
""",
"how_files": """
Your uploaded materials help Clare:
- Align explanations with your exact course (here: **Module 10 – Responsible AI**)
- Use terminology consistent with your professor
- Improve factual accuracy
🔒 **Privacy**
- Files are used only within your session
- They are not kept as permanent training data
Accepted formats: **.docx / .pdf / .pptx**
For this experiment, Clare is **already pre-loaded** with the Module 10 reading. Uploads are optional.
""",
"micro_quiz": """
The **Micro-Quiz** function provides a:
- 1-minute self-check
- 1–3 questions about **Module 10 – Responsible AI**
- Instant feedback inside the main chat
**How it works:**
1. Click “Let’s Try (Micro-Quiz)” on the right.
2. Clare will send the **first quiz question** in the main chat.
3. Type your answer in the chat box.
4. Clare will:
- Judge correctness
- Give a brief explanation
- Ask if you want another question
5. You can continue or say “stop” at any time.
""",
"summarization": """
Clare can summarize:
- Module 10 reading
- Uploaded slides / notes
- Long conversation threads
""",
"export_conversation": """
You can export your chat session for:
- Study review
- Exam preparation
- Saving important explanations
Export format: **Markdown / plain text**.
""",
"faq": """
**Q: Does Clare give assignment answers?**
No. Clare assists with understanding and planning but does **not** generate full solutions.
**Q: Does Clare replace lectures or TA office hours?**
No. Clare supplements your learning by providing on-demand guidance.
**Q: What languages does Clare support?**
Currently: English & 简体中文.
"""
}
# ================== CSS 样式表 ==================
CUSTOM_CSS = """
/* --- Main Header --- */
.header-container { padding: 10px 20px; background-color: #ffffff; border-bottom: 2px solid #f3f4f6; margin-bottom: 15px; display: flex; align-items: center; }
/* --- Sidebar Login Panel --- */
.login-panel {
background-color: #e5e7eb;
padding: 15px;
border-radius: 8px;
text-align: center;
margin-bottom: 20px;
}
.login-panel img {
display: block;
margin: 0 auto 10px auto;
height: 80px;
object-fit: contain;
}
.login-main-btn {
background-color: #ffffff !important;
color: #000 !important;
border: 1px solid #000 !important;
font-weight: bold !important;
}
.logout-btn {
background-color: #6b2828 !important;
color: #fff !important;
border: none !important;
font-weight: bold !important;
}
/* User Guide */
.main-user-guide { border: none !important; background: transparent !important; box-shadow: none !important; }
.main-user-guide > .label-wrap { border: none !important; background: transparent !important; padding: 10px 0 !important; }
.main-user-guide > .label-wrap span { font-size: 1.3rem !important; font-weight: 800 !important; color: #111827 !important; }
.clean-accordion { border: none !important; background: transparent !important; box-shadow: none !important; margin-bottom: 0px !important; padding: 0 !important; border-radius: 0 !important; }
.clean-accordion > .label-wrap { padding: 8px 5px !important; border: none !important; background: transparent !important; border-bottom: 1px solid #e5e7eb !important; }
.clean-accordion > .label-wrap span { font-size: 0.9rem !important; font-weight: 500 !important; color: #374151 !important; }
.clean-accordion > .label-wrap:hover { background-color: #f9fafb !important; }
/* Action Buttons */
.action-btn { font-weight: bold !important; font-size: 0.9rem !important; position: relative; overflow: visible !important; }
.action-btn:hover::before { content: "See User Guide for details"; position: absolute; bottom: 110%; left: 50%; transform: translateX(-50%); background-color: #333; color: #fff; padding: 5px 10px; border-radius: 5px; font-size: 12px; white-space: nowrap; z-index: 1000; pointer-events: none; opacity: 0; animation: fadeIn 0.2s forwards; }
.action-btn:hover::after { content: ""; position: absolute; bottom: 100%; left: 50%; margin-left: -5px; border-width: 5px; border-style: solid; border-color: #333 transparent transparent transparent; opacity: 0; animation: fadeIn 0.2s forwards; }
/* Tooltips & Memory Line */
.html-tooltip { border-bottom: 1px dashed #999; cursor: help; position: relative; }
.html-tooltip:hover::before { content: attr(data-tooltip); position: absolute; bottom: 120%; left: 0; background-color: #333; color: #fff; padding: 5px 8px; border-radius: 4px; font-size: 11px; white-space: nowrap; z-index: 100; pointer-events: none; }
.memory-line-box { border: 1px solid #e5e7eb; padding: 12px; border-radius: 8px; background-color: #f9fafb; height: 100%; display: flex; flex-direction: column; justify-content: space-between; }
/* Results Box Style */
.result-box { border: 1px solid #e5e7eb; background: #ffffff; padding: 10px; border-radius: 8px; height: 100%; }
.result-box .prose { font-size: 0.9rem; }
@keyframes fadeIn { to { opacity: 1; } }
"""
# ========== Preload Module 10 PDF ==========
MODULE10_PATH = "module10_responsible_ai.pdf"
MODULE10_DOC_TYPE = "Literature Review / Paper"
preloaded_topics: List[str] = []
preloaded_chunks: List[Dict] = []
if os.path.exists(MODULE10_PATH):
try:
preloaded_topics = extract_course_topics_from_file(
MODULE10_PATH, MODULE10_DOC_TYPE
)
preloaded_chunks = build_rag_chunks_from_file(
MODULE10_PATH, MODULE10_DOC_TYPE
)
print("Module 10 PDF preloaded successfully.")
except Exception as e:
print("Module 10 preload failed:", e)
else:
print("Module 10 PDF not found at path:", MODULE10_PATH)
# ===== LangSmith logging =====
ls_client = Client()
LS_DATASET_NAME = "clare_user_events"
def log_event(data: Dict):
"""
把日志写入 LangSmith Dataset (clare_user_events)
"""
try:
inputs = {
"question": data.get("question"),
"student_id": data.get("student_id"),
}
# ✅ event_type 等字段作为 metadata,这样在 Dataset 列表里能直接看到 / 过滤
metadata = {k: v for k, v in data.items() if k not in ("question", "answer")}
ls_client.create_example(
inputs=inputs,
outputs={"answer": data.get("answer")},
metadata=metadata,
dataset_name=LS_DATASET_NAME,
)
except Exception as e:
print("LangSmith log failed:", e)
# ===== Reference Formatting Helper =====
def format_references(
rag_chunks: List[Dict], max_files: int = 2, max_sections_per_file: int = 3
) -> str:
if not rag_chunks:
return ""
refs_by_file: Dict[str, List[str]] = defaultdict(list)
for chunk in rag_chunks:
file_name = chunk.get("source_file") or "module10_responsible_ai.pdf"
section = chunk.get("section") or "Related section"
if section not in refs_by_file[file_name]:
refs_by_file[file_name].append(section)
if not refs_by_file:
return ""
lines = ["**References (RAG context used):**"]
for i, (file_name, sections) in enumerate(refs_by_file.items()):
if i >= max_files:
break
short_sections = sections[:max_sections_per_file]
if short_sections:
section_str = "; ".join(short_sections)
lines.append(f"- *{file_name}* — {section_str}")
else:
lines.append(f"- *{file_name}*")
if len(lines) == 1:
return ""
return "\n".join(lines)
def is_academic_query(message: str) -> bool:
if not message:
return False
m = message.strip().lower()
if not m:
return False
m = " ".join(m.split())
smalltalk_tokens = {
"hi", "hello", "hey", "yo",
"thanks", "thank", "thank you",
"ok", "okay",
"bye", "goodbye", "see you",
"haha", "lol"
}
tokens = m.split()
if "?" not in m and all(t in smalltalk_tokens for t in tokens):
return False
meta_phrases = [
"who are you",
"what are you",
"what is your name",
"introduce yourself",
"tell me about yourself",
"what can you do",
"how can you help",
"how do you help",
"how do i use",
"how to use this",
"what is this app",
"what is this tool",
"what is clare",
"who is clare",
]
if any(p in m for p in meta_phrases):
return False
if len(tokens) <= 2 and "?" not in m:
return False
return True
# ================== Gradio App ==================
with gr.Blocks(
title="Clare – Hanbridge AI Teaching Assistant", css=CUSTOM_CSS
) as demo:
# 全局状态
course_outline_state = gr.State(preloaded_topics or DEFAULT_COURSE_TOPICS)
weakness_state = gr.State([])
cognitive_state_state = gr.State({"confusion": 0, "mastery": 0})
rag_chunks_state = gr.State(preloaded_chunks or [])
last_question_state = gr.State("")
last_answer_state = gr.State("")
user_name_state = gr.State("")
user_id_state = gr.State("")
# ✅ 当前“最近一次回答”是否已经被点赞/点踩(只允许一次)
feedback_used_state = gr.State(False)
# --- Header ---
with gr.Row(elem_classes="header-container"):
with gr.Column(scale=3):
gr.HTML(
f"""
Clare
Your Personalized AI Tutor
Personalized guidance, review, and intelligent reinforcement
"""
)
# --- Main Layout ---
with gr.Row():
# === Left Sidebar ===
with gr.Column(scale=1, min_width=200):
clear_btn = gr.Button(
"Reset Conversation", variant="stop", interactive=False
)
gr.Markdown("### Model Settings")
model_name = gr.Textbox(
label="Model",
value="gpt-4.1-mini",
interactive=False,
lines=1,
)
language_preference = gr.Radio(
choices=["Auto", "English", "简体中文"],
value="Auto",
label="Language",
interactive=False,
)
learning_mode = gr.Radio(
choices=LEARNING_MODES,
value="Concept Explainer",
label="Learning Mode",
info="See User Guide for mode definition details.",
interactive=False,
)
with gr.Accordion(
"User Guide", open=True, elem_classes="main-user-guide"
):
with gr.Accordion(
"Getting Started",
open=False,
elem_classes="clean-accordion",
):
gr.Markdown(USER_GUIDE_SECTIONS["getting_started"])
with gr.Accordion(
"Mode Definition",
open=False,
elem_classes="clean-accordion",
):
gr.Markdown(USER_GUIDE_SECTIONS["mode_definition"])
with gr.Accordion(
"How Clare Works",
open=False,
elem_classes="clean-accordion",
):
gr.Markdown(USER_GUIDE_SECTIONS["how_clare_works"])
with gr.Accordion(
"What is Memory Line",
open=False,
elem_classes="clean-accordion",
):
gr.Markdown(USER_GUIDE_SECTIONS["memory_line"])
with gr.Accordion(
"Learning Progress Report",
open=False,
elem_classes="clean-accordion",
):
gr.Markdown(USER_GUIDE_SECTIONS["learning_progress"])
with gr.Accordion(
"How Clare Uses Your Files",
open=False,
elem_classes="clean-accordion",
):
gr.Markdown(USER_GUIDE_SECTIONS["how_files"])
with gr.Accordion(
"Micro-Quiz", open=False, elem_classes="clean-accordion"
):
gr.Markdown(USER_GUIDE_SECTIONS["micro_quiz"])
with gr.Accordion(
"Summarization",
open=False,
elem_classes="clean-accordion",
):
gr.Markdown(USER_GUIDE_SECTIONS["summarization"])
with gr.Accordion(
"Export Conversation",
open=False,
elem_classes="clean-accordion",
):
gr.Markdown(USER_GUIDE_SECTIONS["export_conversation"])
with gr.Accordion(
"FAQ", open=False, elem_classes="clean-accordion"
):
gr.Markdown(USER_GUIDE_SECTIONS["faq"])
gr.Markdown("---")
gr.Button("System Settings", size="sm", variant="secondary", interactive=False)
gr.HTML(
"""
"""
)
# === Center Main ===
with gr.Column(scale=3):
gr.Markdown(
"""
✦ Instruction: This prototype is pre-loaded with Module 10 – Responsible AI (Alto, 2024, Chapter 12).
✦ You do not need to upload files (uploads are optional).
✦ Please log in on the right before chatting with Clare.
"""
)
chatbot = gr.Chatbot(
label="",
height=450,
avatar_images=(None, CLARE_LOGO_PATH),
show_label=False,
type="tuples",
)
# Rating bar (last answer)
gr.Markdown("#### Rate Clare’s last answer")
with gr.Row():
thumb_up_btn = gr.Button(
"👍 Helpful", size="sm", interactive=False
)
thumb_down_btn = gr.Button(
"👎 Not helpful", size="sm", interactive=False
)
feedback_toggle_btn = gr.Button(
"Give detailed feedback", size="sm", variant="secondary", interactive=False
)
feedback_text = gr.Textbox(
label="What worked well or what was wrong?",
placeholder="Optional: describe what you liked / what was confusing or incorrect.",
lines=3,
visible=False,
)
feedback_submit_btn = gr.Button(
"Submit Feedback", size="sm", variant="primary", visible=False, interactive=False
)
user_input = gr.Textbox(
label="Your Input",
placeholder="Please log in on the right before asking Clare anything...",
show_label=False,
container=True,
autofocus=False,
interactive=False,
)
with gr.Row():
with gr.Column(scale=2):
syllabus_file = gr.File(
file_types=[".docx", ".pdf", ".pptx"],
file_count="single",
height=160,
label="Upload additional Module 10 file (.docx/.pdf/.pptx) — optional",
interactive=False,
)
with gr.Column(scale=1):
doc_type = gr.Dropdown(
choices=DOC_TYPES,
value="Syllabus",
label="File type",
container=True,
interactive=False,
)
gr.HTML("")
docs_btn = gr.Button(
"📂 Loaded Docs",
size="sm",
variant="secondary",
interactive=False,
)
with gr.Column(scale=2):
with gr.Group(elem_classes="memory-line-box"):
gr.HTML(
f"""
Memory Line
Next Review: T+7
Report ⬇️
"""
)
review_btn = gr.Button(
"Review Now",
size="sm",
variant="primary",
interactive=False,
)
session_status = gr.Markdown(visible=False)
# === Right Sidebar ===
with gr.Column(scale=1, min_width=180):
with gr.Group(elem_classes="login-panel"):
gr.HTML(f"
")
with gr.Group(visible=True) as login_state_1:
login_start_btn = gr.Button(
"Student Login", elem_classes="login-main-btn"
)
with gr.Group(visible=False) as login_state_2:
name_input = gr.Textbox(
label="Student Name", placeholder="Name", container=True
)
id_input = gr.Textbox(
label="Email/ID", placeholder="ID", container=True
)
login_confirm_btn = gr.Button(
"Enter", variant="primary", size="sm"
)
with gr.Group(visible=False) as login_state_3:
student_info_html = gr.HTML()
logout_btn = gr.Button(
"Log out", elem_classes="logout-btn", size="sm"
)
gr.Markdown("### Actions")
export_btn = gr.Button(
"Export Conversation", size="sm", elem_classes="action-btn", interactive=False
)
quiz_btn = gr.Button(
"Let's Try (Micro-Quiz)", size="sm", elem_classes="action-btn", interactive=False
)
summary_btn = gr.Button(
"Summarization", size="sm", elem_classes="action-btn", interactive=False
)
gr.Markdown("### Results")
with gr.Group(elem_classes="result-box"):
result_display = gr.Markdown(
value="Results (export / summary) will appear here...",
label="Generated Content",
)
# ================== Login Flow ==================
def show_inputs():
return {
login_state_1: gr.update(visible=False),
login_state_2: gr.update(visible=True),
login_state_3: gr.update(visible=False),
}
login_start_btn.click(
show_inputs, outputs=[login_state_1, login_state_2, login_state_3]
)
def confirm_login(name, id_val):
if not name or not id_val:
return {
login_state_1: gr.update(),
login_state_2: gr.update(),
login_state_3: gr.update(),
student_info_html: gr.update(
value="Please enter both Name and Email/ID to start.
"
),
user_name_state: gr.update(),
user_id_state: gr.update(),
feedback_used_state: False,
user_input: gr.update(interactive=False),
clear_btn: gr.update(interactive=False),
export_btn: gr.update(interactive=False),
quiz_btn: gr.update(interactive=False),
summary_btn: gr.update(interactive=False),
syllabus_file: gr.update(interactive=False),
doc_type: gr.update(interactive=False),
review_btn: gr.update(interactive=False),
language_preference: gr.update(interactive=False),
learning_mode: gr.update(interactive=False),
model_name: gr.update(interactive=False),
docs_btn: gr.update(interactive=False),
thumb_up_btn: gr.update(interactive=False, value="👍 Helpful"),
thumb_down_btn: gr.update(interactive=False, value="👎 Not helpful"),
feedback_toggle_btn: gr.update(interactive=False),
feedback_text: gr.update(visible=False, value=""),
feedback_submit_btn: gr.update(interactive=False, visible=False),
}
info_html = f"""
"""
return {
login_state_1: gr.update(visible=False),
login_state_2: gr.update(visible=False),
login_state_3: gr.update(visible=True),
student_info_html: gr.update(value=info_html),
user_name_state: name,
user_id_state: id_val,
feedback_used_state: False,
user_input: gr.update(
interactive=True,
placeholder="Ask about Module 10 concepts, Responsible AI, or let Clare test you...",
),
clear_btn: gr.update(interactive=True),
export_btn: gr.update(interactive=True),
quiz_btn: gr.update(interactive=True),
summary_btn: gr.update(interactive=True),
syllabus_file: gr.update(interactive=True),
doc_type: gr.update(interactive=True),
review_btn: gr.update(interactive=True),
language_preference: gr.update(interactive=True),
learning_mode: gr.update(interactive=True),
model_name: gr.update(interactive=False),
docs_btn: gr.update(interactive=True),
# ✅ 登录后仍然不允许点赞点踩,必须“有回答”才解锁
thumb_up_btn: gr.update(interactive=False, value="👍 Helpful"),
thumb_down_btn: gr.update(interactive=False, value="👎 Not helpful"),
feedback_toggle_btn: gr.update(interactive=True),
feedback_text: gr.update(visible=False, value=""),
feedback_submit_btn: gr.update(interactive=True, visible=False),
}
login_confirm_btn.click(
confirm_login,
inputs=[name_input, id_input],
outputs=[
login_state_1,
login_state_2,
login_state_3,
student_info_html,
user_name_state,
user_id_state,
feedback_used_state,
user_input,
clear_btn,
export_btn,
quiz_btn,
summary_btn,
syllabus_file,
doc_type,
review_btn,
language_preference,
learning_mode,
model_name,
docs_btn,
thumb_up_btn,
thumb_down_btn,
feedback_toggle_btn,
feedback_text,
feedback_submit_btn,
],
)
def logout():
return {
login_state_1: gr.update(visible=True),
login_state_2: gr.update(visible=False),
login_state_3: gr.update(visible=False),
name_input: gr.update(value=""),
id_input: gr.update(value=""),
user_name_state: "",
user_id_state: "",
feedback_used_state: False,
student_info_html: gr.update(value=""),
user_input: gr.update(
value="",
interactive=False,
placeholder="Please log in on the right before asking Clare anything...",
),
clear_btn: gr.update(interactive=False),
export_btn: gr.update(interactive=False),
quiz_btn: gr.update(interactive=False),
summary_btn: gr.update(interactive=False),
syllabus_file: gr.update(interactive=False),
doc_type: gr.update(interactive=False),
review_btn: gr.update(interactive=False),
language_preference: gr.update(interactive=False),
learning_mode: gr.update(interactive=False),
docs_btn: gr.update(interactive=False),
thumb_up_btn: gr.update(interactive=False, value="👍 Helpful"),
thumb_down_btn: gr.update(interactive=False, value="👎 Not helpful"),
feedback_toggle_btn: gr.update(interactive=False),
feedback_text: gr.update(visible=False, value=""),
feedback_submit_btn: gr.update(interactive=False, visible=False),
}
logout_btn.click(
logout,
outputs=[
login_state_1,
login_state_2,
login_state_3,
name_input,
id_input,
user_name_state,
user_id_state,
feedback_used_state,
student_info_html,
user_input,
clear_btn,
export_btn,
quiz_btn,
summary_btn,
syllabus_file,
doc_type,
review_btn,
language_preference,
learning_mode,
docs_btn,
thumb_up_btn,
thumb_down_btn,
feedback_toggle_btn,
feedback_text,
feedback_submit_btn,
],
)
# ================== Main Logic ==================
def update_course_and_rag(file, doc_type_val):
local_topics = preloaded_topics or []
local_chunks = preloaded_chunks or []
if file is not None:
try:
topics = extract_course_topics_from_file(file, doc_type_val)
except Exception:
topics = []
try:
chunks = build_rag_chunks_from_file(file, doc_type_val)
except Exception:
chunks = []
local_topics = (preloaded_topics or []) + (topics or [])
local_chunks = (preloaded_chunks or []) + (chunks or [])
status_md = (
f"✅ **Loaded Module 10 base reading + uploaded {doc_type_val} file.**\n\n"
"Both will be used for explanations and quizzes."
)
else:
status_md = (
"✅ **Using pre-loaded Module 10 reading only.**\n\n"
"You may optionally upload additional Module 10 materials."
)
return local_topics, local_chunks, status_md
syllabus_file.change(
update_course_and_rag,
[syllabus_file, doc_type],
[course_outline_state, rag_chunks_state, session_status],
)
def show_loaded_docs(doc_type_val):
gr.Info(
f"For this experiment, Clare always includes the pre-loaded Module 10 reading.\n"
f"Additional uploaded {doc_type_val} files will be used as supplementary context.",
title="Loaded Documents",
)
docs_btn.click(show_loaded_docs, inputs=[doc_type])
def respond(
message,
chat_history,
course_outline,
weaknesses,
cognitive_state,
rag_chunks,
model_name_val,
lang_pref,
mode_val,
doc_type_val,
user_id_val,
feedback_used,
):
# 未登录:不解锁按钮
if not user_id_val:
out_msg = (
"🔒 Please log in with your Student Name and Email/ID on the right "
"before using Clare."
)
new_history = (chat_history or []) + [[message, out_msg]]
new_status = render_session_status(
mode_val or "Concept Explainer",
weaknesses or [],
cognitive_state or {"confusion": 0, "mastery": 0},
)
return (
"",
new_history,
weaknesses,
cognitive_state,
new_status,
"",
"",
feedback_used,
gr.update(interactive=False, value="👍 Helpful"),
gr.update(interactive=False, value="👎 Not helpful"),
)
resolved_lang = detect_language(message or "", lang_pref)
# 空输入:不改变按钮状态
if not message or not message.strip():
new_status = render_session_status(
mode_val or "Concept Explainer",
weaknesses or [],
cognitive_state or {"confusion": 0, "mastery": 0},
)
return (
"",
chat_history,
weaknesses,
cognitive_state,
new_status,
"",
"",
feedback_used,
gr.update(),
gr.update(),
)
weaknesses = update_weaknesses_from_message(message, weaknesses or [])
cognitive_state = update_cognitive_state_from_message(message, cognitive_state)
if is_academic_query(message):
rag_context_text, rag_used_chunks = retrieve_relevant_chunks(
message, rag_chunks or []
)
else:
rag_context_text, rag_used_chunks = "", []
start_ts = time.time()
answer, new_history = chat_with_clare(
message=message,
history=chat_history,
model_name=model_name_val,
language_preference=resolved_lang,
learning_mode=mode_val,
doc_type=doc_type_val,
course_outline=course_outline,
weaknesses=weaknesses,
cognitive_state=cognitive_state,
rag_context=rag_context_text,
)
end_ts = time.time()
latency_ms = (end_ts - start_ts) * 1000.0
if is_academic_query(message) and rag_used_chunks:
ref_text = format_references(rag_used_chunks)
else:
ref_text = ""
if ref_text and new_history:
last_user, last_assistant = new_history[-1]
if "References (RAG context used):" not in (last_assistant or ""):
last_assistant = f"{last_assistant}\n\n{ref_text}"
new_history[-1] = [last_user, last_assistant]
answer = last_assistant
student_id = user_id_val or "ANON"
experiment_id = "RESP_AI_W10"
try:
log_event(
{
"experiment_id": experiment_id,
"student_id": student_id,
"event_type": "chat_turn",
"timestamp": end_ts,
"latency_ms": latency_ms,
"question": message,
"answer": answer,
"model_name": model_name_val,
"language": resolved_lang,
"learning_mode": mode_val,
}
)
except Exception as e:
print("log_event error:", e)
new_status = render_session_status(mode_val, weaknesses, cognitive_state)
# ✅ 有新回答:重置 feedback_used=False,并解锁按钮(恢复文案)
return (
"",
new_history,
weaknesses,
cognitive_state,
new_status,
message,
answer,
False,
gr.update(interactive=True, value="👍 Helpful"),
gr.update(interactive=True, value="👎 Not helpful"),
)
user_input.submit(
respond,
[
user_input,
chatbot,
course_outline_state,
weakness_state,
cognitive_state_state,
rag_chunks_state,
model_name,
language_preference,
learning_mode,
doc_type,
user_id_state,
feedback_used_state,
],
[
user_input,
chatbot,
weakness_state,
cognitive_state_state,
session_status,
last_question_state,
last_answer_state,
feedback_used_state,
thumb_up_btn,
thumb_down_btn,
],
)
# ===== Micro-Quiz =====
def start_micro_quiz(
chat_history,
course_outline,
weaknesses,
cognitive_state,
rag_chunks,
model_name_val,
lang_pref,
mode_val,
doc_type_val,
user_id_val,
):
if not user_id_val:
gr.Info("Please log in first to start a micro-quiz.", title="Login required")
return (
chat_history,
weaknesses,
cognitive_state,
render_session_status(
mode_val or "Concept Explainer",
weaknesses or [],
cognitive_state or {"confusion": 0, "mastery": 0},
),
)
quiz_instruction = (
"We are running a short micro-quiz session based ONLY on **Module 10 – "
"Responsible AI (Alto, 2024, Chapter 12)** and the pre-loaded materials.\n\n"
"Step 1 – Before asking any content question:\n"
"• First ask me which quiz style I prefer right now:\n"
" - (1) Multiple-choice questions\n"
" - (2) Short-answer / open-ended questions\n"
"• Ask me explicitly: \"Which quiz style do you prefer now: 1) Multiple-choice or 2) Short-answer? "
"Please reply with 1 or 2.\"\n"
"• Do NOT start a content question until I have answered 1 or 2.\n\n"
"Step 2 – After I choose the style:\n"
"• If I choose 1 (multiple-choice):\n"
" - Ask ONE multiple-choice question at a time, based on Module 10 concepts "
"(Responsible AI definition, risk types, mitigation layers, EU AI Act, etc.).\n"
" - Provide 3–4 options (A, B, C, D) and make only one option clearly correct.\n"
"• If I choose 2 (short-answer):\n"
" - Ask ONE short-answer question at a time, also based on Module 10 concepts.\n"
" - Do NOT show the answer when you ask the question.\n\n"
"Step 3 – For each answer I give:\n"
"• Grade my answer (correct / partially correct / incorrect).\n"
"• Give a brief explanation and the correct answer.\n"
"• Then ask if I want another question of the SAME style.\n"
"• Continue this pattern until I explicitly say to stop.\n\n"
"Please start by asking me which quiz style I prefer (1 = multiple-choice, 2 = short-answer). "
"Do not ask any content question before I choose."
)
resolved_lang = lang_pref
start_ts = time.time()
quiz_ctx_text, _quiz_ctx_chunks = retrieve_relevant_chunks(
"Module 10 quiz", rag_chunks or []
)
answer, new_history = chat_with_clare(
message=quiz_instruction,
history=chat_history,
model_name=model_name_val,
language_preference=resolved_lang,
learning_mode=mode_val,
doc_type=doc_type_val,
course_outline=course_outline,
weaknesses=weaknesses,
cognitive_state=cognitive_state,
rag_context=quiz_ctx_text,
)
end_ts = time.time()
latency_ms = (end_ts - start_ts) * 1000.0
student_id = user_id_val or "ANON"
experiment_id = "RESP_AI_W10"
try:
log_event(
{
"experiment_id": experiment_id,
"student_id": student_id,
"event_type": "micro_quiz_start",
"timestamp": end_ts,
"latency_ms": latency_ms,
"question": quiz_instruction,
"answer": answer,
"model_name": model_name_val,
"language": resolved_lang,
"learning_mode": mode_val,
}
)
except Exception as e:
print("log_event error:", e)
new_status = render_session_status(mode_val, weaknesses, cognitive_state)
return new_history, weaknesses, cognitive_state, new_status
quiz_btn.click(
start_micro_quiz,
[
chatbot,
course_outline_state,
weakness_state,
cognitive_state_state,
rag_chunks_state,
model_name,
language_preference,
learning_mode,
doc_type,
user_id_state,
],
[chatbot, weakness_state, cognitive_state_state, session_status],
)
# ===== Feedback Handlers (thumb + detailed) =====
def show_feedback_box():
return {
feedback_text: gr.update(visible=True),
feedback_submit_btn: gr.update(visible=True),
}
feedback_toggle_btn.click(
show_feedback_box,
None,
[feedback_text, feedback_submit_btn],
)
def send_thumb_up(
last_q,
last_a,
user_id_val,
mode_val,
model_name_val,
lang_pref,
feedback_used,
):
# 没有可评价回答:保持禁用
if not last_q and not last_a:
print("No last QA to log for thumbs_up.")
return (
feedback_used,
gr.update(interactive=False, value="👍 Helpful"),
gr.update(interactive=False, value="👎 Not helpful"),
)
# 已经反馈过:直接禁用
if feedback_used:
print("Feedback already sent for this answer (thumb_up).")
return (
feedback_used,
gr.update(interactive=False),
gr.update(interactive=False),
)
try:
log_event(
{
"experiment_id": "RESP_AI_W10",
"student_id": user_id_val or "ANON",
"event_type": "like",
"timestamp": time.time(),
"question": last_q,
"answer": last_a,
"model_name": model_name_val,
"language": lang_pref,
"learning_mode": mode_val,
}
)
print("[Feedback] thumbs_up logged to LangSmith.")
except Exception as e:
print("thumb_up log error:", e)
# 点完一次:置 True + 按钮置灰 + 文案 sent
return (
True,
gr.update(interactive=False, value="👍 Helpful (sent)"),
gr.update(interactive=False),
)
def send_thumb_down(
last_q,
last_a,
user_id_val,
mode_val,
model_name_val,
lang_pref,
feedback_used,
):
if not last_q and not last_a:
print("No last QA to log for thumbs_down.")
return (
feedback_used,
gr.update(interactive=False, value="👍 Helpful"),
gr.update(interactive=False, value="👎 Not helpful"),
)
if feedback_used:
print("Feedback already sent for this answer (thumb_down).")
return (
feedback_used,
gr.update(interactive=False),
gr.update(interactive=False),
)
try:
log_event(
{
"experiment_id": "RESP_AI_W10",
"student_id": user_id_val or "ANON",
"event_type": "dislike",
"timestamp": time.time(),
"question": last_q,
"answer": last_a,
"model_name": model_name_val,
"language": lang_pref,
"learning_mode": mode_val,
}
)
print("[Feedback] thumbs_down logged to LangSmith.")
except Exception as e:
print("thumb_down log error:", e)
return (
True,
gr.update(interactive=False),
gr.update(interactive=False, value="👎 Not helpful (sent)"),
)
thumb_up_btn.click(
send_thumb_up,
[
last_question_state,
last_answer_state,
user_id_state,
learning_mode,
model_name,
language_preference,
feedback_used_state,
],
[feedback_used_state, thumb_up_btn, thumb_down_btn],
)
thumb_down_btn.click(
send_thumb_down,
[
last_question_state,
last_answer_state,
user_id_state,
learning_mode,
model_name,
language_preference,
feedback_used_state,
],
[feedback_used_state, thumb_up_btn, thumb_down_btn],
)
def submit_detailed_feedback(
text, last_q, last_a, user_id_val, mode_val, model_name_val, lang_pref
):
if not text or not text.strip():
return gr.update(
value="",
placeholder="Please enter some feedback before submitting.",
)
try:
log_event(
{
"experiment_id": "RESP_AI_W10",
"student_id": user_id_val or "ANON",
"event_type": "detailed_feedback",
"timestamp": time.time(),
"question": last_q,
"answer": last_a,
"feedback_text": text.strip(),
"model_name": model_name_val,
"language": lang_pref,
"learning_mode": mode_val,
}
)
print("[Feedback] detailed_feedback logged to LangSmith.")
except Exception as e:
print("detailed_feedback log error:", e)
return gr.update(
value="",
placeholder="Thanks! Your feedback has been recorded.",
)
feedback_submit_btn.click(
submit_detailed_feedback,
[
feedback_text,
last_question_state,
last_answer_state,
user_id_state,
learning_mode,
model_name,
language_preference,
],
[feedback_text],
)
# ===== Export / Summary =====
export_btn.click(
lambda h, c, m, w, cog: export_conversation(h, c, m, w, cog),
[chatbot, course_outline_state, learning_mode, weakness_state, cognitive_state_state],
[result_display],
)
summary_btn.click(
lambda h, c, w, cog, m, l: summarize_conversation(
h, c, w, cog, m, l
),
[
chatbot,
course_outline_state,
weakness_state,
cognitive_state_state,
model_name,
language_preference,
],
[result_display],
)
# ===== Reset Conversation =====
def clear_all():
empty_state = {"confusion": 0, "mastery": 0}
default_status = render_session_status("Concept Explainer", [], empty_state)
return (
[],
[],
empty_state,
[],
"",
default_status,
"",
"",
False,
gr.update(interactive=False, value="👍 Helpful"),
gr.update(interactive=False, value="👎 Not helpful"),
)
clear_btn.click(
clear_all,
None,
[
chatbot,
weakness_state,
cognitive_state_state,
rag_chunks_state,
result_display,
session_status,
last_question_state,
last_answer_state,
feedback_used_state,
thumb_up_btn,
thumb_down_btn,
],
queue=False,
)
if __name__ == "__main__":
demo.launch(
share=True,
server_name="0.0.0.0",
server_port=7860,
)