Spaces:
Sleeping
Sleeping
File size: 4,788 Bytes
bae14fb | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 | from langchain_core.prompts import ChatPromptTemplate
from langgraph.prebuilt import create_react_agent
from agents.states import Quiz
from agents.prompts import SUPERVISOR_SYSTEM_PROMPT, SUPERVISOR_USER_PROMPT, SUPERVISOR_CHAT_PROMPT
from agents.tools import Docs
from agents.model import llm
from typing import List, Optional, Tuple
def escape_template_braces(text: str) -> str:
"""
Escape curly braces in text to prevent ChatPromptTemplate from
interpreting mathematical notation like {X ∈ A|Y = y} as template variables.
Args:
text: Input text that may contain curly braces
Returns:
Text with curly braces escaped ({{ and }})
"""
if text is None:
return ""
return text.replace("{", "{{").replace("}", "}}")
def create_supervisor_agent(docs: Docs):
"""
Create a Supervisor agent for Socratic tutoring.
Args:
docs: Docs instance with loaded document
Returns:
A LangGraph ReAct agent configured for tutoring
"""
search_tool = docs.as_search_tool()
agent = create_react_agent(
model=llm,
tools=[search_tool],
)
return agent
def format_quiz_results(quiz: Quiz, user_answers: List[str]) -> str:
"""Format quiz results for the supervisor to review."""
results = []
correct_count = 0
for i, (task, user_answer) in enumerate(zip(quiz.tasks, user_answers)):
correct = task.correct_answer or ""
is_correct = user_answer.strip().lower() == correct.strip().lower()
if is_correct:
correct_count += 1
result = f"""
Question {task.task_id}: {task.task}
Type: {task.task_type}
"""
if task.answer_options:
result += f"Options: {', '.join(task.answer_options)}\n"
result += f"""Student Answer: {user_answer}
Correct Answer: {task.correct_answer}
Result: {'CORRECT' if is_correct else 'INCORRECT'}
"""
results.append(result)
header = f"Score: {correct_count}/{len(quiz.tasks)} ({100*correct_count/len(quiz.tasks):.0f}%)\n"
return header + "\n".join(results)
def provide_feedback(
docs: Docs,
summary: str,
quiz: Quiz,
user_answers: List[str]
) -> str:
"""
Provide Socratic feedback on quiz performance.
Args:
docs: Docs instance with loaded document
summary: Summary of the document
quiz: The quiz that was taken
user_answers: User's answers to the quiz
Returns:
Feedback string from the supervisor
"""
search_tool = docs.as_search_tool()
quiz_results = format_quiz_results(quiz, user_answers)
context_docs = docs.similarity_search("main concepts explanation", k=3)
context = "\n\n".join(doc.page_content for doc in context_docs)
escaped_summary = escape_template_braces(summary)
escaped_quiz_results = escape_template_braces(quiz_results)
escaped_context = escape_template_braces(context)
prompt = ChatPromptTemplate.from_messages([
("system", SUPERVISOR_SYSTEM_PROMPT),
("human", SUPERVISOR_USER_PROMPT + "\n\nRelevant Document Context:\n{context}")
])
chain = prompt | llm
response = chain.invoke({
"summary": escaped_summary,
"quiz_results": escaped_quiz_results,
"context": escaped_context
})
return response.content
def chat_with_supervisor(
docs: Docs,
summary: str,
user_message: str,
conversation_history: Optional[List[dict]] = None
) -> str:
"""
Continue tutoring conversation with the supervisor.
Args:
docs: Docs instance with loaded document
summary: Summary of the document
user_message: User's message/question
conversation_history: Previous conversation messages
Returns:
Supervisor's response
"""
context_docs = docs.similarity_search(user_message, k=3)
context = "\n\n".join(doc.page_content for doc in context_docs)
escaped_summary = escape_template_braces(summary)
escaped_context = escape_template_braces(context)
escaped_user_message = escape_template_braces(user_message)
messages: List[Tuple[str, str]] = [
("system", f"You are a Socratic tutor. Document Summary: {escaped_summary}\n\nRelevant Context:\n{escaped_context}")
]
if conversation_history:
for msg in conversation_history:
escaped_content = escape_template_braces(msg["content"])
messages.append((msg["role"], escaped_content))
messages.append(("human", escaped_user_message))
prompt = ChatPromptTemplate.from_messages(messages)
chain = prompt | llm
response = chain.invoke({})
return response.content
|