Spaces:
Sleeping
Sleeping
whymath
commited on
Commit
·
e04643d
1
Parent(s):
df206fd
Adding tutee-led AI student style and updating settings
Browse files
app.py
CHANGED
|
@@ -10,10 +10,13 @@ load_dotenv()
|
|
| 10 |
|
| 11 |
start_msg = "Hello! I'm Teach2Learn VirtualStudent, a virtual student peer by Jerry Chiang and Yohan Mathew\n\nYou can choose to upload a PDF, or just start chatting\n"
|
| 12 |
base_instructions = """
|
| 13 |
-
Assume you are a
|
| 14 |
You should always first let the user know if they are correct or not, and then ask them questions to help them learn by teaching rather than explaining things to them.
|
| 15 |
If they ask for feedback, you should provide constructive feedback on the whole conversation instead of asking another question.
|
| 16 |
"""
|
|
|
|
|
|
|
|
|
|
| 17 |
openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo")
|
| 18 |
base_chain = utils.create_base_chain(openai_chat_model, base_instructions)
|
| 19 |
|
|
@@ -24,14 +27,15 @@ async def start_chat():
|
|
| 24 |
|
| 25 |
# Set the user session settings
|
| 26 |
settings = {
|
| 27 |
-
"
|
| 28 |
}
|
| 29 |
cl.user_session.set("settings", settings)
|
| 30 |
|
| 31 |
# Send a welcome message with action buttons
|
| 32 |
actions = [
|
| 33 |
-
cl.Action(name="
|
| 34 |
-
cl.Action(name="
|
|
|
|
| 35 |
]
|
| 36 |
await cl.Message(content=start_msg, actions=actions).send()
|
| 37 |
|
|
@@ -43,11 +47,16 @@ async def main(message: cl.Message):
|
|
| 43 |
settings = cl.user_session.get("settings")
|
| 44 |
|
| 45 |
# Generate the response from the chain
|
| 46 |
-
if settings["
|
| 47 |
print("\nUsing RAG chain to answer query", user_query)
|
| 48 |
rag_chain = settings["rag_chain"]
|
| 49 |
query_response = rag_chain.invoke({"question" : user_query})
|
| 50 |
query_answer = query_response["response"].content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
else:
|
| 52 |
print("\nUsing base chain to answer query", user_query)
|
| 53 |
query_response = base_chain.invoke({"question" : user_query})
|
|
@@ -73,16 +82,16 @@ async def upload_pdf_fn(action: cl.Action):
|
|
| 73 |
timeout=180,
|
| 74 |
).send()
|
| 75 |
file_uploaded = files[0]
|
| 76 |
-
print("\nUploaded file:", file_uploaded, "\n")
|
| 77 |
|
| 78 |
# Create the RAG chain and store it in the user session
|
| 79 |
rag_chain = utils.create_rag_chain_from_file(openai_chat_model, base_instructions, file_uploaded.path, file_uploaded.name)
|
| 80 |
settings = cl.user_session.get("settings")
|
| 81 |
settings["rag_chain"] = rag_chain
|
| 82 |
-
settings["
|
| 83 |
cl.user_session.set("settings", settings)
|
| 84 |
|
| 85 |
-
msg = cl.Message(content="
|
| 86 |
await msg.send()
|
| 87 |
|
| 88 |
|
|
@@ -96,3 +105,17 @@ async def switch_default_fn(action: cl.Action):
|
|
| 96 |
|
| 97 |
msg = cl.Message(content="Okay, I'm back to answering general questions. What would you like to try teaching me next?")
|
| 98 |
await msg.send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
start_msg = "Hello! I'm Teach2Learn VirtualStudent, a virtual student peer by Jerry Chiang and Yohan Mathew\n\nYou can choose to upload a PDF, or just start chatting\n"
|
| 12 |
base_instructions = """
|
| 13 |
+
Assume you are a student and that the user is your teacher. Your goal is to ensure that the user understands the concept they are explaining.
|
| 14 |
You should always first let the user know if they are correct or not, and then ask them questions to help them learn by teaching rather than explaining things to them.
|
| 15 |
If they ask for feedback, you should provide constructive feedback on the whole conversation instead of asking another question.
|
| 16 |
"""
|
| 17 |
+
userled_instructions = """
|
| 18 |
+
Pretend you are a student and that the user is your teacher. Your goal is to get the user to teach you about a topic or concept, and you can ask clarifying questions to help them teach better.
|
| 19 |
+
"""
|
| 20 |
openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo")
|
| 21 |
base_chain = utils.create_base_chain(openai_chat_model, base_instructions)
|
| 22 |
|
|
|
|
| 27 |
|
| 28 |
# Set the user session settings
|
| 29 |
settings = {
|
| 30 |
+
"current_mode": "base_chain"
|
| 31 |
}
|
| 32 |
cl.user_session.set("settings", settings)
|
| 33 |
|
| 34 |
# Send a welcome message with action buttons
|
| 35 |
actions = [
|
| 36 |
+
cl.Action(name="switch_default", value="switch_default_value", label="Switch back to default mode (or for feedback)", description="Switch back to default mode (or for feedback)"),
|
| 37 |
+
cl.Action(name="switch_ai_student", value="switch_ai_student_value", label="Switch to AI student mode", description="Switch to AI student mode"),
|
| 38 |
+
cl.Action(name="upload_pdf", value="upload_pdf_value", label="Upload a PDF", description="Upload a PDF")
|
| 39 |
]
|
| 40 |
await cl.Message(content=start_msg, actions=actions).send()
|
| 41 |
|
|
|
|
| 47 |
settings = cl.user_session.get("settings")
|
| 48 |
|
| 49 |
# Generate the response from the chain
|
| 50 |
+
if settings["current_mode"] == "rag_chain":
|
| 51 |
print("\nUsing RAG chain to answer query", user_query)
|
| 52 |
rag_chain = settings["rag_chain"]
|
| 53 |
query_response = rag_chain.invoke({"question" : user_query})
|
| 54 |
query_answer = query_response["response"].content
|
| 55 |
+
elif settings["current_mode"] == "ai_student_chain":
|
| 56 |
+
print("\nUsing AI student chain to answer query", user_query)
|
| 57 |
+
ai_student_chain = settings["ai_student_chain"]
|
| 58 |
+
query_response = ai_student_chain.invoke({"question" : user_query})
|
| 59 |
+
query_answer = query_response.content
|
| 60 |
else:
|
| 61 |
print("\nUsing base chain to answer query", user_query)
|
| 62 |
query_response = base_chain.invoke({"question" : user_query})
|
|
|
|
| 82 |
timeout=180,
|
| 83 |
).send()
|
| 84 |
file_uploaded = files[0]
|
| 85 |
+
# print("\nUploaded file:", file_uploaded, "\n")
|
| 86 |
|
| 87 |
# Create the RAG chain and store it in the user session
|
| 88 |
rag_chain = utils.create_rag_chain_from_file(openai_chat_model, base_instructions, file_uploaded.path, file_uploaded.name)
|
| 89 |
settings = cl.user_session.get("settings")
|
| 90 |
settings["rag_chain"] = rag_chain
|
| 91 |
+
settings["current_mode"] = "rag_chain"
|
| 92 |
cl.user_session.set("settings", settings)
|
| 93 |
|
| 94 |
+
msg = cl.Message(content="Okay, I'm ready for you to teach me from the uploaded PDF file.")
|
| 95 |
await msg.send()
|
| 96 |
|
| 97 |
|
|
|
|
| 105 |
|
| 106 |
msg = cl.Message(content="Okay, I'm back to answering general questions. What would you like to try teaching me next?")
|
| 107 |
await msg.send()
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@cl.action_callback("switch_ai_student")
|
| 111 |
+
async def switch_ai_student_fn(action: cl.Action):
|
| 112 |
+
print("\nSwitching to AI student mode")
|
| 113 |
+
|
| 114 |
+
settings = cl.user_session.get("settings")
|
| 115 |
+
ai_student_chain = utils.create_base_chain(openai_chat_model, userled_instructions)
|
| 116 |
+
settings["ai_student_chain"] = ai_student_chain
|
| 117 |
+
settings["current_mode"] = "ai_student_chain"
|
| 118 |
+
cl.user_session.set("settings", settings)
|
| 119 |
+
|
| 120 |
+
msg = cl.Message(content="Okay, I will take on the role of a student. What would you like to try teaching me next?")
|
| 121 |
+
await msg.send()
|
utils.py
CHANGED
|
@@ -26,10 +26,10 @@ def chunk_documents(docs, tiktoken_len):
|
|
| 26 |
return split_chunks
|
| 27 |
|
| 28 |
|
| 29 |
-
def create_base_chain(openai_chat_model,
|
| 30 |
human_template = "{question}"
|
| 31 |
base_prompt = ChatPromptTemplate.from_messages([
|
| 32 |
-
("system",
|
| 33 |
("human", human_template)
|
| 34 |
])
|
| 35 |
base_chain = base_prompt | openai_chat_model
|
|
@@ -58,7 +58,8 @@ def create_rag_chain_from_file(openai_chat_model, base_instructions, file_path,
|
|
| 58 |
|
| 59 |
# Define the RAG prompt template
|
| 60 |
RAG_PROMPT = """
|
| 61 |
-
Use the provided context while replying to the user query. Only use the provided context to
|
|
|
|
| 62 |
|
| 63 |
QUERY:
|
| 64 |
{question}
|
|
|
|
| 26 |
return split_chunks
|
| 27 |
|
| 28 |
|
| 29 |
+
def create_base_chain(openai_chat_model, system_prompt):
|
| 30 |
human_template = "{question}"
|
| 31 |
base_prompt = ChatPromptTemplate.from_messages([
|
| 32 |
+
("system", system_prompt),
|
| 33 |
("human", human_template)
|
| 34 |
])
|
| 35 |
base_chain = base_prompt | openai_chat_model
|
|
|
|
| 58 |
|
| 59 |
# Define the RAG prompt template
|
| 60 |
RAG_PROMPT = """
|
| 61 |
+
Use the provided context while replying to the user query. Only use the provided context to respond to the query.
|
| 62 |
+
If the context is not sufficient, you can respond with "I cannot seem to find this topic in the PDF. Would you like to switch to another mode?".
|
| 63 |
|
| 64 |
QUERY:
|
| 65 |
{question}
|