Spaces:
Sleeping
Sleeping
whymath commited on
Commit ·
5768c9a
1
Parent(s): 3a1e28f
Adding few shot examples and refining prompts
Browse files
app.py
CHANGED
|
@@ -10,12 +10,18 @@ load_dotenv()
|
|
| 10 |
|
| 11 |
start_msg = "Hello! I'm Teach2Learn VirtualStudent, a virtual student peer by Jerry Chiang and Yohan Mathew\n\nYou can choose to upload a PDF, or just start chatting\n"
|
| 12 |
base_instructions = """
|
| 13 |
-
Assume you
|
| 14 |
-
|
|
|
|
| 15 |
If they ask for feedback, you should provide constructive feedback on the whole conversation instead of asking another question.
|
| 16 |
"""
|
| 17 |
-
|
| 18 |
-
Pretend you are a student
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
"""
|
| 20 |
openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo")
|
| 21 |
base_chain = utils.create_base_chain(openai_chat_model, base_instructions)
|
|
@@ -33,8 +39,8 @@ async def start_chat():
|
|
| 33 |
|
| 34 |
# Send a welcome message with action buttons
|
| 35 |
actions = [
|
| 36 |
-
cl.Action(name="switch_default", value="switch_default_value", label="Switch back to default mode
|
| 37 |
-
cl.Action(name="switch_ai_student", value="switch_ai_student_value", label="Switch to
|
| 38 |
cl.Action(name="upload_pdf", value="upload_pdf_value", label="Upload a PDF", description="Upload a PDF")
|
| 39 |
]
|
| 40 |
await cl.Message(content=start_msg, actions=actions).send()
|
|
@@ -88,7 +94,7 @@ async def upload_pdf_fn(action: cl.Action):
|
|
| 88 |
|
| 89 |
# Create the RAG chain and store it in the user session
|
| 90 |
if settings["current_mode"] == "ai_student_chain":
|
| 91 |
-
rag_instructions =
|
| 92 |
else:
|
| 93 |
rag_instructions = base_instructions
|
| 94 |
rag_chain = utils.create_rag_chain_from_file(openai_chat_model, rag_instructions, file_uploaded, file_uploaded.name)
|
|
@@ -108,7 +114,7 @@ async def switch_default_fn(action: cl.Action):
|
|
| 108 |
settings["rag_chain_available"] = False
|
| 109 |
cl.user_session.set("settings", settings)
|
| 110 |
|
| 111 |
-
msg = cl.Message(content="Okay, I'm back to
|
| 112 |
await msg.send()
|
| 113 |
|
| 114 |
|
|
@@ -117,10 +123,10 @@ async def switch_ai_student_fn(action: cl.Action):
|
|
| 117 |
print("\nSwitching to AI student mode")
|
| 118 |
settings = cl.user_session.get("settings")
|
| 119 |
|
| 120 |
-
ai_student_chain = utils.
|
| 121 |
settings["ai_student_chain"] = ai_student_chain
|
| 122 |
settings["current_mode"] = "ai_student_chain"
|
| 123 |
cl.user_session.set("settings", settings)
|
| 124 |
|
| 125 |
-
msg = cl.Message(content="Okay, I will take on the role of
|
| 126 |
await msg.send()
|
|
|
|
| 10 |
|
| 11 |
start_msg = "Hello! I'm Teach2Learn VirtualStudent, a virtual student peer by Jerry Chiang and Yohan Mathew\n\nYou can choose to upload a PDF, or just start chatting\n"
|
| 12 |
base_instructions = """
|
| 13 |
+
Assume you have mastery in the topic and that the user is a someone who is trying to ensure they have a solid understanding by teaching and explaining the material to you.
|
| 14 |
+
Your goal is to ensure that the user understands the concept they are explaining by asking questions to help them learn by teaching rather than explaining things directly to them.
|
| 15 |
+
Let the user know if they are correct. If the user is wrong or off track, you should challenge the user by asking them Socratic questions to guide them back.
|
| 16 |
If they ask for feedback, you should provide constructive feedback on the whole conversation instead of asking another question.
|
| 17 |
"""
|
| 18 |
+
ai_student_instructions = """
|
| 19 |
+
Pretend you are a bumbling student with a poor grasp of the topic, are prone to make mistakes, and the user is your teacher.
|
| 20 |
+
Your goal is to get the user to teach you about a topic or concept, and you can ask clarifying questions to help them teach better.
|
| 21 |
+
You may lay out a scneario for the teacher to help you thru, such as a homework problem, a scenario you need to resolve, or a piece of text you need help deciphering.
|
| 22 |
+
Do not explain the material to them except when they ask you to, and when you do as a bumbling student, you may make mistakes and say something unclear or false.
|
| 23 |
+
If they ask for feedback, instead of asking another question, you should provide constructive feedback on how well they grasped the content and did in their teaching, including ways they can improve.
|
| 24 |
+
When you make a mistake, if the user does not catch or correct you, make sure you let the user know during the feedback at the end of the session.
|
| 25 |
"""
|
| 26 |
openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo")
|
| 27 |
base_chain = utils.create_base_chain(openai_chat_model, base_instructions)
|
|
|
|
| 39 |
|
| 40 |
# Send a welcome message with action buttons
|
| 41 |
actions = [
|
| 42 |
+
cl.Action(name="switch_default", value="switch_default_value", label="Switch back to default mode", description="Switch back to default mode"),
|
| 43 |
+
cl.Action(name="switch_ai_student", value="switch_ai_student_value", label="Switch to bumbling student mode", description="Switch to bumbling student mode"),
|
| 44 |
cl.Action(name="upload_pdf", value="upload_pdf_value", label="Upload a PDF", description="Upload a PDF")
|
| 45 |
]
|
| 46 |
await cl.Message(content=start_msg, actions=actions).send()
|
|
|
|
| 94 |
|
| 95 |
# Create the RAG chain and store it in the user session
|
| 96 |
if settings["current_mode"] == "ai_student_chain":
|
| 97 |
+
rag_instructions = ai_student_instructions
|
| 98 |
else:
|
| 99 |
rag_instructions = base_instructions
|
| 100 |
rag_chain = utils.create_rag_chain_from_file(openai_chat_model, rag_instructions, file_uploaded, file_uploaded.name)
|
|
|
|
| 114 |
settings["rag_chain_available"] = False
|
| 115 |
cl.user_session.set("settings", settings)
|
| 116 |
|
| 117 |
+
msg = cl.Message(content="Okay, I'm back to my default mode. What would you like to try teaching me next?")
|
| 118 |
await msg.send()
|
| 119 |
|
| 120 |
|
|
|
|
| 123 |
print("\nSwitching to AI student mode")
|
| 124 |
settings = cl.user_session.get("settings")
|
| 125 |
|
| 126 |
+
ai_student_chain = utils.create_ai_student_chain(openai_chat_model, ai_student_instructions)
|
| 127 |
settings["ai_student_chain"] = ai_student_chain
|
| 128 |
settings["current_mode"] = "ai_student_chain"
|
| 129 |
cl.user_session.set("settings", settings)
|
| 130 |
|
| 131 |
+
msg = cl.Message(content="Okay, I will take on the role of an unsure student. What would you like to try teaching me next?")
|
| 132 |
await msg.send()
|
utils.py
CHANGED
|
@@ -27,6 +27,7 @@ def chunk_documents(docs, tiktoken_len):
|
|
| 27 |
print('len(split_chunks) =', len(split_chunks))
|
| 28 |
return split_chunks
|
| 29 |
|
|
|
|
| 30 |
def process_file(file: AskFileResponse):
|
| 31 |
import tempfile
|
| 32 |
|
|
@@ -44,6 +45,30 @@ def create_base_chain(openai_chat_model, system_prompt):
|
|
| 44 |
human_template = "{question}"
|
| 45 |
base_prompt = ChatPromptTemplate.from_messages([
|
| 46 |
("system", system_prompt),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
("human", human_template)
|
| 48 |
])
|
| 49 |
base_chain = base_prompt | openai_chat_model
|
|
@@ -51,6 +76,35 @@ def create_base_chain(openai_chat_model, system_prompt):
|
|
| 51 |
return base_chain
|
| 52 |
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
def create_rag_chain_from_file(openai_chat_model, base_instructions, file_response, file_name):
|
| 55 |
|
| 56 |
# Load the documents from a PDF file using PyMuPDFLoader
|
|
@@ -74,7 +128,7 @@ def create_rag_chain_from_file(openai_chat_model, base_instructions, file_respon
|
|
| 74 |
# Define the RAG prompt template
|
| 75 |
RAG_PROMPT = """
|
| 76 |
Use the provided context while replying to the user query. Only use the provided context to respond to the query.
|
| 77 |
-
If the context is not sufficient, you can respond with "I cannot seem to find this topic in the PDF. Would you like to switch to
|
| 78 |
|
| 79 |
QUERY:
|
| 80 |
{question}
|
|
|
|
| 27 |
print('len(split_chunks) =', len(split_chunks))
|
| 28 |
return split_chunks
|
| 29 |
|
| 30 |
+
|
| 31 |
def process_file(file: AskFileResponse):
|
| 32 |
import tempfile
|
| 33 |
|
|
|
|
| 45 |
human_template = "{question}"
|
| 46 |
base_prompt = ChatPromptTemplate.from_messages([
|
| 47 |
("system", system_prompt),
|
| 48 |
+
# Example 1
|
| 49 |
+
# ("human", "I want to teach you about the Pythagorean Theorem. Can you pretend to know the topic well and give me feedback on how well I explain it?"),
|
| 50 |
+
# ("ai", "That sounds great! I’m here to learn about the Pythagorean Theorem from you. Can you explain what the Pythagorean Theorem is and how to apply it?"),
|
| 51 |
+
# Example 2
|
| 52 |
+
# ("human", "The Pythagorean Theorem is a theorem that relates the lengths of right triangles. More specifically, if a triangle has 3 sides - a, b and c, with c being the hypotenuse - then the theorem tells us a^2+b^2 = c^2. This helps us calculate distances in 2-D space and has applications in math, science, engineering, and architecture."),
|
| 53 |
+
# ("ai", "Great! That makes sense. Can you walk me through an example of how to apply the Pythagorean Theorem to a real world problem?"),
|
| 54 |
+
# Example 3
|
| 55 |
+
# ("human", "The Pythagorean Theorem has something to do with triangles and the lengths of their sides. I'm not sure what though."),
|
| 56 |
+
# ("ai", "Okay, I see. What kind of triangles does it deal with? And what is the relationship between the three sides? Maybe this site can help us: https://byjus.com/maths/pythagoras-theorem"),
|
| 57 |
+
# Example 4
|
| 58 |
+
("human", "I'd like to end the session"),
|
| 59 |
+
("ai", "No worries. Would you like me to share some feedback with you?"),
|
| 60 |
+
# Example 5
|
| 61 |
+
# ("human", "I don't want to discuss the Pythagorean Theorem anymore. Instead, I want to talk more about circles."),
|
| 62 |
+
# ("ai", "That's fine. Would you like for me to first give you some feedback on this lesson before we switch to another topic?"),
|
| 63 |
+
# Example 6
|
| 64 |
+
("human", "Can you tell me how I did?"),
|
| 65 |
+
("ai", "Sure! Shall I first give you some feedback on how well you covered the content, and then some feedback on your approach to teaching?"),
|
| 66 |
+
# Example 7
|
| 67 |
+
("human", "Can you tell me the answer?"),
|
| 68 |
+
("ai", "Hmm, maybe we can figure it out together? If I passed you some references to look up, can you help me figure it out?"),
|
| 69 |
+
# Example 8a (mistake)
|
| 70 |
+
("human", "So using the Pythagorean Theorem, given the hypotenuse is 13 and one of the legs is 5, we know the length of the other leg is going to equal sqrt(13^2 - 6^2) = sqrt(169 - 36) = sqrt(133) which is almost 12?"),
|
| 71 |
+
("ai", "Hmm, can you explain to me why you have written 6^2 rather than 5^2?"),
|
| 72 |
("human", human_template)
|
| 73 |
])
|
| 74 |
base_chain = base_prompt | openai_chat_model
|
|
|
|
| 76 |
return base_chain
|
| 77 |
|
| 78 |
|
| 79 |
+
def create_ai_student_chain(openai_chat_model, system_prompt):
|
| 80 |
+
human_template = "{question}"
|
| 81 |
+
base_prompt = ChatPromptTemplate.from_messages([
|
| 82 |
+
("system", system_prompt),
|
| 83 |
+
# Example 3
|
| 84 |
+
# ("human", "The Pythagorean Theorem has something to do with triangles and the lengths of their sides. I'm not sure what though."),
|
| 85 |
+
# ("ai", "Okay, I see. What kind of triangles does it deal with? And what is the relationship between the three sides? Maybe this site can help us: https://byjus.com/maths/pythagoras-theorem"),
|
| 86 |
+
# Example 4
|
| 87 |
+
("human", "I'd like to end the session"),
|
| 88 |
+
("ai", "No worries. Would you like me to share some feedback with you?"),
|
| 89 |
+
# Example 5
|
| 90 |
+
# ("human", "I don't want to discuss the Pythagorean Theorem anymore. Instead, I want to talk more about circles."),
|
| 91 |
+
# ("ai", "That's fine. Would you like for me to first give you some feedback on this lesson before we switch to another topic?"),
|
| 92 |
+
# Example 6
|
| 93 |
+
("human", "Can you tell me how I did?"),
|
| 94 |
+
("ai", "Sure! Shall I first give you some feedback on how well you covered the content, and then some feedback on your approach to teaching?"),
|
| 95 |
+
# Example 7
|
| 96 |
+
("human", "Can you tell me the answer?"),
|
| 97 |
+
("ai", "Hmm, maybe we can figure it out together? If I passed you some references to look up, can you help me figure it out?"),
|
| 98 |
+
# Example 8b (mistake)
|
| 99 |
+
("human", "So can you show me how you would apply the Pythagorean Theorem to solve this next problem? Let's say you are building a 8 feet tall vertical structure and you'd like to add support beams all around it 6 feet away from its base. Can you help me calculate how long these support beams should be?"),
|
| 100 |
+
("ai", "Because the structure is vertical and the support beams are on the ground, we see this forms a right triangle. So we can use the Pythagorean Theorem to calculate the length of the support beam. Let's call the length of the support beam 'c', while the height of the vertical structure is 'a' and the distance the support beam is away from the structure is 'b'. Hence, if c^2 = a^2 + b^2, I think we need to solve for c = sqrt(8^2 + 6^2) = sqrt(16+12) = sqrt (28) = 5.3? Did I do that right?"),
|
| 101 |
+
("human", human_template)
|
| 102 |
+
])
|
| 103 |
+
ai_student_chain = base_prompt | openai_chat_model
|
| 104 |
+
print("Created base chain\n")
|
| 105 |
+
return ai_student_chain
|
| 106 |
+
|
| 107 |
+
|
| 108 |
def create_rag_chain_from_file(openai_chat_model, base_instructions, file_response, file_name):
|
| 109 |
|
| 110 |
# Load the documents from a PDF file using PyMuPDFLoader
|
|
|
|
| 128 |
# Define the RAG prompt template
|
| 129 |
RAG_PROMPT = """
|
| 130 |
Use the provided context while replying to the user query. Only use the provided context to respond to the query.
|
| 131 |
+
If the context is not sufficient, you can respond with "I cannot seem to find this topic in the PDF. Would you like to switch to back to the default or bumbling student mode?".
|
| 132 |
|
| 133 |
QUERY:
|
| 134 |
{question}
|