TutorGPT / app.py
utkuarslan5
app
f30a8b8
# from dotenv import find_dotenv, load_dotenv
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import (SimpleSequentialChain,
SequentialChain,
LLMMathChain,
ConversationChain)
from langchain.prompts.prompt import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
import streamlit as st
import warnings
warnings.filterwarnings("ignore")
# Load environment variables
# load_dotenv(find_dotenv())
q = """Let the triangle ABC with the vertices A, B and C be given on the Bessele ellipsoid. The mean latitude is 48° 30'. The constants of the Bessele ellipsoid are given by c = 6398786.849 m and e'² = 0.00671922. Let the length (of the geodesic line) between the vertices B and C be 58461.234 m. Furthermore the angles (= 62° 26' 54'.21) and (= 52° 12' 06'.99) are known.
Determine (in this order and independently)
a) the side AC of the triangle ABC using Legendre's theorem,
b) the side AB of the triangle ABC according to Soldner's additament method as well as
c) the angle using Ehlert's formulas.
d) control the side BC using the spherical cosine theorem.
"""
# --------------------------------------------------------------
# LLM: Tutors you in your homeworks
# --------------------------------------------------------------
temperature = 0.3
# Define llm
llm = OpenAI(model_name="gpt-3.5-turbo",
temperature=temperature,
max_retries=15,
max_tokens=2048)
# Template to use for the system message prompt
SYSTEM_PROMPT_TEMPLATE = """You are a Tutor for human students. Humans will ask you questions and you will provide which calculations to make. Keep your answers simple. Format your output readible Markdown.
Output:
Summary: <A summary of the answer> \n
Formulas: <A list of all the formulas to use> \n
Steps: <step-by-step list of answer> \n
"""
system_message_prompt = SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT_TEMPLATE)
# Human question prompt
USER_PROMPT_TEMPLATE = """The following is a educational conversation with a Student and a Tutor. Tutor outputs step-by-step which calculations to make to answer the question based on the context. If the Tutor is unsure of the steps, it truthfully says it does not know.
Current conversation:
{history}
Student: {input}
Tutor:"""
human_message_prompt = HumanMessagePromptTemplate.from_template(USER_PROMPT_TEMPLATE)
_CHAT_PROMPT = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
conversation_chain = ConversationChain(llm=llm,
prompt=_CHAT_PROMPT,
memory=ConversationBufferMemory(ai_prefix="Tutor", human_prefix="Student"),
verbose=True
)
# Math prompt template
MATH_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If you execute code, you must print out the final answer using the print function. You MUST use the python package numpy to answer your question. You must import numpy as np. Output the question, your code, output of your code, and your answer.
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
print(${{code}})
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
import numpy as np
print(np.multiply(37593, 67))
```
```output
2518731
```
Answer: 2518731
Question: {question}"""
PROMPT = PromptTemplate(input_variables=["question"], template=MATH_PROMPT_TEMPLATE)
math_chain = LLMMathChain(llm=llm,
verbose=True,)
# --------------------------------------------------------------
# Setup UI components
# --------------------------------------------------------------
tutor, math = st.tabs(["Tutor", "Math"])
with tutor:
st.title("TutorGPT")
st.caption("May produce inaccurate results. Always verify your answers.")
chat = st.text_area(label='Ask your question')
ask = st.button(label='Ask!')
if ask:
with st.spinner("Thinking..."):
response = conversation_chain.predict(input=chat)
st.write(response)
with math:
calculation = st.text_input(label='Calculate')
ask2 = st.button(label='Calculate!')
st.text('Accepts only one-liner expressions.')
if ask2:
response = math_chain.run(calculation)
st.write(response)