setup / ui /til_feedback.py
AnanthulaShravya's picture
Rename til_feedback.py to ui/til_feedback.py
e6e059c verified
import streamlit as st
from dotenv import load_dotenv
from crew.til import TilCrew
from streamlit_extras.capture import stdout
load_dotenv()
def main():
st.markdown("<div class='container'>", unsafe_allow_html=True)
st.markdown(
"""
<div class="centered">
<p class="title">Today I Learnt Feedback</p>
<p class="description">Feedback on Today I Learnt</p>
</div>
""",
unsafe_allow_html=True
)
til_content = st.text_area('Enter what you learnt today:',
"* Quantization is the process of reducing the size of LLM models by reducing the underlying weights.\n"
"* The weights are reduced by scaling down the datatypes from a datatype that takes larger space to a data type that takes a smaller space, this is also known as downcasting.\n"
"* Quantization offers benefits such as reduced storage space usage and faster computation.\n"
"* Disadvantages: Answers are less precise\n"
"* I learnt how to use Go Routines to handle concurrency in React.\n",
key='til_content', help='Enter what you learnt today')
if st.button("Get Feedback"):
with st.status(
"🤖 **Analysing TIL...**", state="running", expanded=True
) as status:
with st.container(height=500, border=False):
log_container = st.empty()
with stdout(log_container.code, terminator=""):
feedback = TilCrew()
inputs = {"content": til_content}
results = feedback.kickoff(inputs=inputs)["feedback"]
status.update(
label="✅ Feedback ready!",
state="complete",
expanded=False,
)
for result in results:
st.markdown(f"#### TIL: {result['til']}")
st.markdown(f"**Feedback:** {result['feedback']}")
if result['feedback'] == "not_ok":
st.markdown(f"**Criteria:** {result['feedback_criteria']}")
st.markdown(f"**Reason:** {result['reason']}")
if result.get('suggestion') is not None:
st.markdown(f"**Suggestion:** {result['suggestion']}")
if __name__ == "__main__":
main()