File size: 4,971 Bytes
3fc3c48
888953a
1f39bb8
 
b1753a9
1f39bb8
a9f3aed
01c715d
7b4a850
353f348
94686a0
 
 
 
 
 
 
 
 
 
 
 
 
35d4946
94686a0
 
 
 
 
c327216
 
 
 
 
3f49c4a
 
c327216
 
 
 
a581cfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c327216
3f49c4a
01c715d
 
 
 
 
 
a78cc33
01c715d
 
 
 
 
 
 
 
 
 
 
 
 
a016754
 
01c715d
 
 
 
 
35d4946
a016754
01c715d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35d4946
01c715d
 
 
 
 
 
35d4946
01c715d
 
 
 
 
 
a9f3aed
7b4a850
c327216
35d4946
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import streamlit as st
from dotenv import load_dotenv
from app.workflows.til.analyse_til import TilCrew
from app.workflows.til.analyse_til_v2 import AnalyseTilV2
from streamlit_extras.capture import stdout
from app.workflows.utils.feedback import Feedback

load_dotenv()

def feedback_main():

    page_bg_img = '''
        <style>
        [data-testid="stAppViewContainer"]{
            background-image:url("https://www.shutterstock.com/image-vector/abstract-technology-communication-concept-vector-600nw-1914443275.jpg");
            background-size:cover;
        }
        [data-testid="stHeader"]{
            background: rgba(0,0,0,0);
        }
        [data-testid="stToolbar"]{
            right: 2rem;
        }


        </style>
        '''

    st.markdown(page_bg_img, unsafe_allow_html=True)
    st.markdown("<div class='container'>", unsafe_allow_html=True)

    st.markdown(
        """
        <div class="centered">
            <p class="title">Today I Learnt Feedback</p>
            <p class="description">Feedback on Today I Learnt</p>
        </div>
        """,
        unsafe_allow_html=True
    )

    default_content = (
        "* Quantization is the process of reducing the size of LLM models by reducing the underlying weights.\n"
        "* The weights are reduced by scaling down the datatypes from a datatype that takes larger space to a data type that takes a smaller space, this is also known as downcasting.\n"
        "* Quantization offers benefits such as reduced storage space usage and faster computation.\n"
        "* Disadvantages: Answers are less precise\n"
        "* I learnt how to use Go Routines to handle concurrency in React.\n"
    )

    if 'til_content' not in st.session_state:
        st.session_state.til_content = default_content

    til_content = st.text_area(
        'Enter what you learnt today:',
        value=st.session_state.til_content,
        key='til_content',
        help='Enter what you learnt today'
    )

    if st.button("Get Feedback"):
            with st.status(
                "🤖 **Analysing TIL...**", state="running", expanded=True
            ) as status:
                with st.container(height=500, border=False):
                    log_container = st.empty()
                    with stdout(log_container.code, terminator=""):
                        feedback = AnalyseTilV2()
                        inputs = {"content": til_content}
                        results = feedback.kickoff(inputs=inputs)
                status.update(
                    label="✅ Feedback ready!",
                    state="complete",
                    expanded=False,
                )
            st.session_state.results = results
            st.session_state.run_id = results["run_id"]
            clear_feedback_state(results)

    if 'results' in st.session_state:
            results = st.session_state.results
            for result in results["til"]:
                st.markdown(f"#### TIL: {result['takeaway']}")
                st.markdown(f"**Feedback:** {result['feedback']}")
                if result['feedback'] == "not_ok":
                    st.markdown(f"**Reason:** {result['reason']}")
                    if result.get('suggestion') is not None:
                        st.markdown(f"**Suggestion:** {result['suggestion']}")

                feedback_key = result['takeaway'].replace(' ', '_')
                feedback_given_key = f"{feedback_key}_feedback_given"

                if feedback_given_key not in st.session_state:
                    st.session_state[feedback_given_key] = False

                if not st.session_state[feedback_given_key]:
                    if st.button("helpful", key=f"helpful_{feedback_key}"):
                            give_feedback(feedback_key, True)
                            st.session_state[feedback_given_key] = True

                    if st.button("not_helpful", key=f"not_helpful_{feedback_key}"):
                            give_feedback(feedback_key, False)
                            st.session_state[feedback_given_key] = True


def give_feedback(feedback_key, is_helpful):
        run_id = st.session_state.run_id
        metric_type = "helpful" if is_helpful else "not_helpful"
        metric_score = 1 if is_helpful else 0

        feedback_data = Feedback(
            metric_type=metric_type,
            metric_score=metric_score,
            feedback_on=feedback_key.replace('_', ' ').title()
        )
        try:
            TilCrew.post_feedback(run_id, feedback_data)
            st.success("Feedback submitted successfully!")
        except Exception as e:
            st.error(f"Failed to submit feedback: {e}")

        st.session_state[f"{feedback_key}_feedback_given"] = True

def clear_feedback_state(results):
       for key in st.session_state.keys():
        if key.endswith("_feedback_given"):
            st.session_state[key] = False


if __name__ == "__main__":
    feedback_main()