File size: 2,651 Bytes
952b7e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# app.py
import streamlit as st
from function import Head_Agent

def init_chatbot():

    with open('open_ai_key.txt', 'r', encoding='utf-8') as file:
        openai_key = file.readline().strip()
    with open('pinecone_api.txt', 'r', encoding='utf-8') as file:
        pinecone_key = file.readline().strip()

    pinecone_index_name = 'machine-learning-index'

    return Head_Agent(openai_key, pinecone_key, pinecone_index_name)


# ------------- Streamlit -------------------
st.title("My Streamlit Chatbot with Greetings")

if "chatbot" not in st.session_state:
    st.session_state["chatbot"] = init_chatbot()

chatbot = st.session_state["chatbot"]

user_query = st.text_input("Please enter your question:")

greeting_keywords = {
    "hi", "hello", "hey", "how are you", "how r u", "yo", "good morning", "good evening", "good afternoon"
}

if st.button("Sent"):
    if not user_query.strip():
        st.warning("Please enter valid content.")
    else:
        # ---
        normalized_input = user_query.lower().strip()
        if normalized_input in greeting_keywords:
            greet_response = "Hello there! How can I assist you today?"
            st.write("Robot: ", greet_response)

            chatbot.conv_history.append(f"User: {user_query}")
            chatbot.conv_history.append(f"Robot: {greet_response}")

        else:
            if chatbot.obnoxious_agent.check_query(user_query):
                st.write("Robot: Your question is inappropriate, please try another one.")

            else:
                docs = chatbot.query_agent.query_vector_store(user_query)
                matches = docs["matches"]

                response = chatbot.answering_agent.generate_response(
                    user_query,
                    matches,
                    chatbot.conv_history
                )

                chatbot.conv_history.append(f"User: {user_query}")
                chatbot.conv_history.append(f"Robot: {response}")

                # 3.
                conversation_context = (
                    f"User query: {user_query}\n"
                    f"Retrieve document summaries: {response}"
                )
                relevance = chatbot.relevant_agent.get_relevance(conversation_context)

                if relevance.strip().lower() == "no":
                    st.write("【Robot: generated answer, but not sure if it's relevant:】", response)
                else:
                    st.write("Robot:", response)

st.write("---")
st.subheader("Conversation History")
for msg in chatbot.conv_history:
    st.write(msg)