Caseyrmorrison commited on
Commit
51c300f
·
verified ·
1 Parent(s): e77b0bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +168 -165
app.py CHANGED
@@ -1,165 +1,168 @@
1
- import os
2
- import streamlit as st
3
- import openai
4
- from openai import OpenAI
5
- from langchain_openai import OpenAIEmbeddings
6
- from langchain_openai import ChatOpenAI
7
-
8
- from pinecone import Pinecone
9
- from langchain_pinecone import PineconeVectorStore
10
-
11
- from agents import Obnoxious_Agent, Query_Agent, Relevant_Documents_Agent, Answering_Agent
12
-
13
- client = None
14
-
15
- # **Streamlit UI Elements**
16
- st.title("Mini Project 2: Streamlit Chatbot")
17
-
18
- # Pinecone Setup**
19
- pc = Pinecone(api_key=st.secrets['PINECONE_API_KEY'])
20
- index = pc.Index('openaiembedding')
21
-
22
- # Session State for API key and Chat history
23
- if "is_valid" not in st.session_state:
24
- st.session_state["is_valid"] = False
25
- if 'api_key' not in st.session_state:
26
- st.session_state["api_key"] = ""
27
- if "messages" not in st.session_state:
28
- st.session_state['messages'] = []
29
-
30
- def check_openai_api_key():
31
- client = OpenAI(api_key=st.session_state["api_key"])
32
- st.session_state["is_valid"] = False
33
- try:
34
- client.models.list()
35
- except openai.APIError as e:
36
- st.session_state["is_valid"] = False
37
- except openai.APIConnectionError as e:
38
- st.session_state["is_valid"] = False
39
- except openai.AuthenticationError as e:
40
- st.session_state["is_valid"] = False
41
- else:
42
- st.session_state["is_valid"] = True
43
- st.success("Valid OpenAI API key entered successfully!")
44
-
45
- if not st.session_state["is_valid"]:
46
- st.session_state["api_key"] = st.text_input(label="Enter your OpenAI API Key", type="password")
47
- check_openai_api_key()
48
-
49
- class Head_Agent:
50
- def __init__(self, client, index, embeddings, api_key) -> None:
51
- # TODO: Initialize the Head_Agent
52
- # OpenAI client + embeddings
53
- self.client = client
54
- self.model = ChatOpenAI(api_key=api_key)
55
- self.embeddings = embeddings
56
- # Pinecone Index + vector store
57
- self.index = index
58
- self.vectorstore = PineconeVectorStore(index_name='openaiembedding', embedding=self.embeddings, namespace="embedding_2048_256", text_key="text")
59
- self.retriever = self.vectorstore.as_retriever()
60
- # Filter user queries
61
- self.obnoxious_phrase = "Please do not ask inappropriate or obnoxious questions."
62
- self.non_relevant_phrase = "No relevant documents found in the documents. Please ask a question relevant to the book on Machine Learning"
63
- # chat history prompt
64
- self.memory_prompt = """Given a chat history and the latest user question \
65
- which might reference context in the chat history, formulate a standalone question \
66
- which can be understood without the chat history. Do NOT answer the question, \
67
- just reformulate it if needed and otherwise return it as is.
68
- Chat_History: {chat_history}
69
- """
70
-
71
- def setup_sub_agents(self):
72
- self.OA = Obnoxious_Agent(self.client)
73
- self.QA = Query_Agent(self.client, self.embeddings, self.index, self.vectorstore)
74
- self.RDA = Relevant_Documents_Agent(self.client, self.embeddings)
75
- self.AA = Answering_Agent(self.model, self.retriever)
76
- print("Agents Initialized")
77
-
78
- def query_gpt(self, prompt) -> str:
79
- # TODO: Get if the returned documents are relevant
80
- try:
81
- response = self.client.chat.completions.create(model="gpt-3.5-turbo",
82
- temperature=0,
83
- messages=[{
84
- "role": "system",
85
- "content": prompt
86
- }]
87
- )
88
- return response.choices[0].message.content
89
- except Exception as e:
90
- print(f"Error while checking with gpt: {e}")
91
- return None
92
-
93
- def set_chat_history(self, chat_history):
94
- self.memory_prompt = self.memory_prompt.format(chat_history=chat_history)
95
-
96
- def main_loop(self, query, chat_history):
97
- # TODO: Get Query from Streamlit App
98
- # print("Query:", query)
99
-
100
- # Query for greeting
101
- if query.lower() in ["hello", "hi", "hey"]:
102
- return "Hello! How can I help you today?"
103
-
104
- ## 1. Obnoxious Agent
105
- # Return yes/True = end
106
- # Return no/False = continue
107
- OA_prompt = self.OA.set_prompt(query)
108
- # print("OA Prompt:", OA_prompt)
109
- OA_response = self.query_gpt(OA_prompt)
110
- print("Obnoxious Response:", OA_response)
111
- OA_action = self.OA.extract_action(OA_response)
112
- # print("OA Action:", OA_action)
113
- if OA_action:
114
- print("OA Message:", self.obnoxious_phrase)
115
- return self.obnoxious_phrase
116
-
117
- self.set_chat_history(chat_history)
118
- updated_prompt = self.query_gpt(self.memory_prompt)
119
-
120
- ## 2. Pinecone Query - Get Documents
121
- pinecone_docs_response = self.QA.query_vector_store(updated_prompt)
122
- # print("Documents:", pinecone_docs_response)
123
-
124
- ## 3. Relevant
125
- # If relevant continue, else
126
- RDA_prompt = self.RDA.set_prompt(pinecone_docs_response, updated_prompt)
127
- # print("RDA Prompt:", RDA_prompt)
128
- relevance_response = self.query_gpt(RDA_prompt)
129
- print("Relevance Response:", relevance_response)
130
- RDA_action = self.RDA.extract_action(relevance_response)
131
- # print("RDA Action:", RDA_action)
132
- if not RDA_action:
133
- print("RDA Message:", self.non_relevant_phrase)
134
- return self.non_relevant_phrase
135
-
136
- ## 4. Answering
137
- AA_prompt = self.AA.set_prompt(updated_prompt)
138
- # print("AA Prompt:", AA_prompt)
139
- AA_response = self.AA.generate_response()
140
- # print("AA Response:", AA_response)
141
- return AA_response
142
-
143
- # Initialize Head_Agent if the API key is valid
144
- if st.session_state["is_valid"]:
145
- api_key = st.session_state["api_key"]
146
- client = OpenAI(api_key=api_key)
147
- embeddings = OpenAIEmbeddings(api_key=api_key, model='text-embedding-ada-002')
148
-
149
- HA = Head_Agent(client, index, embeddings, api_key)
150
- HA.setup_sub_agents()
151
-
152
- # Display existing chat messages
153
- for message in st.session_state['messages']:
154
- st.chat_message(message['role']).write(message['content'])
155
-
156
- # Main chat interaction loop
157
- if user_prompt := st.chat_input("What would you like to ask?"):
158
- st.session_state['messages'].append({"role": "user", "content": user_prompt})
159
- st.chat_message(st.session_state['messages'][-1]['role']).write(st.session_state['messages'][-1]['content'])
160
-
161
- print("Chat History:", st.session_state['messages'])
162
- response = HA.main_loop(query=user_prompt, chat_history=st.session_state['messages'])
163
-
164
- st.session_state['messages'].append({"role": "assistant", "content": response})
165
- st.chat_message("assistant").write(response)
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ import openai
4
+ from openai import OpenAI
5
+ from langchain_openai import OpenAIEmbeddings
6
+ from langchain_openai import ChatOpenAI
7
+
8
+ from pinecone import Pinecone
9
+ from langchain_pinecone import PineconeVectorStore
10
+
11
+ from agents import Obnoxious_Agent, Query_Agent, Relevant_Documents_Agent, Answering_Agent
12
+
13
+ client = None
14
+
15
+ # **Streamlit UI Elements**
16
+ st.title("Mini Project 2: Streamlit Chatbot")
17
+
18
+ # Pinecone Setup**
19
+ pc = Pinecone(api_key=st.secrets['PINECONE_API_KEY'])
20
+ index = pc.Index('openaiembedding')
21
+
22
+ # Session State for API key and Chat history
23
+ if "is_valid" not in st.session_state:
24
+ st.session_state["is_valid"] = False
25
+ if 'api_key' not in st.session_state:
26
+ st.session_state["api_key"] = ""
27
+ if "messages" not in st.session_state:
28
+ st.session_state['messages'] = []
29
+
30
+ def check_openai_api_key():
31
+ client = OpenAI(api_key=st.session_state["api_key"])
32
+ st.session_state["is_valid"] = False
33
+ try:
34
+ client.models.list()
35
+ except openai.APIError as e:
36
+ st.session_state["is_valid"] = False
37
+ except openai.APIConnectionError as e:
38
+ st.session_state["is_valid"] = False
39
+ except openai.AuthenticationError as e:
40
+ st.session_state["is_valid"] = False
41
+ else:
42
+ st.session_state["is_valid"] = True
43
+ st.success("Valid OpenAI API key entered successfully!")
44
+
45
+ if not st.session_state["is_valid"]:
46
+ st.session_state["api_key"] = st.text_input(label="Enter your OpenAI API Key", type="password")
47
+ check_openai_api_key()
48
+
49
+ class Head_Agent:
50
+ def __init__(self, client, index, embeddings, api_key) -> None:
51
+ # TODO: Initialize the Head_Agent
52
+ # OpenAI client + embeddings
53
+ self.client = client
54
+ self.model = ChatOpenAI(api_key=api_key)
55
+ self.embeddings = embeddings
56
+ # Pinecone Index + vector store
57
+ self.index = index
58
+ self.vectorstore = PineconeVectorStore(index_name='openaiembedding', embedding=self.embeddings, namespace="embedding_2048_256", text_key="text")
59
+ self.retriever = self.vectorstore.as_retriever()
60
+ # Filter user queries
61
+ self.obnoxious_phrase = "Please do not ask inappropriate or obnoxious questions."
62
+ self.non_relevant_phrase = "No relevant documents found in the documents. Please ask a question relevant to the book on Machine Learning"
63
+ # chat history prompt
64
+ self.memory_prompt = """Given a chat history and the latest user question \
65
+ which might reference context in the chat history, formulate a standalone question \
66
+ which can be understood without the chat history. Do NOT answer the question, \
67
+ just reformulate it if needed and otherwise return it as is.
68
+ Chat_History: {chat_history}
69
+ """
70
+
71
+ def setup_sub_agents(self):
72
+ self.OA = Obnoxious_Agent(self.client)
73
+ self.QA = Query_Agent(self.client, self.embeddings, self.index, self.vectorstore)
74
+ self.RDA = Relevant_Documents_Agent(self.client, self.embeddings)
75
+ self.AA = Answering_Agent(self.model, self.retriever)
76
+ print("Agents Initialized")
77
+
78
+ def query_gpt(self, prompt) -> str:
79
+ # TODO: Get if the returned documents are relevant
80
+ try:
81
+ response = self.client.chat.completions.create(model="gpt-3.5-turbo",
82
+ temperature=0,
83
+ messages=[{
84
+ "role": "system",
85
+ "content": prompt
86
+ }]
87
+ )
88
+ resp = response.choices[0].message.content
89
+ if resp is None:
90
+ resp = "Return none API response empty - possible latency issues."
91
+ return resp
92
+ except Exception as e:
93
+ print(f"Error while checking with gpt: {e}")
94
+ return None
95
+
96
+ def set_chat_history(self, chat_history):
97
+ self.memory_prompt = self.memory_prompt.format(chat_history=chat_history)
98
+
99
+ def main_loop(self, query, chat_history):
100
+ # TODO: Get Query from Streamlit App
101
+ # print("Query:", query)
102
+
103
+ # Query for greeting
104
+ if query.lower() in ["hello", "hi", "hey"]:
105
+ return "Hello! How can I help you today?"
106
+
107
+ ## 1. Obnoxious Agent
108
+ # Return yes/True = end
109
+ # Return no/False = continue
110
+ OA_prompt = self.OA.set_prompt(query)
111
+ # print("OA Prompt:", OA_prompt)
112
+ OA_response = self.query_gpt(OA_prompt)
113
+ print("Obnoxious Response:", OA_response)
114
+ OA_action = self.OA.extract_action(OA_response)
115
+ # print("OA Action:", OA_action)
116
+ if OA_action:
117
+ print("OA Message:", self.obnoxious_phrase)
118
+ return self.obnoxious_phrase
119
+
120
+ self.set_chat_history(chat_history)
121
+ updated_prompt = self.query_gpt(self.memory_prompt)
122
+
123
+ ## 2. Pinecone Query - Get Documents
124
+ pinecone_docs_response = self.QA.query_vector_store(updated_prompt)
125
+ # print("Documents:", pinecone_docs_response)
126
+
127
+ ## 3. Relevant
128
+ # If relevant continue, else
129
+ RDA_prompt = self.RDA.set_prompt(pinecone_docs_response, updated_prompt)
130
+ # print("RDA Prompt:", RDA_prompt)
131
+ relevance_response = self.query_gpt(RDA_prompt)
132
+ print("Relevance Response:", relevance_response)
133
+ RDA_action = self.RDA.extract_action(relevance_response)
134
+ # print("RDA Action:", RDA_action)
135
+ if not RDA_action:
136
+ print("RDA Message:", self.non_relevant_phrase)
137
+ return self.non_relevant_phrase
138
+
139
+ ## 4. Answering
140
+ AA_prompt = self.AA.set_prompt(updated_prompt)
141
+ # print("AA Prompt:", AA_prompt)
142
+ AA_response = self.AA.generate_response()
143
+ # print("AA Response:", AA_response)
144
+ return AA_response
145
+
146
+ # Initialize Head_Agent if the API key is valid
147
+ if st.session_state["is_valid"]:
148
+ api_key = st.session_state["api_key"]
149
+ client = OpenAI(api_key=api_key)
150
+ embeddings = OpenAIEmbeddings(api_key=api_key, model='text-embedding-ada-002')
151
+
152
+ HA = Head_Agent(client, index, embeddings, api_key)
153
+ HA.setup_sub_agents()
154
+
155
+ # Display existing chat messages
156
+ for message in st.session_state['messages']:
157
+ st.chat_message(message['role']).write(message['content'])
158
+
159
+ # Main chat interaction loop
160
+ if user_prompt := st.chat_input("What would you like to ask?"):
161
+ st.session_state['messages'].append({"role": "user", "content": user_prompt})
162
+ st.chat_message(st.session_state['messages'][-1]['role']).write(st.session_state['messages'][-1]['content'])
163
+
164
+ print("Chat History:", st.session_state['messages'])
165
+ response = HA.main_loop(query=user_prompt, chat_history=st.session_state['messages'])
166
+
167
+ st.session_state['messages'].append({"role": "assistant", "content": response})
168
+ st.chat_message("assistant").write(response)