ecarr-bend commited on
Commit
e762673
·
1 Parent(s): 569ec5f

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -160
app.py DELETED
@@ -1,160 +0,0 @@
1
- import gradio as gr
2
- import os
3
- import pinecone
4
- import openai
5
-
6
- from langchain.embeddings.openai import OpenAIEmbeddings
7
- from langchain.chat_models import ChatOpenAI
8
- from langchain.vectorstores import Pinecone
9
-
10
- from langchain.agents.openai_functions_agent.agent_token_buffer_memory import AgentTokenBufferMemory
11
- from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
12
- from langchain.schema.messages import SystemMessage
13
- from langchain.prompts import MessagesPlaceholder
14
- from langchain.agents import AgentExecutor
15
- from langchain.agents.agent_toolkits import create_retriever_tool
16
-
17
- from langchain.callbacks.base import BaseCallbackHandler
18
-
19
- from queue import Queue
20
- from threading import Thread
21
-
22
- print("CHECK - Pinecone vector db setup")
23
-
24
- # set up OpenAI environment vars and embeddings
25
- openai.api_key = os.environ.get("OPENAI_API_KEY")
26
- embeddings = OpenAIEmbeddings()
27
-
28
- # initialize pinecone db
29
- index_name = "kellogg-course-assistant"
30
-
31
- pinecone.init(
32
- api_key=os.getenv("PINECONE_API_KEY"), # find at app.pinecone.io
33
- environment=os.getenv("PINECONE_ENV"), # next to api key in console
34
- )
35
-
36
- # load existing index
37
- vectorsearch = Pinecone.from_existing_index(index_name, embeddings)
38
- retriever = vectorsearch.as_retriever()
39
-
40
- print("CHECK - setting up conversational retrieval agent")
41
-
42
- # callback handler for streaming
43
- class QueueCallback(BaseCallbackHandler):
44
- """Callback handler for streaming LLM responses to a queue."""
45
-
46
- def __init__(self, q):
47
- self.q = q
48
-
49
- def on_llm_new_token(self, token: str, **kwargs: any) -> None:
50
- self.q.put(token)
51
-
52
- def on_llm_end(self, *args, **kwargs: any) -> None:
53
- return self.q.empty()
54
-
55
- # create retrieval tool
56
- tool = create_retriever_tool(
57
- retriever,
58
- "search_kellogg_site",
59
- "Searches and returns content from within the Kellogg website."
60
- )
61
- tools = [tool]
62
-
63
- # conversational retrieval agent component construction - memory, prompt template, agent, agent executor
64
- # This is needed for both the memory and the prompt
65
- memory_key = "history"
66
- memory = AgentTokenBufferMemory(memory_key=memory_key, llm=llm)
67
- # memory = AgentTokenBufferMemory(memory_key=memory_key, llm=llm, max_history=0, max_token_limit= 4000)
68
-
69
- system_message = SystemMessage(
70
- content=(
71
- "You are a helpful educational expert providing advice to students of the Northwestern business school Kellogg. "
72
- "Use both your knowledge and the Kellogg site search tool to generate helpful answers for questions about courses and providing a list of suggested web course articles for more information. "
73
- "Format your answer with distinct <h3>titles</h3> and <h3>subtitles</h3>, <b>emphasis</b>, <b>bold</b>, <i>italic<i>, <li>lists</li>, and tables *use html code*. For lists, or bullet points, always start them by having a topic in <b>emphasis</b> before going into the description. Ensure to frequently take concepts and break them down into bullet points or lists following the emphasis directions that were just laid out."
74
- "Do not include details of your intermediate steps in the final response. "
75
- "At the end of your response, provide links to relevant web course articles returned by the retriever."
76
- )
77
- )
78
-
79
- print("CHECK - setting up gradio chatbot UI")
80
-
81
- # RAG agent function
82
- def predict(message, model_type):
83
- # clearing RAG memory
84
- # memory.clear()
85
-
86
- # Create a Queue
87
- q = Queue()
88
- job_done = object()
89
-
90
- # conversational retrieval agent component construction - memory, prompt template, agent, agent executor
91
- # specifying LLM to use
92
- if (model_type==1):
93
- llm = ChatOpenAI(temperature = 0.1, model_name="gpt-3.5-turbo-16k", streaming=True, callbacks=[QueueCallback(q)])
94
- else:
95
- llm = ChatOpenAI(temperature = 0.1, model_name="gpt-4", streaming=True, callbacks=[QueueCallback(q)])
96
-
97
- # This is needed for both the memory and the prompt
98
- memory_key = "history"
99
- memory = AgentTokenBufferMemory(memory_key=memory_key, llm=llm)
100
-
101
- prompt = OpenAIFunctionsAgent.create_prompt(
102
- system_message=system_message,
103
- extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)]
104
- )
105
-
106
- agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
107
- agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=False, return_intermediate_steps=True)
108
-
109
- # Create a funciton to call - this will run in a thread
110
- def task():
111
- resp = agent_executor({"input":message})
112
- q.put(job_done)
113
-
114
- # Create a thread and start the function
115
- t = Thread(target=task)
116
- t.start()
117
-
118
- content = ""
119
-
120
- # Get each new token from the queue and yield for our generator
121
- while True:
122
- try:
123
- next_token = q.get(True, timeout=1)
124
- if next_token is job_done:
125
- break
126
- content += next_token
127
- yield next_token, content
128
- except:
129
- pass
130
-
131
- def ask_llm(message, history, model_type):
132
- for next_token, content in predict(message, model_type):
133
- yield(content)
134
-
135
- # set up and run chat interface
136
- kellogg_agent = gr.ChatInterface(
137
- fn=ask_llm,
138
- chatbot=gr.Chatbot(height=500),
139
- textbox=gr.Textbox(placeholder="Ask me a question", container=False, scale=7),
140
- title="Kellogg Course AI Assistant",
141
- description="Please provide your questions about courses offered by Kellogg.",
142
- additional_inputs=[model_type],
143
- additional_inputs_accordion_name="AI Assistant Options:",
144
- examples=[["Can you tell me about a marketing major? What would I want from my career if I went that way instead of say strategy?"],
145
- ["I'm interested in strategy. Can you give me a recommendation of courses I should consider over the next year?"],
146
- ["I'm wanting to know more about advertising. Can you recommend some courses on that subject?"],
147
- ["How many credits do I need to graduate?"],
148
- ["I loved the Competitive Strategy and industrial structure class. Can you tell me others like that one?"]],
149
- # cache_examples=True,
150
- # retry_btn=None,
151
- # undo_btn="Delete Previous",
152
- clear_btn="Clear",
153
- )
154
-
155
- def main():
156
- kellogg_agent.queue().launch()
157
-
158
- # start UI
159
- if __name__ == "__main__":
160
- main()