Spaces:
Paused
Paused
Commit ·
dd5cfa4
1
Parent(s): 9a2769e
Delete app.py
Browse files
app.py
DELETED
|
@@ -1,117 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import os
|
| 3 |
-
import pinecone
|
| 4 |
-
import openai
|
| 5 |
-
|
| 6 |
-
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 7 |
-
from langchain.chat_models import ChatOpenAI
|
| 8 |
-
from langchain.vectorstores import Pinecone
|
| 9 |
-
|
| 10 |
-
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import AgentTokenBufferMemory
|
| 11 |
-
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
| 12 |
-
from langchain.schema.messages import SystemMessage
|
| 13 |
-
from langchain.prompts import MessagesPlaceholder
|
| 14 |
-
from langchain.agents import AgentExecutor
|
| 15 |
-
from langchain.agents.agent_toolkits import create_retriever_tool
|
| 16 |
-
|
| 17 |
-
print("CHECK - Pinecone vector db setup")
|
| 18 |
-
|
| 19 |
-
# set up OpenAI environment vars and embeddings
|
| 20 |
-
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
| 21 |
-
embeddings = OpenAIEmbeddings()
|
| 22 |
-
|
| 23 |
-
# initialize pinecone db
|
| 24 |
-
index_name = "kellogg-markstrat"
|
| 25 |
-
|
| 26 |
-
pinecone.init(
|
| 27 |
-
api_key=os.getenv("PINECONE_API_KEY"), # find at app.pinecone.io
|
| 28 |
-
environment=os.getenv("PINECONE_ENV"), # next to api key in console
|
| 29 |
-
)
|
| 30 |
-
|
| 31 |
-
# load existing index
|
| 32 |
-
vectorsearch = Pinecone.from_existing_index(index_name, embeddings)
|
| 33 |
-
retriever = vectorsearch.as_retriever()
|
| 34 |
-
|
| 35 |
-
print("CHECK - setting up conversational retrieval agent")
|
| 36 |
-
|
| 37 |
-
# create LLM
|
| 38 |
-
# llm4 = ChatOpenAI(temperature = 0.1, model_name="gpt-4")
|
| 39 |
-
llm35 = ChatOpenAI(temperature = 0, model_name="gpt-3.5-turbo-16k")
|
| 40 |
-
llm = llm35
|
| 41 |
-
|
| 42 |
-
# create retrieval tool
|
| 43 |
-
tool = create_retriever_tool(
|
| 44 |
-
retriever,
|
| 45 |
-
"search_markstrat",
|
| 46 |
-
"Searches and returns information about the MarkStrat simulation program."
|
| 47 |
-
)
|
| 48 |
-
tools = [tool]
|
| 49 |
-
|
| 50 |
-
# conversational retrieval agent component construction - memory, prompt template, agent, agent executor
|
| 51 |
-
# This is needed for both the memory and the prompt
|
| 52 |
-
memory_key = "history"
|
| 53 |
-
memory = AgentTokenBufferMemory(memory_key=memory_key, llm=llm)
|
| 54 |
-
# memory = AgentTokenBufferMemory(memory_key=memory_key, llm=llm, max_history=0, max_token_limit= 4000)
|
| 55 |
-
|
| 56 |
-
system_message = SystemMessage(
|
| 57 |
-
content=(
|
| 58 |
-
"You are an AI bot marketing professor at a business school helping students understand how to play the markstrat simulation. For every question or comment compose a well-structured response to the user's question, using context and conversation information. Your tone of voice will be conversational and engaging, while still being to the point and direct. "
|
| 59 |
-
"Use the MarkStrat search tool to generate helpful answers for the user question. "
|
| 60 |
-
"Format your answer with distinct <h3>titles</h3> and <h3>subtitles</h3>, <b>emphasis</b>, <b>bold</b>, <i>italic<i>, <li>lists</li>, and tables *use html code*. For lists, or bullet points, always start them by having a topic in <b>emphasis</b> before going into the description. Ensure to frequently take concepts and break them down into bullet points or lists following the emphasis directions that were just laid out. "
|
| 61 |
-
"If its a simple question that asks for a quantitative answer, then provide a much more succinct response. "
|
| 62 |
-
"Respond to questions that are at least 80% similar to the content within the specified context being sent to you, if the question has nothing to do with the additional information supplied to you, then reply with 'I can only answer questions related to MarkStrat."
|
| 63 |
-
)
|
| 64 |
-
)
|
| 65 |
-
|
| 66 |
-
prompt = OpenAIFunctionsAgent.create_prompt(
|
| 67 |
-
system_message=system_message,
|
| 68 |
-
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)]
|
| 69 |
-
)
|
| 70 |
-
|
| 71 |
-
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
|
| 72 |
-
agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=True,return_intermediate_steps=True)
|
| 73 |
-
|
| 74 |
-
print("CHECK - setting up gradio chatbot UI")
|
| 75 |
-
|
| 76 |
-
# build Gradio selectable options in Chat UI
|
| 77 |
-
model_type=gr.Dropdown(choices=["gpt-4 + rag",
|
| 78 |
-
"gpt-3.5-turbo + rag"],
|
| 79 |
-
value="gpt-4 + rag",
|
| 80 |
-
type="index",
|
| 81 |
-
label="LLM Models"
|
| 82 |
-
)
|
| 83 |
-
|
| 84 |
-
# gradio chatbot UI
|
| 85 |
-
def predict(message, history):
|
| 86 |
-
# clearing RAG memory
|
| 87 |
-
# memory.clear()
|
| 88 |
-
|
| 89 |
-
llm_response = agent_executor({"input":message})
|
| 90 |
-
|
| 91 |
-
return llm_response["output"]
|
| 92 |
-
|
| 93 |
-
# set up and run chat interface
|
| 94 |
-
kellogg_agent = gr.ChatInterface(
|
| 95 |
-
fn=predict,
|
| 96 |
-
chatbot=gr.Chatbot(height=500),
|
| 97 |
-
textbox=gr.Textbox(placeholder="Ask me a question", container=False, scale=7),
|
| 98 |
-
title="Kellogg MarkStrat Chatbot - BETA ",
|
| 99 |
-
description="ChatBot here to help provide advice on how to play MarkStrat",
|
| 100 |
-
# additional_inputs=[model_type],
|
| 101 |
-
# additional_inputs_accordion_name="AI Assistant Options:",
|
| 102 |
-
examples=[["What is the max number of sonites you can have?"], ["What should I consider when targeting the professional segment?"]],
|
| 103 |
-
# cache_examples=True,
|
| 104 |
-
retry_btn=None,
|
| 105 |
-
undo_btn=None,
|
| 106 |
-
clear_btn=None,
|
| 107 |
-
#undo_btn="Delete Previous",
|
| 108 |
-
#clear_btn="Clear",
|
| 109 |
-
|
| 110 |
-
)
|
| 111 |
-
|
| 112 |
-
user_cred = os.environ.get("USER_CRED")
|
| 113 |
-
pass_cred = os.environ.get("PASS_CRED")
|
| 114 |
-
|
| 115 |
-
# start UI
|
| 116 |
-
if __name__ == "__main__":
|
| 117 |
-
kellogg_agent.queue().launch(auth=(user_cred, pass_cred))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|