AIBotGoogleHF / app.py
iShare's picture
Update app.py
6d44216
import os
import requests
from langchain.agents import Tool
from langchain.tools import BaseTool
from langchain.agents import load_tools
from langchain.memory import ConversationBufferMemory
from langchain.memory import ConversationBufferWindowMemory
#from langchain.chat_models import ChatOpenAI
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.agents import initialize_agent
import gradio as gr
from langchain.chains.question_answering import load_qa_chain
from langchain import PromptTemplate, LLMChain
from langchain import HuggingFaceHub
from pathlib import Path
from time import sleep
from langchain.agents import AgentType
#from langchain.llms import OpenAI
from langchain.agents import AgentOutputParser
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers.json import parse_json_markdown
from langchain.schema import AgentAction, AgentFinish
import os
import random
import string
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY =os.getenv("OPENAI_API_KEY")
GOOGLE_API_KEY =os.getenv("GOOGLE_API_KEY")
GOOGLE_CSE_ID =os.getenv("GOOGLE_CSE_ID")
#HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
#repo_id = os.getenv('repo_id')
HUGGINGFACEHUB_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
repo_id = os.environ.get('repo_id')
g_search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Google Search",
func=g_search.run,
description="useful when you need to answer questions about current events."
),
]
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
#memory = ConversationBufferWindowMemory(memory_key="chat_history", k=5, return_messages=True, output_key="output")
#llm=ChatOpenAI(temperature=0)
llm=HuggingFaceHub(repo_id=repo_id)
class OutputParser(AgentOutputParser):
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> AgentAction | AgentFinish:
try:
# this will work IF the text is a valid JSON with action and action_input
response = parse_json_markdown(text)
action, action_input = response["action"], response["action_input"]
if action == "Final Answer":
# this means the agent is finished so we call AgentFinish
return AgentFinish({"output": action_input}, text)
else:
# otherwise the agent wants to use an action, so we call AgentAction
return AgentAction(action, action_input, text)
except Exception:
# sometimes the agent will return a string that is not a valid JSON
# often this happens when the agent is finished
# so we just return the text as the output
return AgentFinish({"output": text}, text)
@property
def _type(self) -> str:
return "conversational_chat"
# initialize output parser for agent
parser = OutputParser()
tools = load_tools(["llm-math"], llm=llm)
# initialize agent
agent = initialize_agent(
agent="chat-conversational-react-description",
tools=tools,
llm=llm,
verbose=True,
early_stopping_method="generate",
memory=memory,
agent_kwargs={"output_parser": parser}
)
#my_agent = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory, handle_parsing_errors=True)
#my_agent = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)
#my_agent = initialize_agent(tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,verbose=True)
#handle_parsing_errors="You must take care of the user input query and if you cannot find an answer by yourself, use the tools and then response based on the search results by the tools and make a perfect response to user query.")
#self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True)
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<>\n", "\n<>\n\n"
sys_msg = B_SYS + """Assistant is a expert JSON builder designed to assist with a wide range of tasks.
Assistant is able to respond to the User and use tools using JSON strings that contain "action" and "action_input" parameters.
All of Assistant's communication is performed using this JSON format.
Assistant can also use tools by responding to the user with tool use instructions in the same "action" and "action_input" JSON format. Tools available to Assistant are:
- "Calculator": Useful for when you need to answer questions about math.
- To use the calculator tool, Assistant should write like so:
```json
{{"action": "Calculator",
"action_input": "sqrt(4)"}}
```
Here are some previous conversations between the Assistant and User:
User: Hey how are you today?
Assistant: ```json
{{"action": "Final Answer",
"action_input": "I'm good thanks, how are you?"}}
```
User: I'm great, what is the square root of 4?
Assistant: ```json
{{"action": "Calculator",
"action_input": "sqrt(4)"}}
```
User: 2.0
Assistant: ```json
{{"action": "Final Answer",
"action_input": "It looks like the answer is 2!"}}
```
User: Thanks could you tell me what 4 to the power of 2 is?
Assistant: ```json
{{"action": "Calculator",
"action_input": "4**2"}}
```
User: 16.0
Assistant: ```json
{{"action": "Final Answer",
"action_input": "It looks like the answer is 16!"}}
```
Here is the latest conversation between Assistant and User.""" + E_SYS
new_prompt = agent.agent.create_prompt(
system_message=sys_msg,
tools=tools
)
agent.agent.llm_chain.prompt = new_prompt
instruction = B_INST + " Respond to the following in JSON with 'action' and 'action_input' values " + E_INST
human_msg = instruction + "\nUser: {input}"
agent.agent.llm_chain.prompt.messages[2].prompt.template = human_msg
result=agent("hey how are you today?")
print("Result: "+str(result))
def chat_response(input_text):
print("START PRINTING")
#response = my_agent.run(input=input_text)
response = agent.run(input=input_text)
print(response)
print("END PRINTING")
return response
interface = gr.Interface(fn=chat_response, inputs="text", outputs="text", description="Chat with a conversational agent")
interface.launch()