|
|
from langchain_community.llms import HuggingFaceEndpoint |
|
|
from langchain_community.chat_models.huggingface import ChatHuggingFace |
|
|
from composio_langchain import ComposioToolset, App |
|
|
from langchain import hub |
|
|
from langchain.agents import AgentExecutor, load_tools |
|
|
from langchain.agents.format_scratchpad import format_log_to_str |
|
|
from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser |
|
|
from langchain.tools.render import render_text_description |
|
|
from langchain_community.utilities import SerpAPIWrapper |
|
|
import os |
|
|
import gradio as gr |
|
|
from composio import Composio |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import subprocess |
|
|
|
|
|
|
|
|
result = subprocess.run(['composio-cli', 'add', 'github'], capture_output=True, text=True) |
|
|
|
|
|
|
|
|
print(result.stdout) |
|
|
|
|
|
os.environ["SERPAPI_API_KEY"] = os.getenv("SERPAPI_API_KEY") |
|
|
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
|
def setup_llm(repo_id): |
|
|
return HuggingFaceEndpoint(repo_id=repo_id) |
|
|
|
|
|
def setup_chat_model(llm): |
|
|
return ChatHuggingFace(llm=llm) |
|
|
|
|
|
def setup_tools(llm): |
|
|
return load_tools(["serpapi", "llm-math", "stackexchange"], llm=llm) |
|
|
|
|
|
def setup_prompt(tools): |
|
|
prompt = hub.pull("hwchase17/react-json") |
|
|
prompt = prompt.partial(tools=render_text_description(tools), |
|
|
tool_names=", ".join([t.name for t in tools])) |
|
|
return prompt |
|
|
|
|
|
def setup_agent(chat_model_with_stop, tools, prompt): |
|
|
return ( |
|
|
{ |
|
|
"input": lambda x: x["input"], |
|
|
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]), |
|
|
} |
|
|
| prompt |
|
|
| chat_model_with_stop |
|
|
| ReActJsonSingleInputOutputParser() |
|
|
) |
|
|
|
|
|
def execute_agent(agent, tools, input_text): |
|
|
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True) |
|
|
agent_executor.return_intermediate_steps = True |
|
|
return agent_executor.invoke({"input": input_text}) |
|
|
|
|
|
|
|
|
llm = setup_llm(repo_id="HuggingFaceH4/zephyr-7b-beta") |
|
|
tools = setup_tools(llm) |
|
|
prompt = setup_prompt(tools) |
|
|
chat_model = setup_chat_model(llm) |
|
|
|
|
|
chat_model_with_stop = chat_model.bind(stop=["\nInvalidStop"]) |
|
|
agent = setup_agent(chat_model_with_stop, tools, prompt) |
|
|
|
|
|
def response(input,history=[]): |
|
|
res = execute_agent(agent, tools, input) |
|
|
if(res["intermediate_steps"]==[]): |
|
|
return res["output"] |
|
|
else: |
|
|
return res["intermediate_steps"][0]+"\n"+res["output"] |
|
|
|
|
|
|
|
|
gr.ChatInterface(response).launch(share=True, debug=True) |