File size: 2,994 Bytes
d67bdfd 410e8c3 906506d a8cda86 d67bdfd a8cda86 d67bdfd db6d969 891ece7 d6a30d6 7c92bf7 a3c2c42 d67bdfd 4408205 d67bdfd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
from langchain_community.llms import HuggingFaceEndpoint
from langchain_community.chat_models.huggingface import ChatHuggingFace
from composio_langchain import ComposioToolset, App
from langchain import hub
from langchain.agents import AgentExecutor, load_tools
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
from langchain.tools.render import render_text_description
from langchain_community.utilities import SerpAPIWrapper
import os
import gradio as gr
from composio import Composio
#client = Composio("blwrvdoq4jjmwn7d2qr0h")
#integration = client.get_integration(os.getenv("GIT_TOKEN"))
#connected_account = integration.initiate_connection(entity_id = None)
#print("Complete the auth flow, link: ", connected_account.redirectUrl)
#integration = client.get_integration(os.getenv("GMAIL_TOKEN"))
#connected_account = integration.initiate_connection(entity_id = None)
#print("Complete the auth flow, link: ", connected_account.redirectUrl)
import subprocess
# Run the command using subprocess.run()
result = subprocess.run(['composio-cli', 'add', 'github'], capture_output=True, text=True)
# Check the output
print(result.stdout)
os.environ["SERPAPI_API_KEY"] = os.getenv("SERPAPI_API_KEY")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN")
def setup_llm(repo_id):
return HuggingFaceEndpoint(repo_id=repo_id)
def setup_chat_model(llm):
return ChatHuggingFace(llm=llm)
def setup_tools(llm):
return load_tools(["serpapi", "llm-math", "stackexchange"], llm=llm)
def setup_prompt(tools):
prompt = hub.pull("hwchase17/react-json")
prompt = prompt.partial(tools=render_text_description(tools),
tool_names=", ".join([t.name for t in tools]))
return prompt
def setup_agent(chat_model_with_stop, tools, prompt):
return (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
}
| prompt
| chat_model_with_stop
| ReActJsonSingleInputOutputParser()
)
def execute_agent(agent, tools, input_text):
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)
agent_executor.return_intermediate_steps = True
return agent_executor.invoke({"input": input_text})
llm = setup_llm(repo_id="HuggingFaceH4/zephyr-7b-beta")
tools = setup_tools(llm)
prompt = setup_prompt(tools)
chat_model = setup_chat_model(llm)
#tools = ComposioToolset(apps=[App.GITHUB, App.GMAIL])
chat_model_with_stop = chat_model.bind(stop=["\nInvalidStop"])
agent = setup_agent(chat_model_with_stop, tools, prompt)
def response(input,history=[]):
res = execute_agent(agent, tools, input)
if(res["intermediate_steps"]==[]):
return res["output"]
else:
return res["intermediate_steps"][0]+"\n"+res["output"]
gr.ChatInterface(response).launch(share=True, debug=True) |