rafaaa2105 commited on
Commit
5cdc083
·
verified ·
1 Parent(s): 049939a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -48
app.py CHANGED
@@ -1,9 +1,13 @@
 
1
  import requests
2
  import chainlit as cl
3
- from langchain.prompts import PromptTemplate
4
- from langchain.chains import LLMChain
 
 
 
5
  from langchain_community.llms import HuggingFaceHub
6
- import os
7
 
8
  # Define the API endpoint URL
9
  API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1"
@@ -11,26 +15,6 @@ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-In
11
  # Replace this with your actual Hugging Face API token
12
  HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
13
 
14
- # Function to query the Hugging Face Inference API
15
- def query_hf_api(inputs):
16
- headers = {"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}"}
17
- payload = {
18
- "inputs": inputs,
19
- "parameters": {"temperature": 0.7},
20
- "options": {"use_cache": False}
21
- }
22
- response = requests.post(API_URL, headers=headers, json=payload)
23
- return response.json()
24
-
25
- # Example usage
26
- def get_response(question):
27
- result = query_hf_api(question)
28
- if isinstance(result, list) and result:
29
- # Assuming the first element of the list is the answer
30
- return result[0]
31
- else:
32
- return ""
33
-
34
  # Load the LLM (Hugging Face Hub)
35
  llm = HuggingFaceHub(
36
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
@@ -43,43 +27,45 @@ llm = HuggingFaceHub(
43
  },
44
  )
45
 
46
- # Define the prompt template
47
- template = """
48
- You are a helpful AI assistant. Provide the answer for the following question:
49
 
50
- Question: {question}
 
 
 
 
 
51
 
52
- Answer:
53
- """
54
- prompt = PromptTemplate(template=template, input_variables=["question"])
 
 
 
 
 
 
 
 
55
 
56
- # Create the LLMChain
57
- llm_chain = LLMChain(llm=llm, prompt=prompt)
58
 
59
  @cl.on_chat_start
60
  async def main():
61
- cl.user_session.set("llm_chain", llm_chain)
62
  return "API-based Mixtral-Instruct"
63
 
64
  @cl.on_message
65
  async def run(message: cl.Message):
66
- cb = cl.AsyncLangchainCallbackHandler(
67
- stream_final_answer=True, answer_prefix_tokens=["Answer:"]
68
- )
69
-
70
- # Retrieve the LLMChain from the user session
71
- llm_chain = cl.user_session.get("llm_chain")
72
 
73
  question = message.content
74
- result = llm_chain.run(question)
75
-
76
- # Extract the generated response from the LLMChain output
77
- if "Answer:" in result:
78
- answer = result.split("Answer:")[1].strip()
79
- else:
80
- answer = result
81
 
82
- if answer:
83
- await cl.Message(content=answer).send()
84
  else:
85
  await cl.Message(content="I'm sorry, I couldn't find an answer.").send()
 
1
+ import os
2
  import requests
3
  import chainlit as cl
4
+ from langchain import hub
5
+ from langchain.agents import AgentExecutor, load_tools
6
+ from langchain.agents.format_scratchpad import format_log_to_str
7
+ from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
8
+ from langchain.tools.render import render_text_description
9
  from langchain_community.llms import HuggingFaceHub
10
+ from langchain_community.utilities import SerpAPIWrapper
11
 
12
  # Define the API endpoint URL
13
  API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1"
 
15
  # Replace this with your actual Hugging Face API token
16
  HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  # Load the LLM (Hugging Face Hub)
19
  llm = HuggingFaceHub(
20
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
 
27
  },
28
  )
29
 
30
+ # Setup tools
31
+ tools = load_tools(["serpapi", "llm-math"], llm=llm)
 
32
 
33
+ # Setup ReAct style prompt
34
+ prompt = hub.pull("hwchase17/react-json")
35
+ prompt = prompt.partial(
36
+ tools=render_text_description(tools),
37
+ tool_names=", ".join([t.name for t in tools]),
38
+ )
39
 
40
+ # Define the agent
41
+ chat_model_with_stop = llm.bind(stop=["\nObservation"])
42
+ agent = (
43
+ {
44
+ "input": lambda x: x["input"],
45
+ "agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
46
+ }
47
+ | prompt
48
+ | chat_model_with_stop
49
+ | ReActJsonSingleInputOutputParser()
50
+ )
51
 
52
+ # Instantiate AgentExecutor
53
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
54
 
55
  @cl.on_chat_start
56
  async def main():
57
+ cl.user_session.set("agent_executor", agent_executor)
58
  return "API-based Mixtral-Instruct"
59
 
60
  @cl.on_message
61
  async def run(message: cl.Message):
62
+ # Retrieve the AgentExecutor from the user session
63
+ agent_executor = cl.user_session.get("agent_executor")
 
 
 
 
64
 
65
  question = message.content
66
+ result = agent_executor.invoke({"input": question})
 
 
 
 
 
 
67
 
68
+ if result["output"]:
69
+ await cl.Message(content=result["output"]).send()
70
  else:
71
  await cl.Message(content="I'm sorry, I couldn't find an answer.").send()