rafaaa2105 commited on
Commit
87bdcd7
·
verified ·
1 Parent(s): cb4d6ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -30
app.py CHANGED
@@ -1,6 +1,3 @@
1
- import os
2
- import requests
3
- import chainlit as cl
4
  from langchain import hub
5
  from langchain.agents import AgentExecutor, load_tools
6
  from langchain.agents.format_scratchpad import format_log_to_str
@@ -8,14 +5,9 @@ from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
8
  from langchain.tools.render import render_text_description
9
  from langchain_community.llms import HuggingFaceHub
10
  from langchain_community.utilities import SerpAPIWrapper
 
11
 
12
- # Define the API endpoint URL
13
- API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1"
14
-
15
- # Replace this with your actual Hugging Face API token
16
- HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
17
-
18
- # Load the LLM (Hugging Face Hub)
19
  llm = HuggingFaceHub(
20
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
21
  task="text-generation",
@@ -38,7 +30,7 @@ prompt = prompt.partial(
38
  )
39
 
40
  # Define the agent
41
- chat_model_with_stop = llm.bind(stop=["\nObservation"])
42
  agent = (
43
  {
44
  "input": lambda x: x["input"],
@@ -50,22 +42,15 @@ agent = (
50
  )
51
 
52
  # Instantiate AgentExecutor
53
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)
54
-
55
- @cl.on_chat_start
56
- async def main():
57
- cl.user_session.set("agent_executor", agent_executor)
58
- return "API-based Mixtral-Instruct"
59
-
60
- @cl.on_message
61
- async def run(message: cl.Message):
62
- # Retrieve the AgentExecutor from the user session
63
- agent_executor = cl.user_session.get("agent_executor")
64
-
65
- question = message.content
66
- result = agent_executor.invoke({"input": question})
67
-
68
- if result["output"]:
69
- await cl.Message(content=result["output"]).send()
70
- else:
71
- await cl.Message(content="I'm sorry, I couldn't find an answer.").send()
 
 
 
 
1
  from langchain import hub
2
  from langchain.agents import AgentExecutor, load_tools
3
  from langchain.agents.format_scratchpad import format_log_to_str
 
5
  from langchain.tools.render import render_text_description
6
  from langchain_community.llms import HuggingFaceHub
7
  from langchain_community.utilities import SerpAPIWrapper
8
+ import chainlit as cl
9
 
10
+ # Instantiate the LLM
 
 
 
 
 
 
11
  llm = HuggingFaceHub(
12
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
13
  task="text-generation",
 
30
  )
31
 
32
  # Define the agent
33
+ chat_model_with_stop = ChatHuggingFace(llm=llm).bind(stop=["\nObservation"])
34
  agent = (
35
  {
36
  "input": lambda x: x["input"],
 
42
  )
43
 
44
  # Instantiate AgentExecutor
45
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
46
+
47
+ # Create the Chainlit app
48
+ @cl.chainlit_app
49
+ def app():
50
+ question = cl.get_text_input("Enter your question")
51
+ if question:
52
+ response = agent_executor.invoke({"input": question})
53
+ cl.display_output(response["output"])
54
+
55
+ # Run the app
56
+ app()