lopera47 commited on
Commit
a9f000e
·
verified ·
1 Parent(s): 0cb611c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -9,6 +9,7 @@ from langchain.chat_models import ChatOpenAI
9
  from langchain.tools import DuckDuckGoSearchRun
10
  from langchain_community.llms import HuggingFaceHub
11
  from langchain_huggingface import HuggingFaceEndpoint
 
12
 
13
  # (Keep Constants as is)
14
  # --- Constants ---
@@ -31,8 +32,16 @@ class BasicAgent: # Some times Inheritance is needed
31
 
32
  # # Create the LLM # Temprature set to 0 because we need exact match
33
  # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # ChatML understands roles (user, assistant, system)
 
 
 
 
 
 
 
34
 
35
- llm = HuggingFaceHub(
 
36
  repo_id="tiiuae/falcon-7b-instruct",
37
  huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
38
  model_kwargs={"temperature": 0.5, "max_new_tokens": 512}
@@ -66,7 +75,8 @@ class BasicAgent: # Some times Inheritance is needed
66
  def __call__(self, question: str) -> str:
67
  print(f"Agent received question (first 50 chars): {question[:50]}...")
68
  # Do not return intermidiate steps or thoughts
69
- response = self.agent.run(question)
 
70
  print(f"Agent response: {response}")
71
  return response
72
 
 
9
  from langchain.tools import DuckDuckGoSearchRun
10
  from langchain_community.llms import HuggingFaceHub
11
  from langchain_huggingface import HuggingFaceEndpoint
12
+ from langchain_community.chat_models import ChatHuggingFace
13
 
14
  # (Keep Constants as is)
15
  # --- Constants ---
 
32
 
33
  # # Create the LLM # Temprature set to 0 because we need exact match
34
  # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # ChatML understands roles (user, assistant, system)
35
+
36
+ # # Define the LLM
37
+ # llm = HuggingFaceHub(
38
+ # repo_id="tiiuae/falcon-7b-instruct",
39
+ # huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
40
+ # model_kwargs={"temperature": 0.5, "max_new_tokens": 512}
41
+ # )
42
 
43
+ # Define the LLM
44
+ llm = ChatHuggingFace(
45
  repo_id="tiiuae/falcon-7b-instruct",
46
  huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
47
  model_kwargs={"temperature": 0.5, "max_new_tokens": 512}
 
75
  def __call__(self, question: str) -> str:
76
  print(f"Agent received question (first 50 chars): {question[:50]}...")
77
  # Do not return intermidiate steps or thoughts
78
+ # response = self.agent.run(question)
79
+ response = self.agent.invoke({"input": question})
80
  print(f"Agent response: {response}")
81
  return response
82