TBao-THUer commited on
Commit
a9abd7c
·
verified ·
1 Parent(s): 5d59f10

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +8 -10
agent.py CHANGED
@@ -706,27 +706,25 @@ tools = [
706
 
707
 
708
  # Build graph function
709
- def build_graph(provider: str = "groq"):
710
  """Build the graph"""
711
  # Load environment variables from .env file
712
- if provider == "groq":
 
 
 
713
  # Groq https://console.groq.com/docs/models
714
- llm = ChatGroq(model="qwen/qwen3-32b", temperature=0)
715
  elif provider == "huggingface":
716
  # TODO: Add huggingface endpoint
717
  llm = ChatHuggingFace(
718
  llm=HuggingFaceEndpoint(
719
- repo_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
720
- task="text-generation", # for chat‐style use “text-generation”
721
- max_new_tokens=1024,
722
- do_sample=False,
723
- repetition_penalty=1.03,
724
  temperature=0,
725
  ),
726
- verbose=True,
727
  )
728
  else:
729
- raise ValueError("Invalid provider. Choose 'groq' or 'huggingface'.")
730
  # Bind tools to LLM
731
  llm_with_tools = llm.bind_tools(tools)
732
 
 
706
 
707
 
708
  # Build graph function
709
+ def build_graph(provider: str = "google"):
710
  """Build the graph"""
711
  # Load environment variables from .env file
712
+ if provider == "google":
713
+ # Google Gemini
714
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
715
+ elif provider == "groq":
716
  # Groq https://console.groq.com/docs/models
717
+ llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it
718
  elif provider == "huggingface":
719
  # TODO: Add huggingface endpoint
720
  llm = ChatHuggingFace(
721
  llm=HuggingFaceEndpoint(
722
+ url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
 
 
 
 
723
  temperature=0,
724
  ),
 
725
  )
726
  else:
727
+ raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
728
  # Bind tools to LLM
729
  llm_with_tools = llm.bind_tools(tools)
730