Umer797 commited on
Commit
2cac8b2
·
verified ·
1 Parent(s): 2e99435

Update llm_node.py

Browse files
Files changed (1) hide show
  1. llm_node.py +8 -7
llm_node.py CHANGED
@@ -2,11 +2,11 @@ import os
2
  from huggingface_hub import InferenceClient
3
 
4
  def llm_node(question, search_result):
5
- # Initialize the Hugging Face Inference Client
6
- client = InferenceClient(
7
- repo_id="HuggingFaceH4/zephyr-7b-beta", # ✅ You can swap in another, e.g., mistralai/Mistral-7B-Instruct-v0.2
8
- token=os.getenv("HUGGINGFACEHUB_API_TOKEN")
9
- )
10
 
11
  # Craft the prompt carefully
12
  prompt = f"""You are solving a GAIA benchmark evaluation question.
@@ -24,9 +24,10 @@ Here’s retrieved information:
24
 
25
  Your answer:"""
26
 
27
- # Call the model
28
  response = client.text_generation(
29
- prompt,
 
30
  max_new_tokens=500,
31
  temperature=0.1,
32
  top_p=0.95,
 
2
  from huggingface_hub import InferenceClient
3
 
4
  def llm_node(question, search_result):
5
+ # Initialize the client (no repo_id here!)
6
+ client = InferenceClient(token=os.getenv("HUGGINGFACEHUB_API_TOKEN"))
7
+
8
+ # Define the model you want to use
9
+ model_id = "HuggingFaceH4/zephyr-7b-beta" # You can swap this with e.g., mistralai/Mistral-7B-Instruct-v0.2
10
 
11
  # Craft the prompt carefully
12
  prompt = f"""You are solving a GAIA benchmark evaluation question.
 
24
 
25
  Your answer:"""
26
 
27
+ # Call the model (pass model ID here)
28
  response = client.text_generation(
29
+ model=model_id,
30
+ prompt=prompt,
31
  max_new_tokens=500,
32
  temperature=0.1,
33
  top_p=0.95,