mujtabarizvi commited on
Commit
b83c856
·
verified ·
1 Parent(s): 8629b10

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -6,7 +6,7 @@ import pandas as pd
6
  import re # For parsing LLM output
7
 
8
  # --- HF Inference API for LLM ---
9
- from huggingface_hub import HfInference
10
  # You can choose a different model, but make sure it's good at instruction following and ReAct-style prompting.
11
  # Zephyr-7B-beta or Mistral-7B-Instruct are good choices available on the free inference API.
12
  # Starling-LM-7B-beta is also excellent if available and performant enough.
@@ -16,7 +16,7 @@ LLM_MODEL = "HuggingFaceH4/zephyr-7b-beta" # or "mistralai/Mistral-7B-Instruct-v
16
  # Name: HF_TOKEN, Value: your_hf_token_here (with read access is usually enough for inference)
17
  try:
18
  hf_token = os.getenv("HF_TOKEN")
19
- llm_client = HfInference(model=LLM_MODEL, token=hf_token)
20
  except Exception as e:
21
  print(f"Error initializing HfInference client: {e}")
22
  llm_client = None
 
6
  import re # For parsing LLM output
7
 
8
  # --- HF Inference API for LLM ---
9
+ from huggingface_hub import InferenceClient
10
  # You can choose a different model, but make sure it's good at instruction following and ReAct-style prompting.
11
  # Zephyr-7B-beta or Mistral-7B-Instruct are good choices available on the free inference API.
12
  # Starling-LM-7B-beta is also excellent if available and performant enough.
 
16
  # Name: HF_TOKEN, Value: your_hf_token_here (with read access is usually enough for inference)
17
  try:
18
  hf_token = os.getenv("HF_TOKEN")
19
+ llm_client = InferenceClient(model=LLM_MODEL, token=hf_token)
20
  except Exception as e:
21
  print(f"Error initializing HfInference client: {e}")
22
  llm_client = None