Vijayrathank commited on
Commit
e94a8e8
·
verified ·
1 Parent(s): 1f3efef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -3,6 +3,12 @@ import gradio as gr
3
  from huggingface_hub import InferenceClient
4
  from transformers import pipeline
5
 
 
 
 
 
 
 
6
  # --- Emissions factors --------------------------------------------------------
7
  EMISSIONS_FACTORS = {
8
  "transportation": {"car": 2.3, "bus": 0.1, "train": 0.04, "plane": 0.25},
@@ -45,7 +51,6 @@ pipe = pipeline("text-generation", model="google/gemma-3-270m-it")
45
  def respond(
46
  message,
47
  history: list[dict[str, str]],
48
- system_message,
49
  car_km,
50
  bus_km,
51
  train_km,
@@ -83,13 +88,9 @@ def respond(
83
  return
84
 
85
  # --- Remote branch --------------------------------------------------------
86
- token = (hf_token_ui or "").strip() or (os.getenv("HF_TOKEN") or "").strip()
87
- if not token:
88
- yield "⚠️ Please provide a Hugging Face token in the 'HF Token' box or set HF_TOKEN in the environment."
89
- return
90
 
91
  model_id = "openai/gpt-oss-20b"
92
- client = InferenceClient(model=model_id, token=token)
93
 
94
  response = ""
95
  for chunk in client.chat_completion(
 
3
  from huggingface_hub import InferenceClient
4
  from transformers import pipeline
5
 
6
+
7
+ HF_TOKEN = os.getenv("HF_TOKEN")
8
+ if not HF_TOKEN:
9
+ raise RuntimeError("HF_TOKEN not found. In Spaces, add it under Settings → Repository secrets.")
10
+
11
+ login(token=HF_TOKEN)
12
  # --- Emissions factors --------------------------------------------------------
13
  EMISSIONS_FACTORS = {
14
  "transportation": {"car": 2.3, "bus": 0.1, "train": 0.04, "plane": 0.25},
 
51
  def respond(
52
  message,
53
  history: list[dict[str, str]],
 
54
  car_km,
55
  bus_km,
56
  train_km,
 
88
  return
89
 
90
  # --- Remote branch --------------------------------------------------------
 
 
 
 
91
 
92
  model_id = "openai/gpt-oss-20b"
93
+ client = InferenceClient(model=model_id, token=HF_TOKEN)
94
 
95
  response = ""
96
  for chunk in client.chat_completion(