Vijayrathank commited on
Commit
cc20004
·
verified ·
1 Parent(s): 772f7d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -57
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
 
4
 
5
  # --- Emissions factors --------------------------------------------------------
6
  EMISSIONS_FACTORS = {
@@ -23,9 +24,9 @@ def calculate_footprint(car_km, bus_km, train_km, air_km,
23
  )
24
  total_emissions = transport_emissions + food_emissions
25
  stats = {
26
- "trees": round(total_emissions / 21),
27
- "flights": round(total_emissions / 500),
28
- "driving100km": round(total_emissions / 230)
29
  }
30
  return total_emissions, stats
31
 
@@ -37,12 +38,15 @@ while keeping a supportive and positive tone. Prefer actionable steps over theor
37
  Reasoning: medium
38
  """
39
 
 
 
 
40
  # --- Chat callback ------------------------------------------------------------
41
  def respond(
42
  message,
43
  history: list[dict[str, str]],
44
- hf_token_ui, # from password textbox (optional)
45
- system_message, # from textbox
46
  car_km,
47
  bus_km,
48
  train_km,
@@ -50,27 +54,8 @@ def respond(
50
  meat_meals,
51
  vegetarian_meals,
52
  vegan_meals,
 
53
  ):
54
- """
55
- Streams a response from openai/gpt-oss-20b via Hugging Face Inference API.
56
- Token priority: UI textbox > HF_TOKEN env var.
57
- """
58
- # Resolve token from UI or env
59
- token = (hf_token_ui or "").strip() or (os.getenv("HF_TOKEN") or "").strip()
60
- if not token:
61
- yield "⚠️ Please provide a valid Hugging Face token in the 'HF Token' box or set HF_TOKEN in the environment."
62
- return
63
-
64
- # Correct, namespaced repo id
65
- model_id = "openai/gpt-oss-20b"
66
-
67
- # Build client
68
- try:
69
- client = InferenceClient(model=model_id, token=token)
70
- except Exception as e:
71
- yield f"Failed to initialize InferenceClient: {e}"
72
- return
73
-
74
  # Compute personalized footprint summary
75
  footprint, stats = calculate_footprint(
76
  car_km, bus_km, train_km, air_km,
@@ -85,43 +70,49 @@ def respond(
85
  f"{system_message}"
86
  )
87
 
88
- # Construct messages in OpenAI-style format; providers map this to the model's chat template.
89
- messages = [{"role": "system", "content": custom_prompt}]
90
- messages.extend(history or [])
91
- messages.append({"role": "user", "content": message})
 
 
92
 
93
- # Stream from HF Inference API
94
- try:
95
- response = ""
96
- for chunk in client.chat_completion(
97
- messages,
98
- max_tokens=3000,
99
- temperature=0.7,
100
- top_p=0.95,
101
- stream=True,
102
- ):
103
- try:
104
- # Some providers return choices[0].delta.content during streaming
105
- if chunk.choices and getattr(chunk.choices[0], "delta", None):
106
- token_piece = chunk.choices[0].delta.content or ""
107
- else:
108
- # Fallback: some providers may use 'message' at the end
109
- token_piece = getattr(chunk, "message", {}).get("content", "") or ""
110
- except Exception:
111
- token_piece = ""
112
 
113
- if token_piece:
114
- response += token_piece
115
- yield response
116
- except Exception as e:
117
- # Common causes: 401 (bad token), 404 (wrong repo id), provider downtime
118
- yield f"Inference error with '{model_id}': {e}\n"
119
  return
120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  # --- UI -----------------------------------------------------------------------
122
  demo = gr.ChatInterface(
123
  fn=respond,
124
- type="messages", # fixes 'tuples' deprecation warning
125
  additional_inputs=[
126
  gr.Textbox(label="HF Token (prefer env var HF_TOKEN)", type="password", placeholder="hf_..."),
127
  gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, label="System Prompt"),
@@ -132,13 +123,15 @@ demo = gr.ChatInterface(
132
  gr.Slider(0, 21, value=7, step=1, label="Meat meals/week"),
133
  gr.Slider(0, 21, value=7, step=1, label="Vegetarian meals/week"),
134
  gr.Slider(0, 21, value=7, step=1, label="Vegan meals/week"),
 
135
  ],
136
  title="🌱 Sustainable.ai (gpt-oss-20b)",
137
  description=(
138
  "Chat with an AI that helps you understand and reduce your carbon footprint. "
139
- "Provide a Hugging Face token in the UI or via HF_TOKEN. Uses openai/gpt-oss-20b."
 
140
  ),
141
  )
142
 
143
  if __name__ == "__main__":
144
- demo.launch()
 
1
  import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
+ from transformers import pipeline
5
 
6
  # --- Emissions factors --------------------------------------------------------
7
  EMISSIONS_FACTORS = {
 
24
  )
25
  total_emissions = transport_emissions + food_emissions
26
  stats = {
27
+ "trees": round(total_emissions / 21),
28
+ "flights": round(total_emissions / 500),
29
+ "driving100km": round(total_emissions / 230)
30
  }
31
  return total_emissions, stats
32
 
 
38
  Reasoning: medium
39
  """
40
 
41
+ # --- Local pipeline (initialized once) ----------------------------------------
42
+ pipe = pipeline("text-generation", model="google/gemma-3-270m-it")
43
+
44
  # --- Chat callback ------------------------------------------------------------
45
  def respond(
46
  message,
47
  history: list[dict[str, str]],
48
+ hf_token_ui,
49
+ system_message,
50
  car_km,
51
  bus_km,
52
  train_km,
 
54
  meat_meals,
55
  vegetarian_meals,
56
  vegan_meals,
57
+ use_local_model, # checkbox
58
  ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  # Compute personalized footprint summary
60
  footprint, stats = calculate_footprint(
61
  car_km, bus_km, train_km, air_km,
 
70
  f"{system_message}"
71
  )
72
 
73
+ # Build chat context
74
+ chat_context = custom_prompt + "\n"
75
+ for turn in (history or []):
76
+ role, content = turn["role"], turn["content"]
77
+ chat_context += f"{role.upper()}: {content}\n"
78
+ chat_context += f"USER: {message}\nASSISTANT:"
79
 
80
+ # --- Local branch ---------------------------------------------------------
81
+ if use_local_model:
82
+ out = pipe(chat_context, max_new_tokens=300, do_sample=True)
83
+ yield out[0]["generated_text"]
84
+ return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
+ # --- Remote branch --------------------------------------------------------
87
+ token = (hf_token_ui or "").strip() or (os.getenv("HF_TOKEN") or "").strip()
88
+ if not token:
89
+ yield "⚠️ Please provide a Hugging Face token in the 'HF Token' box or set HF_TOKEN in the environment."
 
 
90
  return
91
 
92
+ model_id = "openai/gpt-oss-20b"
93
+ client = InferenceClient(model=model_id, token=token)
94
+
95
+ response = ""
96
+ for chunk in client.chat_completion(
97
+ [{"role": "system", "content": custom_prompt}] + (history or []) + [{"role": "user", "content": message}],
98
+ max_tokens=3000,
99
+ temperature=0.7,
100
+ top_p=0.95,
101
+ stream=True,
102
+ ):
103
+ token_piece = ""
104
+ if chunk.choices and getattr(chunk.choices[0], "delta", None):
105
+ token_piece = chunk.choices[0].delta.content or ""
106
+ else:
107
+ token_piece = getattr(chunk, "message", {}).get("content", "") or ""
108
+ if token_piece:
109
+ response += token_piece
110
+ yield response
111
+
112
  # --- UI -----------------------------------------------------------------------
113
  demo = gr.ChatInterface(
114
  fn=respond,
115
+ type="messages",
116
  additional_inputs=[
117
  gr.Textbox(label="HF Token (prefer env var HF_TOKEN)", type="password", placeholder="hf_..."),
118
  gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, label="System Prompt"),
 
123
  gr.Slider(0, 21, value=7, step=1, label="Meat meals/week"),
124
  gr.Slider(0, 21, value=7, step=1, label="Vegetarian meals/week"),
125
  gr.Slider(0, 21, value=7, step=1, label="Vegan meals/week"),
126
+ gr.Checkbox(label="Use Local Model (google/gemma-3-270m-it)", value=False),
127
  ],
128
  title="🌱 Sustainable.ai (gpt-oss-20b)",
129
  description=(
130
  "Chat with an AI that helps you understand and reduce your carbon footprint. "
131
+ "Toggle 'Use Local Model' to run locally with google/gemma-3-270m-it, or leave it off "
132
+ "to call Hugging Face Inference API (gpt-oss-20b)."
133
  ),
134
  )
135
 
136
  if __name__ == "__main__":
137
+ demo.launch()