Spaces:
Sleeping
Sleeping
File size: 5,197 Bytes
1f52c8d ef5d472 ec944f1 cc20004 ef5d472 e94a8e8 ec944f1 e94a8e8 7ae423d 1f52c8d 7ae423d 1f52c8d 49e9e54 1f52c8d cc20004 1f52c8d ef5d472 7ae423d 1f3efef 7ae423d 49e9e54 cc20004 7ae423d ef5d472 1f52c8d cc20004 ef5d472 7ae423d 1f52c8d ef5d472 7ae423d ef5d472 cc20004 ef5d472 cc20004 7ae423d cc20004 7ae423d cc20004 e94a8e8 cc20004 7ae423d cc20004 7ae423d cc20004 7ae423d f981216 7ae423d cc20004 7ae423d ef5d472 cc20004 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 | import os
import gradio as gr
from huggingface_hub import InferenceClient, login
from transformers import pipeline
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise RuntimeError("HF_TOKEN not found. In Spaces, add it under Settings → Repository secrets.")
login(token=HF_TOKEN)
# --- Emissions factors --------------------------------------------------------
EMISSIONS_FACTORS = {
"transportation": {"car": 2.3, "bus": 0.1, "train": 0.04, "plane": 0.25},
"food": {"meat": 6.0, "vegetarian": 1.5, "vegan": 1.0},
}
def calculate_footprint(car_km, bus_km, train_km, air_km,
meat_meals, vegetarian_meals, vegan_meals):
transport_emissions = (
car_km * EMISSIONS_FACTORS["transportation"]["car"] +
bus_km * EMISSIONS_FACTORS["transportation"]["bus"] +
train_km * EMISSIONS_FACTORS["transportation"]["train"] +
air_km * EMISSIONS_FACTORS["transportation"]["plane"]
)
food_emissions = (
meat_meals * EMISSIONS_FACTORS["food"]["meat"] +
vegetarian_meals * EMISSIONS_FACTORS["food"]["vegetarian"] +
vegan_meals * EMISSIONS_FACTORS["food"]["vegan"]
)
total_emissions = transport_emissions + food_emissions
stats = {
"trees": round(total_emissions / 21),
"flights": round(total_emissions / 500),
"driving100km": round(total_emissions / 230)
}
return total_emissions, stats
# --- Default system prompt ----------------------------------------------------
system_message = """
You are Sustainable.ai, a friendly, encouraging, and knowledgeable AI assistant.
Always provide practical sustainability suggestions that are easy to adopt,
while keeping a supportive and positive tone. Prefer actionable steps over theory.
Reasoning: medium
"""
# --- Local pipeline (initialized once) ----------------------------------------
pipe = pipeline("text-generation", model="google/gemma-3-270m-it")
# --- Chat callback ------------------------------------------------------------
def respond(
message,
history: list[dict[str, str]],
car_km,
bus_km,
train_km,
air_km,
meat_meals,
vegetarian_meals,
vegan_meals,
use_local_model, # checkbox
):
# Compute personalized footprint summary
footprint, stats = calculate_footprint(
car_km, bus_km, train_km, air_km,
meat_meals, vegetarian_meals, vegan_meals
)
custom_prompt = (
f"This user’s estimated weekly footprint is **{footprint:.1f} kg CO2**.\n"
f"That’s roughly planting {stats['trees']} trees 🌳 or taking {stats['flights']} short flights ✈️.\n"
f"Breakdown includes transportation and food choices.\n"
f"Your job is to give practical, friendly suggestions to lower this footprint.\n"
f"{system_message}"
)
# Build chat context
chat_context = custom_prompt + "\n"
for turn in (history or []):
role, content = turn["role"], turn["content"]
chat_context += f"{role.upper()}: {content}\n"
chat_context += f"USER: {message}\nASSISTANT:"
# --- Local branch ---------------------------------------------------------
if use_local_model:
out = pipe(chat_context, max_new_tokens=300, do_sample=True)
yield out[0]["generated_text"]
return
# --- Remote branch --------------------------------------------------------
model_id = "openai/gpt-oss-20b"
client = InferenceClient(model=model_id, token=HF_TOKEN)
response = ""
for chunk in client.chat_completion(
[{"role": "system", "content": custom_prompt}] + (history or []) + [{"role": "user", "content": message}],
max_tokens=3000,
temperature=0.7,
top_p=0.95,
stream=True,
):
token_piece = ""
if chunk.choices and getattr(chunk.choices[0], "delta", None):
token_piece = chunk.choices[0].delta.content or ""
else:
token_piece = getattr(chunk, "message", {}).get("content", "") or ""
if token_piece:
response += token_piece
yield response
# --- UI -----------------------------------------------------------------------
demo = gr.ChatInterface(
fn=respond,
type="messages",
additional_inputs=[
gr.Slider(0, 500, value=50, step=10, label="Car km/week"),
gr.Slider(0, 500, value=20, step=10, label="Bus km/week"),
gr.Slider(0, 500, value=20, step=10, label="Train km/week"),
gr.Slider(0, 5000, value=200, step=50, label="Air km/week"),
gr.Slider(0, 21, value=7, step=1, label="Meat meals/week"),
gr.Slider(0, 21, value=7, step=1, label="Vegetarian meals/week"),
gr.Slider(0, 21, value=7, step=1, label="Vegan meals/week"),
gr.Checkbox(label="Use Local Model (google/gemma-3-270m-it)", value=False),
],
title="🌱 Sustainable.ai",
description=(
"Chat with an AI that helps you understand and reduce your carbon footprint. "
"Toggle 'Use Local Model' to run locally with google/gemma-3-270m-it, or leave it off "
"to call Hugging Face Inference API (gpt-oss-20b)."
),
)
if __name__ == "__main__":
demo.launch()
|