Spaces:
Sleeping
Sleeping
| # app.py — fixed (no f-string backslash usage) — full file | |
| import gradio as gr | |
| import numpy as np | |
| from PIL import Image | |
| import os | |
| import json | |
| import re | |
| import tensorflow as tf | |
| # -------------------------- | |
| # MODEL LOADING | |
| # -------------------------- | |
| MODEL_FILES = [ | |
| "best_waste_classification_model.h5", | |
| "cnn_best_model.h5", | |
| "mobilenet_enhanced_complete_model.h5" | |
| ] | |
| model = None | |
| for mf in MODEL_FILES: | |
| if os.path.exists(mf): | |
| try: | |
| model = tf.keras.models.load_model(mf) | |
| print(f"Loaded model: {mf}") | |
| break | |
| except Exception as e: | |
| print("Failed loading model", mf, e) | |
| # -------------------------- | |
| # LOAD LABELS | |
| # -------------------------- | |
| labels = [] | |
| if os.path.exists("labels.txt"): | |
| with open("labels.txt", "r", encoding="utf-8") as f: | |
| labels = [line.strip() for line in f.readlines() if line.strip()] | |
| print("Loaded labels (count={}): {}".format(len(labels), labels[:50])) | |
| # -------------------------- | |
| # LOAD TIPS JSON | |
| # -------------------------- | |
| tips = {} | |
| if os.path.exists("recycling_tips.json"): | |
| try: | |
| with open("recycling_tips.json", "r", encoding="utf-8") as f: | |
| tips = json.load(f) | |
| except Exception as e: | |
| print("Error loading recycling_tips.json:", e) | |
| print("Loaded tips keys (count={}): {}".format(len(tips), list(tips.keys())[:50])) | |
| TARGET_SIZE = (224, 224) | |
| # -------------------------- | |
| # Normalization & lookup helpers | |
| # -------------------------- | |
| def normalize_key(s: str) -> str: | |
| if s is None: | |
| return "" | |
| s = str(s).strip().lower() | |
| s = re.sub(r"[_\-]+", " ", s) | |
| s = re.sub(r"[^a-z0-9 ]+", "", s) | |
| s = re.sub(r"\s+", " ", s).strip() | |
| return s | |
| tips_exact = dict(tips) | |
| tips_lower = {k.lower(): v for k, v in tips.items()} | |
| tips_norm = {normalize_key(k): v for k, v in tips.items()} | |
| tips_norm_keys = list(tips_norm.keys()) | |
| def format_raw_tip_to_markdown(raw_tip): | |
| """Return a markdown/plain text string preserving newlines and bullets.""" | |
| if isinstance(raw_tip, dict): | |
| parts = [] | |
| if "tips" in raw_tip and isinstance(raw_tip["tips"], list): | |
| parts.append("Tips:") | |
| for t in raw_tip["tips"]: | |
| parts.append(f"• {t}") | |
| if "preparation" in raw_tip: | |
| parts.append("") | |
| parts.append("Preparation:") | |
| parts.append(raw_tip["preparation"]) | |
| if "recyclability" in raw_tip: | |
| parts.append("") | |
| parts.append("Recyclability:") | |
| parts.append(raw_tip["recyclability"]) | |
| # join with newline characters | |
| return "\n".join(parts) | |
| else: | |
| return str(raw_tip) | |
| # -------------------------- | |
| # Preprocess | |
| # -------------------------- | |
| def preprocess_image(img: Image.Image): | |
| img = img.convert("RGB") | |
| img = img.resize(TARGET_SIZE) | |
| arr = np.array(img) / 255.0 | |
| arr = np.expand_dims(arr, axis=0).astype(np.float32) | |
| return arr | |
| # -------------------------- | |
| # Predict with robust lookup | |
| # -------------------------- | |
| def predict(image): | |
| if image is None: | |
| return "No image provided", "" | |
| if model is None: | |
| return "Model not found or failed to load", "" | |
| try: | |
| x = preprocess_image(image) | |
| preds = model.predict(x) | |
| preds = np.asarray(preds).squeeze() | |
| if preds.ndim == 0: | |
| idx = 0 | |
| confidence = float(preds) | |
| else: | |
| idx = int(np.argmax(preds)) | |
| confidence = float(np.max(preds)) | |
| except Exception as e: | |
| print("Error during prediction:", e) | |
| return f"Error: {e}", "" | |
| raw_label = labels[idx] if (0 <= idx < len(labels)) else f"class_{idx}" | |
| norm_label = normalize_key(raw_label) | |
| raw_tip = None | |
| source = None | |
| # lookup sequence | |
| if raw_label in tips_exact: | |
| raw_tip = tips_exact[raw_label]; source = "exact" | |
| if raw_tip is None and raw_label.lower() in tips_lower: | |
| raw_tip = tips_lower[raw_label.lower()]; source = "lower" | |
| if raw_tip is None and norm_label in tips_norm: | |
| raw_tip = tips_norm[norm_label]; source = "normalized" | |
| if raw_tip is None: | |
| tokens = set(norm_label.split()) | |
| best_match = None | |
| best_score = 0 | |
| for tk in tips_norm_keys: | |
| tk_tokens = set(tk.split()) | |
| common = len(tokens & tk_tokens) | |
| score = common / (1 + len(tk_tokens)) | |
| if score > best_score and common > 0: | |
| best_score = score | |
| best_match = tk | |
| if best_match: | |
| raw_tip = tips_norm[best_match]; source = f"token_match:{best_match}" | |
| if raw_tip is None: | |
| for k in tips_exact.keys(): | |
| if k.lower() in raw_label.lower() or raw_label.lower() in k.lower(): | |
| raw_tip = tips_exact[k]; source = f"substring:{k}" | |
| break | |
| if raw_tip is None: | |
| tip_text = "No recycling tip available for this detected label. Please check local recycling rules." | |
| else: | |
| # create plain text with newlines and bullets | |
| tip_text = format_raw_tip_to_markdown(raw_tip) | |
| prediction_text = f"{raw_label} ({confidence*100:.2f}%)" | |
| print("PREDICTION DEBUG:", { | |
| "idx": idx, | |
| "raw_label": raw_label, | |
| "normalized_label": norm_label, | |
| "confidence": confidence, | |
| "tip_source": source, | |
| "tip_found": raw_tip is not None, | |
| "available_tip_keys_sample": list(tips_exact.keys())[:50] | |
| }) | |
| # Return prediction and tip text (tip will be rendered into HTML in the UI) | |
| return prediction_text, tip_text | |
| # -------------------------- | |
| # UI: Inject CSS to allow tip area to expand fully | |
| # -------------------------- | |
| css = """ | |
| /* Make the HTML output area expand height and remove internal scrollbars */ | |
| .output_html, .output-markdown, .gr-output { | |
| max-height: none !important; | |
| height: auto !important; | |
| overflow: visible !important; | |
| } | |
| /* Slightly larger readable font for the tip area */ | |
| .recycling-tip-container { | |
| white-space: pre-wrap; | |
| font-size: 16px; | |
| line-height: 1.35; | |
| } | |
| """ | |
| # Build UI | |
| with gr.Blocks(css=css) as demo: | |
| gr.Markdown("# ♻️ Automated Waste Classifier — Full Tips Visible") | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| img = gr.Image(type="pil", label="Upload Image") | |
| with gr.Column(scale=2): | |
| out = gr.Textbox(label="Prediction", interactive=False, lines=1) | |
| # Use HTML output for the tip. We'll wrap the tip content in a div with our class. | |
| tip_html = gr.HTML("<div class='recycling-tip-container'>Recycling tip will appear here</div>", label="Recycling Tip") | |
| btn = gr.Button("Classify") | |
| # outputs order must match: prediction_text, tip_text | |
| # we will convert tip_text -> wrapped HTML in the callback below using safe replace done outside f-string | |
| def predict_and_wrap(img_in): | |
| pred, tip_text = predict(img_in) | |
| # escape HTML special chars in tip_text to avoid injection issues (keep bullets/newlines) | |
| safe = (tip_text.replace("&", "&") | |
| .replace("<", "<") | |
| .replace(">", ">")) | |
| # perform replace outside f-string to avoid backslash in expression | |
| replaced = safe.replace("\n", "<br>") | |
| html = "<div class='recycling-tip-container'>" + replaced + "</div>" | |
| return pred, html | |
| btn.click(fn=predict_and_wrap, inputs=img, outputs=[out, tip_html]) | |
| demo.launch() | |