Automated_waste / flask_app.py
Ars135's picture
Update flask_app.py
d11819e verified
import gradio as gr
import numpy as np
from PIL import Image
import os
import json
import re
import tensorflow as tf
# --------------------------
# MODEL LOADING
# --------------------------
MODEL_FILES = [
"best_waste_classification_model.h5",
"cnn_best_model.h5",
"mobilenet_enhanced_complete_model.h5"
]
model = None
for mf in MODEL_FILES:
if os.path.exists(mf):
try:
model = tf.keras.models.load_model(mf)
print(f"Loaded model: {mf}")
break
except Exception as e:
print("Failed loading model", mf, e)
# --------------------------
# LOAD LABELS
# --------------------------
labels = []
if os.path.exists("labels.txt"):
with open("labels.txt", "r", encoding="utf-8") as f:
labels = [line.strip() for line in f.readlines() if line.strip()]
print("Loaded labels (count={}): {}".format(len(labels), labels[:50]))
# --------------------------
# LOAD TIPS JSON
# --------------------------
tips = {}
if os.path.exists("recycling_tips.json"):
try:
with open("recycling_tips.json", "r", encoding="utf-8") as f:
tips = json.load(f)
except Exception as e:
print("Error loading recycling_tips.json:", e)
print("Loaded tips keys (count={}): {}".format(len(tips), list(tips.keys())[:50]))
TARGET_SIZE = (224, 224)
# --------------------------
# Normalization & lookup helpers
# --------------------------
def normalize_key(s: str) -> str:
"""Lowercase, replace underscores/dashes with spaces, remove non-alnum except spaces, collapse spaces."""
if s is None:
return ""
s = str(s).strip().lower()
s = re.sub(r"[_\-]+", " ", s)
s = re.sub(r"[^a-z0-9 ]+", "", s)
s = re.sub(r"\s+", " ", s).strip()
return s
# Build lookup maps
tips_exact = dict(tips) # original
tips_lower = {k.lower(): v for k, v in tips.items()}
tips_norm = {normalize_key(k): v for k, v in tips.items()}
tips_norm_keys = list(tips_norm.keys())
def format_raw_tip_to_markdown(raw_tip):
"""Convert the raw JSON value into a Markdown string (with bullets and headers)."""
if isinstance(raw_tip, dict):
parts = []
# Tips as bullet list
if "tips" in raw_tip and isinstance(raw_tip["tips"], list):
parts.append("### Tips")
for t in raw_tip["tips"]:
parts.append(f"- {t}")
# Preparation
if "preparation" in raw_tip:
parts.append("")
parts.append("**Preparation:**")
parts.append(raw_tip["preparation"])
# Recyclability
if "recyclability" in raw_tip:
parts.append("")
parts.append("**Recyclability:**")
parts.append(raw_tip["recyclability"])
# Join with double newlines so Markdown renders cleanly
return "\n\n".join([p for p in parts if p is not None])
else:
# Plain string -> return as paragraph
return str(raw_tip)
# --------------------------
# Preprocess
# --------------------------
def preprocess_image(img: Image.Image):
img = img.convert("RGB")
img = img.resize(TARGET_SIZE)
arr = np.array(img) / 255.0
arr = np.expand_dims(arr, axis=0).astype(np.float32)
return arr
# --------------------------
# Main predict with robust lookup
# --------------------------
def predict(image):
# Always return two outputs: prediction_text, tip_markdown
if image is None:
return "No image provided", ""
if model is None:
return "Model not found or failed to load", ""
try:
x = preprocess_image(image)
preds = model.predict(x)
preds = np.asarray(preds).squeeze()
if preds.ndim == 0:
idx = 0
confidence = float(preds)
else:
idx = int(np.argmax(preds))
confidence = float(np.max(preds))
except Exception as e:
print("Error during prediction:", e)
return f"Error: {e}", ""
raw_label = labels[idx] if (0 <= idx < len(labels)) else f"class_{idx}"
norm_label = normalize_key(raw_label)
raw_tip = None
source = None
# 1) exact original key
if raw_label in tips_exact:
raw_tip = tips_exact[raw_label]; source = "exact"
# 2) exact lowercase
if raw_tip is None and raw_label.lower() in tips_lower:
raw_tip = tips_lower[raw_label.lower()]; source = "lower"
# 3) normalized match
if raw_tip is None and norm_label in tips_norm:
raw_tip = tips_norm[norm_label]; source = "normalized"
# 4) token overlap fuzzy match
if raw_tip is None:
tokens = set(norm_label.split())
best_match = None
best_score = 0
for tk in tips_norm_keys:
tk_tokens = set(tk.split())
common = len(tokens & tk_tokens)
score = common / (1 + len(tk_tokens))
if score > best_score and common > 0:
best_score = score
best_match = tk
if best_match:
raw_tip = tips_norm[best_match]; source = f"token_match:{best_match}"
# 5) substring fallback
if raw_tip is None:
for k in tips_exact.keys():
if k.lower() in raw_label.lower() or raw_label.lower() in k.lower():
raw_tip = tips_exact[k]; source = f"substring:{k}"
break
# Format tip as Markdown (for large, wrap-friendly display)
if raw_tip is None:
tip_markdown = "No recycling tip available for this detected label. Please check local recycling rules."
else:
tip_markdown = format_raw_tip_to_markdown(raw_tip)
prediction_text = f"{raw_label} ({confidence*100:.2f}%)"
# Debugging log line (useful if something still fails)
print("PREDICTION DEBUG:", {
"idx": idx,
"raw_label": raw_label,
"normalized_label": norm_label,
"confidence": confidence,
"tip_source": source,
"tip_found": raw_tip is not None,
"available_tip_keys_sample": list(tips_exact.keys())[:50]
})
return prediction_text, tip_markdown
# --------------------------
# UI - use Markdown for tips so it expands cleanly
# --------------------------
with gr.Blocks() as demo:
gr.Markdown("# ♻️ Automated Waste Classifier — Tips (expanded view)")
with gr.Row():
img = gr.Image(type="pil", label="Upload Image")
out = gr.Textbox(label="Prediction", interactive=False, lines=1)
# Use Markdown component for the tip so it renders nicely and expands
tip_md = gr.Markdown("**Recycling tip will appear here**", label="Recycling Tip")
btn = gr.Button("Classify")
# Make sure outputs match return values: [prediction_text, tip_markdown]
btn.click(fn=predict, inputs=img, outputs=[out, tip_md])
if __name__ == "__main__":
demo.launch()