Iris314's picture
Upload app.py
918758a verified
# -*- coding: utf-8 -*-
"""Untitled0.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1irh48fSmmTfb6dgVI1PbTuzZGFja9htU
"""
import os # For reading environment variables
import shutil # For directory cleanup
import zipfile # For extracting model archives
import pathlib # For path manipulations
import tempfile # For creating temporary files/directories
import gradio # For interactive UI
import pandas # For tabular data handling
import PIL.Image # For image I/O
import huggingface_hub # For downloading model assets
import autogluon.multimodal # For loading AutoGluon image classifier
from datasets import load_dataset
# Hardcoded Hub model (native zip)
MODEL_REPO_ID = "SebastianAndreu/2025-24679-HW1-Part2-image-autogluon-predictor"
ZIP_FILENAME = "autogluon_image_predictor_dir.zip"
# Local cache/extract dirs
CACHE_DIR = pathlib.Path("hf_assets")
EXTRACT_DIR = CACHE_DIR / "predictor_native"
# Download & load the native predictor
def _prepare_predictor_dir() -> str:
CACHE_DIR.mkdir(parents=True, exist_ok=True)
local_zip = huggingface_hub.hf_hub_download(
repo_id=MODEL_REPO_ID,
filename=ZIP_FILENAME,
repo_type="model",
local_dir=str(CACHE_DIR),
local_dir_use_symlinks=False,
)
if EXTRACT_DIR.exists():
shutil.rmtree(EXTRACT_DIR)
EXTRACT_DIR.mkdir(parents=True, exist_ok=True)
with zipfile.ZipFile(local_zip, "r") as zf:
zf.extractall(str(EXTRACT_DIR))
contents = list(EXTRACT_DIR.iterdir())
predictor_root = contents[0] if (len(contents) == 1 and contents[0].is_dir()) else EXTRACT_DIR
return str(predictor_root)
PREDICTOR_DIR = _prepare_predictor_dir()
PREDICTOR = autogluon.multimodal.MultiModalPredictor.load(PREDICTOR_DIR)
CLASS_LABELS = {0: "no_duo", 1: "duo"}
def _human_label(c):
try:
ci = int(c)
return CLASS_LABELS.get(ci, str(c))
except Exception:
return CLASS_LABELS.get(c, str(c))
# Prediction function
def do_predict(pil_img: PIL.Image.Image):
if pil_img is None:
return "No image provided.", {}, pandas.DataFrame(columns=["Predicted label", "Confidence (%)"])
tmpdir = pathlib.Path(tempfile.mkdtemp())
img_path = tmpdir / "input.png"
pil_img.save(img_path)
df = pandas.DataFrame({"image": [str(img_path)]})
proba_df = PREDICTOR.predict_proba(df)
proba_df = proba_df.rename(columns={0: "no_duo (0)", 1: "duo (1)"})
row = proba_df.iloc[0]
pretty_dict = {
"no_duo": float(row.get("no_duo (0)", 0.0)),
"duo": float(row.get("duo (1)", 0.0)),
}
return pretty_dict
# Load the dataset from Hugging Face
# Replace with the correct dataset repo ID you showed in the screenshot
DATASET_REPO_ID = "scottymcgee/duo-image-dataset"
dataset = load_dataset(DATASET_REPO_ID, split="original")
# Save a few example images locally for Gradio's Examples component
tmpdir = pathlib.Path(tempfile.mkdtemp())
EXAMPLES = []
for i in range(5): # Take the first 5 samples
img = dataset[i]["image"] # PIL.Image object
img_path = tmpdir / f"example_{i}.png"
img.save(img_path) # Save to disk
EXAMPLES.append([str(img_path)]) # Gradio expects file paths in a list
# Define Gradio interface
with gradio.Blocks() as demo:
# Title and introduction
gradio.Markdown("# Duo Detection")
gradio.Markdown("""
This app demonstrates an **AutoGluon multimodal image predictor**.
Upload an image, and the model will predict whether it belongs to the class
`duo` or `no_duo`.
""")
# Image input (upload or webcam)
image_in = gradio.Image(type="pil", label="Input image", sources=["upload", "webcam"])
# Output: class probabilities
proba_pretty = gradio.Label(num_top_classes=2, label="Class probabilities")
# Run prediction whenever a new image is uploaded
image_in.change(fn=do_predict, inputs=[image_in], outputs=[proba_pretty])
# Representative examples from the dataset
gradio.Examples(
examples=EXAMPLES,
inputs=[image_in],
label="Representative examples",
examples_per_page=5,
cache_examples=False,
)
if __name__ == "__main__":
demo.launch()