elhamb's picture
Update app.py
2a20d23 verified
raw
history blame
3.71 kB
import gradio as gr
import tensorflow as tf
from tensorflow import keras
import numpy as np
from PIL import Image
# --- Configuration ---
MODEL_PATH = "cats-vs-dogs-finetuned.keras"
IMAGE_SIZE = (180, 180) # Adjust this to match the input size your model expects!
CLASS_LABELS = ['Cat', 'Dog']
# --- Load the Model ---
# We load the Keras model. Hugging Face Spaces will automatically find this file
# if you upload it to your repository.
try:
model = keras.models.load_model(MODEL_PATH)
print(f"Model loaded successfully from {MODEL_PATH}")
except Exception as e:
# If the model fails to load (e.g., during initial setup before it's uploaded),
# we use a placeholder function. This helps the app start.
print(f"Error loading model: {e}. Using a placeholder function.")
model = None
# --- Prediction Function ---
def predict_image(input_img_pil):
# WRAP ENTIRE LOGIC IN TRY/EXCEPT FOR MAXIMUM ERROR CAPTURE
try:
# 0. Crucial check: ensure an image was actually uploaded
if input_img_pil is None:
# Return a simple dictionary indicating missing input
return {"Please upload an image first.": 1.0}
if model is None:
# Model loading failed during initialization
return {"MODEL NOT FOUND": 1.0, "Please check if cat-vs-dog.keras exists.": 0.0}
# 1. Preprocessing: Resize and convert to NumPy array
print(f"Original image size: {input_img_pil.size}")
img_resized = input_img_pil.resize(IMAGE_SIZE)
img_array = keras.preprocessing.image.img_to_array(img_resized)
# 2. Rescaling and Batch dimension:
img_array = img_array / 255.0 # Common normalization step
img_array = np.expand_dims(img_array, axis=0) # Add batch dimension
# 3. Prediction
print(f"Array shape for model input: {img_array.shape}")
predictions = model.predict(img_array) # Get the single prediction result
print(f"Raw model predictions: {predictions}")
# 4. Format the output for Gradio's Label component
# Assuming predictions is a 2-element array: [prob_cat, prob_dog]
return {"dog":predictions[0][0],"cat":1-predictions[0][0]}
except Exception as e:
# Catch any error, log it, and return it to the user in a visible format
error_message = f"CRITICAL RUNTIME ERROR: {str(e)}"
detailed_trace = traceback.format_exc()
print("\n--- DETAILED RUNTIME ERROR LOG ---")
print(error_message)
print(detailed_trace)
print("------------------------------------\n")
# This format should force Gradio to display the specific error message
return {f"💥 {error_message}": 1.0}
# --- Gradio Interface Setup ---
# Define the input component (Image) and output component (Label)
image_input = gr.Image(type="pil", label="Upload a Cat or Dog Image")
label_output = gr.Label(num_top_classes=2, label="Prediction")
# Example images for users to try (place these in your Space if you use them)
examples = [
# To use these, you would need to upload files named 'example_cat.jpg' and 'example_dog.jpg'
# 'example_cat.jpg',
# 'example_dog.jpg'
]
# Create the Gradio interface
demo = gr.Interface(
fn=predict_image,
inputs=image_input,
outputs=label_output,
title="Keras Cat vs Dog Classifier",
description="Upload an image of a cat or dog to see the model's prediction. The model is loaded from cat-vs-dog.keras.",
theme=gr.themes.Soft(),
# Optional: Add examples if you upload them
# examples=examples
)
# Launch the app
if __name__ == "__main__":
demo.launch()