File size: 4,193 Bytes
55b44d3
9587e24
 
55b44d3
 
9587e24
 
55b44d3
9587e24
55b44d3
 
 
 
 
 
 
 
 
9587e24
 
55b44d3
 
9587e24
55b44d3
 
 
 
 
 
 
 
 
9587e24
55b44d3
 
 
 
 
 
 
9587e24
 
55b44d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9587e24
 
55b44d3
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
import tensorflow as tf
import numpy as np
from PIL import Image
import os
import io

# --- Configuration ---
PHOTO_SIZE = 224
MODEL_FILE_NAME = 'vgg_model50.h5' # Make sure this matches your uploaded model file
CLASS_NAMES = ["Non-Autstic", "Autstic"] # Match indices 0 and 1 from your notebook

# --- Load the Keras model ---
# Ensure the model file is in the same directory in the Space repository
model_path = os.path.join(os.path.dirname(__file__), MODEL_FILE_NAME)
if not os.path.exists(model_path):
    # Basic error handling if model is missing
    raise FileNotFoundError(f"Model file '{MODEL_FILE_NAME}' not found. Please upload it to the Space.")

try:
    model = tf.keras.models.load_model(model_path)
    print("Model loaded successfully.")
except Exception as e:
    print(f"Error loading model: {e}")
    # You might want more robust error handling or display in the Gradio interface
    model = None

def preprocess_image(pil_image):
    """
    Preprocesses the PIL image object for the VGG16 model.
    Resizes to (PHOTO_SIZE, PHOTO_SIZE), normalizes to [0, 1].
    """
    try:
        # Gradio provides a PIL image object directly
        img = pil_image.convert('RGB') # Ensure image is RGB
        img = img.resize((PHOTO_SIZE, PHOTO_SIZE))
        np_image = np.array(img).astype('float32') / 255.0
        # Expand dimensions to create batch size of 1
        np_image = np.expand_dims(np_image, axis=0)
        print(f"Image preprocessed successfully. Shape: {np_image.shape}")
        return np_image
    except Exception as e:
        print(f"Error preprocessing image: {e}")
        return None

def predict_autism(image_input):
    """
    Takes a PIL Image input from Gradio, preprocesses, predicts, and returns the class name.
    """
    if model is None:
        return "Error: Model not loaded." # Or raise an error

    print(f"Received image of type: {type(image_input)}") # Should be PIL Image

    # Preprocess the image (Gradio image input provides PIL image)
    processed_image = preprocess_image(image_input)
    if processed_image is None:
        return "Error: Image preprocessing failed."

    # Make prediction
    print("Making prediction...")
    prediction = model.predict(processed_image)
    predicted_class_index = np.argmax(prediction, axis=1)[0]
    predicted_class_name = CLASS_NAMES[predicted_class_index]
    confidence = float(np.max(prediction)) # Get the confidence score

    print(f"Prediction result index: {predicted_class_index}, Class: {predicted_class_name}, Confidence: {confidence:.4f}")

    # Return prediction as a dictionary (Gradio handles JSON conversion for API)
    # The key 'label' often works well with Gradio output components
    # Or return just the string if using a simple Textbox output
    # return {CLASS_NAMES[0]: float(1-confidence), CLASS_NAMES[1]: confidence} # Example for Label output
    return predicted_class_name # Simpler for Textbox output

# --- Create Gradio Interface ---
# Input: Image Upload
# Output: Textbox to display the predicted class
# Allow flagging for feedback (optional but good practice)
# Add title and description
# Provide example images if available in your Space repo (e.g., in an 'examples' folder)
# examples_folder = os.path.join(os.path.dirname(__file__), "examples")
# example_images = [os.path.join(examples_folder, img) for img in os.listdir(examples_folder)] if os.path.exists(examples_folder) else None

iface = gr.Interface(
    fn=predict_autism,
    inputs=gr.Image(type="pil", label="Upload Image"), # Input is PIL format
    outputs=gr.Textbox(label="Prediction Result"), # Output is simple text
    # outputs=gr.Label(num_top_classes=2), # Alternative: Label output shows confidences
    title="Autism Classification from Facial Images (VGG16)",
    description="Upload a facial image to classify as Autistic or Non-Autistic using a VGG16 model.",
    allow_flagging="never",
    # examples=example_images # Uncomment if you add example images
)

# --- Launch the Gradio app ---
# When run on Hugging Face Spaces, it automatically uses the Space's URL
if __name__ == "__main__":
    iface.launch() # share=True is not needed on Spaces