Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import tensorflow as tf | |
| import numpy as np | |
| from tensorflow.keras.models import load_model | |
| from tensorflow.keras.preprocessing import image | |
| from PIL import Image | |
| # --- CONFIGURATION (MUST MATCH YOUR TRAINING) --- | |
| # The model file is automatically available in the Space's file system | |
| MODEL_PATH = 'weapon_classifier_final_tuned.keras' | |
| IMG_SIZE = (224, 224) | |
| # The names of your classes, corresponding to the order they were generated (0=Not_Weapon, 1=Weapon) | |
| CLASS_NAMES = ['Not a Weapon', 'Weapon'] | |
| # --- END CONFIGURATION --- | |
| # 1. LOAD MODEL GLOBALLY | |
| # Loading the model here ensures it only happens once when the app starts. | |
| try: | |
| classifier_model = load_model(MODEL_PATH) | |
| print("Model loaded successfully for Gradio interface.") | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| # Fallback in case of model load failure | |
| classifier_model = None | |
| # 2. DEFINE THE PREDICTION FUNCTION | |
| # Gradio automatically passes the uploaded image as a NumPy array (or PIL Image, depending on 'type'). | |
| def classify_weapon(input_img_array): | |
| """ | |
| Takes a NumPy image array, preprocesses it, and returns the classification. | |
| """ | |
| if classifier_model is None: | |
| return "Error: Model failed to load." | |
| # Gradio passes the image as a NumPy array with shape (H, W, 3) and values 0-255. | |
| # a) Resize: Must resize to the model's required input size (224x224 for VGG16). | |
| # We use PIL/Image.fromarray and resize before conversion to prevent distortion | |
| img = Image.fromarray(input_img_array.astype('uint8')) | |
| img = img.resize(IMG_SIZE) | |
| # b) Convert to array and add batch dimension (1, 224, 224, 3) | |
| img_array = np.array(img).astype('float32') | |
| img_array = np.expand_dims(img_array, axis=0) | |
| # c) Normalize (Rescale): Must match the 1./255 scaling used during training | |
| processed_image = img_array / 255.0 | |
| # d) Make Prediction | |
| # Prediction returns a 1x1 array, e.g., [[0.95]] | |
| prediction = classifier_model.predict(processed_image) | |
| # e) Interpret Output for Gradio Label Component | |
| probability = prediction[0][0] | |
| # Gradio's Label component expects a dictionary mapping labels to probabilities. | |
| # We calculate the confidence for both classes based on the Sigmoid output. | |
| confidences = { | |
| CLASS_NAMES[1]: float(probability), # Weapon confidence | |
| CLASS_NAMES[0]: float(1 - probability) # Not a Weapon confidence | |
| } | |
| return confidences | |
| # 3. CREATE THE GRADIO INTERFACE | |
| # Gradio will handle the 'Submit' button click automatically. | |
| gr_interface = gr.Interface( | |
| # The function to run when the user submits an image | |
| fn=classify_weapon, | |
| # Input component: Image component set to resize input to 224x224 for display | |
| # Gradio automatically converts the image to a NumPy array for the function. | |
| inputs=gr.Image( | |
| label="Upload Image for Classification", | |
| type="numpy" # Pass NumPy array to the function | |
| ), | |
| # Output component: Label displays the class names and confidence scores. | |
| outputs=gr.Label( | |
| num_top_classes=2, | |
| label="Classification Result" | |
| ), | |
| title="Weapon or Fruit Detector", | |
| description="Upload an image to classify it as 'Weapon' or 'Not a Weapon'." | |
| ) | |
| # 4. LAUNCH THE INTERFACE | |
| # In a Hugging Face Space, the launch() call is automatically handled by the environment. | |
| # If running locally, you would use: demo.launch() | |
| gr_interface.launch() |