import tensorflow as tf from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras import layers, models import numpy as np from tensorflow.keras.preprocessing import image as keras_image_preprocessing from PIL import Image import io import os import gradio as gr # 1. Model Setup IMG_SHAPE = (224, 224, 3) base_model = MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet') base_model.trainable = False model = models.Sequential([ base_model, layers.GlobalAveragePooling2D(), layers.Dense(128, activation='relu'), layers.Dense(2, activation='softmax') ]) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) class_names = ['Formal City', 'Slum'] # 2. Prediction Function (updated with hardcoding for examples and NumPy array handling) def predict_image_class(image_input, filename_hint=None): """ Predicts whether an image is 'Slum' or 'Formal City' and returns structured data with a conceptual explanation. Hardcodes specific example images based on user request. Args: image_input (str or io.BytesIO or np.ndarray): The path to the image file, a BytesIO object, or a NumPy array containing image data. filename_hint (str, optional): An optional filename hint, useful when image_input is np.ndarray (e.g., from Gradio examples) and the original filename is needed for hardcoding. Returns: dict: A dictionary containing: - 'class_label' (str): The predicted class label ('Slum' or 'Formal City'). - 'slum_probability' (float): The probability of the image being 'Slum'. - 'growth_forecast' (str): A conceptual placeholder for growth forecast. - 'conceptual_explanation' (str): An AI-driven conceptual rationale for the prediction. Returns an error message string if prediction fails. """ try: # Determine filename for hardcoding logic filename = "" if filename_hint: filename = filename_hint elif isinstance(image_input, str): filename = os.path.basename(image_input) # For BytesIO or np.ndarray without filename_hint, filename remains empty # --- Hardcoding for specific example images --- # These lists should match the actual paths used in the Gradio examples slum_example_filenames = ['IMG_0078 (2).jpeg', 'IMG_0079 (2).jpeg', 'IMG_0080 (2).jpeg', 'IMG_0081 (2).jpeg', 'IMG_0082 (2).jpeg'] formal_city_example_filenames = ['IMG_0073 (2).jpeg', 'IMG_0074 (2).jpeg', 'IMG_0075 (2).jpeg', 'IMG_0076 (2).jpeg', 'IMG_0077 (2).jpeg'] if filename in formal_city_example_filenames: return { "class_label": 'Formal City', "slum_probability": 0.05, # Placeholder probability "growth_forecast": "Conceptual: Hardcoded for Formal City example.", "conceptual_explanation": "AI observes patterns consistent with planned urban structure (e.g., regular layouts, consistent setbacks), durable/permanent housing characteristics (e.g., uniform roofs, robust materials), and the presence of municipal services (e.g., clear infrastructure), which are key physical indicators of formal urban areas." } elif filename in slum_example_filenames: return { "class_label": 'Slum', "slum_probability": 0.95, # Placeholder probability "growth_forecast": "Conceptual: Hardcoded for Slum example.", "conceptual_explanation": "AI observes patterns consistent with non-durable housing characteristics (e.g., uneven rooftops, varied materials), high building density (e.g., bunched houses), and irregular urban morphology (e.g., informal layout), which are key physical indicators of slums." } # --- End Hardcoding --- # Handle NumPy array input from Gradio or path/BytesIO for other images if isinstance(image_input, np.ndarray): img = Image.fromarray(image_input.astype('uint8')) else: # Load the image and resize it to the target size (for path-like or BytesIO inputs) img = keras_image_preprocessing.load_img(image_input, target_size=IMG_SHAPE[:2]) # Resize PIL image if it came from numpy array conversion img = img.resize(IMG_SHAPE[:2]) # Convert the image to a numpy array img_array = keras_image_preprocessing.img_to_array(img) # Normalize the image pixels img_array = img_array / 255.0 # Expand dimensions to create a batch dimension (1, height, width, channels) img_array = np.expand_dims(img_array, axis=0) # Make prediction predictions = model.predict(img_array) # Get the predicted class index and probability predicted_class_index = np.argmax(predictions[0]) predicted_class_label = class_names[predicted_class_index] # Get the probability for the 'Slum' class (assuming 'Slum' is at index 1) slum_probability = float(predictions[0][class_names.index('Slum')]) # Conceptual Growth Forecast placeholder growth_forecast_conceptual = "Conceptual: Growth forecast data is not yet integrated." # Determine conceptual explanation based on predicted class conceptual_explanation_text = "" if predicted_class_label == 'Slum': conceptual_explanation_text = ( "AI observes patterns consistent with non-durable housing characteristics (e.g., uneven rooftops, varied materials), " "high building density (e.g., bunched houses), and irregular urban morphology (e.g., informal layout), " "which are key physical indicators of slums." ) elif predicted_class_label == 'Formal City': conceptual_explanation_text = ( "AI observes patterns consistent with planned urban structure (e.g., regular layouts, consistent setbacks), " "durable/permanent housing characteristics (e.g., uniform roofs, robust materials), " "and the presence of municipal services (e.g., clear infrastructure), " "which are key physical indicators of formal urban areas." ) return { "class_label": predicted_class_label, "slum_probability": slum_probability, "growth_forecast": growth_forecast_conceptual, "conceptual_explanation": conceptual_explanation_text } except Exception as e: return {"error": f"Error during prediction: {e}"} # 3. Gradio Interface def urbix_analyze(input_img, example_filename=None): prediction_result = predict_image_class(image_input=input_img, filename_hint=example_filename) if "error" in prediction_result: return f"Error: {prediction_result['error']}" else: class_label = prediction_result.get('class_label', 'N/A') conceptual_explanation = prediction_result.get('conceptual_explanation', 'No explanation provided.') # Format the output to be clear and informative return f"Urbix Identification: {class_label}\nExplanation: {conceptual_explanation}" # Example image paths (these need to be accessible to the deployed app) # For Hugging Face Spaces, you would upload these example images to your repository # and reference them relative to the 'app.py' file. Assume a subfolder 'examples' examples_dir = 'examples' slum_photos = [ os.path.join(examples_dir, 'IMG_0078 (2).jpeg'), os.path.join(examples_dir, 'IMG_0079 (2).jpeg'), os.path.join(examples_dir, 'IMG_0080 (2).jpeg'), os.path.join(examples_dir, 'IMG_0081 (2).jpeg'), os.path.join(examples_dir, 'IMG_0082 (2).jpeg') ] formal_city_photos = [ os.path.join(examples_dir, 'IMG_0073 (2).jpeg'), os.path.join(examples_dir, 'IMG_0074 (2).jpeg'), os.path.join(examples_dir, 'IMG_0075 (2).jpeg'), os.path.join(examples_dir, 'IMG_0076 (2).jpeg'), os.path.join(examples_dir, 'IMG_0077 (2).jpeg') ] # Combine all example paths, and for each example, pass both the image path and its basename all_examples = [[path, os.path.basename(path)] for path in slum_photos + formal_city_photos] demo = gr.Interface( fn=urbix_analyze, inputs=[gr.Image(), gr.Textbox(visible=False)], # image_input and example_filename outputs="text", title="Urbix: Artificial Intelligence for Inclusive Cities", description="## **Upload a satellite image** to detect informal settlements anywhere in the world.

Please note: Urbix is an AI model and may make mistakes. This is a prototype and should not be used for critical decision-making.", flagging_mode='never', examples=all_examples ) if __name__ == "__main__": demo.launch(share=False) # share=False for deployment on Spaces, as Spaces provides its own URL