bulentsoykan's picture
update
6b8794b verified
"""
NATO Advanced Study Institute - AI for Disaster Management
Interactive Gradio Application
This state-of-the-art Gradio app provides an interactive interface for participants
to explore the curriculum, try AI models, and visualize disaster response scenarios.
"""
import gradio as gr
import numpy as np
from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageEnhance
import io
import random
from datetime import datetime
# ============================================================================
# CONSTANTS AND CONFIGURATION
# ============================================================================
CURRICULUM_DAYS = {
"Day 0: Setup Check": {
"title": "Environment Setup & Pre-Flight Check",
"description": "Verify Google Colab environment, GPU availability, and library imports",
"notebook": "00_Setup_Check.ipynb",
"key_concepts": ["Environment verification", "GPU testing", "Dependency checks"],
"icon": "πŸ”§"
},
"Day 1: Foundations": {
"title": "Introduction to AI and Imagery",
"description": "Understanding digital images, RGB vs multispectral imagery, geospatial data formats",
"notebook": "01_Intro_to_AI_and_Imagery.ipynb",
"key_concepts": ["Image arrays", "GeoTIFF", "Coordinate systems", "Vector overlays"],
"icon": "🌍"
},
"Day 2: CNN Basics": {
"title": "Image Classification with CNNs",
"description": "Build CNNs from scratch for binary classification (damaged vs undamaged buildings)",
"notebook": "02_Image_Classification_CNN_Basics.ipynb",
"key_concepts": ["Conv2D layers", "MaxPooling", "Binary classification", "Feature visualization"],
"icon": "πŸ—οΈ"
},
"Day 3: Production Systems": {
"title": "End-to-End Workflow for Damage Detection",
"description": "Multi-class damage classification with data augmentation and class balancing",
"notebook": "03_End_to_End_Workflow_Damage_Detection.ipynb",
"key_concepts": ["Multi-class classification", "Data augmentation", "Class weights", "F1-score"],
"icon": "🎯"
},
"Day 4-5: Segmentation": {
"title": "Semantic Segmentation for Flood Mapping",
"description": "U-Net architecture for pixel-level flood detection from satellite imagery",
"notebook": "04_Semantic_Segmentation_Flood_Mapping.ipynb",
"key_concepts": ["U-Net", "Encoder-decoder", "IoU metric", "Pixel-wise classification"],
"icon": "🌊"
},
"Day 6: Transfer Learning": {
"title": "Transfer Learning for Efficiency",
"description": "Leverage pre-trained models (ResNet50, VGG16, EfficientNet) for rapid development",
"notebook": "05_Transfer_Learning_for_Efficiency.ipynb",
"key_concepts": ["Feature extraction", "Fine-tuning", "Pre-trained models", "Limited data"],
"icon": "πŸš€"
},
"Day 7: Deployment": {
"title": "Deployment Considerations & Ethics",
"description": "Model optimization, deployment strategies, human-in-the-loop, responsible AI",
"notebook": "06_Deployment_Considerations.ipynb",
"key_concepts": ["Model optimization", "TFLite", "Edge deployment", "AI ethics"],
"icon": "βš–οΈ"
}
}
DAMAGE_LEVELS = {
0: {"name": "No Damage", "color": "#00FF00", "description": "Building intact"},
1: {"name": "Minor Damage", "color": "#FFFF00", "description": "Minor structural issues"},
2: {"name": "Major Damage", "color": "#FFA500", "description": "Significant structural damage"},
3: {"name": "Destroyed", "color": "#FF0000", "description": "Building destroyed"}
}
# ============================================================================
# UTILITY FUNCTIONS
# ============================================================================
def create_sample_building_image(damage_level=0):
"""Create a synthetic building image with specified damage level"""
img = Image.new('RGB', (256, 256), color='skyblue')
draw = ImageDraw.Draw(img)
# Draw ground
draw.rectangle([(0, 180), (256, 256)], fill='#228B22')
# Draw building based on damage level
if damage_level == 0: # No damage
# Intact building
draw.rectangle([(80, 100), (176, 180)], fill='#8B4513', outline='black', width=2)
# Windows
for i in range(3):
for j in range(2):
draw.rectangle([(95 + i*25, 115 + j*25), (110 + i*25, 135 + j*25)],
fill='lightblue', outline='black', width=1)
# Roof
draw.polygon([(70, 100), (128, 60), (186, 100)], fill='#A0522D', outline='black')
elif damage_level == 1: # Minor damage
draw.rectangle([(80, 100), (176, 180)], fill='#8B4513', outline='black', width=2)
# Some broken windows
for i in range(3):
for j in range(2):
if random.random() > 0.5:
draw.rectangle([(95 + i*25, 115 + j*25), (110 + i*25, 135 + j*25)],
fill='lightblue', outline='black', width=1)
else:
draw.rectangle([(95 + i*25, 115 + j*25), (110 + i*25, 135 + j*25)],
fill='gray', outline='black', width=1)
# Slightly damaged roof
draw.polygon([(70, 100), (128, 65), (186, 100)], fill='#8B4513', outline='black')
elif damage_level == 2: # Major damage
# Partially collapsed building
draw.polygon([(80, 180), (80, 120), (140, 110), (176, 140), (176, 180)],
fill='#654321', outline='black', width=2)
# Debris
for _ in range(10):
x, y = random.randint(70, 180), random.randint(160, 180)
draw.rectangle([(x, y), (x+10, y+10)], fill='gray')
# Damaged roof
draw.polygon([(70, 120), (110, 85), (150, 110)], fill='#654321', outline='black')
else: # Destroyed
# Rubble pile
for _ in range(30):
x, y = random.randint(60, 190), random.randint(140, 180)
size = random.randint(5, 20)
color = random.choice(['#696969', '#808080', '#A9A9A9', '#654321'])
draw.rectangle([(x, y), (x+size, y+size)], fill=color)
# Scattered debris
for _ in range(20):
x, y = random.randint(40, 210), random.randint(120, 185)
draw.circle((x, y), random.randint(2, 8), fill='#404040')
return img
def create_flood_map_sample(flood_percentage=30):
"""Create a synthetic satellite image with flood overlay"""
# Base satellite-like image
img = Image.new('RGB', (512, 512), color='#90EE90')
draw = ImageDraw.Draw(img)
# Add terrain variation
np.random.seed(42)
pixels = np.array(img)
noise = np.random.randint(-30, 30, pixels.shape)
pixels = np.clip(pixels.astype(int) + noise, 0, 255).astype(np.uint8)
img = Image.fromarray(pixels)
draw = ImageDraw.Draw(img, 'RGBA')
# Add roads
draw.rectangle([(100, 0), (120, 512)], fill='#696969')
draw.rectangle([(0, 250), (512, 270)], fill='#696969')
# Add buildings
for i in range(15):
x, y = random.randint(0, 480), random.randint(0, 480)
w, h = random.randint(20, 40), random.randint(20, 40)
draw.rectangle([(x, y), (x+w, y+h)], fill='#8B4513', outline='black')
# Add flood areas (semi-transparent blue)
flood_regions = int((flood_percentage / 100) * 10)
for _ in range(flood_regions):
x, y = random.randint(0, 400), random.randint(0, 400)
w, h = random.randint(80, 150), random.randint(80, 150)
draw.ellipse([(x, y), (x+w, y+h)], fill=(0, 100, 255, 120))
return img
def simulate_damage_detection(image, confidence_threshold=0.7):
"""Simulate building damage detection on an uploaded image"""
# Simulate AI prediction
damage_level = random.randint(0, 3)
confidence = random.uniform(confidence_threshold, 0.99)
# Create result visualization
result_img = image.copy()
draw = ImageDraw.Draw(result_img)
# Draw bounding box with damage level color
width, height = result_img.size
margin = 20
color = DAMAGE_LEVELS[damage_level]["color"]
draw.rectangle([(margin, margin), (width-margin, height-margin)],
outline=color, width=5)
# Add label
label = f"{DAMAGE_LEVELS[damage_level]['name']}: {confidence:.2%}"
# Create label background
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 20)
except:
font = ImageFont.load_default()
bbox = draw.textbbox((margin, margin-30), label, font=font)
draw.rectangle(bbox, fill=color)
draw.text((margin, margin-30), label, fill='black', font=font)
return result_img, damage_level, confidence
def simulate_flood_segmentation(image):
"""Simulate flood segmentation on an uploaded image"""
# Convert to numpy array
img_array = np.array(image)
# Create synthetic flood mask (detect blue-ish areas)
# This is a simple heuristic, not actual ML
blue_channel = img_array[:, :, 2]
green_channel = img_array[:, :, 1]
red_channel = img_array[:, :, 0]
# Areas where blue is dominant
flood_mask = (blue_channel > 100) & (blue_channel > red_channel + 20)
# Create overlay
overlay = image.copy()
overlay_array = np.array(overlay)
overlay_array[flood_mask] = [0, 150, 255] # Blue overlay for flood
# Blend
result = Image.blend(image, Image.fromarray(overlay_array), alpha=0.4)
flood_percentage = (np.sum(flood_mask) / flood_mask.size) * 100
iou_score = random.uniform(0.75, 0.95)
return result, flood_percentage, iou_score
# ============================================================================
# GRADIO INTERFACE COMPONENTS
# ============================================================================
def create_welcome_tab():
"""Create the welcome/overview tab"""
with gr.Column():
gr.Markdown("""
# 🌍 NATO Advanced Study Institute
## AI for Disaster Management: Interactive Learning Platform
Welcome to the interactive demonstration platform for the **NATO ASI on AI for Disaster Management**!
### 🎯 What is this platform?
This application provides hands-on demonstrations of the AI techniques covered in our 7-day curriculum
on **Geospatial AI for Humanitarian Response**. You can:
- πŸ“š **Explore the Curriculum**: Navigate through each day's content and learning objectives
- πŸ€– **Try AI Models**: Interactive demos of damage detection, flood mapping, and more
- πŸ“Š **Visualize Results**: See how AI analyzes satellite imagery for disaster response
- πŸš€ **Learn by Doing**: Upload your own images and experiment with the models
### πŸ† Learning Philosophy
> *"What I cannot create, I do not understand."* β€” Richard Feynman
This curriculum emphasizes **practical implementation** over theoretical knowledge. Every concept
is taught through hands-on coding, building real systems that address actual disaster management challenges.
### πŸ“ˆ Course Overview
Our 7-day curriculum takes you from fundamentals to deployment:
| Day | Topic | Key Technology |
|-----|-------|----------------|
| **0** | Environment Setup | Google Colab, GPU, Libraries |
| **1** | Foundations | Image Processing, Geospatial Data |
| **2** | CNN Basics | Convolutional Neural Networks |
| **3** | Production Systems | Multi-class Classification, Augmentation |
| **4-5** | Segmentation | U-Net, Flood Mapping |
| **6** | Transfer Learning | ResNet50, EfficientNet |
| **7** | Deployment & Ethics | TFLite, Responsible AI |
### 🌟 Real-World Impact
The techniques you'll learn are used by humanitarian organizations worldwide:
- **πŸ—οΈ Building Damage Assessment**: Classify 100,000+ buildings in hours after earthquakes
- **🌊 Flood Mapping**: Identify inundated areas to direct rescue operations
- **πŸ›£οΈ Infrastructure Analysis**: Locate blocked roads and damaged bridges
- **πŸ“ Resource Allocation**: Prioritize areas for relief distribution
### πŸš€ Get Started
1. **Explore Curriculum**: Check out the detailed day-by-day breakdown
2. **Try Demos**: Experiment with damage detection and flood mapping models
3. **Upload Images**: Test the models on your own satellite/aerial imagery
4. **Review Notebooks**: Access the complete Jupyter notebooks for hands-on coding
---
**Built with ❀️ for humanitarian AI applications**
*Making the world more resilient to disasters, one model at a time.*
""")
with gr.Row():
gr.Button("πŸ“š View Curriculum", size="lg", variant="primary")
gr.Button("πŸ€– Try Damage Detection", size="lg", variant="secondary")
gr.Button("🌊 Try Flood Mapping", size="lg", variant="secondary")
def create_curriculum_tab():
"""Create the curriculum explorer tab"""
with gr.Column():
gr.Markdown("# πŸ“š Curriculum Explorer")
gr.Markdown("Detailed breakdown of each day's content, learning objectives, and key concepts.")
for day, info in CURRICULUM_DAYS.items():
with gr.Accordion(f"{info['icon']} {day}: {info['title']}", open=False):
gr.Markdown(f"""
### {info['title']}
**{info['description']}**
**πŸ““ Notebook**: `{info['notebook']}`
**πŸ”‘ Key Concepts**:
{chr(10).join([f'- {concept}' for concept in info['key_concepts']])}
**πŸ’‘ Learning Outcomes**:
By the end of this module, you will be able to apply these concepts to real-world
disaster management scenarios and build production-ready systems.
""")
def create_damage_detection_tab():
"""Create the building damage detection demo tab"""
with gr.Column():
gr.Markdown("""
# πŸ—οΈ Building Damage Detection
This demo simulates the CNN-based damage classification system taught in **Days 2-3**.
## How it works:
1. **Upload** an image of a building or generate a sample
2. The AI model classifies damage into 4 levels: None, Minor, Major, Destroyed
3. **View** the results with confidence scores and visualizations
## Damage Classification Levels:
- 🟒 **No Damage**: Building structurally intact
- 🟑 **Minor Damage**: Superficial damage, building functional
- 🟠 **Major Damage**: Significant structural damage, unsafe
- πŸ”΄ **Destroyed**: Building collapsed or completely destroyed
""")
with gr.Row():
with gr.Column():
damage_level_choice = gr.Radio(
choices=["No Damage", "Minor Damage", "Major Damage", "Destroyed"],
label="Generate Sample Building",
value="No Damage"
)
generate_btn = gr.Button("🎨 Generate Sample Image", variant="secondary")
gr.Markdown("### Or Upload Your Own Image")
input_image = gr.Image(type="pil", label="Upload Building Image")
confidence_slider = gr.Slider(
minimum=0.5, maximum=0.99, value=0.7, step=0.05,
label="Confidence Threshold"
)
detect_btn = gr.Button("πŸ” Detect Damage", variant="primary", size="lg")
with gr.Column():
output_image = gr.Image(type="pil", label="Detection Results")
with gr.Row():
damage_output = gr.Textbox(label="Predicted Damage Level", interactive=False)
confidence_output = gr.Textbox(label="Confidence Score", interactive=False)
gr.Markdown("""
### πŸ“Š Model Performance Metrics
In the actual training notebooks, you'll achieve:
- **Accuracy**: 85-92% on validation set
- **F1-Score**: 0.82-0.89 (balanced across classes)
- **Inference Time**: ~50ms per image on GPU
### πŸŽ“ What You'll Learn
**Day 2**: Build basic CNNs for binary classification
**Day 3**: Implement production systems with:
- Multi-class classification (4 damage levels)
- Data augmentation for robustness
- Class balancing techniques
- Comprehensive evaluation metrics
""")
def generate_sample(damage_choice):
damage_map = {
"No Damage": 0,
"Minor Damage": 1,
"Major Damage": 2,
"Destroyed": 3
}
return create_sample_building_image(damage_map[damage_choice])
def detect_damage(image, threshold):
if image is None:
return None, "No image provided", "N/A"
result_img, damage_level, confidence = simulate_damage_detection(image, threshold)
damage_text = DAMAGE_LEVELS[damage_level]["name"]
confidence_text = f"{confidence:.2%}"
return result_img, damage_text, confidence_text
generate_btn.click(
fn=generate_sample,
inputs=[damage_level_choice],
outputs=[input_image]
)
detect_btn.click(
fn=detect_damage,
inputs=[input_image, confidence_slider],
outputs=[output_image, damage_output, confidence_output]
)
def create_flood_mapping_tab():
"""Create the flood segmentation demo tab"""
with gr.Column():
gr.Markdown("""
# 🌊 Flood Mapping with Semantic Segmentation
This demo simulates the **U-Net segmentation** system taught in **Days 4-5**.
## How it works:
1. **Upload** satellite imagery or generate a sample flood scenario
2. The U-Net model performs pixel-wise classification
3. **View** the flood extent map with percentage coverage and IoU scores
## Why Segmentation?
Unlike classification (which gives one label per image), **semantic segmentation**
provides a label for *every pixel*, enabling:
- Precise flood extent mapping
- Area calculations for affected regions
- Integration with GIS systems for rescue planning
""")
with gr.Row():
with gr.Column():
flood_slider = gr.Slider(
minimum=0, maximum=80, value=30, step=5,
label="Flood Coverage % (for sample generation)"
)
generate_flood_btn = gr.Button("🌊 Generate Flood Scenario", variant="secondary")
gr.Markdown("### Or Upload Satellite Imagery")
flood_input = gr.Image(type="pil", label="Upload Satellite Image")
segment_btn = gr.Button("πŸ—ΊοΈ Map Flood Extent", variant="primary", size="lg")
with gr.Column():
flood_output = gr.Image(type="pil", label="Flood Segmentation Results")
with gr.Row():
flood_percent_output = gr.Textbox(label="Flood Coverage %", interactive=False)
iou_output = gr.Textbox(label="IoU Score", interactive=False)
gr.Markdown("""
### πŸ“Š U-Net Architecture
```
Encoder (Downsampling) Decoder (Upsampling)
↓ ↑
Conv β†’ Pool β†’ Conv β†’ Pool Upconv β†’ Concat β†’ Conv
↓ ↓ ↑
Bottleneck Skip Connections
```
**Key Innovation**: Skip connections preserve spatial information
### πŸ“ˆ Model Performance
- **IoU (Intersection over Union)**: 0.78-0.92
- **Dice Coefficient**: 0.85-0.94
- **Pixel Accuracy**: 88-95%
### πŸŽ“ What You'll Learn
**Days 4-5**: Implement U-Net for flood mapping
- Encoder-decoder architecture
- Skip connections for spatial preservation
- Custom loss functions (Dice loss, Focal loss)
- Post-processing techniques
### 🌍 Real-World Applications
- **Hurricane Harvey (2017)**: Mapped 300+ sq km of flooding
- **Kerala Floods (2018)**: Prioritized rescue operations
- **Mozambique Cyclone (2019)**: Infrastructure damage assessment
""")
def generate_flood_sample(flood_pct):
return create_flood_map_sample(flood_pct)
def segment_flood(image):
if image is None:
return None, "No image provided", "N/A"
result_img, flood_pct, iou = simulate_flood_segmentation(image)
return result_img, f"{flood_pct:.2f}%", f"{iou:.3f}"
generate_flood_btn.click(
fn=generate_flood_sample,
inputs=[flood_slider],
outputs=[flood_input]
)
segment_btn.click(
fn=segment_flood,
inputs=[flood_input],
outputs=[flood_output, flood_percent_output, iou_output]
)
def create_transfer_learning_tab():
"""Create the transfer learning comparison tab"""
with gr.Column():
gr.Markdown("""
# πŸš€ Transfer Learning: Standing on the Shoulders of Giants
This demo illustrates the power of **transfer learning** taught in **Day 6**.
## Why Transfer Learning?
Training deep neural networks from scratch requires:
- ❌ Millions of labeled images
- ❌ Days/weeks of training time
- ❌ Expensive GPU resources
Transfer learning allows you to:
- βœ… Use pre-trained models (ImageNet, COCO, etc.)
- βœ… Train with 10x less data
- βœ… Achieve better accuracy in less time
## Available Pre-trained Models
""")
with gr.Row():
with gr.Column():
model_choice = gr.Radio(
choices=["ResNet50", "VGG16", "MobileNetV2", "EfficientNetB0"],
label="Select Pre-trained Model",
value="ResNet50"
)
training_mode = gr.Radio(
choices=["Feature Extraction", "Fine-Tuning"],
label="Transfer Learning Mode",
value="Feature Extraction"
)
dataset_size = gr.Slider(
minimum=100, maximum=10000, value=1000, step=100,
label="Training Dataset Size"
)
compare_btn = gr.Button("πŸ“Š Compare Models", variant="primary", size="lg")
with gr.Column():
gr.Markdown("### πŸ“ˆ Model Comparison Results")
comparison_output = gr.Markdown()
gr.Markdown("""
### 🎯 Model Characteristics
| Model | Parameters | Speed | Accuracy | Best For |
|-------|-----------|-------|----------|----------|
| **ResNet50** | 25.6M | Medium | High | General purpose |
| **VGG16** | 138M | Slow | High | Feature extraction |
| **MobileNetV2** | 3.5M | Fast | Medium | Edge devices |
| **EfficientNetB0** | 5.3M | Medium | Very High | Best trade-off |
### πŸ”§ Two Approaches
**Feature Extraction**:
- Freeze pre-trained layers
- Only train final classifier
- ⚑ Fast training (minutes)
- Works with small datasets
**Fine-Tuning**:
- Unfreeze some layers
- Adjust pre-trained weights
- 🎯 Higher accuracy
- Needs more data & time
""")
def compare_models(model, mode, size):
# Simulate comparison results
base_accuracy = {
"ResNet50": 0.87,
"VGG16": 0.85,
"MobileNetV2": 0.82,
"EfficientNetB0": 0.90
}
base_time = {
"ResNet50": 45,
"VGG16": 80,
"MobileNetV2": 25,
"EfficientNetB0": 50
}
# Adjust based on mode and dataset size
accuracy = base_accuracy[model]
if mode == "Fine-Tuning":
accuracy += 0.03
accuracy *= (1 + np.log10(size / 1000) * 0.05)
accuracy = min(accuracy, 0.98)
training_time = base_time[model] * (size / 1000)
if mode == "Fine-Tuning":
training_time *= 2.5
# From-scratch comparison
scratch_accuracy = accuracy - 0.15
scratch_time = training_time * 5
result = f"""
### πŸ† Results for {model} ({mode})
**Transfer Learning Performance**:
- βœ… **Accuracy**: {accuracy:.1%}
- ⏱️ **Training Time**: {training_time:.1f} minutes
- πŸ“Š **Dataset Size**: {size:,} images
**vs. Training from Scratch**:
- ⚠️ **Accuracy**: {scratch_accuracy:.1%} ({(accuracy - scratch_accuracy):.1%} worse)
- ⏱️ **Training Time**: {scratch_time:.1f} minutes ({scratch_time/training_time:.1f}x slower)
- πŸ“Š **Dataset Needed**: {size * 10:,} images ({10}x more data)
---
### πŸ’‘ Insights
{"**Feature Extraction** is ideal for this dataset size. You're freezing the convolutional base and only training the classification head, which is fast and prevents overfitting." if mode == "Feature Extraction" else "**Fine-Tuning** can achieve higher accuracy but requires more data and training time. Consider this if you have >5000 labeled examples."}
### πŸ“ˆ Improvement Over Baseline
Transfer learning gives you a **{(accuracy - scratch_accuracy) / scratch_accuracy * 100:.1f}% accuracy boost** while training **{scratch_time/training_time:.1f}x faster**!
"""
return result
compare_btn.click(
fn=compare_models,
inputs=[model_choice, training_mode, dataset_size],
outputs=[comparison_output]
)
def create_deployment_tab():
"""Create the deployment and ethics tab"""
with gr.Column():
gr.Markdown("""
# βš–οΈ Deployment Considerations & Responsible AI
Day 7 focuses on the **critical final step**: deploying AI systems that work reliably
in real disaster scenarios while maintaining ethical standards.
## πŸš€ Deployment Pipeline
""")
with gr.Row():
with gr.Column():
gr.Markdown("""
### 1️⃣ Model Optimization
**TensorFlow Lite Conversion**:
```python
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
```
**Benefits**:
- πŸ“‰ 4x smaller model size
- ⚑ 3x faster inference
- πŸ“± Runs on mobile/edge devices
### 2️⃣ Quantization
- **Float16**: 50% size reduction, minimal accuracy loss
- **Int8**: 75% size reduction, 1-2% accuracy loss
- **Dynamic Range**: Best balance for most cases
""")
with gr.Column():
gr.Markdown("""
### 3️⃣ Deployment Strategies
| Strategy | Latency | Scale | Cost | Best For |
|----------|---------|-------|------|----------|
| **Cloud** | 100-500ms | High | $$$ | Central processing |
| **Edge** | 10-50ms | Medium | $ | Field operations |
| **Hybrid** | Variable | Very High | $$ | Resilient systems |
### 4️⃣ Human-in-the-Loop
```python
if confidence < threshold:
# Route to human expert
queue_for_review(image, prediction)
else:
# Auto-approve high-confidence predictions
process_automatically(image, prediction)
```
""")
gr.Markdown("""
---
## βš–οΈ Ethical AI Principles
### 🎯 Core Tenets for Disaster Response AI
**1. Transparency**
- Explain model decisions to stakeholders
- Document limitations and failure modes
- Make confidence scores visible
**2. Fairness & Bias**
- Test on diverse geographies and building types
- Avoid over-representing certain regions
- Monitor for demographic disparities in accuracy
**3. Privacy & Security**
- Protect personally identifiable information (PII)
- Secure data transmission and storage
- Comply with data protection regulations
**4. Accountability**
- Maintain audit trails
- Enable human oversight
- Design fail-safes for critical decisions
**5. Beneficence**
- Prioritize humanitarian impact
- Avoid dual-use concerns
- Share knowledge with disaster response community
---
## πŸ›‘οΈ Responsible Deployment Checklist
Before deploying your disaster management AI system:
- [ ] **Validation**: Test on held-out disaster events
- [ ] **Robustness**: Evaluate on different sensors, seasons, lighting
- [ ] **Calibration**: Ensure confidence scores reflect true accuracy
- [ ] **Monitoring**: Set up performance tracking in production
- [ ] **Fallback**: Design graceful degradation when AI fails
- [ ] **Documentation**: Create user guides for non-technical operators
- [ ] **Ethics Review**: Assess potential harms and mitigation strategies
- [ ] **Stakeholder Buy-in**: Get approval from response teams who'll use it
---
## πŸ’­ Final Reflection
> *"The best AI model is one that helps people, not one that achieves
> the highest accuracy on a benchmark."*
In disaster management, a model that:
- βœ… Works reliably in the field
- βœ… Integrates with human workflows
- βœ… Earns trust through transparency
- βœ… Fails gracefully
...is far more valuable than one with perfect test set performance.
**This curriculum emphasizes practical deployment, ethical considerations,
and human oversightβ€”because when lives are at stake, responsible AI is
the only acceptable approach.**
""")
def create_resources_tab():
"""Create the resources and documentation tab"""
with gr.Column():
gr.Markdown("""
# πŸ“š Resources & Further Learning
## πŸŽ“ Course Materials
All notebooks are available in this repository:
- `00_Setup_Check.ipynb` - Environment verification
- `01_Intro_to_AI_and_Imagery.ipynb` - Image fundamentals
- `02_Image_Classification_CNN_Basics.ipynb` - CNN basics
- `03_End_to_End_Workflow_Damage_Detection.ipynb` - Production systems
- `04_Semantic_Segmentation_Flood_Mapping.ipynb` - U-Net segmentation
- `05_Transfer_Learning_for_Efficiency.ipynb` - Transfer learning
- `06_Deployment_Considerations.ipynb` - Deployment & ethics
---
## πŸ“Š Datasets for Practice
### Building Damage Assessment
- **xView2 (xBD)**: 850,000+ building annotations across multiple disaster types
- [https://xview2.org/](https://xview2.org/)
- **RescueNet**: Damaged buildings from natural disasters
### Flood Mapping
- **SEN12-FLOOD**: Global flood detection dataset
- **FloodNet**: High-resolution flood imagery
### Satellite Imagery
- **Sentinel-2**: Free 10m resolution multispectral imagery
- [https://scihub.copernicus.eu/](https://scihub.copernicus.eu/)
- **Landsat**: Historical satellite archive (30m resolution)
- **Planet Labs**: High-frequency imaging (commercial)
### Geospatial Data
- **OpenStreetMap**: Building footprints, roads, infrastructure
- **USGS Earth Explorer**: Topography and elevation data
---
## πŸŽ₯ Online Courses
### Deep Learning Fundamentals
- **fast.ai**: Practical Deep Learning for Coders
- **Stanford CS231n**: Convolutional Neural Networks for Visual Recognition
- **Coursera Deeplearning.ai**: Deep Learning Specialization (Andrew Ng)
### Computer Vision
- **PyImageSearch**: Extensive CV tutorials and courses
- **Udacity**: Computer Vision Nanodegree
### Geospatial Analysis
- **NASA ARSET**: Applied Remote Sensing Training
- **Geo For Good**: Google Earth Engine tutorials
---
## 🏒 Organizations & Communities
### Humanitarian Tech
- **Humanitarian OpenStreetMap Team (HOT)**: Community mapping for disasters
- **Missing Maps**: Mapping vulnerable places before disasters
- **UN OCHA Centre for Humanitarian Data**: Data-driven disaster response
### Space Agencies
- **European Space Agency (ESA)**: Copernicus Emergency Management Service
- **NASA Disasters Program**: Earth observation for disaster response
- **JAXA**: Asian disaster monitoring
### Research Initiatives
- **AI for Good**: UN initiative for AI in humanitarian applications
- **Data Science for Social Good**: Fellowship programs
---
## πŸ“– Academic Papers
### Building Damage Detection
- **"xBD: A Dataset for Assessing Building Damage from Satellite Imagery"** (Gupta et al., 2019)
- **"RescueNet: Joint Building Segmentation and Damage Detection"** (Weber & KanΓ©, 2020)
### Semantic Segmentation
- **"U-Net: Convolutional Networks for Biomedical Image Segmentation"** (Ronneberger et al., 2015)
- **"DeepLabv3+: Encoder-Decoder with Atrous Separable Convolution"** (Chen et al., 2018)
### Transfer Learning
- **"A Survey on Transfer Learning"** (Pan & Yang, 2010)
- **"EfficientNet: Rethinking Model Scaling for CNNs"** (Tan & Le, 2019)
### AI for Disaster Management
- **"Artificial Intelligence for Humanitarian Assistance and Disaster Response"** (Sun et al., 2020)
- **"Deep Learning for Multi-temporal Satellite Image Change Detection"** (Daudt et al., 2018)
---
## πŸ› οΈ Software & Tools
### Deep Learning Frameworks
- **TensorFlow / Keras**: Industry-standard framework
- **PyTorch**: Research-friendly framework
- **FastAI**: High-level library built on PyTorch
### Geospatial Libraries
- **Rasterio**: Read/write geospatial raster data
- **GeoPandas**: Geospatial data manipulation
- **GDAL**: Geospatial data abstraction library
- **Shapely**: Geometric operations
### Visualization
- **Matplotlib / Seaborn**: Statistical visualization
- **Folium**: Interactive maps
- **Plotly**: Interactive plots
### Deployment
- **Gradio**: Rapid ML demos (used for this app!)
- **Streamlit**: Data app framework
- **TensorFlow Serving**: Production ML serving
- **ONNX**: Model interoperability
---
## πŸ“ž Getting Help
- **GitHub Issues**: [Report bugs or request features](https://github.com/AI4DM/Geospatial-AI-for-Humanitarian-Response/issues)
- **Stack Overflow**: Use tags `tensorflow`, `computer-vision`, `geospatial`
- **Reddit**: r/MachineLearning, r/learnmachinelearning, r/gis
---
## πŸ“œ Citation
If you use this curriculum in your research or teaching:
```bibtex
@misc{ai4dm_nato_asi_2025,
title={AI for Disaster Management: NATO ASI Curriculum},
author={Bulent Soykan},
year={2025},
publisher={GitHub},
url={https://github.com/AI4DM/Geospatial-AI-for-Humanitarian-Response}
}
```
---
## 🌟 Stay Connected
- ⭐ **Star** this repository for updates
- πŸ‘€ **Watch** for new releases
- 🍴 **Fork** to customize for your courses
**Built with ❀️ for humanitarian AI applications**
""")
# ============================================================================
# MAIN APPLICATION
# ============================================================================
def create_app():
"""Create the main Gradio application"""
with gr.Blocks(
title="NATO ASI - AI for Disaster Management"
) as demo:
gr.Markdown("""
<div style="text-align: center; padding: 20px;">
<h1>🌍 NATO Advanced Study Institute</h1>
<h2>AI for Disaster Management: Interactive Learning Platform</h2>
<p style="font-size: 18px; color: #666;">
A comprehensive hands-on curriculum on Geospatial AI for Humanitarian Response
</p>
</div>
""")
with gr.Tabs() as tabs:
with gr.Tab("🏠 Welcome"):
create_welcome_tab()
with gr.Tab("πŸ“š Curriculum"):
create_curriculum_tab()
with gr.Tab("πŸ—οΈ Damage Detection"):
create_damage_detection_tab()
with gr.Tab("🌊 Flood Mapping"):
create_flood_mapping_tab()
with gr.Tab("πŸš€ Transfer Learning"):
create_transfer_learning_tab()
with gr.Tab("βš–οΈ Deployment & Ethics"):
create_deployment_tab()
with gr.Tab("πŸ“š Resources"):
create_resources_tab()
gr.Markdown("""
---
<div style="text-align: center; padding: 20px; color: #666;">
<p><strong>Built with ❀️ for humanitarian AI applications</strong></p>
<p><em>Making the world more resilient to disasters, one model at a time.</em></p>
<p>Β© 2025 NATO Advanced Study Institute | Developed by Bulent Soykan</p>
</div>
""")
return demo
if __name__ == "__main__":
app = create_app()
app.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)