Spaces:
Sleeping
Sleeping
| # app.py | |
| import os | |
| import time | |
| from PIL import Image | |
| import torch | |
| import torch.nn as nn | |
| import numpy as np | |
| import pandas as pd | |
| import gradio as gr | |
| from torchvision import transforms | |
| # Reference to original uploaded files (for your review) | |
| ORIGINAL_SCRIPT_1 = "/mnt/data/final_hackethon_project.py" | |
| ORIGINAL_SCRIPT_2 = "/mnt/data/flood_risk_dashboard_Version1.py" | |
| # ---------------- Model (same as streamlit version) ---------------- | |
| class MultimodalFloodModel(nn.Module): | |
| def __init__(self, num_numeric_features=4, image_feature_dim=64): | |
| super().__init__() | |
| self.numeric_branch = nn.Sequential( | |
| nn.Linear(num_numeric_features, 32), | |
| nn.ReLU(), | |
| nn.Linear(32,16), | |
| nn.ReLU() | |
| ) | |
| self.image_branch = nn.Sequential( | |
| nn.Conv2d(3,16,3,padding=1), nn.ReLU(), nn.MaxPool2d(2,2), | |
| nn.Conv2d(16,32,3,padding=1), nn.ReLU(), nn.MaxPool2d(2,2), | |
| nn.Conv2d(32,64,3,padding=1), nn.ReLU(), nn.MaxPool2d(2,2), | |
| nn.Flatten(), | |
| nn.Linear(64*16*16, image_feature_dim), | |
| nn.ReLU() | |
| ) | |
| self.fusion = nn.Sequential( | |
| nn.Linear(16+image_feature_dim,64), | |
| nn.ReLU(), | |
| nn.Linear(64,1), | |
| nn.Sigmoid() | |
| ) | |
| def forward(self, numeric_input, image_input): | |
| num_feat = self.numeric_branch(numeric_input) | |
| img_feat = self.image_branch(image_input) | |
| combined = torch.cat([num_feat,img_feat],dim=1) | |
| return self.fusion(combined) | |
| # ---------------- Load model if present ---------------- | |
| MODEL_PATH = "models/flood_model.pth" | |
| device = torch.device("cpu") | |
| real_model = None | |
| if os.path.exists(MODEL_PATH): | |
| try: | |
| real_model = MultimodalFloodModel(num_numeric_features=4).to(device) | |
| real_model.load_state_dict(torch.load(MODEL_PATH, map_location=device)) | |
| real_model.eval() | |
| print("Loaded model:", MODEL_PATH) | |
| except Exception as e: | |
| print("Model load failed:", e) | |
| real_model = None | |
| else: | |
| print("No model found at", MODEL_PATH, "- using heuristic") | |
| # ---------------- Heuristic predictor ---------------- | |
| def heuristic_predict(numeric_input): | |
| rainfall = float(numeric_input.get("rainfall_24h", 0)) | |
| river = float(numeric_input.get("river_level", 0)) | |
| humidity = float(numeric_input.get("humidity", 0)) | |
| score = 0.0 | |
| score += min(rainfall / 200.0, 1.0) * 0.6 | |
| score += min(river / 10.0, 1.0) * 0.3 | |
| score += min(humidity / 100.0, 1.0) * 0.1 | |
| return float(score) | |
| # ---------------- Utilities ---------------- | |
| image_transforms = transforms.Compose([ | |
| transforms.Resize((128,128)), | |
| transforms.ToTensor() | |
| ]) | |
| def get_risk_recommendations(risk_level): | |
| recs = { | |
| "High": { | |
| "message": "Immediate action required. High risk of severe flooding.", | |
| "resources": [ | |
| "Evacuation routes and shelters", | |
| "Emergency contact list (police, fire, medical)", | |
| "Pre-packed Go-Bags with essentials" | |
| ] | |
| }, | |
| "Medium": { | |
| "message": "Monitor conditions closely. Prepare to act quickly.", | |
| "resources": [ | |
| "Sandbags and property protection measures", | |
| "Move valuables to higher floors", | |
| "Secure outdoor items" | |
| ] | |
| }, | |
| "Low": { | |
| "message": "Low risk. Continue normal monitoring.", | |
| "resources": [ | |
| "Stay updated with local alerts", | |
| "Check drainage systems and clear blockages", | |
| "Keep emergency contacts handy" | |
| ] | |
| } | |
| } | |
| return recs.get(risk_level, recs["Low"]) | |
| def get_resource_allocation_df(risk_level): | |
| allocations = { | |
| "High": {"Water (Liters)": 10000, "Food (Kits)": 500, "Medical (Kits)": 150, "Rescue Teams": 10}, | |
| "Medium": {"Water (Liters)": 3000, "Food (Kits)": 150, "Medical (Kits)": 50, "Rescue Teams": 3}, | |
| "Low": {"Water (Liters)": 500, "Food (Kits)": 20, "Medical (Kits)": 5, "Rescue Teams": 0}, | |
| } | |
| df = pd.DataFrame.from_dict(allocations, orient='index') | |
| df.index.name = "Risk" | |
| return df | |
| # ---------------- Prediction wrapper ---------------- | |
| def predict_fn(rainfall, river_level, temperature, humidity, uploaded_image, manual_risk): | |
| numeric = { | |
| "rainfall_24h": rainfall, | |
| "river_level": river_level, | |
| "temperature": temperature, | |
| "humidity": humidity | |
| } | |
| # If manual override selected, skip model | |
| if manual_risk: | |
| label = manual_risk | |
| prob = 1.0 if label == "High" else (0.5 if label == "Medium" else 0.1) | |
| recs = get_risk_recommendations(label) | |
| df = get_resource_allocation_df(label) | |
| return (prob, label, recs["message"], "\n".join(recs["resources"]), df, uploaded_image) | |
| # Try real model | |
| if real_model is not None: | |
| try: | |
| numeric_vals = [ | |
| float(numeric['rainfall_24h']), | |
| float(numeric['river_level']), | |
| float(numeric['temperature']), | |
| float(numeric['humidity']) | |
| ] | |
| numeric_tensor = torch.tensor([numeric_vals], dtype=torch.float32).to(device) | |
| if uploaded_image is not None: | |
| img = Image.fromarray(uploaded_image).convert('RGB') if isinstance(uploaded_image, (np.ndarray,)) else Image.open(uploaded_image).convert('RGB') | |
| image_tensor = image_transforms(img).unsqueeze(0).to(device) | |
| else: | |
| image_tensor = torch.zeros((1,3,128,128), dtype=torch.float32).to(device) | |
| with torch.no_grad(): | |
| prob = float(real_model(numeric_tensor, image_tensor).item()) | |
| label = "High" if prob > 0.6 else ("Medium" if prob > 0.35 else "Low") | |
| recs = get_risk_recommendations(label) | |
| df = get_resource_allocation_df(label) | |
| return (prob, label, recs["message"], "\n".join(recs["resources"]), df, uploaded_image) | |
| except Exception as e: | |
| print("Model inference failed, falling back to heuristic:", e) | |
| # Heuristic fallback | |
| prob = heuristic_predict(numeric) | |
| label = "High" if prob > 0.6 else ("Medium" if prob > 0.35 else "Low") | |
| recs = get_risk_recommendations(label) | |
| df = get_resource_allocation_df(label) | |
| return (prob, label, recs["message"], "\n".join(recs["resources"]), df, uploaded_image) | |
| # ---------------- Gradio UI ---------------- | |
| with gr.Blocks(title="Flood Risk & Resource Allocation Assistant") as demo: | |
| gr.Markdown("## 🌊 Flood Risk & Resource Allocation Assistant") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| rainfall = gr.Number(label="Rainfall (24h) — mm", value=20.0) | |
| river_level = gr.Number(label="River level — m", value=1.2) | |
| temperature = gr.Number(label="Temperature — °C", value=30.0) | |
| humidity = gr.Number(label="Humidity — %", value=60.0) | |
| uploaded_image = gr.Image(type="filepath", label="Upload image (optional)") | |
| manual_risk = gr.Dropdown(choices=["", "Low", "Medium", "High"], value="", label="Manual Risk (optional)") | |
| predict_btn = gr.Button("Predict Risk & Suggest Resources") | |
| with gr.Column(scale=1): | |
| prob_out = gr.Number(label="Predicted Probability", interactive=False) | |
| label_out = gr.Textbox(label="Risk Label", interactive=False) | |
| message_out = gr.Textbox(label="Recommendation Message", interactive=False) | |
| resources_out = gr.Textbox(label="Resources (line-separated)", interactive=False) | |
| allocation_out = gr.Dataframe(headers=["Water (Liters)", "Food (Kits)", "Medical (Kits)", "Rescue Teams"], datatype=["number","number","number","number"]) | |
| image_out = gr.Image(label="Uploaded Image", interactive=False) | |
| def run_and_display(rainfall, river_level, temperature, humidity, uploaded_image, manual_risk): | |
| prob, label, message, resources, df, img = predict_fn(rainfall, river_level, temperature, humidity, uploaded_image, manual_risk) | |
| # gr requires dataframe or list-of-lists | |
| return prob, label, message, resources, df.reset_index().to_dict(orient="list"), img | |
| predict_btn.click(fn=run_and_display, inputs=[rainfall, river_level, temperature, humidity, uploaded_image, manual_risk], | |
| outputs=[prob_out, label_out, message_out, resources_out, allocation_out, image_out]) | |
| gr.Markdown("---") | |
| gr.Markdown("**Notes:** Place trained weights at `models/flood_model.pth` to enable model inference. Original scripts: " + ORIGINAL_SCRIPT_1 + " , " + ORIGINAL_SCRIPT_2) | |
| if __name__ == "__main__": | |
| demo.launch() | |