Spaces:
Runtime error
Runtime error
| # app.py | |
| import os, math, random | |
| import numpy as np | |
| import torch | |
| import torch.nn as nn | |
| import gradio as gr | |
| import seaborn as sns | |
| import matplotlib.pyplot as plt | |
| from sklearn.metrics import ( | |
| precision_recall_fscore_support, roc_auc_score, confusion_matrix, roc_curve, auc | |
| ) | |
| # ==================================================== | |
| # 1. SETUP | |
| # ==================================================== | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| ARTIFACT_DIR = "./artifacts" | |
| # === Hardcode preprocessing details (from your training notebook) === | |
| WINDOW_SIZE = 48 # or whatever you used | |
| STRIDE = 5 | |
| DT = 1.0 | |
| in_channels = 9 | |
| mean = np.zeros(in_channels) | |
| std = np.ones(in_channels) | |
| # ==================================================== | |
| # 2. MODEL DEFINITIONS (copy from your training code) | |
| # ==================================================== | |
| class PositionalEncoding(nn.Module): | |
| def __init__(self, d_model, max_len=1000): | |
| super().__init__() | |
| pe = torch.zeros(max_len, d_model) | |
| pos = torch.arange(0, max_len).unsqueeze(1) | |
| div = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) | |
| pe[:, 0::2] = torch.sin(pos * div) | |
| pe[:, 1::2] = torch.cos(pos * div) | |
| self.pe = pe.unsqueeze(0) | |
| def forward(self, x): | |
| return x + self.pe[:, :x.size(1)].to(x.device) | |
| class TransformerForecaster(nn.Module): | |
| def __init__(self, in_channels=9, d_model=192, nhead=4, num_layers=2, pred_len=8): | |
| super().__init__() | |
| self.input_linear = nn.Linear(in_channels, d_model) | |
| self.pos_enc = PositionalEncoding(d_model) | |
| enc_layer = nn.TransformerEncoderLayer(d_model, nhead, d_model*2, batch_first=True) | |
| self.enc = nn.TransformerEncoder(enc_layer, num_layers=num_layers) | |
| self.pred_len = pred_len | |
| self.out_mean = nn.Linear(d_model, pred_len * 2) | |
| self.out_logvar = nn.Linear(d_model, pred_len * 2) | |
| def forward(self, x): | |
| x = self.input_linear(x) | |
| x = self.pos_enc(x) | |
| enc = self.enc(x) | |
| h = enc[:, -1, :] | |
| mu = self.out_mean(h).view(-1, self.pred_len, 2) | |
| logvar = self.out_logvar(h).view(-1, self.pred_len, 2) | |
| return mu, logvar | |
| class ConvAutoencoder(nn.Module): | |
| def __init__(self, in_channels=9, latent=128, seq_len=48): | |
| super().__init__() | |
| self.enc = nn.Sequential( | |
| nn.Conv1d(in_channels, 64, 3, padding=1), | |
| nn.ReLU(), | |
| nn.Conv1d(64, 128, 3, padding=1), | |
| nn.ReLU(), | |
| nn.AdaptiveAvgPool1d(1), | |
| nn.Flatten(), | |
| nn.Linear(128, latent), | |
| nn.ReLU() | |
| ) | |
| self.dec = nn.Sequential( | |
| nn.Linear(latent, 128), | |
| nn.ReLU(), | |
| nn.Linear(128, in_channels * seq_len) | |
| ) | |
| self.in_channels = in_channels | |
| self.seq_len = seq_len | |
| def forward(self, x): | |
| x = x.transpose(1,2) | |
| z = self.enc(x) | |
| out = self.dec(z).view(-1, self.in_channels, self.seq_len).transpose(1,2) | |
| return out | |
| class CNNClassifier(nn.Module): | |
| def __init__(self, in_channels=9, num_classes=2): | |
| super().__init__() | |
| self.net = nn.Sequential( | |
| nn.Conv1d(in_channels, 64, 3, padding=1), | |
| nn.ReLU(), | |
| nn.Conv1d(64, 128, 3, padding=1), | |
| nn.ReLU(), | |
| nn.AdaptiveAvgPool1d(1), | |
| nn.Flatten(), | |
| nn.Linear(128, 64), | |
| nn.ReLU(), | |
| nn.Linear(64, num_classes) | |
| ) | |
| def forward(self, x): | |
| x = x.transpose(1,2) | |
| return self.net(x) | |
| # ==================================================== | |
| # 3. LOAD MODELS (use your trained weights) | |
| # ==================================================== | |
| modelA = TransformerForecaster() | |
| modelB = ConvAutoencoder() | |
| modelC = CNNClassifier() | |
| modelA.load_state_dict(torch.load(os.path.join(ARTIFACT_DIR, "modelA_transformer.pth"), map_location=device)) | |
| modelB.load_state_dict(torch.load(os.path.join(ARTIFACT_DIR, "modelB_ae.pth"), map_location=device)) | |
| modelC.load_state_dict(torch.load(os.path.join(ARTIFACT_DIR, "modelC_cls.pth"), map_location=device)) | |
| modelA.to(device).eval() | |
| modelB.to(device).eval() | |
| modelC.to(device).eval() | |
| # ==================================================== | |
| # 4. SCORING FUNCTIONS | |
| # ==================================================== | |
| def score_transformer(model, X): | |
| with torch.no_grad(): | |
| X_t = torch.tensor(X, dtype=torch.float32, device=device) | |
| mu, logvar = model(X_t) | |
| target = X_t[:, -model.pred_len:, :2] | |
| var = torch.exp(logvar) | |
| nll = 0.5 * (logvar + (target - mu) ** 2 / (var + 1e-9)) | |
| return nll.sum(dim=(1, 2)).cpu().numpy() | |
| def score_autoencoder(model, X): | |
| with torch.no_grad(): | |
| X_t = torch.tensor(X, dtype=torch.float32, device=device) | |
| recon = model(X_t) | |
| mse = ((recon - X_t) ** 2).mean(dim=(1, 2)).cpu().numpy() | |
| return mse | |
| def score_classifier_prob(model, X): | |
| with torch.no_grad(): | |
| X_t = torch.tensor(X, dtype=torch.float32, device=device) | |
| logits = model(X_t) | |
| probs = torch.softmax(logits, dim=1)[:, 1].cpu().numpy() | |
| return probs | |
| # ==================================================== | |
| # 5. VISUAL FUNCTIONS | |
| # ==================================================== | |
| def plot_confusion_matrix(cm): | |
| fig, ax = plt.subplots() | |
| sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", cbar=False) | |
| ax.set_xlabel("Predicted") | |
| ax.set_ylabel("True") | |
| plt.tight_layout() | |
| return fig | |
| def plot_roc_curve(y_true, scores): | |
| fpr, tpr, _ = roc_curve(y_true, scores) | |
| roc_auc = auc(fpr, tpr) | |
| fig, ax = plt.subplots() | |
| ax.plot(fpr, tpr, label=f"AUC = {roc_auc:.3f}") | |
| ax.plot([0,1],[0,1],"--", color="gray") | |
| ax.legend() | |
| plt.tight_layout() | |
| return fig | |
| # ==================================================== | |
| # 6. SIMULATION FUNCTION | |
| # ==================================================== | |
| def simulate(model_choice, w1, w2): | |
| n, T = 300, WINDOW_SIZE | |
| X, y = [], [] | |
| for i in range(n): | |
| t = np.linspace(0, 2*np.pi, T) | |
| x = np.cos(t) + np.random.randn(T)*0.05 | |
| yv = np.sin(t) + np.random.randn(T)*0.05 | |
| w = np.stack([x,yv], axis=1) | |
| if random.random() < 0.3: | |
| idx = random.randint(10, T-10) | |
| w[idx:] += np.random.randn() * 0.8 | |
| y.append(1) | |
| else: | |
| y.append(0) | |
| dx = np.vstack([np.zeros(2), w[1:] - w[:-1]]) | |
| speed = np.linalg.norm(dx, axis=1) | |
| acc = np.concatenate([[0.0], np.diff(speed)]) | |
| heading = np.arctan2(dx[:,1], dx[:,0]) | |
| turn = np.concatenate([[0.0], np.diff(heading)]) | |
| mask = np.zeros(T) | |
| feat = np.stack([w[:,0], w[:,1], dx[:,0], dx[:,1], speed, acc, heading, turn, mask], axis=1) | |
| X.append(feat) | |
| X = np.array(X, dtype=np.float32) | |
| y = np.array(y, dtype=np.int64) | |
| X_norm = (X - mean[None,None,:]) / (std[None,None,:] + 1e-9) | |
| if model_choice == "Transformer": | |
| scores = score_transformer(modelA, X_norm) | |
| elif model_choice == "Autoencoder": | |
| scores = score_autoencoder(modelB, X_norm) | |
| elif model_choice == "Classifier": | |
| scores = score_classifier_prob(modelC, X_norm) | |
| else: | |
| sA = score_transformer(modelA, X_norm) | |
| sB = score_autoencoder(modelB, X_norm) | |
| sC = score_classifier_prob(modelC, X_norm) | |
| w3 = max(0.0, 1 - w1 - w2) | |
| scores = w1*sA + w2*sB + w3*sC | |
| scores = (scores - scores.mean()) / (scores.std() + 1e-9) | |
| thr = np.percentile(scores, 85) | |
| preds = (scores > thr).astype(int) | |
| p, r, f, _ = precision_recall_fscore_support(y, preds, average="binary") | |
| acc = (preds == y).mean() | |
| aucv = roc_auc_score(y, scores) | |
| cm = confusion_matrix(y, preds) | |
| cm_fig = plot_confusion_matrix(cm) | |
| roc_fig = plot_roc_curve(y, scores) | |
| return f"{acc:.3f}", f"{p:.3f}", f"{r:.3f}", f"{f:.3f}", f"{aucv:.3f}", cm_fig, roc_fig | |
| # ==================================================== | |
| # 7. GRADIO UI | |
| # ==================================================== | |
| with gr.Blocks(title="Victim Localisation") as demo: | |
| gr.Markdown("## Victim Localisation and Rescue Support — Deep Learning Demo") | |
| model_choice = gr.Radio(["Transformer", "Autoencoder", "Classifier", "Ensemble"], value="Ensemble") | |
| w1 = gr.Slider(0.0, 1.0, 0.5, step=0.1, label="Transformer weight") | |
| w2 = gr.Slider(0.0, 1.0, 0.3, step=0.1, label="Autoencoder weight") | |
| run_btn = gr.Button("Simulate") | |
| acc_box = gr.Textbox(label="Accuracy") | |
| prec_box = gr.Textbox(label="Precision") | |
| rec_box = gr.Textbox(label="Recall") | |
| f1_box = gr.Textbox(label="F1-score") | |
| auc_box = gr.Textbox(label="ROC-AUC") | |
| cm_plot = gr.Plot(label="Confusion Matrix") | |
| roc_plot = gr.Plot(label="ROC Curve") | |
| run_btn.click(simulate, inputs=[model_choice, w1, w2], outputs=[acc_box, prec_box, rec_box, f1_box, auc_box, cm_plot, roc_plot]) | |
| demo.launch() | |