AbhayVG commited on
Commit
e2151fd
·
verified ·
1 Parent(s): b549dd4

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -0
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ import torchvision.transforms as transforms
4
+ import torch.nn.functional as F
5
+ from PIL import Image
6
+ import streamlit as st
7
+ import requests
8
+ from io import BytesIO
9
+
10
+ # Page config
11
+ st.set_page_config(page_title="Adversarial Self-Driving Test", layout="centered")
12
+
13
+ # Title
14
+ st.title("💥 Adversarial Self-Driving Car Tester")
15
+ st.markdown("Upload a traffic sign, and we'll **confuse the AI model** into causing a virtual accident!")
16
+
17
+ # Load model + labels
18
+ model = torchvision.models.resnet18(pretrained=True)
19
+ model.eval()
20
+
21
+ LABELS_URL = "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt"
22
+ labels = requests.get(LABELS_URL).text.strip().split("\n")
23
+
24
+ # Transform
25
+ transform = transforms.Compose([
26
+ transforms.Resize((224, 224)),
27
+ transforms.ToTensor(),
28
+ ])
29
+
30
+ # Choose image source
31
+ use_default = st.checkbox("🚦 Use Default Stop Sign")
32
+ if use_default:
33
+ img_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/6b/Stop_sign_light_red.svg/512px-Stop_sign_light_red.svg.png"
34
+ image = Image.open(BytesIO(requests.get(img_url).content)).convert('RGB')
35
+ st.image(image, caption="Stop Sign", use_column_width=True)
36
+ else:
37
+ uploaded_file = st.file_uploader("📷 Upload a traffic sign image", type=["jpg", "jpeg", "png"])
38
+ if uploaded_file:
39
+ image = Image.open(uploaded_file).convert('RGB')
40
+ st.image(image, caption="Uploaded Image", use_column_width=True)
41
+ else:
42
+ image = None
43
+
44
+ # Epsilon
45
+ epsilon = st.slider("⚡ Perturbation Strength (epsilon)", 0.001, 0.1, 0.01, step=0.001)
46
+
47
+ # Target class selector
48
+ target_class = st.selectbox(
49
+ "🎯 Confuse the model into predicting:",
50
+ options=[
51
+ (919, "Stop Sign"),
52
+ (717, "Speed Limit 60"),
53
+ (718, "Speed Limit 80"),
54
+ (400, "Speedboat (LOL why?)"),
55
+ ],
56
+ format_func=lambda x: f"{x[0]} - {x[1]}"
57
+ )
58
+ target_class_id = target_class[0]
59
+ target_class_label = target_class[1]
60
+
61
+ # Process
62
+ if image:
63
+ input_tensor = transform(image).unsqueeze(0)
64
+ input_tensor.requires_grad = True
65
+
66
+ # Original prediction
67
+ with torch.no_grad():
68
+ orig_out = model(input_tensor)
69
+ orig_pred_idx = orig_out.argmax().item()
70
+ orig_pred = labels[orig_pred_idx]
71
+ st.markdown(f"✅ **Original Prediction:** `{orig_pred}`")
72
+
73
+ # FGSM Attack
74
+ output = model(input_tensor)
75
+ loss = F.cross_entropy(output, torch.tensor([target_class_id]))
76
+ loss.backward()
77
+ perturb = epsilon * input_tensor.grad.sign()
78
+ adv_tensor = torch.clamp(input_tensor + perturb, 0, 1)
79
+
80
+ # Adversarial prediction
81
+ with torch.no_grad():
82
+ adv_out = model(adv_tensor)
83
+ adv_pred_idx = adv_out.argmax().item()
84
+ adv_pred = labels[adv_pred_idx]
85
+ st.markdown(f"⚠️ **Adversarial Prediction:** `{adv_pred}`")
86
+
87
+ # Show adversarial image
88
+ st.image(transforms.ToPILImage()(adv_tensor.squeeze()), caption="Adversarial Image", use_column_width=True)
89
+
90
+ # Accident message
91
+ if orig_pred != adv_pred:
92
+ st.markdown("#### 🚨 Accident Report")
93
+ st.error(f"🤯 The car thought a `{orig_pred}` was a `{adv_pred}`. That's a full-on self-driving fail!")
94
+ st.markdown("😬 *Hope nobody was crossing the road...*")
95
+ st.markdown("💀 *Insurance premium just went up!*")
96
+ st.markdown("🧠 *Back to AI driving school, buddy.*")