elanuk commited on
Commit
ae2aa11
·
verified ·
1 Parent(s): 99e4ef8

Upload 5 files

Browse files
Files changed (5) hide show
  1. generate_eq.py +98 -0
  2. generate_reports.py +96 -0
  3. requirements.txt +124 -0
  4. streamlit_app.py +412 -0
  5. update_bayesian.py +163 -0
generate_eq.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+
4
+ DAMAGE_STATES = ['none', 'minor', 'severe', 'collapse']
5
+
6
+ def generate_buildings(n_buildings, diameter_km, seed=42):
7
+
8
+ np.random.seed(seed)
9
+ buildings = []
10
+
11
+ radius = diameter_km / 2
12
+
13
+ for i in range(n_buildings):
14
+ angle = np.random.uniform(0, 2 * np.pi)
15
+ r = np.random.uniform(0.5, radius)
16
+
17
+ x = r * np.cos(angle)
18
+ y = r * np.sin(angle)
19
+ distance = r
20
+
21
+ building_type = np.random.choice(
22
+ ['wood', 'concrete', 'steel'],
23
+ p=[0.5, 0.3, 0.2]
24
+ )
25
+
26
+ if building_type == 'wood':
27
+ occupancy = int(np.random.uniform(10, 100))
28
+ elif building_type == 'concrete':
29
+ occupancy = int(np.random.uniform(50, 300))
30
+ else:
31
+ occupancy = int(np.random.uniform(100, 500))
32
+
33
+ buildings.append({
34
+ 'building_id': i,
35
+ 'x': round(x, 2),
36
+ 'y': round(y, 2),
37
+ 'distance_km': round(distance, 2),
38
+ 'building_type': building_type,
39
+ 'occupancy': occupancy
40
+ })
41
+
42
+ return pd.DataFrame(buildings)
43
+
44
+ def compute_damage_probabilities(distance, building_type, magnitude, alpha_params):
45
+ magnitude_scale = (magnitude - 5.0) / 3.0
46
+ magnitude_scale = max(0.1, min(magnitude_scale, 2.0))
47
+
48
+ base_damage = np.exp(-distance / (15.0 * magnitude_scale))
49
+
50
+ alpha = alpha_params[building_type]
51
+
52
+ p_collapse = alpha * base_damage * 0.30 * magnitude_scale
53
+ p_severe = alpha * base_damage * 0.25 * magnitude_scale
54
+ p_minor = alpha * base_damage * 0.20 * magnitude_scale
55
+
56
+ total = p_collapse + p_severe + p_minor
57
+ if total > 0.95:
58
+ scale = 0.95 / total
59
+ p_collapse *= scale
60
+ p_severe *= scale
61
+ p_minor *= scale
62
+
63
+ p_none = 1 - (p_collapse + p_severe + p_minor)
64
+
65
+ return np.array([p_none, p_minor, p_severe, p_collapse])
66
+
67
+ def simulate_damage(buildings_df, magnitude, alpha_params, seed=42):
68
+ np.random.seed(seed)
69
+
70
+ buildings = buildings_df.copy()
71
+
72
+ for idx, building in buildings.iterrows():
73
+ probs = compute_damage_probabilities(
74
+ building['distance_km'],
75
+ building['building_type'],
76
+ magnitude,
77
+ alpha_params
78
+ )
79
+
80
+ damage_state = np.random.choice(DAMAGE_STATES, p=probs)
81
+
82
+ buildings.at[idx, 'true_damage'] = damage_state
83
+ buildings.at[idx, 'p_none'] = round(probs[0], 4)
84
+ buildings.at[idx, 'p_minor'] = round(probs[1], 4)
85
+ buildings.at[idx, 'p_severe'] = round(probs[2], 4)
86
+ buildings.at[idx, 'p_collapse'] = round(probs[3], 4)
87
+
88
+ return buildings
89
+
90
+ def create_scenario(n_buildings=100, diameter_km=40, magnitude=6.5,
91
+ alpha_params=None, seed=42):
92
+ if alpha_params is None:
93
+ alpha_params = {'wood': 1.5, 'concrete': 1.0, 'steel': 0.7}
94
+
95
+ buildings = generate_buildings(n_buildings, diameter_km, seed)
96
+ scenario = simulate_damage(buildings, magnitude, alpha_params, seed)
97
+
98
+ return scenario
generate_reports.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+
4
+ DAMAGE_STATES = ['none', 'minor', 'severe', 'collapse']
5
+
6
+ REPORT_SOURCES = {
7
+ 'automated_sensor': {'reliability': 0.85, 'weight': 0.25},
8
+ 'phone_call': {'reliability': 0.60, 'weight': 0.50},
9
+ 'social_media': {'reliability': 0.40, 'weight': 0.20},
10
+ 'inspector': {'reliability': 0.95, 'weight': 0.05}
11
+ }
12
+
13
+ def generate_noisy_report(true_state, reliability):
14
+ if np.random.random() < reliability:
15
+ return true_state
16
+
17
+ other_states = [s for s in DAMAGE_STATES if s != true_state]
18
+ true_idx = DAMAGE_STATES.index(true_state)
19
+
20
+ weights = []
21
+ for state in other_states:
22
+ state_idx = DAMAGE_STATES.index(state)
23
+ distance = abs(state_idx - true_idx)
24
+ weight = 1.0 / (distance + 1)
25
+ weights.append(weight)
26
+
27
+ weights = np.array(weights) / sum(weights)
28
+ return np.random.choice(other_states, p=weights)
29
+
30
+ def generate_building_reports(building, lambda_rates, max_time_hours=3, seed=None):
31
+ if seed is not None:
32
+ np.random.seed(seed)
33
+
34
+ true_damage = building['true_damage']
35
+ lambda_rate = lambda_rates[true_damage]
36
+
37
+ reports = []
38
+ current_time = 0
39
+ max_time_minutes = max_time_hours * 60
40
+
41
+ while current_time < max_time_minutes:
42
+ lambda_per_minute = lambda_rate / 60.0
43
+ time_to_next = np.random.exponential(1.0 / lambda_per_minute)
44
+ current_time += time_to_next
45
+
46
+ if current_time >= max_time_minutes:
47
+ break
48
+
49
+ source_types = list(REPORT_SOURCES.keys())
50
+ source_weights = [REPORT_SOURCES[s]['weight'] for s in source_types]
51
+ source = np.random.choice(source_types, p=source_weights)
52
+
53
+ reliability = REPORT_SOURCES[source]['reliability']
54
+ reported_state = generate_noisy_report(true_damage, reliability)
55
+
56
+ reports.append({
57
+ 'time_minutes': round(current_time, 2),
58
+ 'source': source,
59
+ 'reported_state': reported_state,
60
+ 'building_id': building['building_id'],
61
+ 'building_type': building['building_type'],
62
+ 'true_damage': building['true_damage']
63
+ })
64
+
65
+ return reports
66
+
67
+ def generate_all_reports(buildings_df, lambda_rates=None, max_time_hours=3, seed=42):
68
+ if lambda_rates is None:
69
+ lambda_rates = {
70
+ 'collapse': 8.0,
71
+ 'severe': 3.0,
72
+ 'minor': 0.8,
73
+ 'none': 0.2
74
+ }
75
+
76
+ np.random.seed(seed)
77
+ all_reports = []
78
+
79
+ for idx, building in buildings_df.iterrows():
80
+ building_seed = seed + idx if seed is not None else None
81
+ reports = generate_building_reports(
82
+ building.to_dict(),
83
+ lambda_rates,
84
+ max_time_hours,
85
+ building_seed
86
+ )
87
+ all_reports.extend(reports)
88
+
89
+ reports_df = pd.DataFrame(all_reports)
90
+ if len(reports_df) > 0:
91
+ reports_df = reports_df.sort_values('time_minutes').reset_index(drop=True)
92
+
93
+ return reports_df
94
+
95
+ def get_report_reliability(source):
96
+ return REPORT_SOURCES[source]['reliability']
requirements.txt ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ altair==5.4.1
2
+ annotated-types==0.7.0
3
+ anyio==3.7.1
4
+ asyncio==3.4.3
5
+ attrs==24.2.0
6
+ beautifulsoup4==4.13.4
7
+ blinker==1.8.2
8
+ cachetools==5.5.0
9
+ certifi==2024.8.30
10
+ charset-normalizer==3.3.2
11
+ click==8.1.7
12
+ contourpy==1.3.2
13
+ cycler==0.12.1
14
+ distlib==0.3.9
15
+ distro==1.9.0
16
+ dotenv==0.9.9
17
+ exceptiongroup==1.2.2
18
+ fastapi==0.104.1
19
+ filelock==3.18.0
20
+ fonttools==4.60.1
21
+ fsspec==2025.5.1
22
+ gitdb==4.0.11
23
+ GitPython==3.1.43
24
+ google==3.0.0
25
+ google-ai-generativelanguage==0.6.10
26
+ google-api-core==2.20.0
27
+ google-api-python-client==2.148.0
28
+ google-auth==2.35.0
29
+ google-auth-httplib2==0.2.0
30
+ google-auth-oauthlib==1.2.1
31
+ google-generativeai==0.8.3
32
+ google_search_results==2.4.2
33
+ googleapis-common-protos==1.65.0
34
+ greenlet==3.2.1
35
+ grpcio==1.66.2
36
+ grpcio-status==1.66.2
37
+ gspread==6.1.4
38
+ h11==0.14.0
39
+ hf-xet==1.1.5
40
+ httpcore==1.0.7
41
+ httplib2==0.22.0
42
+ httpx==0.28.1
43
+ httpx-sse==0.4.1
44
+ huggingface-hub==0.33.2
45
+ idna==3.10
46
+ Jinja2==3.1.2
47
+ jiter==0.9.0
48
+ joblib==1.5.1
49
+ jsonschema==4.23.0
50
+ jsonschema-specifications==2024.10.1
51
+ kiwisolver==1.4.9
52
+ mangum==0.19.0
53
+ markdown-it-py==3.0.0
54
+ MarkupSafe==3.0.2
55
+ matplotlib==3.10.7
56
+ mcp==1.12.4
57
+ mdurl==0.1.2
58
+ mpmath==1.3.0
59
+ narwhals==2.12.0
60
+ networkx==3.4.2
61
+ numpy==2.1.2
62
+ oauth2client==4.1.3
63
+ oauthlib==3.2.2
64
+ openai==1.3.5
65
+ packaging==24.1
66
+ pandas==2.2.3
67
+ pillow==10.4.0
68
+ platformdirs==4.3.8
69
+ playwright==1.52.0
70
+ plotly==6.5.0
71
+ proto-plus==1.24.0
72
+ protobuf==5.28.2
73
+ pyarrow==17.0.0
74
+ pyasn1==0.6.1
75
+ pyasn1_modules==0.4.1
76
+ pydantic==2.5.0
77
+ pydantic-settings==2.10.1
78
+ pydantic_core==2.14.1
79
+ pydeck==0.9.1
80
+ pyee==13.0.0
81
+ Pygments==2.18.0
82
+ pyparsing==3.1.4
83
+ PyPDF2==3.0.1
84
+ python-dateutil==2.9.0.post0
85
+ python-dotenv==1.0.0
86
+ python-multipart==0.0.6
87
+ pytz==2024.2
88
+ PyYAML==6.0.2
89
+ referencing==0.35.1
90
+ regex==2024.11.6
91
+ requests==2.32.3
92
+ requests-oauthlib==2.0.0
93
+ rich==13.9.2
94
+ rpds-py==0.20.0
95
+ rsa==4.9
96
+ safetensors==0.5.3
97
+ scikit-learn==1.7.0
98
+ scipy==1.15.3
99
+ seaborn==0.13.2
100
+ sentence-transformers==5.0.0
101
+ six==1.16.0
102
+ smmap==5.0.1
103
+ sniffio==1.3.1
104
+ soupsieve==2.7
105
+ sse-starlette==3.0.2
106
+ starlette==0.27.0
107
+ streamlit==1.39.0
108
+ sympy==1.14.0
109
+ tenacity==9.0.0
110
+ threadpoolctl==3.6.0
111
+ tiktoken==0.9.0
112
+ tokenizers==0.21.2
113
+ toml==0.10.2
114
+ torch==2.7.1
115
+ tornado==6.4.1
116
+ tqdm==4.66.5
117
+ transformers==4.53.1
118
+ typing-inspection==0.4.1
119
+ typing_extensions==4.12.2
120
+ tzdata==2024.2
121
+ uritemplate==4.1.1
122
+ urllib3==2.2.3
123
+ uvicorn==0.24.0
124
+ virtualenv==20.31.2
streamlit_app.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import plotly.express as px
5
+ import plotly.graph_objects as go
6
+ from generate_eq import create_scenario
7
+ from generate_reports import generate_all_reports
8
+ from update_bayesian import run_inference, compute_decision_metrics
9
+
10
+ st.set_page_config(layout="wide", page_title="Earthquake Response Simulator", page_icon="🌍")
11
+
12
+ st.title("Bayesian Earthquake Response Simulator")
13
+ st.markdown("Design your own earthquake scenario and watch Bayesian inference optimize rescue decisions")
14
+
15
+ with st.sidebar:
16
+ st.header("Scenario Configuration")
17
+
18
+ st.subheader("Earthquake Parameters")
19
+ magnitude = st.slider("Magnitude (Richter)", 5.0, 8.0, 6.5, 0.1,
20
+ help="Higher magnitude = more energy, wider damage radius")
21
+ diameter = st.slider("Affected Area Diameter (km)", 10, 100, 40, 5)
22
+ n_buildings = st.slider("Number of Buildings", 20, 2000, 100, 10,
23
+ help="More buildings = more realistic but slower computation")
24
+ n_teams = st.slider("Rescue Teams Available", 1, 50, 5, 1)
25
+
26
+ st.subheader("Building Vulnerability")
27
+ with st.expander("Advanced: Vulnerability Parameters"):
28
+ alpha_wood = st.slider("Wood Buildings (α)", 0.5, 2.5, 1.5, 0.1)
29
+ alpha_concrete = st.slider("Concrete Buildings (α)", 0.5, 2.5, 1.0, 0.1)
30
+ alpha_steel = st.slider("Steel Buildings (α)", 0.5, 2.5, 0.7, 0.1)
31
+
32
+ st.subheader("Report Arrival Rates")
33
+ with st.expander("Advanced: Poisson Rates (per hour)"):
34
+ lambda_collapse = st.slider("Collapsed Buildings", 1.0, 15.0, 8.0, 0.5)
35
+ lambda_severe = st.slider("Severe Damage", 0.5, 8.0, 3.0, 0.5)
36
+ lambda_minor = st.slider("Minor Damage", 0.1, 3.0, 0.8, 0.1)
37
+ lambda_none = st.slider("No Damage", 0.05, 1.0, 0.2, 0.05)
38
+
39
+ st.divider()
40
+
41
+ sim_seed = st.number_input("Random Seed", 0, 9999, 42, 1)
42
+
43
+ if st.button("Generate New Scenario", type="primary"):
44
+ st.session_state.clear()
45
+ st.rerun()
46
+
47
+ alpha_params = {'wood': alpha_wood, 'concrete': alpha_concrete, 'steel': alpha_steel}
48
+ lambda_rates = {'collapse': lambda_collapse, 'severe': lambda_severe, 'minor': lambda_minor, 'none': lambda_none}
49
+
50
+ if 'scenario' not in st.session_state:
51
+ with st.spinner("Generating earthquake scenario..."):
52
+ st.session_state.scenario = create_scenario(
53
+ n_buildings=n_buildings,
54
+ diameter_km=diameter,
55
+ magnitude=magnitude,
56
+ alpha_params=alpha_params,
57
+ seed=sim_seed
58
+ )
59
+ st.session_state.reports = generate_all_reports(
60
+ st.session_state.scenario,
61
+ lambda_rates=lambda_rates,
62
+ max_time_hours=3,
63
+ seed=sim_seed
64
+ )
65
+ st.session_state.beliefs = run_inference(
66
+ st.session_state.scenario,
67
+ st.session_state.reports
68
+ )
69
+ st.session_state.metrics = compute_decision_metrics(
70
+ st.session_state.scenario,
71
+ st.session_state.beliefs,
72
+ n_teams
73
+ )
74
+
75
+ scenario = st.session_state.scenario
76
+ reports = st.session_state.reports
77
+ beliefs = st.session_state.beliefs
78
+ metrics = st.session_state.metrics
79
+
80
+ tab1, tab2, tab3 = st.tabs(["Live Simulation", "Building Analysis", "Results & Impact"])
81
+
82
+ with tab1:
83
+
84
+ # --- Create placeholders so we control layout order ---
85
+ metrics_placeholder = st.empty()
86
+ st.divider()
87
+ slider_placeholder = st.empty()
88
+
89
+ # --- SLIDER RENDERED BELOW METRICS ---
90
+ with slider_placeholder:
91
+ current_time = st.slider(
92
+ "Simulation Time (minutes)",
93
+ min_value=0.0,
94
+ max_value=180.0,
95
+ value=0.0,
96
+ step=1.0,
97
+ help="Slide to watch how beliefs update as reports arrive over time"
98
+ )
99
+
100
+ # --- Now compute everything based on current_time ---
101
+ current_reports = reports[reports['time_minutes'] <= current_time]
102
+ num_current_reports = len(current_reports)
103
+
104
+ buildings_reported = current_reports['building_id'].nunique()
105
+
106
+ if num_current_reports > 0:
107
+ buildings_with_reports = current_reports['building_id'].unique()
108
+ current_beliefs_subset = beliefs[beliefs['building_id'].isin(buildings_with_reports)]
109
+ avg_entropy = current_beliefs_subset['entropy'].mean()
110
+ else:
111
+ avg_entropy = None
112
+
113
+ # --- RENDER METRICS ABOVE SLIDER ---
114
+ with metrics_placeholder.container():
115
+ col2, col3, col4 = st.columns(3)
116
+
117
+ with col2:
118
+ st.metric("Reports Received", num_current_reports)
119
+
120
+ with col3:
121
+ st.metric("Buildings Reported", buildings_reported)
122
+
123
+ with col4:
124
+ if avg_entropy is not None:
125
+ st.metric("Avg Uncertainty", f"{avg_entropy:.2f}",
126
+ help="0 = certain, 2 = maximum uncertainty")
127
+ else:
128
+ st.metric("Avg Uncertainty", "—")
129
+
130
+
131
+ from update_bayesian import bayesian_update
132
+ from generate_reports import get_report_reliability
133
+
134
+ beliefs_at_time = []
135
+ for _, building in scenario.iterrows():
136
+ prior = np.array([
137
+ building['p_none'],
138
+ building['p_minor'],
139
+ building['p_severe'],
140
+ building['p_collapse']
141
+ ])
142
+
143
+ building_reports = current_reports[current_reports['building_id'] == building['building_id']]
144
+ current_belief = prior.copy()
145
+
146
+ for _, report in building_reports.iterrows():
147
+ reliability = get_report_reliability(report['source'])
148
+ current_belief = bayesian_update(current_belief, report['reported_state'], reliability)
149
+
150
+ beliefs_at_time.append({
151
+ 'building_id': building['building_id'],
152
+ 'p_collapse_current': current_belief[3],
153
+ 'has_reports': len(building_reports) > 0
154
+ })
155
+
156
+ beliefs_current_df = pd.DataFrame(beliefs_at_time)
157
+
158
+ map_data = scenario[['building_id', 'x', 'y', 'distance_km', 'building_type', 'occupancy', 'true_damage']].merge(
159
+ beliefs_current_df,
160
+ on='building_id'
161
+ )
162
+
163
+ fig_map = px.scatter(
164
+ map_data,
165
+ x='x',
166
+ y='y',
167
+ size='occupancy',
168
+ color='p_collapse_current',
169
+ color_continuous_scale='RdYlGn_r',
170
+ range_color=[0, 1],
171
+ hover_data={
172
+ 'building_id': True,
173
+ 'building_type': True,
174
+ 'distance_km': ':.1f',
175
+ 'occupancy': True,
176
+ 'p_collapse_current': ':.3f',
177
+ 'true_damage': True,
178
+ 'has_reports': True,
179
+ 'x': False,
180
+ 'y': False
181
+ },
182
+ labels={'p_collapse_current': 'P(Collapse)', 'has_reports': 'Received Reports'},
183
+ title=f"Building Risk Assessment at t={int(current_time)} minutes"
184
+ )
185
+
186
+ fig_map.add_trace(go.Scatter(
187
+ x=[0], y=[0],
188
+ mode='markers+text',
189
+ marker=dict(size=20, color='red', symbol='star'),
190
+ text=['Epicenter'],
191
+ textposition='top center',
192
+ showlegend=False,
193
+ hoverinfo='skip'
194
+ ))
195
+
196
+ for radius in [5, 10, 15, 20]:
197
+ if radius < diameter / 2:
198
+ theta = np.linspace(0, 2*np.pi, 100)
199
+ x_circle = radius * np.cos(theta)
200
+ y_circle = radius * np.sin(theta)
201
+ fig_map.add_trace(go.Scatter(
202
+ x=x_circle, y=y_circle,
203
+ mode='lines',
204
+ line=dict(color='gray', width=1, dash='dash'),
205
+ showlegend=False,
206
+ hoverinfo='skip'
207
+ ))
208
+
209
+ fig_map.update_layout(
210
+ height=600,
211
+ xaxis_title="Distance East-West (km)",
212
+ yaxis_title="Distance North-South (km)",
213
+ xaxis=dict(scaleanchor="y", scaleratio=1),
214
+ yaxis=dict(scaleanchor="x", scaleratio=1)
215
+ )
216
+
217
+ st.plotly_chart(fig_map, use_container_width=True)
218
+
219
+ if num_current_reports > 0:
220
+ with st.expander("Recent Reports", expanded=False):
221
+ recent = current_reports.nsmallest(10, 'time_minutes', keep='last')
222
+ recent = recent.sort_values('time_minutes', ascending=False)
223
+
224
+ for _, report in recent.iterrows():
225
+ st.text(
226
+ f"t={report['time_minutes']:6.1f}m | "
227
+ f"Building {report['building_id']:3.0f} | "
228
+ f"{report['source']:20s} | "
229
+ f"Reports: {report['reported_state']:8s} | "
230
+ f"True: {report['true_damage']}"
231
+ )
232
+ else:
233
+ st.info("Move the time slider forward to see reports arrive...")
234
+
235
+ with tab2:
236
+ buildings_with_reports = reports['building_id'].unique()
237
+
238
+ if len(buildings_with_reports) > 0:
239
+ selected_building = st.selectbox(
240
+ "Select Building",
241
+ buildings_with_reports,
242
+ format_func=lambda x: f"Building {x}"
243
+ )
244
+
245
+ building_info = scenario[scenario['building_id'] == selected_building].iloc[0]
246
+ building_beliefs = beliefs[beliefs['building_id'] == selected_building].iloc[0]
247
+ building_reports = reports[reports['building_id'] == selected_building]
248
+
249
+ col1, col2 = st.columns([1, 2])
250
+
251
+ with col1:
252
+ st.subheader("Building Information")
253
+ st.metric("Type", building_info['building_type'].title())
254
+ st.metric("Distance", f"{building_info['distance_km']:.1f} km")
255
+ st.metric("Occupancy", f"{building_info['occupancy']} people")
256
+ st.metric("True Damage", building_info['true_damage'].title())
257
+
258
+ st.divider()
259
+
260
+ st.subheader("Inference Results")
261
+ st.metric(
262
+ "P(Collapse)",
263
+ f"{building_beliefs['p_collapse']:.3f}",
264
+ help=f"95% CI: [{building_beliefs['p_collapse_ci_lower']:.3f}, {building_beliefs['p_collapse_ci_upper']:.3f}]"
265
+ )
266
+ ci_width = building_beliefs['p_collapse_ci_upper'] - building_beliefs['p_collapse_ci_lower']
267
+ st.metric("Std Dev", f"{building_beliefs['p_collapse_std']:.3f}")
268
+
269
+ st.metric("Entropy", f"{building_beliefs['entropy']:.2f}")
270
+ st.metric("Reports Received", int(building_beliefs['num_reports']))
271
+
272
+ with col2:
273
+ st.subheader("Belief Evolution")
274
+
275
+ prior = np.array([
276
+ building_info['p_none'],
277
+ building_info['p_minor'],
278
+ building_info['p_severe'],
279
+ building_info['p_collapse']
280
+ ])
281
+
282
+ evolution = [{'time': 0, 'p_collapse': prior[3], 'event': 'Prior'}]
283
+ current = prior.copy()
284
+
285
+ from update_bayesian import bayesian_update
286
+ from generate_reports import get_report_reliability
287
+
288
+ for _, report in building_reports.iterrows():
289
+ reliability = get_report_reliability(report['source'])
290
+ current = bayesian_update(current, report['reported_state'], reliability)
291
+ evolution.append({
292
+ 'time': report['time_minutes'],
293
+ 'p_collapse': current[3],
294
+ 'event': f"{report['source']}: {report['reported_state']}"
295
+ })
296
+
297
+ evolution_df = pd.DataFrame(evolution)
298
+
299
+ fig_evolution = go.Figure()
300
+
301
+ fig_evolution.add_trace(go.Scatter(
302
+ x=evolution_df['time'],
303
+ y=evolution_df['p_collapse'],
304
+ mode='lines+markers',
305
+ name='P(Collapse)',
306
+ line=dict(color='red', width=3),
307
+ hovertemplate='%{text}<br>P(Collapse): %{y:.3f}<extra></extra>',
308
+ text=evolution_df['event']
309
+ ))
310
+
311
+ fig_evolution.add_hline(
312
+ y=building_beliefs['p_collapse_ci_lower'],
313
+ line_dash="dash",
314
+ line_color="gray",
315
+ annotation_text="95% CI Lower"
316
+ )
317
+
318
+ fig_evolution.add_hline(
319
+ y=building_beliefs['p_collapse_ci_upper'],
320
+ line_dash="dash",
321
+ line_color="gray",
322
+ annotation_text="95% CI Upper"
323
+ )
324
+
325
+ fig_evolution.update_layout(
326
+ title=f"Building {selected_building} - Belief Updates Over Time",
327
+ xaxis_title="Time (minutes)",
328
+ yaxis_title="P(Collapse)",
329
+ yaxis=dict(range=[0, 1]),
330
+ height=400
331
+ )
332
+
333
+ st.plotly_chart(fig_evolution, use_container_width=True)
334
+
335
+ st.subheader("Report Timeline")
336
+ for _, report in building_reports.iterrows():
337
+ reliability = get_report_reliability(report['source'])
338
+ st.text(
339
+ f"t={report['time_minutes']:6.1f}m | "
340
+ f"{report['source']:20s} ({reliability:.0%}) | "
341
+ f"Reports: {report['reported_state']:8s} | "
342
+ f"True: {report['true_damage']}"
343
+ )
344
+ else:
345
+ st.info("No buildings received reports in this simulation. Try increasing Poisson rates.")
346
+
347
+ with tab3:
348
+ st.header("Decision Quality Comparison")
349
+
350
+ improvement = metrics['improvement']
351
+ improvement_pct = metrics['improvement_pct']
352
+
353
+ col1, col2, col3 = st.columns(3)
354
+
355
+ with col1:
356
+ st.metric(
357
+ "Naive Approach",
358
+ f"{metrics['naive_lives_saved']} people",
359
+ help="Using only distance-based priors"
360
+ )
361
+
362
+ with col2:
363
+ st.metric(
364
+ "Bayesian Approach",
365
+ f"{metrics['bayesian_lives_saved']} people",
366
+ delta=f"+{improvement}",
367
+ help="Using updated posterior beliefs"
368
+ )
369
+
370
+ with col3:
371
+ st.metric(
372
+ "Improvement",
373
+ f"{improvement_pct:.1f}%",
374
+ delta=f"+{improvement} lives"
375
+ )
376
+
377
+ st.divider()
378
+
379
+ if improvement > 0:
380
+ st.success(
381
+ f"By incorporating uncertain information via Bayesian inference, "
382
+ f"we can reach {improvement} more people ({improvement_pct:.1f}% improvement) "
383
+ f"with the same {n_teams} rescue teams."
384
+ )
385
+ elif improvement < 0:
386
+ st.warning(
387
+ f"In this scenario, the naive approach performed slightly better. "
388
+ f"This can happen when reports are very noisy or when the prior is already well-calibrated."
389
+ )
390
+ else:
391
+ st.info("Both approaches performed equally in this scenario.")
392
+
393
+ st.subheader("Performance Metrics")
394
+
395
+ map_data_full = scenario.merge(beliefs, on='building_id', suffixes=('_prior', '_posterior'))
396
+
397
+ buildings_with_reports = reports['building_id'].unique()
398
+ comparison = map_data_full[map_data_full['building_id'].isin(buildings_with_reports)].copy()
399
+
400
+ if len(comparison) > 0:
401
+ comparison['true_collapsed'] = (comparison['true_damage'] == 'collapse').astype(int)
402
+ comparison['naive_pred'] = (comparison['p_collapse_prior'] > 0.5).astype(int)
403
+ comparison['bayesian_pred'] = (comparison['p_collapse_posterior'] > 0.5).astype(int)
404
+
405
+ naive_acc = (comparison['true_collapsed'] == comparison['naive_pred']).mean()
406
+ bayesian_acc = (comparison['true_collapsed'] == comparison['bayesian_pred']).mean()
407
+
408
+
409
+ st.metric("Naive Accuracy", f"{naive_acc:.1%}")
410
+ st.metric("Bayesian Accuracy", f"{bayesian_acc:.1%}", delta=f"{bayesian_acc - naive_acc:+.1%}")
411
+
412
+ st.divider()
update_bayesian.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ from generate_reports import get_report_reliability
4
+
5
+ DAMAGE_STATES = ['none', 'minor', 'severe', 'collapse']
6
+
7
+ def compute_likelihood(reported_state, true_state, reliability):
8
+ if reported_state == true_state:
9
+ return reliability
10
+
11
+ reported_idx = DAMAGE_STATES.index(reported_state)
12
+ true_idx = DAMAGE_STATES.index(true_state)
13
+ distance = abs(reported_idx - true_idx)
14
+
15
+ base_error_prob = (1 - reliability) / 3.0
16
+
17
+ if distance == 1:
18
+ return base_error_prob * 2.0
19
+ elif distance == 2:
20
+ return base_error_prob * 1.0
21
+ else:
22
+ return base_error_prob * 0.5
23
+
24
+ def bayesian_update(prior, reported_state, reliability):
25
+ likelihood = np.array([
26
+ compute_likelihood(reported_state, state, reliability)
27
+ for state in DAMAGE_STATES
28
+ ])
29
+
30
+ numerator = likelihood * prior
31
+ denominator = np.sum(numerator)
32
+
33
+ if denominator < 1e-10:
34
+ return prior
35
+
36
+ posterior = numerator / denominator
37
+ return posterior
38
+
39
+ def entropy(probs):
40
+ probs = np.array(probs)
41
+ probs = probs[probs > 0]
42
+ return -np.sum(probs * np.log2(probs))
43
+
44
+ def bootstrap_beliefs(reports, prior, n_bootstrap=100):
45
+ if len(reports) == 0:
46
+ return {
47
+ 'mean': prior,
48
+ 'std_dev': np.zeros_like(prior)
49
+ }
50
+
51
+ bootstrap_posteriors = []
52
+
53
+ for _ in range(n_bootstrap):
54
+ resampled_reports = [reports[i] for i in np.random.choice(
55
+ len(reports), size=len(reports), replace=True
56
+ )]
57
+
58
+ belief = prior.copy()
59
+ for report in resampled_reports:
60
+ reliability = get_report_reliability(report['source'])
61
+ belief = bayesian_update(belief, report['reported_state'], reliability)
62
+
63
+ bootstrap_posteriors.append(belief)
64
+
65
+ bootstrap_posteriors = np.array(bootstrap_posteriors)
66
+
67
+ return {
68
+ 'mean': np.mean(bootstrap_posteriors, axis=0),
69
+ 'std_dev': np.std(bootstrap_posteriors, axis=0)
70
+ }
71
+
72
+ def process_building(building, all_reports, prior):
73
+ building_id = building['building_id']
74
+ building_reports = all_reports[all_reports['building_id'] == building_id]
75
+ building_reports = building_reports.sort_values('time_minutes')
76
+
77
+ current_belief = prior.copy()
78
+ report_list = []
79
+
80
+ for _, report in building_reports.iterrows():
81
+ reliability = get_report_reliability(report['source'])
82
+ current_belief = bayesian_update(
83
+ current_belief,
84
+ report['reported_state'],
85
+ reliability
86
+ )
87
+ report_list.append(report.to_dict())
88
+
89
+ bootstrap_result = bootstrap_beliefs(report_list, prior, n_bootstrap=50)
90
+
91
+ return {
92
+ 'building_id': building_id,
93
+ 'p_none': current_belief[0],
94
+ 'p_minor': current_belief[1],
95
+ 'p_severe': current_belief[2],
96
+ 'p_collapse': current_belief[3],
97
+ 'entropy': entropy(current_belief),
98
+ 'num_reports': len(building_reports)
99
+ }
100
+
101
+ def run_inference(buildings_df, reports_df, n_samples=5000):
102
+ results = []
103
+
104
+ for _, building in buildings_df.iterrows():
105
+ prior = np.array([
106
+ building['p_none'],
107
+ building['p_minor'],
108
+ building['p_severe'],
109
+ building['p_collapse']
110
+ ])
111
+
112
+ result = process_building(building, reports_df, prior)
113
+
114
+ # Normalize posterior before sampling
115
+ posterior = np.array([
116
+ result['p_none'],
117
+ result['p_minor'],
118
+ result['p_severe'],
119
+ result['p_collapse']
120
+ ])
121
+ posterior = posterior / posterior.sum()
122
+
123
+ samples = np.random.choice([0,1,2,3], size=n_samples, p=posterior)
124
+ collapse_samples = (samples == 3).astype(float)
125
+ result['p_collapse_std'] = collapse_samples.std()
126
+
127
+ results.append(result)
128
+
129
+ return pd.DataFrame(results)
130
+
131
+
132
+ def compute_decision_metrics(buildings_df, beliefs_df, n_teams):
133
+ merged = buildings_df[['building_id', 'true_damage', 'occupancy', 'p_none', 'p_minor', 'p_severe', 'p_collapse']].merge(
134
+ beliefs_df[['building_id', 'p_none', 'p_minor', 'p_severe', 'p_collapse']],
135
+ on='building_id',
136
+ suffixes=('_prior', '_posterior')
137
+ )
138
+
139
+ merged['at_risk_true'] = merged.apply(lambda row: {
140
+ 'collapse': 0.9 * row['occupancy'],
141
+ 'severe': 0.4 * row['occupancy'],
142
+ 'minor': 0.05 * row['occupancy'],
143
+ 'none': 0
144
+ }[row['true_damage']], axis=1)
145
+
146
+ merged['expected_at_risk'] = (
147
+ merged['p_collapse_posterior'] * 0.9 * merged['occupancy'] +
148
+ merged['p_severe_posterior'] * 0.4 * merged['occupancy'] +
149
+ merged['p_minor_posterior'] * 0.05 * merged['occupancy']
150
+ )
151
+
152
+ bayesian_top = merged.nlargest(n_teams, 'expected_at_risk')
153
+ bayesian_saved = bayesian_top['at_risk_true'].sum()
154
+
155
+ naive_top = merged.nlargest(n_teams, 'p_collapse_prior')
156
+ naive_saved = naive_top['at_risk_true'].sum()
157
+
158
+ return {
159
+ 'bayesian_lives_saved': int(bayesian_saved),
160
+ 'naive_lives_saved': int(naive_saved),
161
+ 'improvement': int(bayesian_saved - naive_saved),
162
+ 'improvement_pct': (bayesian_saved - naive_saved) / naive_saved * 100 if naive_saved > 0 else 0
163
+ }