Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -139,6 +139,8 @@ st.markdown("""
|
|
| 139 |
""", unsafe_allow_html=True)
|
| 140 |
|
| 141 |
# 3. DYNAMIC GEOCODING ENGINE WITH PERSISTENT JSON
|
|
|
|
|
|
|
| 142 |
@st.cache_data(show_spinner=False)
|
| 143 |
def fetch_coordinates_batch(unique_locations):
|
| 144 |
"""
|
|
@@ -161,7 +163,7 @@ def fetch_coordinates_batch(unique_locations):
|
|
| 161 |
d, s = k.split("|")
|
| 162 |
coords_map[(d, s)] = tuple(v)
|
| 163 |
except json.JSONDecodeError:
|
| 164 |
-
pass
|
| 165 |
|
| 166 |
# 2. Add Hardcoded Pre-fills (High Priority Redundancy)
|
| 167 |
# These override if missing, but usually JSON is preferred source if present
|
|
@@ -188,70 +190,77 @@ def fetch_coordinates_batch(unique_locations):
|
|
| 188 |
for k, v in prefills.items():
|
| 189 |
if k not in coords_map:
|
| 190 |
coords_map[k] = v
|
| 191 |
-
|
| 192 |
# 3. Identify missing locations
|
| 193 |
missing_locs = [loc for loc in unique_locations if loc not in coords_map]
|
| 194 |
-
|
| 195 |
if not missing_locs:
|
| 196 |
return coords_map
|
| 197 |
|
| 198 |
# 4. Dynamic Fetching for missing
|
| 199 |
progress_text = "π‘ New locations found. Fetching coordinates..."
|
| 200 |
my_bar = st.progress(0, text=progress_text)
|
| 201 |
-
|
| 202 |
-
headers = {
|
|
|
|
| 203 |
updated = False
|
| 204 |
-
|
| 205 |
for i, (district, state) in enumerate(missing_locs):
|
| 206 |
try:
|
| 207 |
# Update Progress
|
| 208 |
-
my_bar.progress((i + 1) / len(missing_locs),
|
| 209 |
-
|
|
|
|
| 210 |
# API Call
|
| 211 |
query = f"{district}, {state}, India"
|
| 212 |
url = "https://nominatim.openstreetmap.org/search"
|
| 213 |
params = {'q': query, 'format': 'json', 'limit': 1}
|
| 214 |
-
|
| 215 |
-
response = requests.get(
|
| 216 |
-
|
|
|
|
| 217 |
if response.status_code == 200 and response.json():
|
| 218 |
data = response.json()[0]
|
| 219 |
-
coords_map[(district, state)] = (
|
|
|
|
| 220 |
updated = True
|
| 221 |
else:
|
| 222 |
-
pass
|
| 223 |
-
|
| 224 |
# Respect Rate Limiting (1 request per second)
|
| 225 |
-
time.sleep(1.1)
|
| 226 |
-
|
| 227 |
except Exception as e:
|
| 228 |
continue
|
| 229 |
-
|
| 230 |
my_bar.empty()
|
| 231 |
-
|
| 232 |
# 5. Save back to JSON if new data fetched
|
| 233 |
if updated:
|
| 234 |
# Convert keys to string "District|State" for JSON compatibility
|
| 235 |
save_data = {f"{k[0]}|{k[1]}": v for k, v in coords_map.items()}
|
| 236 |
with open(json_file, 'w') as f:
|
| 237 |
json.dump(save_data, f)
|
| 238 |
-
|
| 239 |
return coords_map
|
| 240 |
|
| 241 |
# 4. MAIN DATA LOADER
|
|
|
|
|
|
|
| 242 |
@st.cache_data(ttl=300)
|
| 243 |
def load_data():
|
| 244 |
try:
|
| 245 |
df = pd.read_csv('analyzed_aadhaar_data.csv')
|
| 246 |
except FileNotFoundError:
|
| 247 |
-
return pd.DataFrame()
|
|
|
|
|
|
|
|
|
|
| 248 |
|
| 249 |
-
if 'date' in df.columns: df['date'] = pd.to_datetime(df['date'])
|
| 250 |
-
|
| 251 |
# Clean Data
|
| 252 |
df['district'] = df['district'].astype(str).str.strip()
|
| 253 |
df['state'] = df['state'].astype(str).str.strip()
|
| 254 |
-
|
| 255 |
# --- FIX DUPLICATE STATES ---
|
| 256 |
# Standardize State Names to remove variations (e.g., J&K)
|
| 257 |
state_mapping = {
|
|
@@ -263,13 +272,14 @@ def load_data():
|
|
| 263 |
'Pondicherry': 'Puducherry'
|
| 264 |
}
|
| 265 |
df['state'] = df['state'].replace(state_mapping)
|
| 266 |
-
|
| 267 |
# Get Unique Locations
|
| 268 |
-
unique_locs = list(
|
| 269 |
-
|
|
|
|
| 270 |
# Fetch Coordinates (Cached + Persistent JSON)
|
| 271 |
coords_db = fetch_coordinates_batch(unique_locs)
|
| 272 |
-
|
| 273 |
# Fallback Centers (State Capitals)
|
| 274 |
state_centers = {
|
| 275 |
'Andaman and Nicobar Islands': (11.7401, 92.6586), 'Andhra Pradesh': (15.9129, 79.7400),
|
|
@@ -285,31 +295,33 @@ def load_data():
|
|
| 285 |
'Telangana': (18.1124, 79.0193), 'Tripura': (23.9408, 91.9882), 'Uttar Pradesh': (26.8467, 80.9462),
|
| 286 |
'Uttarakhand': (30.0668, 79.0193), 'West Bengal': (22.9868, 87.8550)
|
| 287 |
}
|
| 288 |
-
|
| 289 |
def get_lat_lon(row):
|
| 290 |
key = (row['district'], row['state'])
|
| 291 |
-
|
| 292 |
# 1. Check Exact Match from API/Cache
|
| 293 |
if key in coords_db:
|
| 294 |
lat, lon = coords_db[key]
|
| 295 |
# Tiny jitter to separate stacked points
|
| 296 |
return pd.Series({'lat': lat + np.random.normal(0, 0.002), 'lon': lon + np.random.normal(0, 0.002)})
|
| 297 |
-
|
| 298 |
# 2. Fallback to State Center
|
| 299 |
center = state_centers.get(row['state'], (20.5937, 78.9629))
|
| 300 |
np.random.seed(hash(key) % 2**32)
|
| 301 |
return pd.Series({
|
| 302 |
-
'lat': center[0] + np.random.uniform(-0.5, 0.5),
|
| 303 |
'lon': center[1] + np.random.uniform(-0.5, 0.5)
|
| 304 |
})
|
| 305 |
|
| 306 |
coords = df.apply(get_lat_lon, axis=1)
|
| 307 |
df['lat'] = coords['lat']
|
| 308 |
df['lon'] = coords['lon']
|
| 309 |
-
|
| 310 |
-
df['risk_category'] = pd.cut(
|
|
|
|
| 311 |
return df
|
| 312 |
|
|
|
|
| 313 |
with st.spinner('Initializing S.T.A.R.K AI & Geocoding...'):
|
| 314 |
df = load_data()
|
| 315 |
|
|
@@ -317,30 +329,40 @@ with st.spinner('Initializing S.T.A.R.K AI & Geocoding...'):
|
|
| 317 |
with st.sidebar:
|
| 318 |
st.markdown("### π‘οΈ S.T.A.R.K AI Control")
|
| 319 |
st.markdown("---")
|
| 320 |
-
|
| 321 |
if not df.empty:
|
| 322 |
if 'date' in df.columns:
|
| 323 |
min_d, max_d = df['date'].min().date(), df['date'].max().date()
|
| 324 |
-
dr = st.date_input("Date Range", value=(
|
| 325 |
-
|
| 326 |
-
|
|
|
|
|
|
|
|
|
|
| 327 |
state_list = ['All'] + sorted(df['state'].unique().tolist())
|
| 328 |
sel_state = st.selectbox("State", state_list)
|
| 329 |
-
filtered_df = df[df['state'] ==
|
| 330 |
-
|
|
|
|
| 331 |
dist_list = ['All'] + sorted(filtered_df['district'].unique().tolist())
|
| 332 |
sel_dist = st.selectbox("District", dist_list)
|
| 333 |
-
if sel_dist != 'All':
|
| 334 |
-
|
|
|
|
| 335 |
st.markdown("---")
|
| 336 |
-
risk_filter = st.multiselect(
|
| 337 |
-
|
|
|
|
|
|
|
|
|
|
| 338 |
else:
|
| 339 |
filtered_df = pd.DataFrame()
|
| 340 |
-
|
| 341 |
st.markdown("---")
|
| 342 |
-
st.link_button("π Open Analysis Notebook",
|
| 343 |
-
|
|
|
|
|
|
|
| 344 |
|
| 345 |
# 6. HEADER & METRICS
|
| 346 |
col1, col2 = st.columns([3, 1])
|
|
@@ -348,62 +370,81 @@ with col1:
|
|
| 348 |
st.title("π‘οΈ S.T.A.R.K AI Dashboard")
|
| 349 |
st.markdown("**Context-Aware Fraud Detection & Prevention System**")
|
| 350 |
with col2:
|
| 351 |
-
st.markdown(
|
|
|
|
| 352 |
|
| 353 |
st.markdown("---")
|
| 354 |
|
| 355 |
if not filtered_df.empty:
|
| 356 |
m1, m2, m3, m4, m5 = st.columns(5)
|
| 357 |
-
total, high, crit = len(filtered_df), len(filtered_df[filtered_df['RISK_SCORE'] > 75]), len(
|
|
|
|
| 358 |
m1.metric("Total Centers", f"{total:,}", border=True)
|
| 359 |
-
m2.metric("High Risk", f"{high}", delta="Review",
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 363 |
else:
|
| 364 |
-
st.error(
|
|
|
|
| 365 |
|
| 366 |
st.markdown("##")
|
| 367 |
|
| 368 |
# 7. TABS
|
| 369 |
-
tab_map, tab_list, tab_charts, tab_insights = st.tabs(
|
|
|
|
| 370 |
|
| 371 |
with tab_map:
|
| 372 |
c_map, c_det = st.columns([3, 1])
|
| 373 |
with c_map:
|
| 374 |
if not filtered_df.empty:
|
| 375 |
# Dynamic Zoom based on selection
|
| 376 |
-
if sel_dist != 'All':
|
| 377 |
-
|
| 378 |
-
|
|
|
|
|
|
|
|
|
|
| 379 |
|
| 380 |
fig = px.scatter_mapbox(filtered_df, lat="lat", lon="lon", color="RISK_SCORE", size="total_activity",
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
|
|
|
| 387 |
st.plotly_chart(fig, use_container_width=True)
|
| 388 |
-
else:
|
| 389 |
-
|
|
|
|
| 390 |
with c_det:
|
| 391 |
st.subheader("π₯ Top Hotspots")
|
| 392 |
if not filtered_df.empty:
|
| 393 |
-
top = filtered_df.groupby('district').agg(
|
|
|
|
| 394 |
for i, (d, r) in enumerate(top.iterrows(), 1):
|
| 395 |
-
clr, bdg = ("#ef4444", "CRITICAL") if r['RISK_SCORE'] > 85 else (
|
| 396 |
-
|
|
|
|
|
|
|
| 397 |
|
| 398 |
with tab_list:
|
| 399 |
st.subheader("π― Priority Investigation")
|
| 400 |
if not filtered_df.empty:
|
| 401 |
-
targets = filtered_df[filtered_df['RISK_SCORE'] >
|
|
|
|
| 402 |
csv = targets.to_csv(index=False).encode('utf-8')
|
| 403 |
-
st.download_button("π₯ Export CSV", data=csv,
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
|
|
|
|
|
|
| 407 |
|
| 408 |
with tab_charts:
|
| 409 |
c1, c2 = st.columns(2)
|
|
@@ -411,32 +452,40 @@ with tab_charts:
|
|
| 411 |
st.markdown("**Ghost ID Detection**")
|
| 412 |
if not filtered_df.empty:
|
| 413 |
fig = px.scatter(filtered_df, x="total_activity", y="ratio_deviation", color="risk_category", size="RISK_SCORE",
|
| 414 |
-
|
| 415 |
fig.add_hline(y=0.2, line_dash="dash", line_color="red")
|
| 416 |
st.plotly_chart(fig, use_container_width=True)
|
| 417 |
with c2:
|
| 418 |
st.markdown("**Weekend Activity Analysis**")
|
| 419 |
if not filtered_df.empty:
|
| 420 |
-
wk_counts = filtered_df.groupby(
|
| 421 |
-
|
| 422 |
-
|
|
|
|
|
|
|
|
|
|
| 423 |
st.plotly_chart(fig, use_container_width=True)
|
| 424 |
|
| 425 |
with tab_insights:
|
| 426 |
st.subheader("π AI Detective Insights")
|
| 427 |
if not filtered_df.empty:
|
| 428 |
anom = filtered_df[filtered_df['ratio_deviation'] > 0.4]
|
| 429 |
-
st.info(
|
| 430 |
-
|
|
|
|
| 431 |
c_i1, c_i2 = st.columns(2)
|
| 432 |
with c_i1:
|
| 433 |
st.markdown("#### π¨ Primary Risk Factors")
|
| 434 |
-
st.markdown(
|
| 435 |
-
|
|
|
|
|
|
|
| 436 |
with c_i2:
|
| 437 |
st.markdown("#### π‘ Recommended Actions")
|
| 438 |
-
st.markdown(
|
| 439 |
-
|
|
|
|
|
|
|
| 440 |
|
| 441 |
st.markdown("---")
|
| 442 |
-
st.markdown("""<div style="text-align: center; font-size: 13px; color: #94a3b8;"><b>Project S.T.A.R.K AI</b> | UIDAI Hackathon 2026</div>""", unsafe_allow_html=True)
|
|
|
|
| 139 |
""", unsafe_allow_html=True)
|
| 140 |
|
| 141 |
# 3. DYNAMIC GEOCODING ENGINE WITH PERSISTENT JSON
|
| 142 |
+
|
| 143 |
+
|
| 144 |
@st.cache_data(show_spinner=False)
|
| 145 |
def fetch_coordinates_batch(unique_locations):
|
| 146 |
"""
|
|
|
|
| 163 |
d, s = k.split("|")
|
| 164 |
coords_map[(d, s)] = tuple(v)
|
| 165 |
except json.JSONDecodeError:
|
| 166 |
+
pass # File corrupted, start fresh
|
| 167 |
|
| 168 |
# 2. Add Hardcoded Pre-fills (High Priority Redundancy)
|
| 169 |
# These override if missing, but usually JSON is preferred source if present
|
|
|
|
| 190 |
for k, v in prefills.items():
|
| 191 |
if k not in coords_map:
|
| 192 |
coords_map[k] = v
|
| 193 |
+
|
| 194 |
# 3. Identify missing locations
|
| 195 |
missing_locs = [loc for loc in unique_locations if loc not in coords_map]
|
| 196 |
+
|
| 197 |
if not missing_locs:
|
| 198 |
return coords_map
|
| 199 |
|
| 200 |
# 4. Dynamic Fetching for missing
|
| 201 |
progress_text = "π‘ New locations found. Fetching coordinates..."
|
| 202 |
my_bar = st.progress(0, text=progress_text)
|
| 203 |
+
|
| 204 |
+
headers = {
|
| 205 |
+
'User-Agent': 'StarkDashboard/1.0 (Government Research Project)'}
|
| 206 |
updated = False
|
| 207 |
+
|
| 208 |
for i, (district, state) in enumerate(missing_locs):
|
| 209 |
try:
|
| 210 |
# Update Progress
|
| 211 |
+
my_bar.progress((i + 1) / len(missing_locs),
|
| 212 |
+
text=f"π Locating: {district}, {state}")
|
| 213 |
+
|
| 214 |
# API Call
|
| 215 |
query = f"{district}, {state}, India"
|
| 216 |
url = "https://nominatim.openstreetmap.org/search"
|
| 217 |
params = {'q': query, 'format': 'json', 'limit': 1}
|
| 218 |
+
|
| 219 |
+
response = requests.get(
|
| 220 |
+
url, params=params, headers=headers, timeout=5)
|
| 221 |
+
|
| 222 |
if response.status_code == 200 and response.json():
|
| 223 |
data = response.json()[0]
|
| 224 |
+
coords_map[(district, state)] = (
|
| 225 |
+
float(data['lat']), float(data['lon']))
|
| 226 |
updated = True
|
| 227 |
else:
|
| 228 |
+
pass # Fail silently, will fall back to state center logic later
|
| 229 |
+
|
| 230 |
# Respect Rate Limiting (1 request per second)
|
| 231 |
+
time.sleep(1.1)
|
| 232 |
+
|
| 233 |
except Exception as e:
|
| 234 |
continue
|
| 235 |
+
|
| 236 |
my_bar.empty()
|
| 237 |
+
|
| 238 |
# 5. Save back to JSON if new data fetched
|
| 239 |
if updated:
|
| 240 |
# Convert keys to string "District|State" for JSON compatibility
|
| 241 |
save_data = {f"{k[0]}|{k[1]}": v for k, v in coords_map.items()}
|
| 242 |
with open(json_file, 'w') as f:
|
| 243 |
json.dump(save_data, f)
|
| 244 |
+
|
| 245 |
return coords_map
|
| 246 |
|
| 247 |
# 4. MAIN DATA LOADER
|
| 248 |
+
|
| 249 |
+
|
| 250 |
@st.cache_data(ttl=300)
|
| 251 |
def load_data():
|
| 252 |
try:
|
| 253 |
df = pd.read_csv('analyzed_aadhaar_data.csv')
|
| 254 |
except FileNotFoundError:
|
| 255 |
+
return pd.DataFrame() # Return empty to trigger external error check
|
| 256 |
+
|
| 257 |
+
if 'date' in df.columns:
|
| 258 |
+
df['date'] = pd.to_datetime(df['date'])
|
| 259 |
|
|
|
|
|
|
|
| 260 |
# Clean Data
|
| 261 |
df['district'] = df['district'].astype(str).str.strip()
|
| 262 |
df['state'] = df['state'].astype(str).str.strip()
|
| 263 |
+
|
| 264 |
# --- FIX DUPLICATE STATES ---
|
| 265 |
# Standardize State Names to remove variations (e.g., J&K)
|
| 266 |
state_mapping = {
|
|
|
|
| 272 |
'Pondicherry': 'Puducherry'
|
| 273 |
}
|
| 274 |
df['state'] = df['state'].replace(state_mapping)
|
| 275 |
+
|
| 276 |
# Get Unique Locations
|
| 277 |
+
unique_locs = list(
|
| 278 |
+
df[['district', 'state']].drop_duplicates().itertuples(index=False, name=None))
|
| 279 |
+
|
| 280 |
# Fetch Coordinates (Cached + Persistent JSON)
|
| 281 |
coords_db = fetch_coordinates_batch(unique_locs)
|
| 282 |
+
|
| 283 |
# Fallback Centers (State Capitals)
|
| 284 |
state_centers = {
|
| 285 |
'Andaman and Nicobar Islands': (11.7401, 92.6586), 'Andhra Pradesh': (15.9129, 79.7400),
|
|
|
|
| 295 |
'Telangana': (18.1124, 79.0193), 'Tripura': (23.9408, 91.9882), 'Uttar Pradesh': (26.8467, 80.9462),
|
| 296 |
'Uttarakhand': (30.0668, 79.0193), 'West Bengal': (22.9868, 87.8550)
|
| 297 |
}
|
| 298 |
+
|
| 299 |
def get_lat_lon(row):
|
| 300 |
key = (row['district'], row['state'])
|
| 301 |
+
|
| 302 |
# 1. Check Exact Match from API/Cache
|
| 303 |
if key in coords_db:
|
| 304 |
lat, lon = coords_db[key]
|
| 305 |
# Tiny jitter to separate stacked points
|
| 306 |
return pd.Series({'lat': lat + np.random.normal(0, 0.002), 'lon': lon + np.random.normal(0, 0.002)})
|
| 307 |
+
|
| 308 |
# 2. Fallback to State Center
|
| 309 |
center = state_centers.get(row['state'], (20.5937, 78.9629))
|
| 310 |
np.random.seed(hash(key) % 2**32)
|
| 311 |
return pd.Series({
|
| 312 |
+
'lat': center[0] + np.random.uniform(-0.5, 0.5),
|
| 313 |
'lon': center[1] + np.random.uniform(-0.5, 0.5)
|
| 314 |
})
|
| 315 |
|
| 316 |
coords = df.apply(get_lat_lon, axis=1)
|
| 317 |
df['lat'] = coords['lat']
|
| 318 |
df['lon'] = coords['lon']
|
| 319 |
+
|
| 320 |
+
df['risk_category'] = pd.cut(
|
| 321 |
+
df['RISK_SCORE'], bins=[-1, 50, 75, 85, 100], labels=['Low', 'Medium', 'High', 'Critical'])
|
| 322 |
return df
|
| 323 |
|
| 324 |
+
|
| 325 |
with st.spinner('Initializing S.T.A.R.K AI & Geocoding...'):
|
| 326 |
df = load_data()
|
| 327 |
|
|
|
|
| 329 |
with st.sidebar:
|
| 330 |
st.markdown("### π‘οΈ S.T.A.R.K AI Control")
|
| 331 |
st.markdown("---")
|
| 332 |
+
|
| 333 |
if not df.empty:
|
| 334 |
if 'date' in df.columns:
|
| 335 |
min_d, max_d = df['date'].min().date(), df['date'].max().date()
|
| 336 |
+
dr = st.date_input("Date Range", value=(
|
| 337 |
+
min_d, max_d), min_value=min_d, max_value=max_d)
|
| 338 |
+
if len(dr) == 2:
|
| 339 |
+
df = df[(df['date'].dt.date >= dr[0]) &
|
| 340 |
+
(df['date'].dt.date <= dr[1])]
|
| 341 |
+
|
| 342 |
state_list = ['All'] + sorted(df['state'].unique().tolist())
|
| 343 |
sel_state = st.selectbox("State", state_list)
|
| 344 |
+
filtered_df = df[df['state'] ==
|
| 345 |
+
sel_state] if sel_state != 'All' else df.copy()
|
| 346 |
+
|
| 347 |
dist_list = ['All'] + sorted(filtered_df['district'].unique().tolist())
|
| 348 |
sel_dist = st.selectbox("District", dist_list)
|
| 349 |
+
if sel_dist != 'All':
|
| 350 |
+
filtered_df = filtered_df[filtered_df['district'] == sel_dist]
|
| 351 |
+
|
| 352 |
st.markdown("---")
|
| 353 |
+
risk_filter = st.multiselect(
|
| 354 |
+
"Risk Level", ['Low', 'Medium', 'High', 'Critical'], default=['High', 'Critical'])
|
| 355 |
+
if risk_filter:
|
| 356 |
+
filtered_df = filtered_df[filtered_df['risk_category'].isin(
|
| 357 |
+
risk_filter)]
|
| 358 |
else:
|
| 359 |
filtered_df = pd.DataFrame()
|
| 360 |
+
|
| 361 |
st.markdown("---")
|
| 362 |
+
st.link_button("π Open Analysis Notebook",
|
| 363 |
+
"https://colab.research.google.com/drive/1YAQ4nfxltvG_cts3fmGc_zi2JQc4oPOT?usp=sharing", use_container_width=True)
|
| 364 |
+
st.info(
|
| 365 |
+
f"**User:** UIDAI_Officer\n\n**Team:** UIDAI_4571\n\n**Update:** {datetime.now().strftime('%H:%M:%S')}")
|
| 366 |
|
| 367 |
# 6. HEADER & METRICS
|
| 368 |
col1, col2 = st.columns([3, 1])
|
|
|
|
| 370 |
st.title("π‘οΈ S.T.A.R.K AI Dashboard")
|
| 371 |
st.markdown("**Context-Aware Fraud Detection & Prevention System**")
|
| 372 |
with col2:
|
| 373 |
+
st.markdown(
|
| 374 |
+
f"""<div style="text-align: right; padding-top: 20px;"><span class="status-badge bg-green">β System Online</span><div style="font-size: 12px; color: #64748b; margin-top: 8px;">{datetime.now().strftime('%d %b %Y')}</div></div>""", unsafe_allow_html=True)
|
| 375 |
|
| 376 |
st.markdown("---")
|
| 377 |
|
| 378 |
if not filtered_df.empty:
|
| 379 |
m1, m2, m3, m4, m5 = st.columns(5)
|
| 380 |
+
total, high, crit = len(filtered_df), len(filtered_df[filtered_df['RISK_SCORE'] > 75]), len(
|
| 381 |
+
filtered_df[filtered_df['RISK_SCORE'] > 85])
|
| 382 |
m1.metric("Total Centers", f"{total:,}", border=True)
|
| 383 |
+
m2.metric("High Risk", f"{high}", delta="Review",
|
| 384 |
+
delta_color="inverse", border=True)
|
| 385 |
+
m3.metric("Critical", f"{crit}", delta="Urgent",
|
| 386 |
+
delta_color="inverse", border=True)
|
| 387 |
+
m4.metric(
|
| 388 |
+
"Avg Risk", f"{filtered_df['RISK_SCORE'].mean():.1f}/100" if not filtered_df.empty else "0", border=True)
|
| 389 |
+
m5.metric("Weekend Spikes", f"{len(filtered_df[(filtered_df['is_weekend'] == 1) & (filtered_df['RISK_SCORE'] > 70)])}",
|
| 390 |
+
delta="Suspicious", delta_color="off", border=True)
|
| 391 |
else:
|
| 392 |
+
st.error(
|
| 393 |
+
"β Critical Error: 'analyzed_aadhaar_data.csv' not found. Please upload the data file.")
|
| 394 |
|
| 395 |
st.markdown("##")
|
| 396 |
|
| 397 |
# 7. TABS
|
| 398 |
+
tab_map, tab_list, tab_charts, tab_insights = st.tabs(
|
| 399 |
+
["πΊοΈ Geographic Risk", "π Priority List", "π Patterns", "π AI Insights"])
|
| 400 |
|
| 401 |
with tab_map:
|
| 402 |
c_map, c_det = st.columns([3, 1])
|
| 403 |
with c_map:
|
| 404 |
if not filtered_df.empty:
|
| 405 |
# Dynamic Zoom based on selection
|
| 406 |
+
if sel_dist != 'All':
|
| 407 |
+
zoom_lvl = 10
|
| 408 |
+
elif sel_state != 'All':
|
| 409 |
+
zoom_lvl = 6
|
| 410 |
+
else:
|
| 411 |
+
zoom_lvl = 3.8
|
| 412 |
|
| 413 |
fig = px.scatter_mapbox(filtered_df, lat="lat", lon="lon", color="RISK_SCORE", size="total_activity",
|
| 414 |
+
color_continuous_scale=["#22c55e", "#fbbf24", "#f97316", "#ef4444"], size_max=25, zoom=zoom_lvl,
|
| 415 |
+
center=None if sel_state == 'All' else {
|
| 416 |
+
"lat": filtered_df['lat'].mean(), "lon": filtered_df['lon'].mean()},
|
| 417 |
+
hover_name="district", hover_data={"state": True, "pincode": True, "lat": False, "lon": False},
|
| 418 |
+
mapbox_style="carto-positron", height=650, title="<b>Live Fraud Risk Heatmap</b>")
|
| 419 |
+
|
| 420 |
+
fig.update_layout(margin={"r": 0, "t": 40, "l": 0, "b": 0})
|
| 421 |
st.plotly_chart(fig, use_container_width=True)
|
| 422 |
+
else:
|
| 423 |
+
st.info("Waiting for data...")
|
| 424 |
+
|
| 425 |
with c_det:
|
| 426 |
st.subheader("π₯ Top Hotspots")
|
| 427 |
if not filtered_df.empty:
|
| 428 |
+
top = filtered_df.groupby('district').agg(
|
| 429 |
+
{'RISK_SCORE': 'mean', 'total_activity': 'sum'}).sort_values('RISK_SCORE', ascending=False).head(5)
|
| 430 |
for i, (d, r) in enumerate(top.iterrows(), 1):
|
| 431 |
+
clr, bdg = ("#ef4444", "CRITICAL") if r['RISK_SCORE'] > 85 else (
|
| 432 |
+
"#f97316", "HIGH")
|
| 433 |
+
st.markdown(
|
| 434 |
+
f"""<div class="hotspot-card" style="border-left-color: {clr};"><b>#{i} {d}</b><br><span style="font-size:12px;color:#64748b">Risk: <b style="color:{clr}">{r['RISK_SCORE']:.1f}</b> | Act: {int(r['total_activity'])}</span></div>""", unsafe_allow_html=True)
|
| 435 |
|
| 436 |
with tab_list:
|
| 437 |
st.subheader("π― Priority Investigation")
|
| 438 |
if not filtered_df.empty:
|
| 439 |
+
targets = filtered_df[filtered_df['RISK_SCORE'] >
|
| 440 |
+
75].sort_values('RISK_SCORE', ascending=False)
|
| 441 |
csv = targets.to_csv(index=False).encode('utf-8')
|
| 442 |
+
st.download_button("π₯ Export CSV", data=csv,
|
| 443 |
+
file_name="stark_priority.csv", mime="text/csv", type="primary")
|
| 444 |
+
st.dataframe(targets[['date', 'state', 'district', 'pincode', 'enrol_adult', 'total_activity', 'RISK_SCORE']],
|
| 445 |
+
column_config={"RISK_SCORE": st.column_config.ProgressColumn("Risk", format="%.1f%%", min_value=0, max_value=100)}, use_container_width=True, hide_index=True)
|
| 446 |
+
else:
|
| 447 |
+
st.info("Waiting for data...")
|
| 448 |
|
| 449 |
with tab_charts:
|
| 450 |
c1, c2 = st.columns(2)
|
|
|
|
| 452 |
st.markdown("**Ghost ID Detection**")
|
| 453 |
if not filtered_df.empty:
|
| 454 |
fig = px.scatter(filtered_df, x="total_activity", y="ratio_deviation", color="risk_category", size="RISK_SCORE",
|
| 455 |
+
color_discrete_map={'Critical': '#ef4444', 'High': '#f97316', 'Medium': '#eab308', 'Low': '#22c55e'}, height=350)
|
| 456 |
fig.add_hline(y=0.2, line_dash="dash", line_color="red")
|
| 457 |
st.plotly_chart(fig, use_container_width=True)
|
| 458 |
with c2:
|
| 459 |
st.markdown("**Weekend Activity Analysis**")
|
| 460 |
if not filtered_df.empty:
|
| 461 |
+
wk_counts = filtered_df.groupby(
|
| 462 |
+
'is_weekend')['total_activity'].sum().reset_index()
|
| 463 |
+
wk_counts['Type'] = wk_counts['is_weekend'].map(
|
| 464 |
+
{0: 'Weekday', 1: 'Weekend'})
|
| 465 |
+
fig = px.bar(wk_counts, x='Type', y='total_activity', color='Type', color_discrete_map={
|
| 466 |
+
'Weekday': '#3b82f6', 'Weekend': '#ef4444'}, height=350)
|
| 467 |
st.plotly_chart(fig, use_container_width=True)
|
| 468 |
|
| 469 |
with tab_insights:
|
| 470 |
st.subheader("π AI Detective Insights")
|
| 471 |
if not filtered_df.empty:
|
| 472 |
anom = filtered_df[filtered_df['ratio_deviation'] > 0.4]
|
| 473 |
+
st.info(
|
| 474 |
+
f"π€ **AI Analysis:** Detected {len(anom)} centers with statistically significant enrollment deviations (> 2Ο from mean).")
|
| 475 |
+
|
| 476 |
c_i1, c_i2 = st.columns(2)
|
| 477 |
with c_i1:
|
| 478 |
st.markdown("#### π¨ Primary Risk Factors")
|
| 479 |
+
st.markdown(
|
| 480 |
+
"- **High Volume on Weekends:** 28% correlation with fraud")
|
| 481 |
+
st.markdown(
|
| 482 |
+
"- **Adult Enrollment Spikes:** 45% correlation with ghost IDs")
|
| 483 |
with c_i2:
|
| 484 |
st.markdown("#### π‘ Recommended Actions")
|
| 485 |
+
st.markdown(
|
| 486 |
+
f"1. Immediate audit of {len(filtered_df[filtered_df['RISK_SCORE']>90])} centers with >90 Risk Score")
|
| 487 |
+
st.markdown(
|
| 488 |
+
"2. Deploy biometric re-verification for 'Rural A' cluster")
|
| 489 |
|
| 490 |
st.markdown("---")
|
| 491 |
+
st.markdown("""<div style="text-align: center; font-size: 13px; color: #94a3b8;"><b>Project S.T.A.R.K AI</b> | UIDAI Hackathon 2026</div>""", unsafe_allow_html=True)
|