smart-election-verification / streamlit_app.py
selvaneyas's picture
Upload 5 files
50a9866 verified
import streamlit as st
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
from src.siamese_model import build_siamese_model
from src.inference import verify_images
# ==============================
# PAGE CONFIG
# ==============================
st.set_page_config(
page_title="Smart Election Verification System",
page_icon="πŸ—³οΈ",
layout="centered"
)
st.title("πŸ—³οΈ Multimodal Biometric verification System")
# ==============================
# GLOBAL CONSTANTS
# ==============================
IMG_SIZE = 128
THRESHOLD = 0.5
# ==============================
# MODE SELECTION (MUST BE FIRST)
# ==============================
MODE = st.selectbox(
"Select Verification Mode",
["Iris Verification", "Fingerprint Verification"]
)
# ==============================
# MODEL LOADING
# ==============================
model_path = (
"models/iris_siamese.keras"
if MODE == "Iris Verification"
else "models/finger_siamese.keras"
)
@st.cache_resource
def load_model(path):
model = build_siamese_model()
model.load_weights(path)
return model
model = load_model(model_path)
# ==============================
# SIDEBAR – DATASET INSPECTOR
# ==============================
st.sidebar.header("πŸ“‚ Dataset Inspector")
csv_path = (
"pairs/iris_pairs.csv"
if MODE == "Iris Verification"
else "pairs/finger_pairs.csv"
)
if st.sidebar.checkbox("View Pair Dataset"):
df = pd.read_csv(csv_path)
st.subheader("πŸ“Š Full Pair Dataset")
st.write(f"Total Pairs: {len(df)}")
st.write(
f"Genuine: {df['label'].sum()} | "
f"Impostor: {len(df) - df['label'].sum()}"
)
# ===== INTERACTIVE TABLE =====
selection = st.dataframe(
df,
use_container_width=True,
selection_mode="single-row",
on_select="rerun"
)
# ===== ROW CLICK β†’ IMAGE VIEW =====
if selection and selection["selection"]["rows"]:
row_idx = selection["selection"]["rows"][0]
row = df.iloc[row_idx]
img1 = cv2.imread(row["img1"], cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(row["img2"], cv2.IMREAD_GRAYSCALE)
st.subheader("πŸ–ΌοΈ Selected Pair Visualization")
st.image(
[img1, img2],
caption=["Image 1", "Image 2"],
width=220
)
st.info(
"Label: "
+ ("βœ… Same Person" if row["label"] == 1 else "❌ Different Person")
)
# st.sidebar.header("πŸ“‚ Dataset Inspector")
# df = None # SAFE DEFAULT
# if st.sidebar.checkbox("View Pair Dataset"):
# csv_path = (
# "pairs/iris_pairs.csv"
# if MODE == "Iris Verification"
# else "pairs/finger_pairs.csv"
# )
# df = pd.read_csv(csv_path)
# st.subheader("πŸ” Dataset Preview")
# st.write(f"Total Pairs: {len(df)}")
# st.write(f"Genuine: {df['label'].sum()} | Impostor: {len(df) - df['label'].sum()}")
# st.dataframe(df.head(20), use_container_width=True)
# if df is not None and st.sidebar.checkbox("View Sample Image Pairs"):
# sample = df.sample(1).iloc[0]
# img1 = cv2.imread(sample["img1"], cv2.IMREAD_GRAYSCALE)
# img2 = cv2.imread(sample["img2"], cv2.IMREAD_GRAYSCALE)
# st.image([img1, img2], caption=["Image 1", "Image 2"], width=200)
# st.write("Label:", "Same Person" if sample["label"] == 1 else "Different Person")
# ==============================
# SIDEBAR – MODEL INFO
# ==============================
def get_model_summary(model):
summary = []
model.summary(print_fn=lambda x: summary.append(x))
return "\n".join(summary)
st.divider()
st.subheader("🧠 Model Information")
show_model_info = st.checkbox("Show Model Information")
if show_model_info:
# ===== Model Architecture =====
with st.expander("πŸ“ Architecture Summary", expanded=False):
st.code(get_model_summary(model), language="text")
# ===== Model Statistics =====
with st.expander("πŸ“Š Model Statistics", expanded=True):
col1, col2 = st.columns(2)
with col1:
st.metric("Input Shape", "128 Γ— 128 Γ— 1")
st.metric("Embedding Size", "128")
with col2:
st.metric("Total Parameters", f"{model.count_params():,}")
st.metric(
"Trainable Parameters",
f"{sum(tf.size(w).numpy() for w in model.trainable_weights):,}"
)
if st.sidebar.checkbox("Show Model Stats"):
st.write("**Input Shape:**", model.input_shape)
st.write("**Output Shape:**", model.output_shape)
st.write("**Parameters:**", model.count_params())
if st.checkbox("Show Model Diagram"):
tf.keras.utils.plot_model(
model,
show_shapes=True,
expand_nested=True,
to_file="siamese_architecture.png"
)
st.image("siamese_architecture.png", caption="Siamese Network Architecture")
# ==============================
# SIDEBAR – SYSTEM STATUS
# ==============================
st.sidebar.header("βš™οΈ System Status")
st.sidebar.write("Mode:", MODE)
st.sidebar.write("Threshold:", THRESHOLD)
st.sidebar.write("Model Loaded:", "βœ… Yes")
st.sidebar.write("Framework:", "TensorFlow + Streamlit")
# ==============================
# IMAGE UPLOAD & VERIFICATION
# ==============================
st.subheader("πŸ” Identity Verification")
img1_file = st.file_uploader("Upload Registered Image", type=["jpg", "png"])
img2_file = st.file_uploader("Upload Live Image", type=["jpg", "png"])
def preprocess(img):
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
img = img / 255.0
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
return img
if img1_file and img2_file:
img1 = cv2.imdecode(
np.frombuffer(img1_file.read(), np.uint8),
cv2.IMREAD_GRAYSCALE
)
img2 = cv2.imdecode(
np.frombuffer(img2_file.read(), np.uint8),
cv2.IMREAD_GRAYSCALE
)
p1 = preprocess(img1)
p2 = preprocess(img2)
similarity = model.predict([p1, p2])[0][0]
verified = similarity > THRESHOLD
st.image([img1, img2], caption=["Registered", "Live"], width=200)
st.metric("Similarity Score", f"{similarity:.4f}")
if verified:
st.success("βœ… VERIFIED – VOTE ALLOWED")
else:
st.error("❌ NOT VERIFIED – ACCESS DENIED")