File size: 4,894 Bytes
69105f1
23931f0
 
69105f1
 
 
 
23931f0
 
 
d21f501
 
 
 
 
 
23931f0
2096ae9
2bf0a6d
 
 
 
 
 
2096ae9
 
 
 
2bf0a6d
2096ae9
2bf0a6d
2096ae9
 
 
 
 
 
2bf0a6d
 
 
 
 
 
69105f1
2096ae9
d21f501
 
 
69105f1
 
 
 
 
 
 
d21f501
69105f1
d21f501
69105f1
 
 
 
 
23931f0
69105f1
 
 
 
 
 
 
 
 
 
23931f0
d21f501
69105f1
23931f0
 
69105f1
 
 
 
 
 
 
23931f0
 
69105f1
 
 
 
 
23931f0
d21f501
23931f0
d21f501
 
 
 
 
 
 
 
23931f0
d21f501
 
 
2096ae9
 
d21f501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23931f0
d21f501
 
23931f0
d21f501
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons, make_circles, make_blobs
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.keras import layers, models

# -------------------------------
# Page Config
# -------------------------------
st.set_page_config(page_title="Neural Network Playground", layout="wide")

# -------------------------------
# Styling
# -------------------------------

st.markdown(
    """
    <style>
    .stApp {
        background-color: #f5f5f5;
    }
    .main-title {
        color: #202020;
        font-size: 40px;
        font-weight: 800;
        font-family: 'Segoe UI', sans-serif;
        margin-bottom: 10px;
    }
    .subtitle {
        color: #333333;
        font-size: 18px;
        font-weight: 400;
        margin-top: -10px;
        font-family: 'Segoe UI', sans-serif;
    }
    </style>
    """,
    unsafe_allow_html=True
)



# -------------------------------
# Helper Functions
# -------------------------------
def generate_data(dataset, test_size):
    if dataset == "moons":
        X, y = make_moons(n_samples=1000, noise=0.2, random_state=42)
    elif dataset == "circles":
        X, y = make_circles(n_samples=1000, noise=0.2, factor=0.5, random_state=42)
    else:
        X, y = make_blobs(n_samples=1000, centers=2, cluster_std=1.5, random_state=42)

    X = StandardScaler().fit_transform(X)
    return train_test_split(X, y, test_size=1 - test_size, random_state=42)

def build_model(activation, learning_rate):
    model = models.Sequential([
        layers.Dense(10, input_shape=(2,), activation=activation),
        layers.Dense(10, activation=activation),
        layers.Dense(1, activation='sigmoid')
    ])
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
    return model

def plot_decision_boundary(model, X, y):
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
                         np.linspace(y_min, y_max, 200))
    grid = np.c_[xx.ravel(), yy.ravel()]
    preds = model.predict(grid, verbose=0).reshape(xx.shape)

    plt.contourf(xx, yy, preds, cmap="RdBu", alpha=0.6)
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap="RdBu", edgecolors='white')
    plt.title("Decision Boundary")
    st.pyplot(plt.gcf())
    plt.clf()

def plot_loss(history):
    plt.plot(history.history['loss'], label='Train Loss')
    plt.plot(history.history['val_loss'], label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.title("Training vs Testing Error")
    st.pyplot(plt.gcf())
    plt.clf()

# -------------------------------
# Sidebar Inputs
# -------------------------------
with st.sidebar:
    st.header("πŸ”§ Hyperparameters")
    dataset = st.selectbox("Select Dataset", ["moons", "circles", "blobs"])
    learning_rate = st.number_input("Learning Rate", value=0.01, format="%.4f")
    activation = st.selectbox("Activation Function", ["relu", "sigmoid", "tanh"])
    split_ratio = st.slider("Train-Test Split", 0.5, 0.9, 0.7)
    batch_size = st.number_input("Batch Size", value=32, step=16)
    train_button = st.button("πŸš€ Train Model")

# -------------------------------
# Main App
# -------------------------------
st.markdown("<h1 class='main-title'>🧠 Neural Network Playground</h1>", unsafe_allow_html=True)
st.markdown("<p class='subtitle'>Interactively explore how neural networks learn decision boundaries with different hyperparameters and synthetic datasets.</p>", unsafe_allow_html=True)

if train_button:
    with st.spinner("Training the neural network..."):
        # Generate data
        X_train, X_test, y_train, y_test = generate_data(dataset, split_ratio)

        # Build and train model
        model = build_model(activation, learning_rate)
        history = model.fit(X_train, y_train, epochs=50, batch_size=batch_size,
                            validation_data=(X_test, y_test), verbose=0)

        # Evaluation
        loss, accuracy = model.evaluate(X_test, y_test, verbose=0)

        # Display accuracy
        st.metric("πŸ“Š Test Accuracy", f"{accuracy * 100:.2f}%")

        # Tabs for output
        tab1, tab2 = st.tabs(["🧭 Decision Boundary", "πŸ“‰ Training vs Testing Loss"])
        with tab1:
            X_all = np.vstack((X_train, X_test))
            y_all = np.concatenate((y_train, y_test))
            plot_decision_boundary(model, X_all, y_all)

        with tab2:
            plot_loss(history)

        # Expandable section for model summary
        with st.expander("πŸ“œ View Model Summary"):
            model.summary(print_fn=lambda x: st.text(x))