Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| from sklearn.datasets import make_moons, make_circles, make_blobs | |
| from sklearn.model_selection import train_test_split | |
| from sklearn.preprocessing import StandardScaler | |
| import tensorflow as tf | |
| from tensorflow.keras import layers, models | |
| # ------------------------------- | |
| # Page Config | |
| # ------------------------------- | |
| st.set_page_config(page_title="Neural Network Playground", layout="wide") | |
| # ------------------------------- | |
| # Styling | |
| # ------------------------------- | |
| st.markdown( | |
| """ | |
| <style> | |
| .stApp { | |
| background-color: #f5f5f5; | |
| } | |
| .main-title { | |
| color: #202020; | |
| font-size: 40px; | |
| font-weight: 800; | |
| font-family: 'Segoe UI', sans-serif; | |
| margin-bottom: 10px; | |
| } | |
| .subtitle { | |
| color: #333333; | |
| font-size: 18px; | |
| font-weight: 400; | |
| margin-top: -10px; | |
| font-family: 'Segoe UI', sans-serif; | |
| } | |
| </style> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| # ------------------------------- | |
| # Helper Functions | |
| # ------------------------------- | |
| def generate_data(dataset, test_size): | |
| if dataset == "moons": | |
| X, y = make_moons(n_samples=1000, noise=0.2, random_state=42) | |
| elif dataset == "circles": | |
| X, y = make_circles(n_samples=1000, noise=0.2, factor=0.5, random_state=42) | |
| else: | |
| X, y = make_blobs(n_samples=1000, centers=2, cluster_std=1.5, random_state=42) | |
| X = StandardScaler().fit_transform(X) | |
| return train_test_split(X, y, test_size=1 - test_size, random_state=42) | |
| def build_model(activation, learning_rate): | |
| model = models.Sequential([ | |
| layers.Dense(10, input_shape=(2,), activation=activation), | |
| layers.Dense(10, activation=activation), | |
| layers.Dense(1, activation='sigmoid') | |
| ]) | |
| optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) | |
| model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) | |
| return model | |
| def plot_decision_boundary(model, X, y): | |
| x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 | |
| y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 | |
| xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200), | |
| np.linspace(y_min, y_max, 200)) | |
| grid = np.c_[xx.ravel(), yy.ravel()] | |
| preds = model.predict(grid, verbose=0).reshape(xx.shape) | |
| plt.contourf(xx, yy, preds, cmap="RdBu", alpha=0.6) | |
| plt.scatter(X[:, 0], X[:, 1], c=y, cmap="RdBu", edgecolors='white') | |
| plt.title("Decision Boundary") | |
| st.pyplot(plt.gcf()) | |
| plt.clf() | |
| def plot_loss(history): | |
| plt.plot(history.history['loss'], label='Train Loss') | |
| plt.plot(history.history['val_loss'], label='Test Loss') | |
| plt.xlabel('Epoch') | |
| plt.ylabel('Loss') | |
| plt.legend() | |
| plt.title("Training vs Testing Error") | |
| st.pyplot(plt.gcf()) | |
| plt.clf() | |
| # ------------------------------- | |
| # Sidebar Inputs | |
| # ------------------------------- | |
| with st.sidebar: | |
| st.header("π§ Hyperparameters") | |
| dataset = st.selectbox("Select Dataset", ["moons", "circles", "blobs"]) | |
| learning_rate = st.number_input("Learning Rate", value=0.01, format="%.4f") | |
| activation = st.selectbox("Activation Function", ["relu", "sigmoid", "tanh"]) | |
| split_ratio = st.slider("Train-Test Split", 0.5, 0.9, 0.7) | |
| batch_size = st.number_input("Batch Size", value=32, step=16) | |
| train_button = st.button("π Train Model") | |
| # ------------------------------- | |
| # Main App | |
| # ------------------------------- | |
| st.markdown("<h1 class='main-title'>π§ Neural Network Playground</h1>", unsafe_allow_html=True) | |
| st.markdown("<p class='subtitle'>Interactively explore how neural networks learn decision boundaries with different hyperparameters and synthetic datasets.</p>", unsafe_allow_html=True) | |
| if train_button: | |
| with st.spinner("Training the neural network..."): | |
| # Generate data | |
| X_train, X_test, y_train, y_test = generate_data(dataset, split_ratio) | |
| # Build and train model | |
| model = build_model(activation, learning_rate) | |
| history = model.fit(X_train, y_train, epochs=50, batch_size=batch_size, | |
| validation_data=(X_test, y_test), verbose=0) | |
| # Evaluation | |
| loss, accuracy = model.evaluate(X_test, y_test, verbose=0) | |
| # Display accuracy | |
| st.metric("π Test Accuracy", f"{accuracy * 100:.2f}%") | |
| # Tabs for output | |
| tab1, tab2 = st.tabs(["π§ Decision Boundary", "π Training vs Testing Loss"]) | |
| with tab1: | |
| X_all = np.vstack((X_train, X_test)) | |
| y_all = np.concatenate((y_train, y_test)) | |
| plot_decision_boundary(model, X_all, y_all) | |
| with tab2: | |
| plot_loss(history) | |
| # Expandable section for model summary | |
| with st.expander("π View Model Summary"): | |
| model.summary(print_fn=lambda x: st.text(x)) | |