TensorPlayground / src /streamlit_app.py
Pasham123's picture
Update src/streamlit_app.py
170f9f3 verified
import altair as alt
import pandas as pd
import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification, make_moons, make_circles
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from mlxtend.plotting import plot_decision_regions
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
# Streamlit page configuration
st.set_page_config(page_title="DeepNet Playground", layout="wide", page_icon="πŸš€")
# Apply seaborn styling
sns.set_style("whitegrid")
# Helper function
def create_dataset(dataset_type, n_samples, noise, random_state=42,
n_features=2, n_informative=2, n_redundant=0,
n_clusters_per_class=1, class_sep=1.0):
if dataset_type == 'Classification':
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_redundant=n_redundant,
n_clusters_per_class=n_clusters_per_class,
class_sep=class_sep,
n_classes=2,
random_state=random_state
)
elif dataset_type == 'Moons':
X, y = make_moons(n_samples=n_samples, noise=noise, random_state=random_state)
elif dataset_type == 'Circles':
X, y = make_circles(n_samples=n_samples, noise=noise, factor=0.5, random_state=random_state)
else:
X, y = None, None
return X, y
def build_model(input_dim, layer_configs):
model = Sequential()
model.add(Dense(layer_configs[0]['neurons'], activation=layer_configs[0]['activation'], input_dim=input_dim))
for config in layer_configs[1:]:
model.add(Dense(config['neurons'], activation=config['activation']))
model.add(Dense(1, activation='sigmoid'))
return model
# Title Section
st.markdown("<h1 style='text-align: center; color: #ff4b4b;'> DeepNet Playground </h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center;'>Create your own dataset, build a neural network, train it live and visualize!</p>", unsafe_allow_html=True)
st.markdown("---")
# Sidebar
with st.sidebar:
st.markdown("## πŸŽ›οΈ Playground Control")
if st.button("πŸ”„ Reset Playground"):
for key in list(st.session_state.keys()):
del st.session_state[key]
st.experimental_rerun()
st.header("πŸ”§ Configuration Panel")
with st.expander("πŸ› οΈ Dataset Settings"):
dataset_type = st.selectbox("Dataset Type", ["Classification", "Moons", "Circles"])
n_samples = st.slider("Number of Samples", 100, 10000, 300, step=50)
noise = st.slider("Noise Level", 0.0, 0.5, 0.2, step=0.01)
if dataset_type == 'Classification':
st.markdown("#### βš™οΈ Advanced Classification Settings")
n_features = st.slider("Total Features", 2, 20, 5)
n_informative = st.slider("Informative Features", 1, n_features, min(2, n_features))
max_redundant = n_features - n_informative
if max_redundant > 0:
n_redundant = st.slider("Redundant Features", 0, max_redundant, 0)
else:
n_redundant = 0
st.info("ℹ️ No redundant features possible with current settings.")
n_clusters_per_class = st.slider("Clusters per Class", 1, 3, 1)
class_sep = st.slider("Class Separation", 0.5, 2.0, 1.0, step=0.1)
with st.expander("πŸ—οΈ Model Architecture"):
n_layers = st.slider("Number of Hidden Layers", 1, 5, 2)
layer_configs = []
for i in range(n_layers):
neurons = st.number_input(f"Neurons in Layer {i+1}", min_value=1, max_value=512, value=8)
activation = st.selectbox(f"Activation for Layer {i+1}", ["relu", "tanh", "sigmoid"], key=f"act_{i}")
layer_configs.append({"neurons": neurons, "activation": activation})
with st.expander("πŸš€ Training Settings"):
epochs = st.slider("Epochs", 10, 500, 100, step=10)
batch_size = st.selectbox("Batch Size", [8, 16, 32, 64], index=2)
learning_rate = st.selectbox("Learning Rate", [0.001, 0.01, 0.1], index=1)
# Main Content
col1, col2 = st.columns([2, 3])
with col1:
st.subheader("πŸ“Š 1. Generate Dataset")
if st.button("🎲 Generate Dataset"):
with st.spinner('✨ Creating Dataset...'):
if dataset_type == "Classification":
X, y = create_dataset(dataset_type, n_samples, noise, n_features=n_features,
n_informative=n_informative, n_redundant=n_redundant,
n_clusters_per_class=n_clusters_per_class, class_sep=class_sep)
else:
X, y = create_dataset(dataset_type, n_samples, noise)
if X is not None:
st.success(f"Generated {dataset_type} dataset with {n_samples} samples.")
fig, ax = plt.subplots()
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y, palette="Set2", s=40, ax=ax)
ax.set_title(f"{dataset_type} Dataset")
st.pyplot(fig)
st.session_state.X = X
st.session_state.y = y
else:
st.error("Error generating dataset.")
with col2:
st.subheader("🧐 2. Build & Train Model")
if 'X' in st.session_state and 'y' in st.session_state:
if st.button("⚑ Train Model"):
with st.spinner('Training the neural net...'):
X, y = st.session_state.X, st.session_state.y
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
X_train, X_val, y_train, y_val = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
model = build_model(input_dim=X_scaled.shape[1], layer_configs=layer_configs)
optimizer = SGD(learning_rate=learning_rate)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=0,
validation_data=(X_val, y_val))
st.session_state.model = model
st.session_state.history = history
st.session_state.scaler = scaler
st.session_state.X_train, st.session_state.X_val = X_train, X_val
st.session_state.y_train, st.session_state.y_val = y_train, y_val
st.success("Model trained successfully! πŸŽ‰")
st.markdown("---")
if 'history' in st.session_state:
st.subheader("πŸ“ˆ 3. Training Performance")
history = st.session_state.history
perf_col1, perf_col2 = st.columns(2)
with perf_col1:
fig_loss, ax_loss = plt.subplots()
ax_loss.plot(history.history['loss'], label='Train Loss', color='blue')
ax_loss.plot(history.history['val_loss'], label='Validation Loss', color='red')
ax_loss.set_title('Loss Curve', fontsize=13)
ax_loss.set_xlabel('Epochs')
ax_loss.set_ylabel('Loss')
ax_loss.legend()
st.pyplot(fig_loss)
with perf_col2:
fig_acc, ax_acc = plt.subplots()
ax_acc.plot(history.history['accuracy'], label='Train Accuracy', color='green')
ax_acc.plot(history.history['val_accuracy'], label='Validation Accuracy', color='orange')
ax_acc.set_title('Accuracy Curve', fontsize=13)
ax_acc.set_xlabel('Epochs')
ax_acc.set_ylabel('Accuracy')
ax_acc.legend()
st.pyplot(fig_acc)
st.markdown("---")
st.subheader("🌈 4. Decision Boundary Visualization")
class KerasClassifierWrapper:
def __init__(self, model, scaler):
self.model = model
self.scaler = scaler
def predict(self, X):
X_scaled = self.scaler.transform(X)
pred_probs = self.model.predict(X_scaled)
return np.where(pred_probs >= 0.5, 1, 0).flatten()
model_wrapper = KerasClassifierWrapper(st.session_state.model, st.session_state.scaler)
fig_db, ax_db = plt.subplots(figsize=(8, 6))
plot_decision_regions(X=st.session_state.scaler.inverse_transform(
np.vstack((st.session_state.X_train, st.session_state.X_val))),
y=np.hstack((st.session_state.y_train, st.session_state.y_val)),
clf=model_wrapper, legend=2, ax=ax_db)
ax_db.set_title('Decision Boundary', fontsize=14)
st.pyplot(fig_db)
st.success("🎯 You have completed the full cycle: Dataset βž” Model βž” Train βž” Visualize!")