Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import torch | |
| import torch.nn as nn | |
| import torch.optim as optim | |
| from sklearn.datasets import make_moons, make_circles, make_classification | |
| from sklearn.model_selection import train_test_split | |
| from sklearn.preprocessing import StandardScaler | |
| from sklearn.metrics import accuracy_score | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| import io | |
| import time | |
| st.set_page_config(page_title="ANN Visualizer", layout="wide") | |
| st.title("π§ Interactive ANN Visualizer") | |
| with st.sidebar: | |
| st.header("βοΈ Configure Your Model") | |
| dataset_type = st.selectbox("Dataset", ["moons", "circles", "classification"]) | |
| n_samples = st.slider("Data Points", 100, 20000, 500, step=100) | |
| noise = st.slider("Noise Level", 0.0, 1.0, 0.2, step=0.05) | |
| num_hidden = st.number_input("Hidden Layers", 1, 10, 2) | |
| hidden_layers, activations, dropout_rates = [], [], [] | |
| activation_map = {"ReLU": nn.ReLU(), "Tanh": nn.Tanh(), "Sigmoid": nn.Sigmoid()} | |
| for i in range(num_hidden): | |
| st.markdown(f"### Hidden Layer {i+1}") | |
| units = st.number_input(f"Units (Layer {i+1})", 1, 512, 8, key=f"units_{i}") | |
| act = st.selectbox(f"Activation (Layer {i+1})", list(activation_map.keys()), key=f"act_{i}") | |
| drop = st.slider(f"Dropout (Layer {i+1})", 0.0, 0.9, 0.0, 0.05, key=f"dropout_{i}") | |
| hidden_layers.append(units) | |
| activations.append(act) | |
| dropout_rates.append(drop) | |
| lr = st.number_input("Learning Rate", 0.0001, 1.0, 0.01, format="%f") | |
| epochs = st.slider("Epochs", 100, 5000, 500, step=100) | |
| weight_decay = st.number_input("L2 Regularization (Weight Decay)", 0.0, 0.1, 0.0001, format="%f") | |
| early_stop = st.checkbox("Enable Early Stopping", True) | |
| patience = st.slider("Patience", 1, 20, 5) if early_stop else None | |
| min_delta = st.number_input("Min Improvement Delta", 0.0001, 0.1, 0.001, format="%f") if early_stop else None | |
| start = st.button("π Start Training") | |
| if start: | |
| # Data Generator | |
| def generate_data(): | |
| if dataset_type == "moons": | |
| return make_moons(n_samples=n_samples, noise=noise, random_state=42) | |
| elif dataset_type == "circles": | |
| return make_circles(n_samples=n_samples, noise=noise, factor=0.5, random_state=42) | |
| else: | |
| return make_classification(n_samples=n_samples, n_features=2, n_informative=2, | |
| n_redundant=0, n_clusters_per_class=1, random_state=42) | |
| X, y = generate_data() | |
| scaler = StandardScaler() | |
| X = scaler.fit_transform(X) | |
| X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) | |
| X_train_tensor = torch.tensor(X_train, dtype=torch.float32) | |
| y_train_tensor = torch.tensor(y_train, dtype=torch.long) | |
| X_test_tensor = torch.tensor(X_test, dtype=torch.float32) | |
| y_test_tensor = torch.tensor(y_test, dtype=torch.long) | |
| # Build Model Dynamically | |
| layers = [] | |
| input_dim = X.shape[1] | |
| for h, act, dr in zip(hidden_layers, activations, dropout_rates): | |
| layers.append(nn.Linear(input_dim, h)) | |
| layers.append(activation_map[act]) | |
| if dr > 0: | |
| layers.append(nn.Dropout(dr)) | |
| input_dim = h | |
| layers.append(nn.Linear(input_dim, 2)) | |
| model = nn.Sequential(*layers) | |
| # Loss, Optimizer | |
| criterion = nn.CrossEntropyLoss() | |
| optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) | |
| # Plotting Setup | |
| st.subheader("π Initial Decision Boundary") | |
| x_min, x_max = X[:,0].min()-0.5, X[:,0].max()+0.5 | |
| y_min, y_max = X[:,1].min()-0.5, X[:,1].max()+0.5 | |
| xx, yy = np.meshgrid(np.linspace(x_min, x_max, 400), np.linspace(y_min, y_max, 400)) | |
| grid = np.c_[xx.ravel(), yy.ravel()] | |
| grid_tensor = torch.tensor(grid, dtype=torch.float32) | |
| model.eval() | |
| with torch.no_grad(): | |
| probs = torch.softmax(model(grid_tensor), dim=1)[:,1].numpy().reshape(xx.shape) | |
| fig, ax = plt.subplots() | |
| ax.contourf(xx, yy, probs, levels=50, cmap="Spectral", alpha=0.8) | |
| ax.scatter(X[:,0], X[:,1], c=y, edgecolor='k', s=15, cmap="Spectral") | |
| ax.set_title("Initial Decision Surface") | |
| ax.set_xticks([]); ax.set_yticks([]) | |
| st.pyplot(fig) | |
| # Training Loop | |
| st.subheader("β³ Training Progress") | |
| progress = st.progress(0) | |
| best_loss = float('inf') | |
| patience_counter = 0 | |
| train_losses, test_losses = [], [] | |
| plot_interval = max(1, epochs // 10) | |
| start_time = time.time() | |
| for epoch in range(1, epochs+1): | |
| model.train() | |
| optimizer.zero_grad() | |
| output = model(X_train_tensor) | |
| loss = criterion(output, y_train_tensor) | |
| loss.backward() | |
| optimizer.step() | |
| train_losses.append(loss.item()) | |
| model.eval() | |
| with torch.no_grad(): | |
| val_output = model(X_test_tensor) | |
| val_loss = criterion(val_output, y_test_tensor) | |
| test_losses.append(val_loss.item()) | |
| # Early Stopping | |
| if early_stop: | |
| if val_loss.item() < best_loss - min_delta: | |
| best_loss = val_loss.item() | |
| patience_counter = 0 | |
| else: | |
| patience_counter += 1 | |
| if patience_counter >= patience: | |
| st.warning(f"Early stopping at epoch {epoch}") | |
| break | |
| # Plot decision surface every few epochs | |
| if epoch % plot_interval == 0 or epoch == epochs: | |
| st.markdown(f"### Epoch {epoch}") | |
| with torch.no_grad(): | |
| probs = torch.softmax(model(grid_tensor), dim=1)[:,1].numpy().reshape(xx.shape) | |
| fig, ax = plt.subplots() | |
| ax.contourf(xx, yy, probs, levels=50, cmap="Spectral", alpha=0.8) | |
| ax.scatter(X[:,0], X[:,1], c=y, edgecolor='k', s=15, cmap="Spectral") | |
| ax.set_title(f"Decision Surface at Epoch {epoch}") | |
| ax.set_xticks([]); ax.set_yticks([]) | |
| st.pyplot(fig) | |
| progress.progress(epoch / epochs) | |
| duration = time.time() - start_time | |
| st.success(f"Training finished in {duration:.2f} seconds") | |
| # Plot Loss Curves | |
| st.subheader("π Final Loss Curves") | |
| fig1, ax1 = plt.subplots() | |
| ax1.plot(train_losses, label="Train Loss") | |
| ax1.plot(test_losses, label="Test Loss") | |
| ax1.set_xlabel("Epochs") | |
| ax1.set_ylabel("Loss") | |
| ax1.legend() | |
| ax1.grid(True) | |
| st.pyplot(fig1) | |
| buf_loss = io.BytesIO() | |
| fig1.savefig(buf_loss, format="png") | |
| st.download_button("Download Loss Curve", buf_loss.getvalue(), file_name="loss_curve.png", mime="image/png") | |
| # Final Decision Surface | |
| st.subheader("π Final Decision Boundary") | |
| with torch.no_grad(): | |
| probs = torch.softmax(model(grid_tensor), dim=1)[:,1].numpy().reshape(xx.shape) | |
| fig2, ax2 = plt.subplots() | |
| ax2.contourf(xx, yy, probs, levels=50, cmap="Spectral", alpha=0.8) | |
| ax2.scatter(X[:,0], X[:,1], c=y, edgecolor='k', s=15, cmap="Spectral") | |
| ax2.set_title("Final Decision Surface") | |
| ax2.set_xticks([]); ax2.set_yticks([]) | |
| st.pyplot(fig2) | |
| buf_boundary = io.BytesIO() | |
| fig2.savefig(buf_boundary, format="png") | |
| st.download_button("Download Decision Surface", buf_boundary.getvalue(), file_name="decision_surface.png", mime="image/png") | |
| # Accuracy | |
| with torch.no_grad(): | |
| train_preds = model(X_train_tensor).argmax(dim=1).numpy() | |
| test_preds = model(X_test_tensor).argmax(dim=1).numpy() | |
| st.metric("Train Accuracy", f"{accuracy_score(y_train, train_preds)*100:.2f}%") | |
| st.metric("Test Accuracy", f"{accuracy_score(y_test, test_preds)*100:.2f}%") | |
| st.info("π Change settings from the sidebar to retrain with different configurations!") | |