Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import numpy as np | |
| import tensorflow as tf | |
| from tensorflow import keras | |
| import matplotlib.pyplot as plt | |
| import networkx as nx | |
| from sklearn.datasets import make_circles, make_moons, make_blobs | |
| from sklearn.model_selection import train_test_split | |
| from sklearn.preprocessing import StandardScaler | |
| from sklearn.utils import shuffle | |
| # Custom CSS | |
| st.markdown(""" | |
| <style> | |
| .sidebar .css-12oz5g7 { | |
| background-color: #f0f2f6; | |
| padding: 20px; | |
| border-radius: 10px; | |
| } | |
| .sidebar .css-1xarl3l { | |
| font-size: 20px; | |
| color: #333333; | |
| } | |
| .sidebar .css-19rxjzo { | |
| background-color: #e9ecef; | |
| border: 1px solid #ced4da; | |
| border-radius: 4px; | |
| color: #495057; | |
| font-size: 14px; | |
| padding: 10px; | |
| margin-top: 10px; | |
| } | |
| .sidebar .css-14qlr5i { | |
| background-color: #6c757d; | |
| color: white; | |
| border-radius: 20px; | |
| padding: 5px 10px; | |
| font-size: 16px; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # Helper functions | |
| def draw_neural_network(model): | |
| G = nx.DiGraph() | |
| pos = {} | |
| input_nodes = ["X1", "X2"] | |
| for i, node in enumerate(input_nodes): | |
| G.add_node(node, layer=0) | |
| pos[node] = (0, -i) | |
| hidden_nodes = [] | |
| for layer_idx, layer in enumerate(model.layers[:-1]): | |
| if isinstance(layer, keras.layers.Dense): | |
| layer_nodes = [f"H{layer_idx+1}_{i+1}" for i in range(layer.units)] | |
| hidden_nodes.append(layer_nodes) | |
| for i, node in enumerate(layer_nodes): | |
| G.add_node(node, layer=layer_idx + 1) | |
| pos[node] = (layer_idx + 1, -i) | |
| output_node = "Y" | |
| G.add_node(output_node, layer=len(hidden_nodes) + 1) | |
| pos[output_node] = (len(hidden_nodes) + 1, -0.5) | |
| all_nodes = input_nodes + sum(hidden_nodes, []) + [output_node] | |
| colors = ["lightblue"] * len(input_nodes) + ["lightcoral"] * sum(len(layer) for layer in hidden_nodes) + ["lightgreen"] | |
| for inp in input_nodes: | |
| for hid in hidden_nodes[0]: | |
| G.add_edge(inp, hid) | |
| for layer_idx in range(len(hidden_nodes) - 1): | |
| for node1 in hidden_nodes[layer_idx]: | |
| for node2 in hidden_nodes[layer_idx + 1]: | |
| G.add_edge(node1, node2) | |
| for hid in hidden_nodes[-1]: | |
| G.add_edge(hid, output_node) | |
| fig, ax = plt.subplots(figsize=(10, 8)) | |
| nx.draw(G, pos, with_labels=True, node_color=colors, edgecolors="black", node_size=1500, font_size=12, ax=ax, width=2, edge_color="gray", arrowsize=20) | |
| ax.axis("off") | |
| ax.set_title("Neural Network Architecture", fontsize=16) | |
| st.pyplot(fig) | |
| def plot_decision_boundary(X, y, model): | |
| if task_type == "Regression": return | |
| x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 | |
| y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 | |
| xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200), np.linspace(y_min, y_max, 200)) | |
| Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) | |
| Z = (Z > 0.5).astype(int).reshape(xx.shape) | |
| plt.figure(figsize=(10, 8)) | |
| plt.contourf(xx, yy, Z, alpha=0.8, cmap="coolwarm") | |
| plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors="k", cmap="coolwarm", s=100) | |
| plt.xlabel("X1") | |
| plt.ylabel("X2") | |
| plt.title("Decision Boundary") | |
| plt.colorbar(label="Class") | |
| st.pyplot(plt) | |
| def plot_regression_surface(X, y, model): | |
| x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 | |
| y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 | |
| xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200), np.linspace(y_min, y_max, 200)) | |
| Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape) | |
| fig = plt.figure(figsize=(10, 8)) | |
| ax = fig.add_subplot(111, projection='3d') | |
| ax.plot_surface(xx, yy, Z, alpha=0.7, cmap='viridis') | |
| ax.scatter(X[:, 0], X[:, 1], y, c=y, cmap='viridis', s=20) | |
| ax.set_xlabel("X1") | |
| ax.set_ylabel("X2") | |
| ax.set_zlabel("Predicted Value") | |
| ax.set_title("Regression Surface") | |
| st.pyplot(fig) | |
| def plot_learning_curves(history): | |
| plt.figure(figsize=(10, 6)) | |
| if task_type == "Classification": | |
| plt.plot(history.history['accuracy'], label='Train Acc') | |
| plt.plot(history.history['val_accuracy'], label='Val Acc') | |
| plt.ylabel('Accuracy') | |
| else: | |
| plt.plot(history.history['mae'], label='Train MAE') | |
| plt.plot(history.history['val_mae'], label='Val MAE') | |
| plt.ylabel('MAE') | |
| plt.title('Learning Curves') | |
| plt.xlabel('Epoch') | |
| plt.legend() | |
| st.pyplot(plt) | |
| st.title("Neural Network Playground") | |
| task_type = st.selectbox("Task Type", ["Classification", "Regression"]) | |
| dataset = st.selectbox("Choose Dataset", ["Circles", "Exclusive OR", "Gaussian", "Spiral"]) | |
| epochs = st.slider("Epochs", 1, 200, 50) | |
| col1, col2, col3 = st.columns(3) | |
| with col1: | |
| learning_rate = st.slider("Learning Rate", 0.001, 1.0, 0.03, 0.001) | |
| with col2: | |
| hidden_layers = st.slider("Hidden Layers", 1, 5, 3) | |
| neuron_counts = [st.slider(f"Neurons in Layer {i+1}", 1, 20, 5) for i in range(hidden_layers)] | |
| with col3: | |
| activation = st.selectbox("Activation", ["relu", "sigmoid", "tanh", "linear"]) | |
| regularization = st.selectbox("Regularization", ["None", "L1", "L2"]) | |
| reg_rate = st.slider("Reg. Rate", 0.0, 0.1, 0.01, 0.001) if regularization != "None" else 0.0 | |
| def generate_dataset(name): | |
| if name == "Circles": | |
| return make_circles(n_samples=1000, noise=0.1, factor=0.5) | |
| elif name == "Exclusive OR": | |
| X = np.random.randn(1000, 2) * 2 | |
| y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0).astype(np.float32) | |
| return X, y | |
| elif name == "Gaussian": | |
| X, y = make_blobs(n_samples=1000, centers=2, n_features=2) | |
| return X, y.astype(np.float32) | |
| elif name == "Spiral": | |
| n = 1000 | |
| X = np.zeros((n * 2, 2)) | |
| y = np.zeros(n * 2) | |
| for j in range(2): | |
| ix = range(n * j, n * (j + 1)) | |
| r = np.linspace(0, 1, n) | |
| t = np.linspace(j * 4, (j + 1) * 4, n) + np.random.randn(n) * 0.2 | |
| X[ix] = np.c_[r * np.sin(t), r * np.cos(t)] | |
| y[ix] = j | |
| return shuffle(X, y) | |
| X, y = generate_dataset(dataset) | |
| scaler = StandardScaler() | |
| X = scaler.fit_transform(X) | |
| X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) | |
| model = keras.Sequential() | |
| first = True | |
| for count in neuron_counts: | |
| layer_args = dict(units=count, activation=activation) | |
| if first: | |
| layer_args['input_shape'] = (2,) | |
| if regularization == "L1": | |
| layer_args['kernel_regularizer'] = keras.regularizers.l1(reg_rate) | |
| elif regularization == "L2": | |
| layer_args['kernel_regularizer'] = keras.regularizers.l2(reg_rate) | |
| model.add(keras.layers.Dense(**layer_args)) | |
| first = False | |
| if task_type == "Classification": | |
| model.add(keras.layers.Dense(1, activation="sigmoid")) | |
| loss = "binary_crossentropy" | |
| metrics = ["accuracy"] | |
| else: | |
| model.add(keras.layers.Dense(1, activation="linear")) | |
| loss = "mse" | |
| metrics = ["mae"] | |
| model.compile(optimizer=keras.optimizers.Adam(learning_rate), loss=loss, metrics=metrics) | |
| history = model.fit(X_train, y_train, epochs=epochs, batch_size=32, verbose=0, validation_split=0.2) | |
| st.subheader("Network Structure") | |
| draw_neural_network(model) | |
| if task_type == "Classification": | |
| st.subheader("Decision Boundary") | |
| plot_decision_boundary(X, y, model) | |
| else: | |
| st.subheader("Regression Surface") | |
| plot_regression_surface(X, y, model) | |
| st.subheader("Learning Curves") | |
| plot_learning_curves(history) | |
| if st.checkbox("Show Model Summary"): | |
| st.subheader("Model Summary") | |
| model.summary(print_fn=lambda x: st.text(x)) | |