File size: 3,923 Bytes
2bf2ce1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a3b182e
2bf2ce1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a3b182e
2bf2ce1
 
 
 
 
 
 
 
 
 
 
 
a3b182e
2bf2ce1
 
a3b182e
2bf2ce1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import streamlit as st
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons, make_circles, make_blobs
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras import models, layers, optimizers

# --- Dataset Generator ---
def get_dataset(name, n_samples=1000, noise=0.2):
    if name == "Moons":
        return make_moons(n_samples=n_samples, noise=noise)
    elif name == "Circles":
        return make_circles(n_samples=n_samples, noise=noise)
    else:
        return make_blobs(n_samples=n_samples, centers=2, cluster_std=1.5, random_state=42)

# --- Model Builder ---
def build_model(activation, lr, num_layers, num_neurons):
    model = models.Sequential([layers.Input(shape=(2,))])
    for _ in range(num_layers):
        model.add(layers.Dense(num_neurons, activation=activation))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(optimizer=optimizers.Adam(learning_rate=lr),
                  loss='binary_crossentropy', metrics=['accuracy'])
    return model

# --- Decision Boundary Plotter ---
def visualize_decision_boundary(model, X, y):
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 300),
                         np.linspace(y_min, y_max, 300))
    grid = np.c_[xx.ravel(), yy.ravel()]
    preds = model.predict(grid, verbose=0).reshape(xx.shape)

    fig, ax = plt.subplots(figsize=(7, 6))
    ax.contourf(xx, yy, preds, cmap='RdBu', alpha=0.6)
    ax.scatter(X[:, 0], X[:, 1], c=y, cmap='RdBu', edgecolors='k', s=25)
    ax.set(title="Decision Boundary", xlabel="Feature 1", ylabel="Feature 2")
    return fig

# --- Loss Plotter ---
def plot_loss(history):
    fig, ax = plt.subplots(figsize=(7, 5))
    ax.plot(history.history['loss'], label='Train Loss')
    ax.plot(history.history['val_loss'], label='Test Loss')
    ax.set(title="Training vs Testing Loss", xlabel="Epoch", ylabel="Loss")
    ax.legend()
    ax.grid(True, linestyle='--', alpha=0.7)
    return fig

# --- Streamlit UI ---
st.set_page_config(page_title="TensorFlow Playground", layout="centered")
st.title(" TensorFlow Playground")
st.write("Explore how hyperparameters affect decision boundaries in neural networks.")

# --- Sidebar Controls ---
st.sidebar.header("🔧 Hyperparameters")
dataset = st.sidebar.selectbox("Select Dataset", ["Moons", "Circles", "Blobs"])
noise = st.sidebar.slider("Dataset Noise", 0.0, 1.0, 0.2, 0.01)
activation = st.sidebar.selectbox("Activation Function", ["relu", "sigmoid", "tanh"])
lr = st.sidebar.slider("Learning Rate", 0.001, 0.1, 0.01, 0.001)
split = st.sidebar.slider("Train-Test Split", 0.1, 0.9, 0.7, 0.05)
batch = st.sidebar.select_slider("Batch Size", options=list(range(8, 129, 8)), value=32)
epochs = st.sidebar.slider("Epochs", 10, 300, 100, 10)
num_neurons = st.sidebar.slider("Neurons/Layer", 1, 100, 10)
num_layers = st.sidebar.slider("Hidden Layers", 1, 5, 2)

# --- Train Button ---
if st.button(" Train Model"):
    with st.spinner("Training the model..."):
        X, y = get_dataset(dataset, noise=noise)
        X = StandardScaler().fit_transform(X)
        X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=split, random_state=42)

        model = build_model(activation, lr, num_layers, num_neurons)
        history = model.fit(X_train, y_train, validation_data=(X_test, y_test),
                            batch_size=batch, epochs=epochs, verbose=0)

        test_loss, test_acc = model.evaluate(X_test, y_test, verbose=0)
        st.success(f"✅ Trained Successfully! Test Accuracy: {test_acc:.2f}")

        st.subheader(" Decision Boundary")
        st.pyplot(visualize_decision_boundary(model, X, y))

        st.subheader(" Loss Over Epochs")
        st.pyplot(plot_loss(history))