Anshini commited on
Commit
23931f0
·
verified ·
1 Parent(s): 69105f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -20
app.py CHANGED
@@ -1,23 +1,15 @@
1
  import streamlit as st
2
-
3
- # UI Inputs
4
- dataset = st.selectbox("Choose a Dataset", ["moons", "circles", "blobs"])
5
- learning_rate = st.number_input("Learning Rate", value=0.01, format="%.4f")
6
- activation = st.selectbox("Activation Function", ["relu", "sigmoid", "tanh"])
7
- split_ratio = st.slider("Train-Test Split Ratio", 0.5, 0.9, 0.7)
8
- batch_size = st.number_input("Batch Size", value=32, step=16)
9
-
10
- if st.button("Train Model"):
11
- # Call your training and plotting function
12
- train_and_visualize(dataset, learning_rate, activation, split_ratio, batch_size)
13
-
14
  from sklearn.datasets import make_moons, make_circles, make_blobs
15
  from sklearn.model_selection import train_test_split
16
  from sklearn.preprocessing import StandardScaler
17
  import tensorflow as tf
18
- from tensorflow.keras import models, layers
19
- import matplotlib.pyplot as plt
20
- import numpy as np
 
 
21
 
22
  def generate_data(dataset, test_size):
23
  if dataset == "moons":
@@ -34,7 +26,7 @@ def build_model(activation, learning_rate):
34
  model = models.Sequential([
35
  layers.Dense(10, input_shape=(2,), activation=activation),
36
  layers.Dense(10, activation=activation),
37
- layers.Dense(1, activation="sigmoid")
38
  ])
39
  optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
40
  model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
@@ -45,11 +37,12 @@ def plot_decision_boundary(model, X, y):
45
  y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
46
  xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
47
  np.linspace(y_min, y_max, 200))
48
- preds = model.predict(np.c_[xx.ravel(), yy.ravel()])
 
49
  preds = preds.reshape(xx.shape)
50
 
51
- plt.contourf(xx, yy, preds, alpha=0.5)
52
- plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdBu)
53
  plt.title("Decision Boundary")
54
  st.pyplot(plt.gcf())
55
  plt.clf()
@@ -57,6 +50,8 @@ def plot_decision_boundary(model, X, y):
57
  def plot_loss(history):
58
  plt.plot(history.history['loss'], label='Train Loss')
59
  plt.plot(history.history['val_loss'], label='Test Loss')
 
 
60
  plt.legend()
61
  plt.title("Training vs Testing Error")
62
  st.pyplot(plt.gcf())
@@ -67,5 +62,23 @@ def train_and_visualize(dataset, lr, act, split, batch):
67
  model = build_model(act, lr)
68
  history = model.fit(X_train, y_train, epochs=50, batch_size=batch,
69
  validation_data=(X_test, y_test), verbose=0)
70
- plot_decision_boundary(model, np.vstack((X_train, X_test)), np.hstack((y_train, y_test)))
 
 
71
  plot_loss(history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
 
 
 
 
 
 
 
 
 
 
4
  from sklearn.datasets import make_moons, make_circles, make_blobs
5
  from sklearn.model_selection import train_test_split
6
  from sklearn.preprocessing import StandardScaler
7
  import tensorflow as tf
8
+ from tensorflow.keras import layers, models
9
+
10
+ # -------------------------------
11
+ # Helper Functions
12
+ # -------------------------------
13
 
14
  def generate_data(dataset, test_size):
15
  if dataset == "moons":
 
26
  model = models.Sequential([
27
  layers.Dense(10, input_shape=(2,), activation=activation),
28
  layers.Dense(10, activation=activation),
29
+ layers.Dense(1, activation='sigmoid')
30
  ])
31
  optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
32
  model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
 
37
  y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
38
  xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
39
  np.linspace(y_min, y_max, 200))
40
+ grid = np.c_[xx.ravel(), yy.ravel()]
41
+ preds = model.predict(grid)
42
  preds = preds.reshape(xx.shape)
43
 
44
+ plt.contourf(xx, yy, preds, cmap="RdBu", alpha=0.6)
45
+ plt.scatter(X[:, 0], X[:, 1], c=y, cmap="RdBu", edgecolors='white')
46
  plt.title("Decision Boundary")
47
  st.pyplot(plt.gcf())
48
  plt.clf()
 
50
  def plot_loss(history):
51
  plt.plot(history.history['loss'], label='Train Loss')
52
  plt.plot(history.history['val_loss'], label='Test Loss')
53
+ plt.xlabel('Epoch')
54
+ plt.ylabel('Loss')
55
  plt.legend()
56
  plt.title("Training vs Testing Error")
57
  st.pyplot(plt.gcf())
 
62
  model = build_model(act, lr)
63
  history = model.fit(X_train, y_train, epochs=50, batch_size=batch,
64
  validation_data=(X_test, y_test), verbose=0)
65
+ X_combined = np.vstack((X_train, X_test))
66
+ y_combined = np.concatenate((y_train, y_test))
67
+ plot_decision_boundary(model, X_combined, y_combined)
68
  plot_loss(history)
69
+
70
+ # -------------------------------
71
+ # Streamlit UI
72
+ # -------------------------------
73
+
74
+ st.title("🧠 Neural Network Playground")
75
+
76
+ dataset = st.selectbox("Choose Dataset", ["moons", "circles", "blobs"])
77
+ learning_rate = st.number_input("Learning Rate", value=0.01, format="%.4f")
78
+ activation = st.selectbox("Activation Function", ["relu", "sigmoid", "tanh"])
79
+ split_ratio = st.slider("Train-Test Split Ratio", 0.5, 0.9, 0.7)
80
+ batch_size = st.number_input("Batch Size", value=32, step=16)
81
+
82
+ if st.button("Train Model"):
83
+ with st.spinner("Training in progress..."):
84
+ train_and_visualize(dataset, learning_rate, activation, split_ratio, batch_size)