Upload 4 files
Browse files- .gitattributes +1 -0
- ann.jpeg +3 -0
- app.py +58 -0
- pages/Tensorflow.py +96 -0
- requirements.txt +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
ann.jpeg filter=lfs diff=lfs merge=lfs -text
|
ann.jpeg
ADDED
|
Git LFS Details
|
app.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import base64
|
| 3 |
+
|
| 4 |
+
# Set page config
|
| 5 |
+
st.set_page_config(page_title="🧠 Neural Network Playground", layout="wide")
|
| 6 |
+
|
| 7 |
+
# Background image function
|
| 8 |
+
def set_background(image_path):
|
| 9 |
+
with open(image_path, "rb") as image_file:
|
| 10 |
+
encoded = base64.b64encode(image_file.read()).decode()
|
| 11 |
+
|
| 12 |
+
st.markdown(
|
| 13 |
+
f"""
|
| 14 |
+
<style>
|
| 15 |
+
.stApp {{
|
| 16 |
+
background-image: url("data:image/png;base64,{encoded}");
|
| 17 |
+
background-size: cover;
|
| 18 |
+
background-repeat: no-repeat;
|
| 19 |
+
background-position: center;
|
| 20 |
+
background-attachment: fixed;
|
| 21 |
+
}}
|
| 22 |
+
</style>
|
| 23 |
+
""",
|
| 24 |
+
unsafe_allow_html=True
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
# Set background image
|
| 28 |
+
set_background(r"ann.jpeg")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# --- Project README/Intro Section ---
|
| 32 |
+
|
| 33 |
+
st.markdown("""
|
| 34 |
+
# 🧠 Interactive Neural Network Playground
|
| 35 |
+
A Python app that lets users explore how neural networks learn by adjusting hyperparameters and visualizing the results.
|
| 36 |
+
---
|
| 37 |
+
### 🚀 What It Does
|
| 38 |
+
- Lets you choose:
|
| 39 |
+
- **Dataset**: moons, circles, blobs, classification
|
| 40 |
+
- **Learning rate**
|
| 41 |
+
- **Activation**: ReLU, Sigmoid, Tanh
|
| 42 |
+
- **Train-test split ratio**
|
| 43 |
+
- **Batch size**
|
| 44 |
+
- **Epochs**:
|
| 45 |
+
- **Hidden Layes**
|
| 46 |
+
- **Number of Neurons**
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
- Builds & trains a TensorFlow/Keras neural network on synthetic data.
|
| 50 |
+
- Visualizes:
|
| 51 |
+
- 🌈 **Decision boundaries** (how the model classifies the space)
|
| 52 |
+
- 📈 **Training vs testing error** across epochs
|
| 53 |
+
---
|
| 54 |
+
### 🎯 Why It’s Useful
|
| 55 |
+
✅ Understand hyperparameter effects
|
| 56 |
+
✅ See overfitting vs underfitting visually
|
| 57 |
+
✅ Learn neural network behavior interactively
|
| 58 |
+
""")
|
pages/Tensorflow.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import tensorflow as tf
|
| 3 |
+
from tensorflow.keras.layers import Input, Dense
|
| 4 |
+
from tensorflow.keras.models import Sequential
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
import seaborn as sns
|
| 8 |
+
from sklearn.datasets import make_moons, make_circles, make_blobs, make_classification
|
| 9 |
+
from sklearn.model_selection import train_test_split
|
| 10 |
+
from sklearn.preprocessing import StandardScaler
|
| 11 |
+
from mlxtend.plotting import plot_decision_regions
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
st.title("Tensorflow Playground")
|
| 15 |
+
|
| 16 |
+
data_sets = {
|
| 17 |
+
"make_classification": make_classification,
|
| 18 |
+
"make_moons": make_moons,
|
| 19 |
+
"make_circles": make_circles,
|
| 20 |
+
"make_blobs": make_blobs
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
learning_rate = st.sidebar.slider('Learning Rate', min_value=0.0001, max_value=0.1, value=0.001, step=0.0001, format="%.4f")
|
| 24 |
+
noise = st.sidebar.slider('Noise Level', min_value=0.0, max_value=1.0, value=0.1, step=0.01, format="%.2f")
|
| 25 |
+
epochs = st.sidebar.slider("Epochs", 1, 100, 10)
|
| 26 |
+
num_hidden_layers = st.sidebar.slider('Number of Hidden Layers', min_value=1, max_value=10, value=2, step=1)
|
| 27 |
+
neurons_per_layer = st.sidebar.slider("Neurons per Layer", 1, 512, 32)
|
| 28 |
+
activation_function = st.sidebar.radio("Select the activation function", ["relu", "sigmoid", "tanh"])
|
| 29 |
+
batch_size = st.sidebar.slider('Batch Size', min_value=8, max_value=512, value=32, step=8)
|
| 30 |
+
test_size = st.sidebar.slider('test_size:', min_value=0.1, max_value=0.9, value=0.3, step=0.05)
|
| 31 |
+
|
| 32 |
+
data_choice = st.sidebar.radio("Select the dataset", list(data_sets.keys()))
|
| 33 |
+
|
| 34 |
+
if data_choice == "make_classification":
|
| 35 |
+
X, y = make_classification(n_samples=2000, n_features=2, n_redundant=0, n_clusters_per_class=1, random_state=27)
|
| 36 |
+
elif data_choice == "make_blobs":
|
| 37 |
+
X, y = make_blobs(n_samples=2000, n_features=2, random_state=27)
|
| 38 |
+
else:
|
| 39 |
+
X, y = data_sets[data_choice](n_samples=2000, noise=noise, random_state=27)
|
| 40 |
+
|
| 41 |
+
if st.button("Start"):
|
| 42 |
+
st.subheader("📍 Input Data")
|
| 43 |
+
fig, ax = plt.subplots(figsize=(8, 6))
|
| 44 |
+
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y, palette='Set2', ax=ax)
|
| 45 |
+
st.pyplot(fig)
|
| 46 |
+
|
| 47 |
+
if st.button("Train Model"):
|
| 48 |
+
model = Sequential()
|
| 49 |
+
model.add(Input(shape=(2,)))
|
| 50 |
+
for i in range(num_hidden_layers):
|
| 51 |
+
model.add(Dense(units=neurons_per_layer, activation=activation_function))
|
| 52 |
+
model.add(Dense(units=1, activation="sigmoid"))
|
| 53 |
+
|
| 54 |
+
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
|
| 55 |
+
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"])
|
| 56 |
+
|
| 57 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=27)
|
| 58 |
+
scaler = StandardScaler()
|
| 59 |
+
X_train_scaled = scaler.fit_transform(X_train)
|
| 60 |
+
X_test_scaled = scaler.transform(X_test)
|
| 61 |
+
|
| 62 |
+
history = model.fit(X_train_scaled, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, verbose=0)
|
| 63 |
+
|
| 64 |
+
st.write(f"🧮 Final Training Loss: **{history.history['loss'][-1]:.4f}**")
|
| 65 |
+
st.write(f"✅ Final Validation Loss: **{history.history['val_loss'][-1]:.4f}**")
|
| 66 |
+
|
| 67 |
+
col1, col2 = st.columns(2)
|
| 68 |
+
|
| 69 |
+
with col1:
|
| 70 |
+
st.markdown("#### Training vs Validation Loss Plot")
|
| 71 |
+
fig1, ax1 = plt.subplots(figsize=(8, 6))
|
| 72 |
+
ax1.plot(history.history["loss"], label="Training Loss", color="green")
|
| 73 |
+
ax1.plot(history.history["val_loss"], label="Validation Loss", color="red")
|
| 74 |
+
ax1.set_xlabel("Epoch", fontsize=14, fontweight='bold')
|
| 75 |
+
ax1.set_ylabel("Loss", fontsize=14, fontweight='bold')
|
| 76 |
+
ax1.legend()
|
| 77 |
+
st.pyplot(fig1)
|
| 78 |
+
|
| 79 |
+
with col2:
|
| 80 |
+
st.markdown("#### Decision Boundary Plot")
|
| 81 |
+
|
| 82 |
+
class KerasClassifierWrapper:
|
| 83 |
+
def __init__(self, model, scaler):
|
| 84 |
+
self.model = model
|
| 85 |
+
self.scaler = scaler
|
| 86 |
+
|
| 87 |
+
def predict(self, X):
|
| 88 |
+
X_scaled = self.scaler.transform(X)
|
| 89 |
+
preds = self.model.predict(X_scaled)
|
| 90 |
+
return np.where(preds > 0.5, 1, 0).flatten()
|
| 91 |
+
|
| 92 |
+
keras_clf = KerasClassifierWrapper(model, scaler)
|
| 93 |
+
|
| 94 |
+
fig2, ax2 = plt.subplots(figsize=(8, 6))
|
| 95 |
+
plot_decision_regions(X=X_train, y=y_train, clf=keras_clf, ax=ax2)
|
| 96 |
+
st.pyplot(fig2)
|
requirements.txt
ADDED
|
File without changes
|