Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import numpy as np | |
| import random | |
| from matplotlib import pyplot as plt | |
| from matplotlib.animation import FuncAnimation,PillowWriter | |
| from sklearn.datasets import make_classification | |
| import tempfile | |
| import os | |
| from PIL import Image | |
| # Set the page configuration | |
| st.set_page_config(page_title="Perceptron Animation", layout="centered") | |
| # Title of the app | |
| # Create a sidebar for navigation | |
| page = st.sidebar.radio("Select a Page", ["Introduction", "Perceptron Animation"]) | |
| # Page 1: Introduction - How Perceptron Works | |
| if page == "Introduction": | |
| st.markdown(""" | |
| ## How the Perceptron Decision Boundary Works: | |
| The Perceptron is a simple linear classifier that learns by adjusting its decision boundary over multiple iterations (epochs). Initially, the decision boundary (the line separating the two classes) is randomly placed. Through each epoch, the Perceptron updates the weights based on the error in classification. These updates modify the slope and intercept of the boundary, causing it to shift and improve its ability to classify the data points correctly. | |
| ### Key Components: | |
| - **Training Data:** The dataset consists of points in 2D space, with each point belonging to one of two classes (visualized with different colors). | |
| - **Initial Boundary:** The decision boundary is randomly initialized and might misclassify some points at the beginning. | |
| - **Training Process:** The Perceptron updates its weights using the error from the previous epoch. The weight update rule is:""") | |
| image = Image.open("perceptron.png") | |
| st.image(image, caption=" ", use_container_width=True) | |
| st.markdown(""" | |
| - **Decision Boundary Evolution:** As the algorithm trains, the decision boundary (the red line) evolves. The slope \(m\) and intercept \(c\) of the line change with each epoch, gradually improving the model's ability to classify the data points. | |
| - **Final Convergence:** After enough epochs, the Perceptron converges, and the decision boundary stabilizes. Ideally, it will perfectly separate the two classes if the data is linearly separable. | |
| ### Visualizing the Perceptron: | |
| The graph on the next page shows how the Perceptron’s decision boundary evolves across multiple epochs. The red line moves closer to an optimal separation between the blue and green points as the algorithm learns. | |
| """) | |
| # Page 2: Perceptron Animation Graph | |
| elif page == "Perceptron Animation": | |
| # Custom CSS for styling the app | |
| st.markdown(""" | |
| <style> | |
| body { | |
| font-family: 'Helvetica', sans-serif; | |
| } | |
| h1 { | |
| font-size: 40px; | |
| color: #ff5733; | |
| font-weight: bold; | |
| text-align: center; | |
| } | |
| h2 { | |
| font-size: 30px; | |
| color: #333333; | |
| text-align: center; | |
| } | |
| .stSlider { | |
| margin-bottom: 20px; | |
| } | |
| .stImage { | |
| border: 3px solid #ff5733; | |
| border-radius: 10px; | |
| padding: 10px; | |
| } | |
| .stButton { | |
| background-color: #ff5733; | |
| color: white; | |
| font-size: 18px; | |
| font-weight: bold; | |
| border-radius: 5px; | |
| padding: 10px; | |
| } | |
| .stButton:hover { | |
| background-color: #ff704d; | |
| } | |
| .stText { | |
| font-size: 18px; | |
| color: #333333; | |
| } | |
| .stMarkdown { | |
| font-size: 16px; | |
| } | |
| .stAlert { | |
| background-color: #f8f9fa; | |
| border-left: 5px solid #ff5733; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # Streamlit layout | |
| st.title("🧠 Perceptron Decision Boundary Animation") | |
| st.write("Visualizing how the Perceptron updates its decision boundary over time on a 2D dataset.") | |
| # Generate data | |
| fv, cv = make_classification(n_samples=100, n_features=2, n_informative=1, | |
| n_redundant=0, n_classes=2, n_clusters_per_class=1, | |
| class_sep=10, random_state=41, hypercube=False) | |
| # Step function | |
| def step(z): | |
| return 1 if z > 0 else 0 | |
| # Perceptron training loop | |
| def train_perceptron(fv, cv, epochs=200, lr=0.1): | |
| m_vals, c_vals = [], [] | |
| fv = np.insert(fv, 0, 1, axis=1) | |
| weights = np.ones(fv.shape[1]) | |
| for _ in range(epochs): | |
| j = np.random.randint(0, 100) | |
| y_hat = step(np.dot(fv[j], weights)) | |
| weights += lr * (cv[j] - y_hat) * fv[j] | |
| m_vals.append(-weights[1] / weights[2]) | |
| c_vals.append(-weights[0] / weights[2]) | |
| return m_vals, c_vals | |
| # UI: Epoch slider | |
| epochs = st.slider("Training Epochs", 20, 300, 100, 10) | |
| m_vals, c_vals = train_perceptron(fv, cv, epochs=epochs) | |
| # Plot setup | |
| x_vals = np.linspace(-3, 3, 100) | |
| fig, ax = plt.subplots(figsize=(8, 5)) | |
| ax.scatter(fv[:, 0], fv[:, 1], c=cv, cmap='winter', s=100) | |
| line, = ax.plot([], [], 'r-', linewidth=2) | |
| ax.set_xlim(-3, 3) | |
| ax.set_ylim(-3, 3) | |
| # Animation functions | |
| def init(): | |
| line.set_data([], []) | |
| return line, | |
| def animate(i): | |
| y_vals = m_vals[i] * x_vals + c_vals[i] | |
| line.set_data(x_vals, y_vals) | |
| ax.set_xlabel(f"Epoch {i+1}") | |
| return line, | |
| anim = FuncAnimation(fig, animate, init_func=init, frames=epochs, interval=100) | |
| # Save as temporary GIF | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".gif") as temp_gif: | |
| anim.save(temp_gif.name, writer=PillowWriter(fps=10)) | |
| gif_path = temp_gif.name | |
| # Display animation with styling | |
| st.image(gif_path, caption="Perceptron Decision Boundary Evolution", use_container_width=True) | |
| # Cleanup on rerun | |
| if os.path.exists(gif_path): | |
| os.remove(gif_path) | |
| # Final footer text with bigger font | |
| st.markdown("---") | |
| st.markdown("<p style='font-size: 18px; text-align: center;'>👩💻 Created by <b>Anshini Kumbhare</b></p>", unsafe_allow_html=True) | |