Spaces:
Running
Running
Deep Learning: massive content expansion - deep theory, MathJax, Python code for all modules
0a6b1f5 | <!DOCTYPE html> | |
| <html lang="en"> | |
| <head> | |
| <meta charset="UTF-8"> | |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
| <title>Complete Deep Learning & Computer Vision Curriculum</title> | |
| <!-- Shared Design System --> | |
| <link rel="stylesheet" href="../shared/css/design-system.css"> | |
| <link rel="stylesheet" href="../shared/css/components.css"> | |
| <!-- MathJax 3.x --> | |
| <script> | |
| MathJax = { tex: { inlineMath: [['$', '$'], ['\\(', '\\)']] } }; | |
| </script> | |
| <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script> | |
| <style> | |
| * { | |
| margin: 0; | |
| padding: 0; | |
| box-sizing: border-box; | |
| } | |
| :root { | |
| --bg: #0f1419; | |
| --surface: #1a1f2e; | |
| --text: #e4e6eb; | |
| --text-dim: #b0b7c3; | |
| --cyan: #00d4ff; | |
| --orange: #ff6b35; | |
| --green: #00ff88; | |
| --yellow: #ffa500; | |
| } | |
| body { | |
| font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; | |
| background: var(--bg); | |
| color: var(--text); | |
| line-height: 1.6; | |
| overflow-x: hidden; | |
| } | |
| .container { | |
| max-width: 1400px; | |
| margin: 0 auto; | |
| padding: 20px; | |
| } | |
| header { | |
| text-align: center; | |
| margin-bottom: 40px; | |
| padding: 30px 0; | |
| border-bottom: 2px solid var(--cyan); | |
| } | |
| h1 { | |
| font-size: 2.5em; | |
| background: linear-gradient(135deg, var(--cyan), var(--orange)); | |
| background-clip: text; | |
| -webkit-background-clip: text; | |
| -webkit-text-fill-color: transparent; | |
| margin-bottom: 10px; | |
| } | |
| .subtitle { | |
| color: var(--text-dim); | |
| font-size: 1.1em; | |
| } | |
| .dashboard { | |
| display: none; | |
| } | |
| .dashboard.active { | |
| display: block; | |
| } | |
| .grid { | |
| display: grid; | |
| grid-template-columns: repeat(auto-fit, minmax(280px, 1fr)); | |
| gap: 25px; | |
| margin: 40px 0; | |
| } | |
| .card { | |
| background: linear-gradient(135deg, rgba(0, 212, 255, 0.1), rgba(255, 107, 53, 0.1)); | |
| border: 2px solid var(--cyan); | |
| border-radius: 12px; | |
| padding: 30px; | |
| cursor: pointer; | |
| transition: all 0.3s ease; | |
| text-align: center; | |
| } | |
| .card:hover { | |
| transform: translateY(-5px); | |
| box-shadow: 0 10px 30px rgba(0, 212, 255, 0.2); | |
| border-color: var(--orange); | |
| } | |
| .card-icon { | |
| font-size: 3em; | |
| margin-bottom: 15px; | |
| } | |
| .card h3 { | |
| color: var(--cyan); | |
| font-size: 1.5em; | |
| margin-bottom: 10px; | |
| } | |
| .card p { | |
| color: var(--text-dim); | |
| font-size: 0.95em; | |
| } | |
| .category-label { | |
| display: inline-block; | |
| margin-top: 10px; | |
| padding: 5px 12px; | |
| background: rgba(0, 212, 255, 0.2); | |
| border-radius: 20px; | |
| font-size: 0.85em; | |
| color: var(--green); | |
| } | |
| .module { | |
| display: none; | |
| } | |
| .module.active { | |
| display: block; | |
| animation: fadeIn 0.3s ease; | |
| } | |
| @keyframes fadeIn { | |
| from { | |
| opacity: 0; | |
| } | |
| to { | |
| opacity: 1; | |
| } | |
| } | |
| .btn-back { | |
| padding: 10px 20px; | |
| background: var(--orange); | |
| color: var(--bg); | |
| border: none; | |
| border-radius: 6px; | |
| cursor: pointer; | |
| font-weight: 600; | |
| margin-bottom: 25px; | |
| transition: all 0.3s ease; | |
| } | |
| .btn-back:hover { | |
| background: var(--cyan); | |
| } | |
| .tabs { | |
| display: flex; | |
| gap: 10px; | |
| margin-bottom: 30px; | |
| flex-wrap: wrap; | |
| justify-content: center; | |
| border-bottom: 1px solid rgba(0, 212, 255, 0.2); | |
| padding-bottom: 15px; | |
| overflow-x: auto; | |
| } | |
| .tab-btn { | |
| padding: 10px 20px; | |
| background: var(--surface); | |
| color: var(--text); | |
| border: 2px solid transparent; | |
| border-radius: 6px; | |
| cursor: pointer; | |
| font-size: 0.95em; | |
| transition: all 0.3s ease; | |
| font-weight: 500; | |
| white-space: nowrap; | |
| } | |
| .tab-btn:hover { | |
| background: rgba(0, 212, 255, 0.1); | |
| border-color: var(--cyan); | |
| } | |
| .tab-btn.active { | |
| background: var(--cyan); | |
| color: var(--bg); | |
| border-color: var(--cyan); | |
| } | |
| .tab { | |
| display: none; | |
| } | |
| .tab.active { | |
| display: block; | |
| animation: fadeIn 0.3s ease; | |
| } | |
| .section { | |
| background: var(--surface); | |
| border: 1px solid rgba(0, 212, 255, 0.2); | |
| border-radius: 10px; | |
| padding: 30px; | |
| margin-bottom: 25px; | |
| transition: all 0.3s ease; | |
| } | |
| .section:hover { | |
| border-color: var(--cyan); | |
| box-shadow: 0 0 20px rgba(0, 212, 255, 0.1); | |
| } | |
| h2 { | |
| color: var(--cyan); | |
| font-size: 1.8em; | |
| margin-bottom: 15px; | |
| } | |
| h3 { | |
| color: var(--orange); | |
| font-size: 1.3em; | |
| margin-top: 20px; | |
| margin-bottom: 12px; | |
| } | |
| h4 { | |
| color: var(--green); | |
| font-size: 1.1em; | |
| margin-top: 15px; | |
| margin-bottom: 10px; | |
| } | |
| p { | |
| margin-bottom: 15px; | |
| line-height: 1.8; | |
| } | |
| ul { | |
| margin-left: 20px; | |
| margin-bottom: 15px; | |
| } | |
| ul li { | |
| margin-bottom: 8px; | |
| } | |
| .info-box { | |
| background: linear-gradient(135deg, rgba(0, 212, 255, 0.1), rgba(255, 107, 53, 0.1)); | |
| border: 1px solid var(--cyan); | |
| border-radius: 8px; | |
| padding: 20px; | |
| margin: 20px 0; | |
| } | |
| .box-title { | |
| color: var(--orange); | |
| font-weight: 700; | |
| margin-bottom: 10px; | |
| font-size: 1.1em; | |
| } | |
| .box-content { | |
| color: var(--text-dim); | |
| line-height: 1.7; | |
| } | |
| .formula { | |
| background: rgba(0, 212, 255, 0.1); | |
| border: 1px solid var(--cyan); | |
| border-radius: 8px; | |
| padding: 20px; | |
| margin: 20px 0; | |
| font-family: 'Courier New', monospace; | |
| overflow-x: auto; | |
| line-height: 1.8; | |
| color: var(--cyan); | |
| } | |
| .code-block { | |
| background: #0d1117; | |
| border: 1px solid #30363d; | |
| border-radius: 8px; | |
| padding: 20px; | |
| margin: 20px 0; | |
| overflow-x: auto; | |
| font-family: 'Fira Code', 'Consolas', monospace; | |
| font-size: 0.88em; | |
| line-height: 1.7; | |
| white-space: pre-wrap; | |
| color: #e6edf3; | |
| } | |
| .code-block .comment { | |
| color: #8b949e; | |
| } | |
| .code-block .keyword { | |
| color: #ff7b72; | |
| } | |
| .code-block .string { | |
| color: #a5d6ff; | |
| } | |
| .code-block .function { | |
| color: #d2a8ff; | |
| } | |
| .code-block .number { | |
| color: #79c0ff; | |
| } | |
| .code-block .class-name { | |
| color: #ffa657; | |
| } | |
| .code-block .builtin { | |
| color: #79c0ff; | |
| } | |
| .code-title { | |
| display: block; | |
| background: #161b22; | |
| color: #8b949e; | |
| padding: 8px 16px; | |
| border-radius: 8px 8px 0 0; | |
| border: 1px solid #30363d; | |
| border-bottom: none; | |
| font-size: 0.85em; | |
| font-family: 'Fira Code', monospace; | |
| margin-top: 20px; | |
| } | |
| .code-title+.code-block { | |
| margin-top: 0; | |
| border-radius: 0 0 8px 8px; | |
| } | |
| .callout { | |
| border-left: 4px solid; | |
| padding: 15px; | |
| margin: 20px 0; | |
| border-radius: 6px; | |
| } | |
| .callout.tip { | |
| border-left-color: var(--green); | |
| background: rgba(0, 255, 136, 0.05); | |
| } | |
| .callout.warning { | |
| border-left-color: var(--yellow); | |
| background: rgba(255, 165, 0, 0.05); | |
| } | |
| .callout.insight { | |
| border-left-color: var(--cyan); | |
| background: rgba(0, 212, 255, 0.05); | |
| } | |
| .callout-title { | |
| font-weight: 700; | |
| margin-bottom: 8px; | |
| } | |
| .list-item { | |
| display: flex; | |
| gap: 12px; | |
| margin: 12px 0; | |
| padding: 12px; | |
| background: rgba(0, 212, 255, 0.05); | |
| border-left: 3px solid var(--cyan); | |
| border-radius: 4px; | |
| } | |
| .list-num { | |
| color: var(--orange); | |
| font-weight: 700; | |
| min-width: 30px; | |
| } | |
| table { | |
| width: 100%; | |
| border-collapse: collapse; | |
| margin: 20px 0; | |
| } | |
| th, | |
| td { | |
| padding: 12px; | |
| text-align: left; | |
| border: 1px solid rgba(0, 212, 255, 0.2); | |
| } | |
| th { | |
| background: rgba(0, 212, 255, 0.1); | |
| color: var(--cyan); | |
| font-weight: 700; | |
| } | |
| .viz-container { | |
| background: rgba(0, 212, 255, 0.02); | |
| border: 1px solid rgba(0, 212, 255, 0.2); | |
| border-radius: 8px; | |
| padding: 20px; | |
| margin: 20px 0; | |
| display: flex; | |
| justify-content: center; | |
| overflow-x: auto; | |
| } | |
| .viz-controls { | |
| display: flex; | |
| gap: 10px; | |
| margin-top: 20px; | |
| justify-content: center; | |
| flex-wrap: wrap; | |
| } | |
| .btn-viz { | |
| padding: 10px 20px; | |
| background: var(--cyan); | |
| color: var(--bg); | |
| border: none; | |
| border-radius: 6px; | |
| font-weight: 600; | |
| cursor: pointer; | |
| font-size: 0.95em; | |
| transition: all 0.3s ease; | |
| } | |
| .btn-viz:hover { | |
| background: var(--orange); | |
| transform: scale(1.05); | |
| } | |
| canvas { | |
| max-width: 100%; | |
| height: auto; | |
| } | |
| @media (max-width: 768px) { | |
| h1 { | |
| font-size: 1.8em; | |
| } | |
| .tabs { | |
| flex-direction: column; | |
| } | |
| .tab-btn { | |
| width: 100%; | |
| } | |
| .grid { | |
| grid-template-columns: 1fr; | |
| } | |
| canvas { | |
| width: 100% !important; | |
| height: auto !important; | |
| } | |
| } | |
| </style> | |
| </head> | |
| <body> | |
| <div class="container"> | |
| <!-- MAIN DASHBOARD --> | |
| <div id="dashboard" class="dashboard active"> | |
| <header> | |
| <h1>🧠 Complete Deep Learning & Computer Vision</h1> | |
| <p class="subtitle">Comprehensive Curriculum | Foundations to Advanced Applications</p> | |
| </header> | |
| <div style="text-align: center; margin-bottom: 40px;"> | |
| <p style="color: var(--text-dim); font-size: 1.1em;"> | |
| Master all aspects of deep learning and computer vision. 25+ modules covering neural networks, CNNs, | |
| object detection, GANs, and more. | |
| </p> | |
| </div> | |
| <div class="grid" id="modulesGrid"></div> | |
| </div> | |
| <!-- MODULES CONTAINER --> | |
| <div id="modulesContainer"></div> | |
| </div> | |
| <script> | |
| const modules = [ | |
| // Module 1: Deep Learning Foundations | |
| { | |
| id: "nn-basics", | |
| title: "Introduction to Neural Networks", | |
| icon: "🧬", | |
| category: "Foundations", | |
| color: "#0088ff", | |
| description: "Biological vs. Artificial neurons and network architecture" | |
| }, | |
| { | |
| id: "perceptron", | |
| title: "The Perceptron", | |
| icon: "⚙️", | |
| category: "Foundations", | |
| color: "#0088ff", | |
| description: "Single layer networks and their limitations" | |
| }, | |
| { | |
| id: "mlp", | |
| title: "Multi-Layer Perceptron (MLP)", | |
| icon: "🏗️", | |
| category: "Foundations", | |
| color: "#0088ff", | |
| description: "Hidden layers and deep architectures" | |
| }, | |
| { | |
| id: "activation", | |
| title: "Activation Functions", | |
| icon: "⚡", | |
| category: "Foundations", | |
| color: "#0088ff", | |
| description: "Sigmoid, ReLU, Tanh, Leaky ReLU, ELU, Softmax" | |
| }, | |
| { | |
| id: "weight-init", | |
| title: "Weight Initialization", | |
| icon: "🎯", | |
| category: "Foundations", | |
| color: "#0088ff", | |
| description: "Xavier, He, Random initialization strategies" | |
| }, | |
| { | |
| id: "loss", | |
| title: "Loss Functions", | |
| icon: "📉", | |
| category: "Foundations", | |
| color: "#0088ff", | |
| description: "MSE, Binary Cross-Entropy, Categorical Cross-Entropy" | |
| }, | |
| { | |
| id: "optimizers", | |
| title: "Optimizers", | |
| icon: "🎯", | |
| category: "Training", | |
| color: "#00ff00", | |
| description: "SGD, Momentum, Adam, Adagrad, RMSprop" | |
| }, | |
| { | |
| id: "backprop", | |
| title: "Forward & Backpropagation", | |
| icon: "⬅️", | |
| category: "Training", | |
| color: "#00ff00", | |
| description: "Chain rule and gradient computation" | |
| }, | |
| { | |
| id: "regularization", | |
| title: "Regularization", | |
| icon: "🛡️", | |
| category: "Training", | |
| color: "#00ff00", | |
| description: "L1/L2, Dropout, Early Stopping, Batch Norm" | |
| }, | |
| { | |
| id: "batch-norm", | |
| title: "Batch Normalization", | |
| icon: "⚙️", | |
| category: "Training", | |
| color: "#00ff00", | |
| description: "Stabilizing and speeding up training" | |
| }, | |
| // Module 2: Computer Vision Fundamentals | |
| { | |
| id: "cv-intro", | |
| title: "CV Fundamentals", | |
| icon: "👁️", | |
| category: "Computer Vision", | |
| color: "#ff6b35", | |
| description: "Why ANNs fail with images, parameter explosion" | |
| }, | |
| { | |
| id: "conv-layer", | |
| title: "Convolutional Layers", | |
| icon: "🖼️", | |
| category: "Computer Vision", | |
| color: "#ff6b35", | |
| description: "Kernels, filters, feature maps, stride, padding" | |
| }, | |
| { | |
| id: "pooling", | |
| title: "Pooling Layers", | |
| icon: "📦", | |
| category: "Computer Vision", | |
| color: "#ff6b35", | |
| description: "Max pooling, average pooling, spatial reduction" | |
| }, | |
| { | |
| id: "cnn-basics", | |
| title: "CNN Architecture", | |
| icon: "🏗️", | |
| category: "Computer Vision", | |
| color: "#ff6b35", | |
| description: "Combining conv, pooling, and fully connected layers" | |
| }, | |
| { | |
| id: "viz-filters", | |
| title: "Visualizing CNNs", | |
| icon: "🔍", | |
| category: "Computer Vision", | |
| color: "#ff6b35", | |
| description: "What filters learn: edges → shapes → objects" | |
| }, | |
| // Module 3: Advanced CNN Architectures | |
| { | |
| id: "lenet", | |
| title: "LeNet-5", | |
| icon: "🔢", | |
| category: "CNN Architectures", | |
| color: "#ff00ff", | |
| description: "Classic digit recognizer (MNIST)" | |
| }, | |
| { | |
| id: "alexnet", | |
| title: "AlexNet", | |
| icon: "🌟", | |
| category: "CNN Architectures", | |
| color: "#ff00ff", | |
| description: "The breakthrough in deep computer vision (2012)" | |
| }, | |
| { | |
| id: "vgg", | |
| title: "VGGNet", | |
| icon: "📊", | |
| category: "CNN Architectures", | |
| color: "#ff00ff", | |
| description: "VGG-16/19: Deep networks with small filters" | |
| }, | |
| { | |
| id: "resnet", | |
| title: "ResNet", | |
| icon: "🌉", | |
| category: "CNN Architectures", | |
| color: "#ff00ff", | |
| description: "Skip connections, solving vanishing gradients" | |
| }, | |
| { | |
| id: "inception", | |
| title: "InceptionNet (GoogLeNet)", | |
| icon: "🎯", | |
| category: "CNN Architectures", | |
| color: "#ff00ff", | |
| description: "1x1 convolutions, multi-scale feature extraction" | |
| }, | |
| { | |
| id: "mobilenet", | |
| title: "MobileNet", | |
| icon: "📱", | |
| category: "CNN Architectures", | |
| color: "#ff00ff", | |
| description: "Depth-wise separable convolutions for efficiency" | |
| }, | |
| { | |
| id: "transfer-learning", | |
| title: "Transfer Learning", | |
| icon: "🔄", | |
| category: "CNN Architectures", | |
| color: "#ff00ff", | |
| description: "Fine-tuning and leveraging pre-trained models" | |
| }, | |
| // Module 4: Object Detection & Segmentation | |
| { | |
| id: "localization", | |
| title: "Object Localization", | |
| icon: "📍", | |
| category: "Detection", | |
| color: "#00ff00", | |
| description: "Bounding boxes and classification together" | |
| }, | |
| { | |
| id: "rcnn", | |
| title: "R-CNN Family", | |
| icon: "🎯", | |
| category: "Detection", | |
| color: "#00ff00", | |
| description: "R-CNN, Fast R-CNN, Faster R-CNN" | |
| }, | |
| { | |
| id: "yolo", | |
| title: "YOLO", | |
| icon: "⚡", | |
| category: "Detection", | |
| color: "#00ff00", | |
| description: "Real-time object detection (v3, v5, v8)" | |
| }, | |
| { | |
| id: "ssd", | |
| title: "SSD", | |
| icon: "🚀", | |
| category: "Detection", | |
| color: "#00ff00", | |
| description: "Single Shot MultiBox Detector" | |
| }, | |
| { | |
| id: "semantic-seg", | |
| title: "Semantic Segmentation", | |
| icon: "🖌️", | |
| category: "Segmentation", | |
| color: "#00ff00", | |
| description: "Pixel-level classification (U-Net)" | |
| }, | |
| { | |
| id: "instance-seg", | |
| title: "Instance Segmentation", | |
| icon: "👥", | |
| category: "Segmentation", | |
| color: "#00ff00", | |
| description: "Mask R-CNN and separate object instances" | |
| }, | |
| { | |
| id: "face-recog", | |
| title: "Face Recognition", | |
| icon: "👤", | |
| category: "Segmentation", | |
| color: "#00ff00", | |
| description: "Siamese networks and triplet loss" | |
| }, | |
| // Module 5: Generative Models | |
| { | |
| id: "autoencoders", | |
| title: "Autoencoders", | |
| icon: "🔀", | |
| category: "Generative", | |
| color: "#ffaa00", | |
| description: "Encoder-decoder, latent space, denoising" | |
| }, | |
| { | |
| id: "gans", | |
| title: "GANs (Generative Adversarial Networks)", | |
| icon: "🎮", | |
| category: "Generative", | |
| color: "#ffaa00", | |
| description: "Generator vs. Discriminator, DCGAN" | |
| }, | |
| { | |
| id: "diffusion", | |
| title: "Diffusion Models", | |
| icon: "🌊", | |
| category: "Generative", | |
| color: "#ffaa00", | |
| description: "Foundation of Stable Diffusion and DALL-E" | |
| }, | |
| // Additional Advanced Topics | |
| { | |
| id: "rnn", | |
| title: "RNNs & LSTMs", | |
| icon: "🔄", | |
| category: "Sequence", | |
| color: "#ff6b35", | |
| description: "Recurrent networks for sequential data" | |
| }, | |
| { | |
| id: "transformers", | |
| title: "Transformers", | |
| icon: "🔗", | |
| category: "Sequence", | |
| color: "#ff6b35", | |
| description: "\"Attention Is All You Need\" - Complete paper breakdown with math" | |
| }, | |
| { | |
| id: "bert", | |
| title: "BERT & NLP Transformers", | |
| icon: "📚", | |
| category: "NLP", | |
| color: "#ff6b35", | |
| description: "Bidirectional transformers for language" | |
| }, | |
| { | |
| id: "gpt", | |
| title: "GPT & Language Models", | |
| icon: "💬", | |
| category: "NLP", | |
| color: "#ff6b35", | |
| description: "Autoregressive models and text generation" | |
| }, | |
| { | |
| id: "vit", | |
| title: "Vision Transformers (ViT)", | |
| icon: "🎨", | |
| category: "Vision", | |
| color: "#ff6b35", | |
| description: "Transformers applied to image data" | |
| }, | |
| { | |
| id: "gnn", | |
| title: "Graph Neural Networks", | |
| icon: "🕸️", | |
| category: "Advanced", | |
| color: "#9900ff", | |
| description: "Deep learning on non-Euclidean graph data" | |
| }, | |
| { | |
| id: "seq2seq", | |
| title: "Seq2Seq & Attention", | |
| icon: "➡️", | |
| category: "NLP", | |
| color: "#ff6b35", | |
| description: "Encoder-Decoder models and Attention mechanics" | |
| }, | |
| { | |
| id: "research-papers", | |
| title: "Research Library", | |
| icon: "🎓", | |
| category: "Advanced", | |
| color: "#9900ff", | |
| description: "Curated collection of seminal deep learning papers" | |
| }, | |
| // Module 7: GenAI & LLM Engineering | |
| { | |
| id: "vector-db", | |
| title: "Vector Databases", | |
| icon: "🧲", | |
| category: "GenAI", | |
| color: "#00c9a7", | |
| description: "Embeddings, similarity search, FAISS, Pinecone, ChromaDB" | |
| }, | |
| { | |
| id: "rag", | |
| title: "RAG Pipelines", | |
| icon: "🔗", | |
| category: "GenAI", | |
| color: "#00c9a7", | |
| description: "Retrieval-Augmented Generation for grounded AI" | |
| }, | |
| { | |
| id: "advanced-llm", | |
| title: "Fine-Tuning & Quantization", | |
| icon: "⚙️", | |
| category: "GenAI", | |
| color: "#00c9a7", | |
| description: "LoRA, QLoRA, PEFT, GGUF, and deployment strategies" | |
| } | |
| ]; | |
| // Comprehensive content for all modules | |
| const MODULE_CONTENT = { | |
| "nn-basics": { | |
| overview: ` | |
| <p>A neural network is a computing system <strong>inspired by biological brains</strong> — but it's not a brain simulation. It's a mathematical function that learns patterns from data by adjusting millions of numbers called <strong>weights</strong>.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🧠 The Core Intuition</div> | |
| Think of a neural network like a factory assembly line. Raw materials (data) enter one end. At each station (layer), workers (neurons) examine the material, make a small decision, and pass the result forward. By the end, the factory produces a finished product (prediction). During training, a quality inspector (loss function) checks the output and sends feedback backward through the line so each worker can improve. | |
| </div> | |
| <h3>How Does a Single Neuron Work?</h3> | |
| <p>A neuron does three things:</p> | |
| <div class="list-item"> | |
| <div class="list-num">1</div> | |
| <div><strong>Weighted Sum:</strong> Multiply each input by a weight and add them up: $$z = w_1 x_1 + w_2 x_2 + ... + w_n x_n + b$$</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2</div> | |
| <div><strong>Bias:</strong> Add a bias term $b$ — this shifts the decision boundary (like the y-intercept in y = mx + b)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">3</div> | |
| <div><strong>Activation:</strong> Pass the result through a non-linear function: $$a = \\sigma(z)$$</div> | |
| </div> | |
| <h3>The Training Process</h3> | |
| <p>Training a neural network follows a simple loop repeated thousands of times:</p> | |
| <div class="info-box"> | |
| <div class="box-title">🔄 The Training Loop</div> | |
| <div class="box-content"> | |
| <strong>1. Forward Pass:</strong> Feed data through the network, get a prediction<br> | |
| <strong>2. Compute Loss:</strong> Measure how wrong the prediction is (e.g., MSE, Cross-Entropy)<br> | |
| <strong>3. Backward Pass:</strong> Calculate how each weight contributed to the error (gradients via chain rule)<br> | |
| <strong>4. Update Weights:</strong> Nudge each weight slightly to reduce the error: $$w_{new} = w_{old} - \\eta \\cdot \\frac{\\partial L}{\\partial w}$$ | |
| <strong>5. Repeat</strong> until the loss is small enough | |
| </div> | |
| </div> | |
| <h3>Types of Layers</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Layer Type</th> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Purpose</th> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Example</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Dense/Linear</td><td>Every neuron connects to every input</td><td>nn.Linear(784, 128)</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Convolutional</td><td>Detect spatial patterns (edges, textures)</td><td>nn.Conv2d(3, 32, 3)</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Recurrent (LSTM/GRU)</td><td>Process sequences (text, time series)</td><td>nn.LSTM(128, 256)</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Attention</td><td>Focus on relevant parts of input</td><td>nn.MultiheadAttention()</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Dropout</td><td>Regularization (randomly zero neurons)</td><td>nn.Dropout(0.5)</td></tr> | |
| <tr><td style="padding: 8px;">BatchNorm</td><td>Stabilize and speed up training</td><td>nn.BatchNorm1d(128)</td></tr> | |
| </table> | |
| `, | |
| concepts: ` | |
| <h3>Core Components</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Neurons (Nodes):</strong> Basic computational units that receive inputs, apply weights, add bias, and apply activation function</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Layers:</strong> Input layer (receives data), Hidden layers (feature extraction), Output layer (predictions)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Weights:</strong> Parameters learned during training that determine connection strength</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Bias:</strong> Allows shifting the activation function for better fitting</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div><strong>Activation Function:</strong> Introduces non-linearity (ReLU, Sigmoid, Tanh)</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <h3>Real-World Applications</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🏥 Healthcare</div> | |
| <div class="box-content">Disease diagnosis, medical image analysis, drug discovery, patient risk prediction</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">💰 Finance</div> | |
| <div class="box-content">Fraud detection, algorithmic trading, credit scoring, portfolio optimization</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🛒 E-commerce</div> | |
| <div class="box-content">Recommendation systems, demand forecasting, customer segmentation, price optimization</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The Fundamental Equations of a Neuron</h3> | |
| <p>A single neuron performs a weighted sum followed by an activation function. This is the atomic building block of all neural networks.</p> | |
| <div class="formula" style="font-size: 1.2rem; text-align: center; margin: 20px 0; background: rgba(0, 212, 255, 0.08); padding: 25px; border-radius: 8px;"> | |
| <strong>z = Σ(wᵢxᵢ) + b = w₁x₁ + w₂x₂ + ... + wₙxₙ + b</strong><br> | |
| <strong>a = σ(z)</strong> | |
| </div> | |
| <h4>Step-by-Step: Single Neuron Forward Pass</h4> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div> | |
| <strong>Weighted Sum (Linear):</strong><br> | |
| z = wᵀx + b = Σᵢ wᵢxᵢ + b<br> | |
| <span class="formula-caption">This is a dot product plus bias - pure linear algebra</span> | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div> | |
| <strong>Activation (Non-Linear):</strong><br> | |
| a = σ(z) where σ can be ReLU, Sigmoid, Tanh, etc.<br> | |
| <span class="formula-caption">This introduces non-linearity, enabling complex functions</span> | |
| </div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Manual Forward Pass</div> | |
| <strong>Inputs:</strong> x = [2, 3], <strong>Weights:</strong> w = [0.5, -0.3], <strong>Bias:</strong> b = 0.1<br><br> | |
| <strong>Step 1 - Weighted Sum:</strong><br> | |
| z = (0.5 × 2) + (-0.3 × 3) + 0.1<br> | |
| z = 1.0 - 0.9 + 0.1 = <strong>0.2</strong><br><br> | |
| <strong>Step 2 - ReLU Activation:</strong><br> | |
| a = max(0, 0.2) = <strong>0.2</strong><br><br> | |
| <strong>Step 2 (alt) - Sigmoid Activation:</strong><br> | |
| a = 1 / (1 + e⁻⁰·²) = 1 / 1.819 ≈ <strong>0.55</strong> | |
| </div> | |
| <h3>Network Layer in Matrix Form</h3> | |
| <p>For a layer with n inputs and m neurons, we use matrices for efficiency:</p> | |
| <div class="formula"> | |
| <strong>Z = WX + b</strong><br> | |
| <strong>A = σ(Z)</strong><br><br> | |
| Where:<br> | |
| • W ∈ ℝᵐˣⁿ (weight matrix: m neurons, n inputs)<br> | |
| • X ∈ ℝⁿˣ¹ (input vector)<br> | |
| • b ∈ ℝᵐˣ¹ (bias vector)<br> | |
| • Z ∈ ℝᵐˣ¹ (pre-activation)<br> | |
| • A ∈ ℝᵐˣ¹ (activation output) | |
| </div> | |
| <h3>Parameter Count Formula</h3> | |
| <div class="formula"> | |
| For a layer: n_in → n_out<br> | |
| <strong>Parameters = n_in × n_out + n_out</strong><br> | |
| (weights) + (biases)<br><br> | |
| Example: Layer 784 → 128<br> | |
| Params = 784 × 128 + 128 = 100,480 | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Build a Neuron from Scratch (NumPy)</h3> | |
| <p>Let's implement the fundamental unit of a neural network — a single neuron — using only NumPy. This helps you understand what happens <strong>under the hood</strong> before using frameworks like PyTorch.</p> | |
| <span class="code-title">📄 neuron_numpy.py</span><div class="code-block"><span class="keyword">import</span> numpy <span class="keyword">as</span> np | |
| <span class="comment"># --- A Single Neuron from Scratch ---</span> | |
| <span class="keyword">def</span> <span class="function">sigmoid</span>(z): | |
| <span class="string">"""Activation function: squashes output to (0, 1)"""</span> | |
| <span class="keyword">return</span> <span class="number">1</span> / (<span class="number">1</span> + np.exp(-z)) | |
| <span class="comment"># Input features (e.g., hours studied, hours slept)</span> | |
| X = np.array([<span class="number">6.0</span>, <span class="number">8.0</span>]) | |
| <span class="comment"># Learnable parameters</span> | |
| weights = np.array([<span class="number">0.5</span>, <span class="number">-0.3</span>]) | |
| bias = <span class="number">0.1</span> | |
| <span class="comment"># Forward pass: z = w·x + b</span> | |
| z = np.dot(weights, X) + bias <span class="comment"># 0.5*6 + (-0.3)*8 + 0.1 = 0.7</span> | |
| output = sigmoid(z) <span class="comment"># σ(0.7) ≈ 0.668</span> | |
| <span class="keyword">print</span>(f<span class="string">"Linear output z = {z:.3f}"</span>) | |
| <span class="keyword">print</span>(f<span class="string">"Activated output a = {output:.3f}"</span>)</div> | |
| <h3>The Same Neuron in PyTorch</h3> | |
| <p>PyTorch wraps this into <code>nn.Linear</code>. One line replaces all the manual math above — but the computation is identical.</p> | |
| <span class="code-title">📄 neuron_pytorch.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="comment"># A single neuron: 2 inputs → 1 output</span> | |
| neuron = nn.Linear(in_features=<span class="number">2</span>, out_features=<span class="number">1</span>) | |
| <span class="comment"># Input tensor</span> | |
| X = torch.tensor([[<span class="number">6.0</span>, <span class="number">8.0</span>]]) | |
| <span class="comment"># Forward pass</span> | |
| z = neuron(X) <span class="comment"># Linear: z = Wx + b</span> | |
| a = torch.sigmoid(z) <span class="comment"># Activation</span> | |
| <span class="keyword">print</span>(f<span class="string">"Output: {a.item():.4f}"</span>) | |
| <span class="comment"># Check learnable parameters</span> | |
| <span class="keyword">print</span>(f<span class="string">"Weights: {neuron.weight.data}"</span>) | |
| <span class="keyword">print</span>(f<span class="string">"Bias: {neuron.bias.data}"</span>) | |
| <span class="keyword">print</span>(f<span class="string">"Total params: {sum(p.numel() for p in neuron.parameters())}"</span>) <span class="comment"># 3</span></div> | |
| <h3>Build a Simple Neural Network</h3> | |
| <span class="code-title">📄 simple_network.py</span><div class="code-block"><span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="comment"># 3-layer network: 784 → 128 → 64 → 10</span> | |
| model = nn.Sequential( | |
| nn.Linear(<span class="number">784</span>, <span class="number">128</span>), <span class="comment"># Hidden layer 1</span> | |
| nn.ReLU(), | |
| nn.Linear(<span class="number">128</span>, <span class="number">64</span>), <span class="comment"># Hidden layer 2</span> | |
| nn.ReLU(), | |
| nn.Linear(<span class="number">64</span>, <span class="number">10</span>), <span class="comment"># Output layer (10 classes)</span> | |
| ) | |
| <span class="comment"># Count parameters</span> | |
| total = <span class="builtin">sum</span>(p.numel() <span class="keyword">for</span> p <span class="keyword">in</span> model.parameters()) | |
| <span class="keyword">print</span>(f<span class="string">"Total parameters: {total:,}"</span>) <span class="comment"># 109,386</span> | |
| <span class="comment"># Forward pass with random MNIST-like input</span> | |
| X = torch.randn(<span class="number">1</span>, <span class="number">784</span>) | |
| output = model(X) | |
| <span class="keyword">print</span>(f<span class="string">"Output shape: {output.shape}"</span>) <span class="comment"># [1, 10]</span></div> | |
| ` | |
| }, | |
| "activation": { | |
| overview: ` | |
| <p>Activation functions are the <strong>secret sauce</strong> that gives neural networks their power. Without them, a neural network — no matter how many layers — would just be a fancy linear regression. They introduce <strong>non-linearity</strong>, which is what allows networks to learn complex patterns like recognizing faces, understanding language, or playing games.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🧠 The Big Idea</div> | |
| Imagine trying to separate red and blue dots on a page using only straight lines. Without activation functions, that's all a neural network can do — draw straight lines. With activation functions, the network can bend, curve, and twist those lines into any shape needed to perfectly separate the dots. | |
| </div> | |
| <h3>Why Do We Need Activation Functions?</h3> | |
| <p>Consider stacking two linear layers:</p> | |
| <div class="formula"> | |
| Layer 1: $$z_1 = W_1 x + b_1$$ | |
| Layer 2: $$z_2 = W_2 z_1 + b_2 = W_2(W_1 x + b_1) + b_2 = (W_2 W_1)x + (W_2 b_1 + b_2)$$ | |
| </div> | |
| <p>The result is still just <strong>one linear transformation</strong>! Adding 100 more layers changes nothing — it collapses into a single matrix multiplication. Activation functions break this linearity.</p> | |
| <div class="info-box"> | |
| <div class="box-title">Learning Objectives</div> | |
| <div class="box-content"> | |
| ✓ Understand <strong>why</strong> non-linearity is essential for deep learning<br> | |
| ✓ Master 7 activation functions: Sigmoid, Tanh, ReLU, Leaky ReLU, ELU, Softmax, GELU<br> | |
| ✓ Know the <strong>vanishing gradient problem</strong> and which activations solve it<br> | |
| ✓ Learn when to use which activation (hidden layers vs. output layers)<br> | |
| ✓ Implement all activations in PyTorch | |
| </div> | |
| </div> | |
| <h3>Quick Comparison Table</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Function</th> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Range</th> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Best For</th> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Issue</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"> | |
| <td style="padding: 8px;">Sigmoid</td><td>(0, 1)</td><td>Binary output</td><td>Vanishing gradient</td> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"> | |
| <td style="padding: 8px;">Tanh</td><td>(-1, 1)</td><td>Hidden layers (older models)</td><td>Vanishing gradient</td> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"> | |
| <td style="padding: 8px;">ReLU</td><td>[0, ∞)</td><td>Default for hidden layers</td><td>Dying neurons</td> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"> | |
| <td style="padding: 8px;">Leaky ReLU</td><td>(-∞, ∞)</td><td>When ReLU neurons die</td><td>Extra hyperparameter</td> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"> | |
| <td style="padding: 8px;">GELU</td><td>≈(-0.17, ∞)</td><td>Transformers (BERT, GPT)</td><td>Slightly slower</td> | |
| </tr> | |
| <tr> | |
| <td style="padding: 8px;">Softmax</td><td>(0, 1) sums to 1</td><td>Multi-class output</td><td>Only for output layer</td> | |
| </tr> | |
| </table> | |
| `, | |
| concepts: ` | |
| <h3>1. Sigmoid — The Classic Gate</h3> | |
| <p>Sigmoid squashes any input into the range (0, 1). Think of it as a <strong>confidence score</strong> — how sure is the network that something is true?</p> | |
| <div class="formula" style="text-align:center;">$$\\sigma(z) = \\frac{1}{1 + e^{-z}}$$</div> | |
| <div class="callout tip"> | |
| <div class="callout-title">📌 When to Use</div> | |
| <strong>Only</strong> for the output layer of binary classification (yes/no, spam/not-spam). <strong>Never</strong> use in hidden layers — it causes the vanishing gradient problem. | |
| </div> | |
| <p><strong>Why it vanishes:</strong> The maximum derivative of sigmoid is just 0.25 (at z=0). When you multiply gradients across 10 layers: 0.25¹⁰ ≈ 0.00000095. The gradient essentially disappears, and the network stops learning.</p> | |
| <h3>2. Tanh — Centered Sigmoid</h3> | |
| <p>Tanh is sigmoid's improvement — it outputs values between (-1, 1), which means it's <strong>zero-centered</strong>. This helps with optimization because gradients don't all push in the same direction.</p> | |
| <div class="formula" style="text-align:center;">$$\\tanh(z) = \\frac{e^z - e^{-z}}{e^z + e^{-z}} = 2\\sigma(2z) - 1$$</div> | |
| <p>Fun fact: Tanh is just a rescaled sigmoid! It's better for hidden layers than sigmoid, but still vanishes for large |z|.</p> | |
| <h3>3. ReLU — The Game Changer</h3> | |
| <p>ReLU (Rectified Linear Unit) is beautifully simple: output the input if positive, else output zero. This simplicity made deep learning practical.</p> | |
| <div class="formula" style="text-align:center;">$$\\text{ReLU}(z) = \\max(0, z)$$</div> | |
| <div class="callout insight"> | |
| <div class="callout-title">🎯 Why ReLU Changed Everything</div> | |
| <strong>1. No vanishing gradient:</strong> Gradient is exactly 1 for positive inputs — constant, never shrinking.<br> | |
| <strong>2. Sparse activation:</strong> ~50% of neurons output 0. This sparsity makes networks efficient.<br> | |
| <strong>3. Computationally cheap:</strong> Just a comparison — no exponentials like sigmoid/tanh.<br> | |
| <strong>4. Biologically plausible:</strong> Real neurons either fire or don't (all-or-nothing). | |
| </div> | |
| <p><strong>The Dying ReLU Problem:</strong> If a neuron's input is always negative (e.g., after a bad weight update), its gradient is permanently 0. That neuron is "dead" — it will never activate again. Solution → Leaky ReLU.</p> | |
| <h3>4. Leaky ReLU & Parametric ReLU</h3> | |
| <p>Instead of outputting 0 for negative inputs, Leaky ReLU allows a small slope (typically α=0.01):</p> | |
| <div class="formula" style="text-align:center;">$$\\text{LeakyReLU}(z) = \\begin{cases} z & z > 0 \\\\ \\alpha z & z \\leq 0 \\end{cases}$$</div> | |
| <p><strong>Parametric ReLU (PReLU)</strong> makes α a <em>learnable parameter</em> — the network decides the optimal slope for negative values.</p> | |
| <h3>5. GELU — The Transformer's Choice</h3> | |
| <p>GELU (Gaussian Error Linear Unit) is used in GPT, BERT, and almost all modern transformers. Unlike ReLU's hard cutoff at 0, GELU provides a <strong>smooth curve</strong>:</p> | |
| <div class="formula" style="text-align:center;">$$\\text{GELU}(z) = z \\cdot \\Phi(z) \\approx 0.5z\\left(1 + \\tanh\\left[\\sqrt{\\frac{2}{\\pi}}(z + 0.044715z^3)\\right]\\right)$$</div> | |
| <p>Where Φ(z) is the cumulative distribution function of the standard normal. It "softly gates" the input based on how likely it is to be positive statistically.</p> | |
| <h3>6. Swish / SiLU</h3> | |
| <p>Discovered by Google through automated search. It's simply x times sigmoid(x):</p> | |
| <div class="formula" style="text-align:center;">$$\\text{Swish}(z) = z \\cdot \\sigma(z) = \\frac{z}{1 + e^{-z}}$$</div> | |
| <p>Swish is smooth, non-monotonic, and consistently outperforms ReLU in deep networks (>40 layers). Used in EfficientNet.</p> | |
| <h3>7. Softmax — Probability Distribution</h3> | |
| <p>Softmax converts a vector of raw scores (logits) into a <strong>probability distribution</strong>. All outputs sum to 1.</p> | |
| <div class="formula" style="text-align:center;">$$\\text{Softmax}(z_i) = \\frac{e^{z_i}}{\\sum_{j=1}^{K} e^{z_j}}$$</div> | |
| <p><strong>Example:</strong> Logits [2.0, 1.0, 0.1] → Softmax [0.659, 0.242, 0.099]. The network is 65.9% confident in class 0.</p> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ Common Mistakes</div> | |
| 1. Using Softmax in hidden layers (it's for the final output layer only)<br> | |
| 2. Using Sigmoid for multi-class (use Softmax instead)<br> | |
| 3. Applying Softmax manually before CrossEntropyLoss in PyTorch (it already includes softmax!) | |
| </div> | |
| <h3>Decision Guide: Which Activation to Use?</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🎯 The Simple Rules</div> | |
| <div class="box-content"> | |
| <strong>Hidden layers:</strong> Start with ReLU. If you see dying neurons, try Leaky ReLU. For Transformers, use GELU.<br><br> | |
| <strong>Output layer — Binary classification:</strong> Sigmoid (one neuron, output = probability)<br> | |
| <strong>Output layer — Multi-class:</strong> Softmax (K neurons, outputs = probabilities summing to 1)<br> | |
| <strong>Output layer — Regression:</strong> No activation (or Linear) — let the output be any real number | |
| </div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🧠 Neural Network Design</div> | |
| <div class="box-content"> | |
| Critical choice for every neural network - affects training speed, convergence, and final accuracy | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🎯 Task-Specific Selection</div> | |
| <div class="box-content"> | |
| Different tasks need different outputs: Sigmoid for binary, Softmax for multi-class, Linear for regression | |
| </div> | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">🎤 Probable Interview Questions</div> | |
| 1. Why do we need activation functions?<br> | |
| 2. What is vanishing gradient?<br> | |
| 3. Why is ReLU preferred over sigmoid?<br> | |
| 4. What are dead neurons?<br> | |
| 5. Difference between ReLU and Leaky ReLU?<br> | |
| 6. Why softmax instead of sigmoid for multiclass?<br> | |
| 7. Why linear activation for regression output?<br> | |
| 8. Why GELU is used in transformers?<br> | |
| 9. Can activation function affect convergence speed?<br> | |
| 10. What happens if we remove activation functions? | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Derivatives: The Backprop Fuel</h3> | |
| <p>Activation functions must be differentiable for backpropagation to work. Here are the key formulas rendered with proper math notation:</p> | |
| <div class="formula" style="text-align:center;"> | |
| <strong>Sigmoid:</strong> $$\\sigma(z) = \\frac{1}{1 + e^{-z}}, \\quad \\sigma'(z) = \\sigma(z)(1 - \\sigma(z))$$ | |
| <strong>Tanh:</strong> $$\\tanh(z) = \\frac{e^z - e^{-z}}{e^z + e^{-z}}, \\quad \\tanh'(z) = 1 - \\tanh^2(z)$$ | |
| <strong>ReLU:</strong> $$\\text{ReLU}(z) = \\max(0, z), \\quad \\text{ReLU}'(z) = \\begin{cases} 1 & z > 0 \\ 0 & z \\leq 0 \\end{cases}$$ | |
| <strong>Leaky ReLU:</strong> $$\\text{LeakyReLU}(z) = \\begin{cases} z & z > 0 \\ \\alpha z & z \\leq 0 \\end{cases}, \\quad \\alpha = 0.01$$ | |
| <strong>Softmax:</strong> $$\\text{Softmax}(z_i) = \\frac{e^{z_i}}{\\sum_{j=1}^{K} e^{z_j}}$$ | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 The Vanishing Gradient Problem (Proof)</div> | |
| Each layer multiplies the gradient by $\\sigma'(z)$. For Sigmoid, $\\max(\\sigma') = 0.25$.<br> | |
| For 10 Sigmoid layers: $$\\text{Total gradient} \\approx (0.25)^{10} \\approx 0.00000095$$ | |
| This is why <strong>ReLU</strong> ($\\text{gradient} = 1$) solved deep learning! | |
| </div> | |
| <h3>Python: Activation Functions</h3> | |
| <span class="code-title">📄 activations.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn.functional <span class="keyword">as</span> F | |
| x = torch.linspace(-<span class="number">5</span>, <span class="number">5</span>, <span class="number">100</span>) | |
| <span class="comment"># All common activations</span> | |
| relu = F.relu(x) | |
| sigmoid = torch.sigmoid(x) | |
| tanh = torch.tanh(x) | |
| leaky = F.leaky_relu(x, negative_slope=<span class="number">0.01</span>) | |
| gelu = F.gelu(x) | |
| swish = x * torch.sigmoid(x) <span class="comment"># Swish = x * σ(x)</span> | |
| <span class="comment"># Softmax (on logits)</span> | |
| logits = torch.tensor([<span class="number">2.0</span>, <span class="number">1.0</span>, <span class="number">0.1</span>]) | |
| probs = F.softmax(logits, dim=<span class="number">0</span>) | |
| <span class="keyword">print</span>(f<span class="string">"Softmax: {probs}"</span>) <span class="comment"># [0.659, 0.242, 0.099] → sums to 1</span> | |
| <span class="comment"># Custom activation in a model</span> | |
| model = torch.nn.Sequential( | |
| torch.nn.Linear(<span class="number">784</span>, <span class="number">256</span>), | |
| torch.nn.GELU(), <span class="comment"># Used in Transformers</span> | |
| torch.nn.Linear(<span class="number">256</span>, <span class="number">10</span>), | |
| )</div> | |
| `, | |
| code: ` | |
| <h3>All Activation Functions in Python</h3> | |
| <p>Let's implement every activation function from scratch using NumPy, then see the PyTorch equivalents.</p> | |
| <span class="code-title">📄 activations_numpy.py</span><div class="code-block"><span class="keyword">import</span> numpy <span class="keyword">as</span> np | |
| <span class="comment"># --- Activation Functions from Scratch ---</span> | |
| <span class="keyword">def</span> <span class="function">sigmoid</span>(z): | |
| <span class="string">"""Squashes input to (0, 1). Used for binary output."""</span> | |
| <span class="keyword">return</span> <span class="number">1</span> / (<span class="number">1</span> + np.exp(-z)) | |
| <span class="keyword">def</span> <span class="function">sigmoid_derivative</span>(z): | |
| s = sigmoid(z) | |
| <span class="keyword">return</span> s * (<span class="number">1</span> - s) <span class="comment"># Max = 0.25 at z=0</span> | |
| <span class="keyword">def</span> <span class="function">tanh_fn</span>(z): | |
| <span class="string">"""Squashes input to (-1, 1). Zero-centered."""</span> | |
| <span class="keyword">return</span> np.tanh(z) | |
| <span class="keyword">def</span> <span class="function">relu</span>(z): | |
| <span class="string">"""Max(0, z). Default for hidden layers."""</span> | |
| <span class="keyword">return</span> np.maximum(<span class="number">0</span>, z) | |
| <span class="keyword">def</span> <span class="function">leaky_relu</span>(z, alpha=<span class="number">0.01</span>): | |
| <span class="string">"""Small gradient when z < 0. Fixes dying ReLU."""</span> | |
| <span class="keyword">return</span> np.where(z > <span class="number">0</span>, z, alpha * z) | |
| <span class="keyword">def</span> <span class="function">gelu</span>(z): | |
| <span class="string">"""Used in BERT/GPT. Smooth approximation."""</span> | |
| <span class="keyword">return</span> <span class="number">0.5</span> * z * (<span class="number">1</span> + np.tanh(np.sqrt(<span class="number">2</span>/np.pi) * (z + <span class="number">0.044715</span> * z**<span class="number">3</span>))) | |
| <span class="keyword">def</span> <span class="function">swish</span>(z): | |
| <span class="string">"""z * sigmoid(z). Self-gated. Used in EfficientNet."""</span> | |
| <span class="keyword">return</span> z * sigmoid(z) | |
| <span class="keyword">def</span> <span class="function">softmax</span>(z): | |
| <span class="string">"""Converts logits to probabilities summing to 1."""</span> | |
| exp_z = np.exp(z - np.max(z)) <span class="comment"># Numerical stability</span> | |
| <span class="keyword">return</span> exp_z / exp_z.sum() | |
| <span class="comment"># --- Demo ---</span> | |
| z = np.linspace(-<span class="number">5</span>, <span class="number">5</span>, <span class="number">100</span>) | |
| <span class="keyword">print</span>(<span class="string">"Sigmoid(0) ="</span>, sigmoid(<span class="number">0</span>)) <span class="comment"># 0.5</span> | |
| <span class="keyword">print</span>(<span class="string">"ReLU(-3) ="</span>, relu(-<span class="number">3</span>)) <span class="comment"># 0</span> | |
| <span class="keyword">print</span>(<span class="string">"Leaky ReLU(-3) ="</span>, leaky_relu(-<span class="number">3</span>)) <span class="comment"># -0.03</span> | |
| <span class="keyword">print</span>(<span class="string">"Softmax([2,1,0.1]) ="</span>, softmax(np.array([<span class="number">2</span>, <span class="number">1</span>, <span class="number">0.1</span>]))) | |
| </div> | |
| <h3>PyTorch Equivalents</h3> | |
| <span class="code-title">📄 activations_pytorch.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="keyword">import</span> torch.nn.functional <span class="keyword">as</span> F | |
| x = torch.randn(<span class="number">5</span>) | |
| <span class="comment"># Functional API</span> | |
| <span class="keyword">print</span>(<span class="string">"Sigmoid:"</span>, torch.sigmoid(x)) | |
| <span class="keyword">print</span>(<span class="string">"ReLU:"</span>, F.relu(x)) | |
| <span class="keyword">print</span>(<span class="string">"GELU:"</span>, F.gelu(x)) | |
| <span class="keyword">print</span>(<span class="string">"Softmax:"</span>, F.softmax(x, dim=<span class="number">0</span>)) | |
| <span class="comment"># In a model (Module API)</span> | |
| model = nn.Sequential( | |
| nn.Linear(<span class="number">784</span>, <span class="number">256</span>), | |
| nn.ReLU(), <span class="comment"># Default for hidden layers</span> | |
| nn.Linear(<span class="number">256</span>, <span class="number">128</span>), | |
| nn.GELU(), <span class="comment"># Used in Transformers</span> | |
| nn.Linear(<span class="number">128</span>, <span class="number">10</span>), <span class="comment"># No activation — CrossEntropyLoss includes Softmax</span> | |
| ) | |
| </div> | |
| ` | |
| }, | |
| "conv-layer": { | |
| overview: ` | |
| <p>Convolution is the <strong>fundamental operation</strong> that makes CNNs work. Instead of connecting every input pixel to every neuron (which would be billions of parameters for an image), a small filter (kernel) <strong>slides across the image</strong>, detecting local patterns like edges, corners, and textures.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🔍 The Flashlight Analogy</div> | |
| Imagine examining a large painting with a small flashlight in a dark room. You can only see a small patch at a time (the receptive field). As you move the flashlight across the painting (the sliding window), you notice patterns — edges, curves, colors. A convolutional layer does exactly this, but with <strong>many flashlights simultaneously</strong>, each looking for a different pattern. | |
| </div> | |
| <h3>Key Concepts</h3> | |
| <div class="list-item"><div class="list-num">1</div><div><strong>Kernel/Filter:</strong> A small matrix (typically 3×3 or 5×5) of learnable weights. Each filter detects one type of pattern.</div></div> | |
| <div class="list-item"><div class="list-num">2</div><div><strong>Stride:</strong> How many pixels the filter moves at each step. Stride=1 means move 1 pixel, stride=2 means skip every other position (halves output size).</div></div> | |
| <div class="list-item"><div class="list-num">3</div><div><strong>Padding:</strong> Adding zeros around the border so the output has the same spatial size as the input. 'same' padding with 3×3 kernel means pad=1.</div></div> | |
| <div class="list-item"><div class="list-num">4</div><div><strong>Feature Map:</strong> The output of applying one filter to the entire input. 32 filters = 32 feature maps = 32 channels in the output.</div></div> | |
| <h3>Output Size Formula</h3> | |
| <div class="formula" style="text-align:center;"> | |
| $$W_{out} = \\left\\lfloor \\frac{W_{in} + 2p - k}{s} \\right\\rfloor + 1$$ | |
| Where: $W_{in}$ = input size, $p$ = padding, $k$ = kernel size, $s$ = stride | |
| </div> | |
| <p><strong>Example:</strong> Input 32×32, kernel 3×3, stride 1, padding 1: Output = (32 + 2 - 3)/1 + 1 = <strong>32×32</strong> (same size!)</p> | |
| <h3>Parameter Efficiency: Why CNNs Beat Dense Layers</h3> | |
| <div class="info-box"> | |
| <div class="box-title">📊 Parameter Comparison (32×32 RGB Image → 64 outputs)</div> | |
| <div class="box-content"> | |
| <strong>Dense Layer:</strong> 32 × 32 × 3 × 64 = <span style="color:#ff6b35">196,608 parameters</span><br> | |
| <strong>Conv Layer (3×3):</strong> 3 × 3 × 3 × 64 + 64 = <span style="color:#00ff88">1,792 parameters</span><br> | |
| <strong>Savings: 110x fewer parameters!</strong> This is thanks to weight sharing and local connectivity. | |
| </div> | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Key Hyperparameters</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Kernel/Filter Size:</strong> Typically 3×3 or 5×5. Smaller = more layers needed, larger = more parameters</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Stride:</strong> Step size when sliding filter. Stride=1 (preserves size), Stride=2 (downsamples by 2×)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Padding:</strong> Add zeros around borders. 'SAME' keeps size, 'VALID' shrinks output</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Number of Filters:</strong> Each filter learns different features. More filters = more capacity but slower</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div><strong>Dilation:</strong> Spacing between kernel elements. Increases receptive field without adding parameters</div> | |
| </div> | |
| <div class="formula"> | |
| Output Size Formula:<br> | |
| W_out = floor((W_in + 2×padding - kernel_size) / stride) + 1<br> | |
| H_out = floor((H_in + 2×padding - kernel_size) / stride) + 1 | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The Mathematical Operation: Cross-Correlation</h3> | |
| <p>In deep learning, what we call "convolution" is mathematically "cross-correlation". It is a local dot product of the kernel and image patch.</p> | |
| <div class="formula"> | |
| S(i, j) = (I * K)(i, j) = Σ_m Σ_n I(i+m, j+n) K(m, n) | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Manual Convolution</div> | |
| **Input (3x3):**<br> | |
| [1 2 0]<br> | |
| [0 1 1]<br> | |
| [1 0 2]<br> | |
| <br> | |
| **Kernel (2x2):**<br> | |
| [1 0]<br> | |
| [0 1]<br> | |
| <br> | |
| **Calculation:**<br> | |
| Step 1 (Top-Left): (1x1) + (2x0) + (0x0) + (1x1) = <strong>2</strong><br> | |
| Step 2 (Top-Right): (2x1) + (0x0) + (1x0) + (1x1) = <strong>3</strong><br> | |
| ... Output is a 2x2 matrix. | |
| </div> | |
| <h3>Backprop through Conv</h3> | |
| <p>Calculated using the same formula but with the kernel flipped vertically and horizontally (true convolution)!</p> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🔍 Feature Extraction</div> | |
| <div class="box-content"> | |
| Early layers learn edges (Gabor-like filters), middle layers learn textures, deep layers learn specific object parts (eyes, wheels). | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🎨 Image Processing</div> | |
| <div class="box-content"> | |
| Blurring, sharpening, and edge detection in Photoshop/GIMP are all done with 2D convolutions using fixed kernels. | |
| </div> | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Convolution Math</h3> | |
| <div class="formula" style="text-align:center;"> | |
| <strong>Output Size:</strong> $$W_{out} = \\left\\lfloor \\frac{W_{in} + 2p - k}{s} \\right\\rfloor + 1$$ | |
| Where $p$ = padding, $k$ = kernel size, $s$ = stride. | |
| </div> | |
| <h3>Complete CNN Classifier (CIFAR-10)</h3> | |
| <span class="code-title">📄 cnn_cifar10.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="keyword">import</span> torchvision | |
| <span class="keyword">import</span> torchvision.transforms <span class="keyword">as</span> T | |
| <span class="comment"># --- Data Loading ---</span> | |
| transform = T.Compose([T.ToTensor(), T.Normalize((<span class="number">0.5</span>,<span class="number">0.5</span>,<span class="number">0.5</span>), (<span class="number">0.5</span>,<span class="number">0.5</span>,<span class="number">0.5</span>))]) | |
| trainset = torchvision.datasets.CIFAR10(root=<span class="string">"./data"</span>, train=<span class="keyword">True</span>, download=<span class="keyword">True</span>, transform=transform) | |
| trainloader = torch.utils.data.DataLoader(trainset, batch_size=<span class="number">64</span>, shuffle=<span class="keyword">True</span>) | |
| <span class="comment"># --- CNN Model ---</span> | |
| <span class="keyword">class</span> <span class="class-name">CNN</span>(nn.Module): | |
| <span class="keyword">def</span> <span class="function">__init__</span>(self): | |
| <span class="builtin">super</span>().__init__() | |
| self.features = nn.Sequential( | |
| nn.Conv2d(<span class="number">3</span>, <span class="number">32</span>, <span class="number">3</span>, padding=<span class="number">1</span>), <span class="comment"># 32x32 → 32x32</span> | |
| nn.BatchNorm2d(<span class="number">32</span>), nn.ReLU(), | |
| nn.MaxPool2d(<span class="number">2</span>), <span class="comment"># → 16x16</span> | |
| nn.Conv2d(<span class="number">32</span>, <span class="number">64</span>, <span class="number">3</span>, padding=<span class="number">1</span>), <span class="comment"># → 16x16</span> | |
| nn.BatchNorm2d(<span class="number">64</span>), nn.ReLU(), | |
| nn.MaxPool2d(<span class="number">2</span>), <span class="comment"># → 8x8</span> | |
| nn.Conv2d(<span class="number">64</span>, <span class="number">128</span>, <span class="number">3</span>, padding=<span class="number">1</span>), <span class="comment"># → 8x8</span> | |
| nn.BatchNorm2d(<span class="number">128</span>), nn.ReLU(), | |
| nn.AdaptiveAvgPool2d(<span class="number">1</span>), <span class="comment"># → 1x1</span> | |
| ) | |
| self.classifier = nn.Linear(<span class="number">128</span>, <span class="number">10</span>) | |
| <span class="keyword">def</span> <span class="function">forward</span>(self, x): | |
| x = self.features(x) | |
| x = x.view(x.size(<span class="number">0</span>), -<span class="number">1</span>) | |
| <span class="keyword">return</span> self.classifier(x) | |
| model = CNN() | |
| <span class="keyword">print</span>(f<span class="string">"Parameters: {sum(p.numel() for p in model.parameters()):,}"</span>)</div> | |
| ` | |
| }, | |
| "yolo": { | |
| overview: ` | |
| <h3>What is YOLO?</h3> | |
| <p>YOLO (You Only Look Once) treats object detection as a single regression problem, going directly from image pixels to bounding box coordinates and class probabilities in one forward pass.</p> | |
| <h3>Why YOLO Over R-CNN?</h3> | |
| <ul> | |
| <li><strong>Speed:</strong> 45+ FPS (real-time) vs R-CNN's ~0.05 FPS</li> | |
| <li><strong>Global Context:</strong> Sees entire image during training (fewer background errors)</li> | |
| <li><strong>One Network:</strong> Unlike R-CNN's multi-stage pipeline</li> | |
| <li><strong>End-to-End Training:</strong> Optimize detection directly</li> | |
| </ul> | |
| <div class="callout tip"> | |
| <div class="callout-title">✅ Advantages</div> | |
| • <strong>Lightning Fast:</strong> Real-time inference (YOLOv8 at 100+ FPS)<br> | |
| • <strong>Simple Architecture:</strong> Single network, easy to train<br> | |
| • <strong>Generalizes Well:</strong> Works on natural images and artwork<br> | |
| • <strong>Small Model Size:</strong> Can run on edge devices (mobile, IoT) | |
| </div> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ Disadvantages</div> | |
| • <strong>Struggles with Small Objects:</strong> Grid limitation affects tiny items<br> | |
| • <strong>Localization Errors:</strong> Less precise than two-stage detectors<br> | |
| • <strong>Limited Objects per Cell:</strong> Can't detect many close objects<br> | |
| • <strong>Aspect Ratio Issues:</strong> Struggles with unusual object shapes | |
| </div> | |
| <h3>YOLO Evolution</h3> | |
| <table> | |
| <tr> | |
| <th>Version</th> | |
| <th>Year</th> | |
| <th>Key Innovation</th> | |
| <th>mAP</th> | |
| </tr> | |
| <tr> | |
| <td>YOLOv1</td> | |
| <td>2015</td> | |
| <td>Original single-shot detector</td> | |
| <td>63.4%</td> | |
| </tr> | |
| <tr> | |
| <td>YOLOv3</td> | |
| <td>2018</td> | |
| <td>Multi-scale predictions</td> | |
| <td>57.9% (faster)</td> | |
| </tr> | |
| <tr> | |
| <td>YOLOv5</td> | |
| <td>2020</td> | |
| <td>PyTorch, Auto-augment</td> | |
| <td>~50% (optimized)</td> | |
| </tr> | |
| <tr> | |
| <td>YOLOv8</td> | |
| <td>2023</td> | |
| <td>Anchor-free, SOTA speed</td> | |
| <td>53.9% (real-time)</td> | |
| </tr> | |
| </table> | |
| `, | |
| concepts: ` | |
| <h3>How YOLO Works (3 Steps)</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Grid Division:</strong> Divide image into S×S grid (e.g., 7×7). Each cell predicts B bounding boxes</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Predictions Per Cell:</strong> Each box predicts (x, y, w, h, confidence) + class probabilities</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Non-Max Suppression:</strong> Remove duplicate detections, keep highest confidence boxes</div> | |
| </div> | |
| <div class="formula"> | |
| Output Tensor Shape (YOLOv1):<br> | |
| S × S × (B × 5 + C)<br> | |
| Example: 7 × 7 × (2 × 5 + 20) = 7 × 7 × 30<br> | |
| <br> | |
| Where:<br> | |
| • S = grid size (7)<br> | |
| • B = boxes per cell (2)<br> | |
| • 5 = (x, y, w, h, confidence)<br> | |
| • C = number of classes (20 for PASCAL VOC) | |
| </div> | |
| `, | |
| applications: ` | |
| <h3>Industry Applications</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🚗 Autonomous Vehicles</div> | |
| <div class="box-content"> | |
| Real-time detection of pedestrians, vehicles, traffic signs, and lane markings for self-driving cars | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🏭 Manufacturing</div> | |
| <div class="box-content"> | |
| Quality control, defect detection on assembly lines, robot guidance, inventory management | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🛡️ Security & Surveillance</div> | |
| <div class="box-content"> | |
| Intrusion detection, crowd monitoring, suspicious behavior analysis, license plate recognition | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🏥 Medical Imaging</div> | |
| <div class="box-content"> | |
| Tumor localization, cell counting, anatomical structure detection in X-rays/CT scans | |
| </div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Intersection over Union (IoU)</h3> | |
| <p>How do we measure if a predicted box is correct? We use the geometric ratio of intersection and union.</p> | |
| <div class="formula"> | |
| IoU = Area of Overlap / Area of Union | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Manual IoU</div> | |
| **Box A (GT):** [0,0,10,10] (Area=100)<br> | |
| **Box B (Pred):** [5,5,15,15] (Area=100)<br> | |
| 1. **Intersection:** Area between [5,5] and [10,10] = 5x5 = 25<br> | |
| 2. **Union:** Area A + Area B - Intersection = 100 + 100 - 25 = 175<br> | |
| 3. **IoU:** 25 / 175 ≈ <strong>0.142</strong> (Poor match!) | |
| </div> | |
| <h3>YOLO Multi-Part Loss</h3> | |
| <p>YOLO uses a composite loss function combining localization, confidence, and classification errors.</p> | |
| <div class="formula"> | |
| L = λ_coord Σ(Localization Loss) + Σ(Confidence Loss) + Σ(Classification Loss) | |
| </div> | |
| ` | |
| }, | |
| "transformers": { | |
| overview: ` | |
| <p>The Transformer architecture, introduced in the 2017 paper <strong>"Attention Is All You Need"</strong>, is the foundation of modern AI. GPT, BERT, LLaMA, DALL-E, Stable Diffusion — they all use Transformers. The key innovation: <strong>self-attention</strong>, which lets every token in a sequence directly attend to every other token.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🎭 The Conference Room Analogy</div> | |
| Imagine a meeting where everyone can hear everyone else simultaneously (unlike RNNs where information passes person-to-person in a chain). Each person (token) decides who to pay attention to based on relevance. The CEO might focus on the CFO for budget discussions but the CTO for technical decisions. This dynamic, context-dependent attention is what Transformers do. | |
| </div> | |
| <h3>Transformer Architecture</h3> | |
| <div class="list-item"><div class="list-num">1</div><div><strong>Input Embedding + Positional Encoding:</strong> Convert tokens to vectors and add position information (since attention has no inherent sense of order)</div></div> | |
| <div class="list-item"><div class="list-num">2</div><div><strong>Multi-Head Self-Attention:</strong> Each head learns a different type of relationship. Head 1 might learn syntactic dependencies, Head 2 might learn semantic similarity.</div></div> | |
| <div class="list-item"><div class="list-num">3</div><div><strong>Feed-Forward Network:</strong> A two-layer MLP applied independently to each position. This is where most of the model's "knowledge" is stored.</div></div> | |
| <div class="list-item"><div class="list-num">4</div><div><strong>Layer Normalization + Residual Connections:</strong> Applied around each sub-layer to stabilize training.</div></div> | |
| <h3>Self-Attention: The Heart of Transformers</h3> | |
| <div class="formula" style="text-align:center;"> | |
| $$\\text{Attention}(Q, K, V) = \\text{softmax}\\left(\\frac{QK^T}{\\sqrt{d_k}}\\right)V$$ | |
| </div> | |
| <p><strong>Q (Query):</strong> "What am I looking for?"<br> | |
| <strong>K (Key):</strong> "What do I contain?"<br> | |
| <strong>V (Value):</strong> "What information do I provide?"</p> | |
| <p>The $\\sqrt{d_k}$ scaling prevents the dot products from becoming too large (which would make softmax saturate and kill gradients).</p> | |
| <h3>Why Transformers Beat RNNs</h3> | |
| <div class="info-box"> | |
| <div class="box-title">Speed & Power Comparison</div> | |
| <div class="box-content"> | |
| <strong>Parallelization:</strong> RNNs process tokens sequentially (slow). Transformers process all tokens at once (fast). GPT-3 training would take 355 years with RNNs.<br> | |
| <strong>Long-range dependencies:</strong> RNNs lose information over long sequences. Transformers can attend to any position directly.<br> | |
| <strong>Scalability:</strong> Transformers scale to billions of parameters with predictable improvements (scaling laws). | |
| </div> | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>🏗️ Complete Architecture Overview</h3> | |
| <p>The Transformer follows an <strong>Encoder-Decoder</strong> structure, but each component uses only attention mechanisms.</p> | |
| <div class="info-box"> | |
| <div class="box-title">📦 Model Hyperparameters (Base Model)</div> | |
| <div class="box-content"> | |
| • <strong>d_model = 512:</strong> Embedding dimension<br> | |
| • <strong>d_ff = 2048:</strong> Feed-forward hidden dimension<br> | |
| • <strong>h = 8:</strong> Number of attention heads<br> | |
| • <strong>d_k = d_v = 64:</strong> Key/Value dimensions (d_model / h)<br> | |
| • <strong>N = 6:</strong> Number of encoder AND decoder layers<br> | |
| • <strong>Total Parameters:</strong> ~65 million | |
| </div> | |
| </div> | |
| <h3>Core Components</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Input Embedding:</strong> Convert tokens to d_model dimensional vectors, scaled by √d_model</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Positional Encoding:</strong> Add sinusoidal position signals so attention knows token order</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Multi-Head Self-Attention:</strong> h parallel attention heads, each computing different relationships</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Add & Norm:</strong> Residual connection followed by Layer Normalization</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div><strong>Feed-Forward Network:</strong> Two linear layers with ReLU: FFN(x) = max(0, xW₁ + b₁)W₂ + b₂</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">06</div> | |
| <div><strong>Masked Attention (Decoder):</strong> Prevent attending to future tokens during training</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">07</div> | |
| <div><strong>Encoder-Decoder Attention:</strong> Decoder attends to encoder output (cross-attention)</div> | |
| </div> | |
| <h3>Encoder Stack (N=6 layers)</h3> | |
| <div class="formula"> | |
| Each encoder layer:<br> | |
| sublayer_1 = LayerNorm(x + MultiHeadAttention(x, x, x))<br> | |
| sublayer_2 = LayerNorm(sublayer_1 + FFN(sublayer_1))<br> | |
| <br> | |
| The encoder processes the input sequence bidirectionally.<br> | |
| All positions can attend to all positions. | |
| </div> | |
| <h3>Decoder Stack (N=6 layers)</h3> | |
| <div class="formula"> | |
| Each decoder layer has THREE sub-layers:<br> | |
| 1. Masked Self-Attention (prevent looking ahead)<br> | |
| 2. Encoder-Decoder Attention (attend to encoder output)<br> | |
| 3. Feed-Forward Network<br> | |
| <br> | |
| Output: LayerNorm(x + sublayer(x)) for each | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">💡 Key Insight: Residual Connections</div> | |
| Every sub-layer has a residual connection: <strong>output = x + Sublayer(x)</strong><br> | |
| This is crucial for:<br> | |
| 1. Training very deep models (gradient highway)<br> | |
| 2. Preserving information across layers<br> | |
| 3. Enabling optional "skipping" of transformations | |
| </div> | |
| `, | |
| applications: ` | |
| <h3>🌍 Revolutionary Applications</h3> | |
| <div class="info-box"> | |
| <div class="box-title">💬 Large Language Models</div> | |
| <div class="box-content"> | |
| <strong>GPT-4, Claude, Gemini:</strong> The most capable AI systems ever built<br> | |
| <strong>ChatGPT:</strong> 100M+ users in 2 months (fastest product adoption ever)<br> | |
| <strong>BERT:</strong> Powers Google Search for 70+ languages | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🌐 Machine Translation (Original Use Case)</div> | |
| <div class="box-content"> | |
| <strong>Google Translate:</strong> Switched to Transformers in 2017<br> | |
| <strong>DeepL:</strong> Transformer-based, often beats Google<br> | |
| <strong>NLLB-200:</strong> Meta's model translates 200 languages | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🎨 Multi-Modal AI</div> | |
| <div class="box-content"> | |
| <strong>DALL-E 3, Midjourney, Stable Diffusion:</strong> Text-to-image generation<br> | |
| <strong>GPT-4V, Gemini:</strong> Vision + Language understanding<br> | |
| <strong>Whisper:</strong> State-of-the-art speech recognition<br> | |
| <strong>Sora:</strong> Text-to-video generation | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🧬 Scientific Breakthroughs</div> | |
| <div class="box-content"> | |
| <strong>AlphaFold 2:</strong> Solved 50-year protein folding problem (Nobel Prize 2024)<br> | |
| <strong>ESMFold:</strong> Meta's protein predictor<br> | |
| <strong>Drug Discovery:</strong> Accelerating molecule design | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">💻 Code & Development</div> | |
| <div class="box-content"> | |
| <strong>GitHub Copilot:</strong> AI pair programmer (used by millions of devs)<br> | |
| <strong>Claude Code, Cursor:</strong> AI coding assistants<br> | |
| <strong>AlphaCode:</strong> Competitive programming solver | |
| </div> | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">📈 Impact Statistics</div> | |
| • Paper has 100,000+ citations (one of most cited CS papers ever)<br> | |
| • Spawned multi-trillion dollar industry<br> | |
| • Every major AI lab now uses Transformer variants<br> | |
| • Considered the "ImageNet moment" for NLP | |
| </div> | |
| `, | |
| math: ` | |
| <h3>📐 Paper & Pain: Complete Mathematical Derivation</h3> | |
| <p>Let's derive every formula from the paper with step-by-step calculations.</p> | |
| <h3>1. Scaled Dot-Product Attention</h3> | |
| <div class="formula" style="font-size: 1.2rem; text-align: center; margin: 20px 0; background: rgba(0, 212, 255, 0.08); padding: 25px; border-radius: 8px;"> | |
| <strong>Attention(Q, K, V) = softmax(QKᵀ / √dₖ) V</strong> | |
| </div> | |
| <h4>Step-by-Step Derivation:</h4> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div> | |
| <strong>Create Q, K, V matrices:</strong><br> | |
| Given input X ∈ ℝⁿˣᵈ (n tokens, d dimensions)<br> | |
| Q = XWᵠ, K = XWᴷ, V = XWⱽ<br> | |
| where Wᵠ, Wᴷ ∈ ℝᵈˣᵈᵏ and Wⱽ ∈ ℝᵈˣᵈᵛ | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div> | |
| <strong>Compute Attention Scores:</strong><br> | |
| scores = QKᵀ ∈ ℝⁿˣⁿ<br> | |
| Each score[i,j] = dot product of query_i and key_j<br> | |
| Measures "how much should position i attend to position j" | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div> | |
| <strong>Scale by √dₖ:</strong><br> | |
| scaled_scores = QKᵀ / √dₖ<br> | |
| <em>Why scale?</em> Dot products grow with dimension. If dₖ = 64:<br> | |
| E[qᵀk] = 0, Var[qᵀk] = dₖ = 64<br> | |
| Without scaling, softmax saturates → vanishing gradients! | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div> | |
| <strong>Apply Softmax:</strong><br> | |
| attention_weights = softmax(scaled_scores) ∈ ℝⁿˣⁿ<br> | |
| Each row sums to 1 (probability distribution over positions) | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div> | |
| <strong>Weighted Sum of Values:</strong><br> | |
| output = attention_weights × V ∈ ℝⁿˣᵈᵛ<br> | |
| Each output[i] is a weighted combination of all values | |
| </div> | |
| </div> | |
| <h3>2. Multi-Head Attention</h3> | |
| <div class="formula" style="background: rgba(255, 107, 53, 0.08); padding: 20px; border-radius: 8px;"> | |
| MultiHead(Q, K, V) = Concat(head₁, ..., headₕ)Wᴼ<br> | |
| where headᵢ = Attention(QWᵢᵠ, KWᵢᴷ, VWᵢⱽ)<br><br> | |
| <strong>Paper's values:</strong><br> | |
| h = 8 heads, dₖ = dᵥ = d_model/h = 512/8 = 64<br> | |
| Wᴼ ∈ ℝ⁽ʰ·ᵈᵛ⁾ˣᵈᵐᵒᵈᵉˡ = ℝ⁵¹²ˣ⁵¹² | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">💡 Why Multiple Heads?</div> | |
| Each head can learn <strong>different relationships</strong>:<br> | |
| • Head 1: Syntactic dependencies (subject-verb)<br> | |
| • Head 2: Pronoun resolution (he → John)<br> | |
| • Head 3: Semantic similarity<br> | |
| • Head 4-8: Other patterns<br> | |
| <br> | |
| <strong>Computation:</strong> Same cost as single-head with full dₖ!<br> | |
| 8 heads × 64 dim = 1 head × 512 dim (same FLOPs) | |
| </div> | |
| <h3>3. Positional Encoding (Sinusoidal)</h3> | |
| <div class="formula" style="background: rgba(46, 204, 113, 0.08); padding: 20px; border-radius: 8px;"> | |
| PE(pos, 2i) = sin(pos / 10000^(2i/d_model))<br> | |
| PE(pos, 2i+1) = cos(pos / 10000^(2i/d_model))<br><br> | |
| pos = position in sequence (0, 1, 2, ...)<br> | |
| i = dimension index (0, 1, ..., d_model/2 - 1) | |
| </div> | |
| <h4>Worked Example (d_model = 4):</h4> | |
| <div class="formula" style="background: var(--surface); padding: 15px;"> | |
| For position pos = 0:<br> | |
| PE(0, 0) = sin(0) = 0<br> | |
| PE(0, 1) = cos(0) = 1<br> | |
| PE(0, 2) = sin(0) = 0<br> | |
| PE(0, 3) = cos(0) = 1<br> | |
| → PE₀ = [0, 1, 0, 1]<br><br> | |
| For position pos = 1:<br> | |
| PE(1, 0) = sin(1/10000⁰) = sin(1) ≈ 0.84<br> | |
| PE(1, 1) = cos(1/10000⁰) = cos(1) ≈ 0.54<br> | |
| PE(1, 2) = sin(1/100) ≈ 0.01<br> | |
| PE(1, 3) = cos(1/100) ≈ 1.00<br> | |
| → PE₁ = [0.84, 0.54, 0.01, 1.00] | |
| </div> | |
| <div class="callout warning"> | |
| <div class="callout-title">🔑 Key Property: Relative Positions</div> | |
| PE(pos + k) can be expressed as a <strong>linear function</strong> of PE(pos)!<br> | |
| This allows the model to easily learn relative position attention.<br> | |
| sin(a+b) = sin(a)cos(b) + cos(a)sin(b) ✓ | |
| </div> | |
| <h3>4. Feed-Forward Network</h3> | |
| <div class="formula"> | |
| FFN(x) = max(0, xW₁ + b₁)W₂ + b₂<br><br> | |
| W₁ ∈ ℝᵈᵐᵒᵈᵉˡ ˣ ᵈᶠᶠ = ℝ⁵¹² ˣ ²⁰⁴⁸ (expand 4×)<br> | |
| W₂ ∈ ℝᵈᶠᶠ ˣ ᵈᵐᵒᵈᵉˡ = ℝ²⁰⁴⁸ ˣ ⁵¹² (project back)<br><br> | |
| This is a 2-layer MLP with ReLU, applied to each position independently. | |
| </div> | |
| <h3>5. Layer Normalization</h3> | |
| <div class="formula"> | |
| LayerNorm(x) = γ ⊙ (x - μ) / √(σ² + ε) + β<br><br> | |
| μ = (1/d) Σᵢ xᵢ (mean across features)<br> | |
| σ² = (1/d) Σᵢ (xᵢ - μ)² (variance)<br> | |
| γ, β: learnable scale and shift (per feature) | |
| </div> | |
| <h3>6. Training Details from Paper</h3> | |
| <div class="info-box"> | |
| <div class="box-title">⚙️ Optimization Settings</div> | |
| <div class="box-content"> | |
| <strong>Optimizer:</strong> Adam with β₁=0.9, β₂=0.98, ε=10⁻⁹<br> | |
| <strong>Learning Rate Schedule:</strong><br> | |
| lr = d_model⁻⁰·⁵ × min(step⁻⁰·⁵, step × warmup_steps⁻¹·⁵)<br> | |
| warmup_steps = 4000<br><br> | |
| <strong>Regularization:</strong><br> | |
| • Dropout = 0.1 (on sublayers and embeddings)<br> | |
| • Label Smoothing = 0.1 | |
| </div> | |
| </div> | |
| <h3>7. Complexity Analysis</h3> | |
| <table> | |
| <tr> | |
| <th>Layer Type</th> | |
| <th>Complexity per Layer</th> | |
| <th>Sequential Ops</th> | |
| <th>Max Path Length</th> | |
| </tr> | |
| <tr> | |
| <td>Self-Attention</td> | |
| <td>O(n² · d)</td> | |
| <td>O(1)</td> | |
| <td>O(1)</td> | |
| </tr> | |
| <tr> | |
| <td>Recurrent (RNN)</td> | |
| <td>O(n · d²)</td> | |
| <td>O(n)</td> | |
| <td>O(n)</td> | |
| </tr> | |
| <tr> | |
| <td>Convolutional</td> | |
| <td>O(k · n · d²)</td> | |
| <td>O(1)</td> | |
| <td>O(logₖ(n))</td> | |
| </tr> | |
| </table> | |
| <div class="callout tip"> | |
| <div class="callout-title">🎯 Key Insight from the Paper</div> | |
| Self-attention is the only architecture with <strong>O(1) path length</strong><br> | |
| between any two positions! This is why it handles long-range<br> | |
| dependencies so well. The tradeoff is O(n²) memory. | |
| </div> | |
| `, | |
| visualization: ` | |
| <h3>🎨 Interactive Transformer Visualization</h3> | |
| <p>Explore how attention mechanisms work in the Transformer architecture.</p> | |
| <div class="viz-container"> | |
| <canvas id="transformerViz" width="800" height="600"></canvas> | |
| </div> | |
| <div class="viz-controls"> | |
| <button class="btn-viz" onclick="visualizeAttention()">Show Attention Weights</button> | |
| <button class="btn-viz" onclick="visualizeMultiHead()">Multi-Head Attention</button> | |
| <button class="btn-viz" onclick="visualizePositional()">Positional Encoding</button> | |
| <button class="btn-viz" onclick="visualizeArchitecture()">Full Architecture</button> | |
| </div> | |
| <h3>Code: Self-Attention in PyTorch</h3> | |
| <div class="formula" style="font-family: 'Courier New', monospace; font-size: 0.9rem; background: #1e1e2e; padding: 20px; border-radius: 8px; overflow-x: auto;"> | |
| <pre style="margin: 0; color: #cdd6f4;"> | |
| <span style="color: #89b4fa;">import</span> torch | |
| <span style="color: #89b4fa;">import</span> torch.nn <span style="color: #89b4fa;">as</span> nn | |
| <span style="color: #89b4fa;">import</span> torch.nn.functional <span style="color: #89b4fa;">as</span> F | |
| <span style="color: #89b4fa;">import</span> math | |
| <span style="color: #89b4fa;">class</span> <span style="color: #f9e2af;">ScaledDotProductAttention</span>(nn.Module): | |
| <span style="color: #a6e3a1;">\"\"\"Attention(Q, K, V) = softmax(QK^T / sqrt(d_k)) V\"\"\"</span> | |
| <span style="color: #89b4fa;">def</span> <span style="color: #f9e2af;">forward</span>(self, Q, K, V, mask=<span style="color: #fab387;">None</span>): | |
| d_k = Q.size(-1) | |
| <span style="color: #6c7086;"># Step 1: Compute attention scores</span> | |
| scores = torch.matmul(Q, K.transpose(-2, -1)) | |
| <span style="color: #6c7086;"># Step 2: Scale by sqrt(d_k)</span> | |
| scores = scores / math.sqrt(d_k) | |
| <span style="color: #6c7086;"># Step 3: Apply mask (for decoder)</span> | |
| <span style="color: #89b4fa;">if</span> mask <span style="color: #89b4fa;">is not</span> <span style="color: #fab387;">None</span>: | |
| scores = scores.masked_fill(mask == <span style="color: #fab387;">0</span>, <span style="color: #fab387;">-1e9</span>) | |
| <span style="color: #6c7086;"># Step 4: Apply softmax</span> | |
| attention_weights = F.softmax(scores, dim=-1) | |
| <span style="color: #6c7086;"># Step 5: Weighted sum of values</span> | |
| output = torch.matmul(attention_weights, V) | |
| <span style="color: #89b4fa;">return</span> output, attention_weights | |
| <span style="color: #89b4fa;">class</span> <span style="color: #f9e2af;">MultiHeadAttention</span>(nn.Module): | |
| <span style="color: #a6e3a1;">\"\"\"Multi-Head Attention from 'Attention Is All You Need'\"\"\"</span> | |
| <span style="color: #89b4fa;">def</span> <span style="color: #f9e2af;">__init__</span>(self, d_model=<span style="color: #fab387;">512</span>, n_heads=<span style="color: #fab387;">8</span>): | |
| <span style="color: #89b4fa;">super</span>().__init__() | |
| self.d_model = d_model | |
| self.n_heads = n_heads | |
| self.d_k = d_model // n_heads <span style="color: #6c7086;"># 64</span> | |
| <span style="color: #6c7086;"># Linear projections for Q, K, V</span> | |
| self.W_Q = nn.Linear(d_model, d_model) | |
| self.W_K = nn.Linear(d_model, d_model) | |
| self.W_V = nn.Linear(d_model, d_model) | |
| self.W_O = nn.Linear(d_model, d_model) | |
| self.attention = ScaledDotProductAttention() | |
| <span style="color: #89b4fa;">def</span> <span style="color: #f9e2af;">forward</span>(self, Q, K, V, mask=<span style="color: #fab387;">None</span>): | |
| batch_size = Q.size(<span style="color: #fab387;">0</span>) | |
| <span style="color: #6c7086;"># 1. Linear projection and split into h heads</span> | |
| Q = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2) | |
| K = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2) | |
| V = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2) | |
| <span style="color: #6c7086;"># 2. Apply attention to all heads in parallel</span> | |
| x, attn = self.attention(Q, K, V, mask) | |
| <span style="color: #6c7086;"># 3. Concat and final linear projection</span> | |
| x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.d_model) | |
| <span style="color: #89b4fa;">return</span> self.W_O(x), attn | |
| <span style="color: #89b4fa;">class</span> <span style="color: #f9e2af;">PositionalEncoding</span>(nn.Module): | |
| <span style="color: #a6e3a1;">\"\"\"Sinusoidal Positional Encoding from the paper\"\"\"</span> | |
| <span style="color: #89b4fa;">def</span> <span style="color: #f9e2af;">__init__</span>(self, d_model=<span style="color: #fab387;">512</span>, max_len=<span style="color: #fab387;">5000</span>): | |
| <span style="color: #89b4fa;">super</span>().__init__() | |
| pe = torch.zeros(max_len, d_model) | |
| position = torch.arange(<span style="color: #fab387;">0</span>, max_len, dtype=torch.float).unsqueeze(<span style="color: #fab387;">1</span>) | |
| div_term = torch.exp(torch.arange(<span style="color: #fab387;">0</span>, d_model, <span style="color: #fab387;">2</span>).float() * (-math.log(<span style="color: #fab387;">10000.0</span>) / d_model)) | |
| pe[:, <span style="color: #fab387;">0</span>::<span style="color: #fab387;">2</span>] = torch.sin(position * div_term) | |
| pe[:, <span style="color: #fab387;">1</span>::<span style="color: #fab387;">2</span>] = torch.cos(position * div_term) | |
| pe = pe.unsqueeze(<span style="color: #fab387;">0</span>) <span style="color: #6c7086;"># [1, max_len, d_model]</span> | |
| self.register_buffer(<span style="color: #a6e3a1;">'pe'</span>, pe) | |
| <span style="color: #89b4fa;">def</span> <span style="color: #f9e2af;">forward</span>(self, x): | |
| <span style="color: #89b4fa;">return</span> x + self.pe[:, :x.size(<span style="color: #fab387;">1</span>)] | |
| <span style="color: #6c7086;"># Example usage:</span> | |
| batch_size, seq_len, d_model = <span style="color: #fab387;">2</span>, <span style="color: #fab387;">10</span>, <span style="color: #fab387;">512</span> | |
| x = torch.randn(batch_size, seq_len, d_model) | |
| mha = MultiHeadAttention(d_model=<span style="color: #fab387;">512</span>, n_heads=<span style="color: #fab387;">8</span>) | |
| pe = PositionalEncoding(d_model=<span style="color: #fab387;">512</span>) | |
| x = pe(x) <span style="color: #6c7086;"># Add positional encoding</span> | |
| output, attn_weights = mha(x, x, x) <span style="color: #6c7086;"># Self-attention</span> | |
| <span style="color: #89b4fa;">print</span>(f<span style="color: #a6e3a1;">"Output shape: {output.shape}"</span>) <span style="color: #6c7086;"># [2, 10, 512]</span> | |
| <span style="color: #89b4fa;">print</span>(f<span style="color: #a6e3a1;">"Attention shape: {attn_weights.shape}"</span>) <span style="color: #6c7086;"># [2, 8, 10, 10]</span> | |
| </pre> | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">🎓 Run This Code!</div> | |
| Copy the code above and run it in a Jupyter notebook or Google Colab.<br> | |
| Experiment with different d_model, n_heads, and seq_len values! | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Self-Attention (Scaled Dot-Product)</h3> | |
| <div class="formula" style="text-align:center;"> | |
| $$\\text{Attention}(Q, K, V) = \\text{softmax}\\left(\\frac{QK^T}{\\sqrt{d_k}}\\right)V$$ | |
| Where $Q = XW^Q$, $K = XW^K$, $V = XW^V$ | |
| </div> | |
| <div class="formula" style="text-align:center;"> | |
| <strong>Multi-Head:</strong> $$\\text{MultiHead}(Q,K,V) = \\text{Concat}(head_1, ..., head_h)W^O$$ | |
| <strong>Positional Encoding:</strong> $$PE_{(pos, 2i)} = \\sin(pos / 10000^{2i/d}), \\quad PE_{(pos, 2i+1)} = \\cos(pos / 10000^{2i/d})$$ | |
| </div> | |
| <span class="code-title">📄 transformer_from_scratch.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="keyword">import</span> math | |
| <span class="keyword">class</span> <span class="class-name">SelfAttention</span>(nn.Module): | |
| <span class="keyword">def</span> <span class="function">__init__</span>(self, d_model, n_heads): | |
| <span class="builtin">super</span>().__init__() | |
| self.d_k = d_model // n_heads | |
| self.n_heads = n_heads | |
| self.W_q = nn.Linear(d_model, d_model) | |
| self.W_k = nn.Linear(d_model, d_model) | |
| self.W_v = nn.Linear(d_model, d_model) | |
| self.W_o = nn.Linear(d_model, d_model) | |
| <span class="keyword">def</span> <span class="function">forward</span>(self, x): | |
| B, T, C = x.shape | |
| Q = self.W_q(x).view(B, T, self.n_heads, self.d_k).transpose(<span class="number">1</span>,<span class="number">2</span>) | |
| K = self.W_k(x).view(B, T, self.n_heads, self.d_k).transpose(<span class="number">1</span>,<span class="number">2</span>) | |
| V = self.W_v(x).view(B, T, self.n_heads, self.d_k).transpose(<span class="number">1</span>,<span class="number">2</span>) | |
| <span class="comment"># Scaled Dot-Product Attention</span> | |
| scores = (Q @ K.transpose(-<span class="number">2</span>,-<span class="number">1</span>)) / math.sqrt(self.d_k) | |
| attn = torch.softmax(scores, dim=-<span class="number">1</span>) | |
| out = (attn @ V).transpose(<span class="number">1</span>,<span class="number">2</span>).reshape(B, T, C) | |
| <span class="keyword">return</span> self.W_o(out) | |
| <span class="comment"># Test</span> | |
| attn = SelfAttention(d_model=<span class="number">512</span>, n_heads=<span class="number">8</span>) | |
| x = torch.randn(<span class="number">2</span>, <span class="number">10</span>, <span class="number">512</span>) <span class="comment"># (batch, seq_len, d_model)</span> | |
| <span class="keyword">print</span>(attn(x).shape) <span class="comment"># (2, 10, 512)</span></div> | |
| ` | |
| }, | |
| "perceptron": { | |
| overview: ` | |
| <h3>What is a Perceptron?</h3> | |
| <p>The perceptron is the simplest neural network, invented in 1958. It's a binary linear classifier that makes predictions based on weighted inputs.</p> | |
| <div class="callout tip"> | |
| <div class="callout-title">✅ Advantages</div> | |
| • Simple and fast<br> | |
| • Guaranteed convergence for linearly separable data<br> | |
| • Interpretable weights | |
| </div> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ Key Limitation</div> | |
| <strong>Cannot solve XOR:</strong> Limited to linear decision boundaries only | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>How Perceptron Works</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Weighted Sum:</strong> z = w₁x₁ + w₂x₂ + ... + b</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Step Function:</strong> Output = 1 if z ≥ 0, else 0</div> | |
| </div> | |
| <div class="formula"> | |
| Learning Rule: w_new = w_old + α(y_true - y_pred)x | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Perceptron Learning Algorithm</h3> | |
| <p>The perceptron update rule is the simplest form of gradient descent.</p> | |
| <div class="formula"> | |
| For each misclassified sample (x, y):<br> | |
| w ← w + α × y × x<br> | |
| b ← b + α × y | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Manual Training</div> | |
| <strong>Data:</strong> x₁ = [1, 1], y₁ = 1 | x₂ = [0, 0], y₂ = 0<br> | |
| <strong>Initial:</strong> w = [0, 0], b = 0, α = 1<br> | |
| <br> | |
| <strong>Iteration 1 (x₁):</strong><br> | |
| z = 0×1 + 0×1 + 0 = 0 → ŷ = 1 ✓ (correct!)<br> | |
| <br> | |
| <strong>Iteration 2 (x₂):</strong><br> | |
| z = 0×0 + 0×0 + 0 = 0 → ŷ = 1 ✗ (wrong! y=0)<br> | |
| Update: w = [0,0] + 1×(0-1)×[0,0] = [0,0], b = 0 + 1×(0-1) = -1<br> | |
| <br> | |
| Now z(x₂) = 0 + 0 - 1 = -1 → ŷ = 0 ✓ | |
| </div> | |
| <h3>Convergence Theorem</h3> | |
| <div class="formula"> | |
| If data is linearly separable with margin γ and ||x|| ≤ R,<br> | |
| perceptron converges in at most (R/γ)² updates. | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">📚 Educational</div> | |
| <div class="box-content"> | |
| Historical importance - first trainable neural model. Perfect for teaching ML fundamentals | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🔬 Simple Classification</div> | |
| <div class="box-content"> | |
| Linearly separable problems: basic pattern recognition, simple binary decisions | |
| </div> | |
| </div> | |
| ` | |
| }, | |
| "mlp": { | |
| overview: ` | |
| <h3>Multi-Layer Perceptron (MLP)</h3> | |
| <p>MLP adds hidden layers between input and output, enabling non-linear decision boundaries and solving the XOR problem that single perceptrons cannot.</p> | |
| <h3>Why MLPs?</h3> | |
| <ul> | |
| <li><strong>Universal Approximation:</strong> Can approximate any continuous function</li> | |
| <li><strong>Non-Linear Learning:</strong> Solves complex problems</li> | |
| <li><strong>Feature Extraction:</strong> Hidden layers learn hierarchical features</li> | |
| </ul> | |
| <div class="callout insight"> | |
| <div class="callout-title">💡 The XOR Breakthrough</div> | |
| Single perceptron: Cannot solve XOR<br> | |
| MLP with 1 hidden layer (2 neurons): Solves XOR!<br> | |
| This proves the power of depth. | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Architecture Components</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Input Layer:</strong> Raw features (no computation)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Hidden Layers:</strong> Extract progressively abstract features</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Output Layer:</strong> Final predictions</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">📊 Tabular Data</div> | |
| <div class="box-content">Credit scoring, fraud detection, customer churn, sales forecasting</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🏭 Manufacturing</div> | |
| <div class="box-content">Quality control, predictive maintenance, demand forecasting</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Neural Network Forward Pass (Matrix Form)</h3> | |
| <p>Vectorization is key to modern deep learning. We process entire layers as matrix multiplications.</p> | |
| <div class="formula"> | |
| Layer 1: z⁽¹⁾ = W⁽¹⁾x + b⁽¹⁾ | a⁽¹⁾ = σ(z⁽¹⁾)<br> | |
| Layer 2: z⁽²⁾ = W⁽²⁾a⁽¹⁾ + b⁽²⁾ | a⁽²⁾ = σ(z⁽²⁾)<br> | |
| ...<br> | |
| Layer L: ŷ = Softmax(W⁽ᴸ⁾a⁽ᴸ⁻¹⁾ + b⁽ᴸ⁾) | |
| </div> | |
| <h3>Paper & Pain: Dimensionality Audit</h3> | |
| <p>Understanding tensor shapes is the #1 skill for debugging neural networks.</p> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Input x:</strong> [n_features, 1]</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Weights W⁽¹⁾:</strong> [n_hidden, n_features]</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Bias b⁽¹⁾:</strong> [n_hidden, 1]</div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Solving XOR</div> | |
| Input: [0,1], Target: 1<br> | |
| Layer 1 (2 neurons):<br> | |
| z₁ = 10x₁ + 10x₂ - 5 | a₁ = σ(z₁)<br> | |
| z₂ = 10x₁ + 10x₂ - 15 | a₂ = σ(z₂)<br> | |
| Layer 2 (1 neuron):<br> | |
| y = σ(20a₁ - 20a₂ - 10)<br> | |
| <strong>Try it on paper!</strong> This specific configuration correctly outputs XOR values. | |
| </div> | |
| ` | |
| }, | |
| "weight-init": { | |
| overview: ` | |
| <h3>Weight Initialization Strategies</h3> | |
| <table> | |
| <tr> | |
| <th>Method</th> | |
| <th>Best For</th> | |
| <th>Formula</th> | |
| </tr> | |
| <tr> | |
| <td>Xavier/Glorot</td> | |
| <td>Sigmoid, Tanh</td> | |
| <td>N(0, √(2/(n_in+n_out)))</td> | |
| </tr> | |
| <tr> | |
| <td>He/Kaiming</td> | |
| <td>ReLU</td> | |
| <td>N(0, √(2/n_in))</td> | |
| </tr> | |
| </table> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ Never Initialize to Zero!</div> | |
| All neurons learn identical features (symmetry problem) | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Key Principles</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Variance Preservation:</strong> Keep activation variance similar across layers</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Symmetry Breaking:</strong> Different weights force different features</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🎯 Critical for Deep Networks</div> | |
| <div class="box-content"> | |
| Proper initialization is essential for training networks >10 layers. Wrong init = training failure | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">⚡ Faster Convergence</div> | |
| <div class="box-content"> | |
| Good initialization reduces training time by 2-10×, especially with modern optimizers | |
| </div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The Variance Preservation Principle</h3> | |
| <p>To prevent gradients from vanishing or exploding, we want the variance of the activations to remain constant across layers.</p> | |
| <div class="formula"> | |
| For a linear layer: y = Σ wᵢxᵢ<br> | |
| Var(y) = Var(Σ wᵢxᵢ) = Σ Var(wᵢxᵢ)<br> | |
| Assuming w and x are independent with mean 0:<br> | |
| Var(wᵢxᵢ) = E[wᵢ²]E[xᵢ²] - E[wᵢ]²E[xᵢ]² = Var(wᵢ)Var(xᵢ)<br> | |
| So, Var(y) = n_in × Var(w) × Var(x) | |
| </div> | |
| <h3>1. Xavier (Glorot) Initialization</h3> | |
| <p>Goal: Var(y) = Var(x) and Var(grad_out) = Var(grad_in)</p> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Forward Pass:</strong> n_in × Var(w) = 1 ⇒ Var(w) = 1/n_in</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Backward Pass:</strong> n_out × Var(w) = 1 ⇒ Var(w) = 1/n_out</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Compromise:</strong> Var(w) = 2 / (n_in + n_out)</div> | |
| </div> | |
| <h3>2. He (Kaiming) Initialization</h3> | |
| <p>For ReLU activation, half the neurons are inactive (output 0), which halves the variance. We must compensate.</p> | |
| <div class="formula"> | |
| Var(ReLU(y)) = 1/2 × Var(y)<br> | |
| To keep Var(ReLU(y)) = Var(x):<br> | |
| 1/2 × n_in × Var(w) = 1<br> | |
| <strong>Var(w) = 2 / n_in</strong> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain Calculation</div> | |
| If n_in = 256 and you use ReLU:<br> | |
| Weight Std Dev = √(2/256) = √(1/128) ≈ <strong>0.088</strong><br> | |
| Initializing with std=1.0 or std=0.01 would cause immediate failure in a deep net! | |
| </div> | |
| ` | |
| }, | |
| "loss": { | |
| overview: ` | |
| <p>The loss function is the <strong>judge</strong> of your neural network. It measures how wrong the network's predictions are compared to the true answers. The entire goal of training is to <strong>minimize this number</strong>.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🎯 Why Loss Functions Matter</div> | |
| The choice of loss function determines <em>what</em> the network optimizes for. Use the wrong loss and you'll get a network that's great at the wrong thing. For example, using MSE for classification gives equal penalty to being "slightly wrong" and "completely wrong" — which isn't what you want. | |
| </div> | |
| <h3>Matching Loss to Task</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Task</th> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Loss Function</th> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">Output Activation</th> | |
| <th style="padding: 10px; text-align: left; color: var(--cyan);">PyTorch</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Regression</td><td>MSE / MAE / Huber</td><td>None (Linear)</td><td>nn.MSELoss()</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Binary Classification</td><td>Binary Cross-Entropy</td><td>Sigmoid</td><td>nn.BCEWithLogitsLoss()</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Multi-class</td><td>Categorical Cross-Entropy</td><td>Softmax (built-in)</td><td>nn.CrossEntropyLoss()</td></tr> | |
| <tr><td style="padding: 8px;">Object Detection</td><td>Focal Loss + IoU Loss</td><td>Varies</td><td>Custom</td></tr> | |
| </table> | |
| `, | |
| concepts: ` | |
| <h3>Common Loss Functions</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>MSE:</strong> (1/n)Σ(y - ŷ)² - Penalizes large errors, sensitive to outliers</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>MAE:</strong> (1/n)Σ|y - ŷ| - Robust to outliers, constant gradient, slower convergence</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Huber Loss:</strong> MSE when |error| ≤ δ, MAE otherwise. Best of both — smooth + robust to outliers</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>BCE (Binary Cross-Entropy):</strong> -[y·log(ŷ) + (1-y)·log(1-ŷ)] - Used with Sigmoid</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div><strong>CCE (Categorical Cross-Entropy):</strong> -Σ y·log(ŷ) - Used with Softmax for multi-class</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">06</div> | |
| <div><strong>Hinge Loss:</strong> max(0, 1 - y·ŷ) where y ∈ {-1, +1} - Margin-based, SVM-style</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🎯 Task-Dependent Selection</div> | |
| <div class="box-content"> | |
| Every ML task needs appropriate loss: classification (cross-entropy), regression (MSE/MAE), ranking (triplet loss) | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">📊 Custom Losses</div> | |
| <div class="box-content"> | |
| Business-specific objectives: Focal Loss (imbalanced data), Dice Loss (segmentation), Contrastive Loss (similarity learning) | |
| </div> | |
| </div> | |
| <h3>Loss Function Comparison</h3> | |
| <table> | |
| <tr><th>Loss</th><th>Type</th><th>Outlier Sensitivity</th><th>Key Property</th></tr> | |
| <tr><td>MSE</td><td>Regression</td><td>High</td><td>Penalizes large errors heavily</td></tr> | |
| <tr><td>MAE</td><td>Regression</td><td>Low</td><td>Robust, constant gradient</td></tr> | |
| <tr><td>Huber</td><td>Regression</td><td>Medium</td><td>Smooth + robust (MSE+MAE combo)</td></tr> | |
| <tr><td>BCE</td><td>Binary Class.</td><td>High</td><td>Strong gradients for wrong predictions</td></tr> | |
| <tr><td>CCE</td><td>Multi-class</td><td>High</td><td>Outputs probabilities via Softmax</td></tr> | |
| <tr><td>Hinge</td><td>Binary Class.</td><td>Medium</td><td>Margin-based, less probabilistic</td></tr> | |
| </table> | |
| <div class="callout tip"> | |
| <div class="callout-title">🎤 Probable Interview Questions</div> | |
| 1. Difference between MSE and MAE?<br> | |
| 2. Why Huber loss is preferred sometimes?<br> | |
| 3. Why BCE with sigmoid?<br> | |
| 4. Why softmax with CCE?<br> | |
| 5. Why can't we use MSE for classification?<br> | |
| 6. What is Hinge loss and where is it used?<br> | |
| 7. Difference between loss function and evaluation metric?<br> | |
| 8. How does loss choice affect gradients?<br> | |
| 9. What is Focal Loss and when to use it?<br> | |
| 10. Can we design custom loss functions? | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Binary Cross-Entropy (BCE) Derivation</h3> | |
| <p>Why do we use logs? BCE is derived from Maximum Likelihood Estimation (MLE) assuming a Bernoulli distribution.</p> | |
| <div class="formula"> | |
| L(ŷ, y) = -(y log(ŷ) + (1-y) log(1-ŷ)) | |
| </div> | |
| <h3>Huber Loss (Smooth MAE)</h3> | |
| <p>Combines MSE for small errors and MAE for large errors using threshold δ:</p> | |
| <div class="formula"> | |
| L = ½(y - ŷ)² when |y - ŷ| ≤ δ<br> | |
| L = δ|y - ŷ| - ½δ² otherwise | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Huber Intuition</div> | |
| <strong>Small error (|error| ≤ δ):</strong> Behaves like MSE — smooth, differentiable<br> | |
| <strong>Large error (|error| > δ):</strong> Behaves like MAE — doesn't blow up for outliers<br><br> | |
| Best of both worlds! Used when data contains mild outliers. | |
| </div> | |
| <h3>Hinge Loss (SVM-style)</h3> | |
| <div class="formula"> | |
| L = (1/n) Σ max(0, 1 - y·ŷ) where y ∈ {-1, +1} | |
| </div> | |
| <p>Margin-based loss: only penalizes predictions within the margin boundary. Used in SVMs and some neural network classifiers.</p> | |
| <h3>Paper & Pain: Why not MSE for Classification?</h3> | |
| <p>If we use MSE for sigmoid output, the gradient is:</p> | |
| <div class="formula"> | |
| ∂L/∂w = (ŷ - y) <strong>σ'(z)</strong> x | |
| </div> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ The Saturation Problem</div> | |
| If the model is very wrong (e.g., target 1, output 0.001), σ'(z) is near 0. <br> | |
| The gradient vanishes, and the model <strong>stops learning!</strong>. | |
| </div> | |
| <h3>The BCE Advantage</h3> | |
| <p>When using BCE, the σ'(z) term cancels out! The gradient becomes:</p> | |
| <div class="formula" style="font-size: 1.2rem; color: #00d4ff;"> | |
| ∂L/∂w = (ŷ - y) x | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">💡</div> | |
| <div>This is beautiful: the gradient depends <strong>only on the error</strong> (ŷ-y), not on how saturated the neuron is. This enables much faster training.</div> | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Loss Functions (MathJax)</h3> | |
| <div class="formula" style="text-align:center;"> | |
| <strong>MSE (Regression):</strong> $$L = \\frac{1}{n}\\sum_{i=1}^{n}(y_i - \\hat{y}_i)^2$$ | |
| <strong>Binary Cross-Entropy:</strong> $$L = -\\frac{1}{n}\\sum[y\\log(\\hat{y}) + (1-y)\\log(1-\\hat{y})]$$ | |
| <strong>Categorical Cross-Entropy:</strong> $$L = -\\sum_{c=1}^{C} y_c \\log(\\hat{y}_c)$$ | |
| </div> | |
| <span class="code-title">📄 loss_functions.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="comment"># --- MSE Loss (Regression) ---</span> | |
| mse = nn.MSELoss() | |
| pred = torch.tensor([<span class="number">2.5</span>, <span class="number">0.0</span>, <span class="number">2.1</span>]) | |
| target = torch.tensor([<span class="number">3.0</span>, <span class="number">-0.5</span>, <span class="number">2.0</span>]) | |
| <span class="keyword">print</span>(f<span class="string">"MSE: {mse(pred, target).item():.4f}"</span>) | |
| <span class="comment"># --- Binary Cross-Entropy (Binary Classification) ---</span> | |
| bce = nn.BCEWithLogitsLoss() <span class="comment"># includes sigmoid</span> | |
| logits = torch.tensor([<span class="number">1.5</span>, <span class="number">-0.5</span>, <span class="number">2.0</span>]) | |
| labels = torch.tensor([<span class="number">1.0</span>, <span class="number">0.0</span>, <span class="number">1.0</span>]) | |
| <span class="keyword">print</span>(f<span class="string">"BCE: {bce(logits, labels).item():.4f}"</span>) | |
| <span class="comment"># --- Cross-Entropy (Multi-class) ---</span> | |
| ce = nn.CrossEntropyLoss() <span class="comment"># includes softmax internally</span> | |
| logits = torch.tensor([[<span class="number">2.0</span>, <span class="number">1.0</span>, <span class="number">0.1</span>]]) | |
| target = torch.tensor([<span class="number">0</span>]) <span class="comment"># class index</span> | |
| <span class="keyword">print</span>(f<span class="string">"CE: {ce(logits, target).item():.4f}"</span>)</div> | |
| ` | |
| }, | |
| "optimizers": { | |
| overview: ` | |
| <p>Optimizers are the <strong>algorithms that actually update the weights</strong> during training. Think of training as hiking down a foggy mountain — you can't see the bottom (global minimum), but you can feel the slope beneath your feet (gradient). The optimizer decides <strong>how big a step to take</strong> and <strong>in which direction</strong>.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🏔️ The Mountain Analogy</div> | |
| <strong>SGD</strong> = Walking downhill with fixed-size steps. Simple but slow.<br> | |
| <strong>Momentum</strong> = Rolling a ball downhill — it builds up speed on consistent slopes.<br> | |
| <strong>Adam</strong> = A smart hiker with GPS who adapts step size per-dimension and remembers past terrain. | |
| </div> | |
| <h3>The Optimization Problem</h3> | |
| <p>Training finds weights that <strong>minimize the loss function</strong>. The loss surface is a high-dimensional landscape with valleys (good), plateaus (stuck), and saddle points (tricky). Different optimizers navigate this landscape differently.</p> | |
| <div class="formula" style="text-align:center;"> | |
| $$\\theta^* = \\arg\\min_{\\theta} L(\\theta; X, y)$$ | |
| </div> | |
| <h3>Learning Rate: The Most Important Hyperparameter</h3> | |
| <div class="info-box"> | |
| <div class="box-title">⚡ Learning Rate Effects</div> | |
| <div class="box-content"> | |
| <strong>Too high (0.1):</strong> Overshoots the minimum, loss oscillates wildly or diverges to infinity<br> | |
| <strong>Too low (0.00001):</strong> Converges extremely slowly, might get stuck in local minima<br> | |
| <strong>Just right (0.001):</strong> Smooth convergence to a good minimum<br><br> | |
| <strong>Rule of thumb:</strong> Start with lr=0.001 for Adam, lr=0.01 for SGD. Use learning rate schedulers (cosine annealing, step decay) to reduce lr during training. | |
| </div> | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>1. SGD (Stochastic Gradient Descent)</h3> | |
| <p>The simplest optimizer. Compute the gradient on a mini-batch and take a step proportional to the learning rate:</p> | |
| <div class="formula" style="text-align:center;">$$\\theta_{t+1} = \\theta_t - \\eta \\nabla L(\\theta_t)$$</div> | |
| <p><strong>Problem:</strong> SGD oscillates in ravines (narrow valleys) and is slow on plateaus. It treats all parameters equally regardless of how frequently they update.</p> | |
| <h3>2. SGD with Momentum</h3> | |
| <p>Adds a "velocity" term that remembers past gradients. Like a ball rolling downhill — it accelerates when the gradient consistently points the same direction:</p> | |
| <div class="formula" style="text-align:center;"> | |
| $$v_t = \\gamma v_{t-1} + \\eta \\nabla L(\\theta_t)$$ | |
| $$\\theta_{t+1} = \\theta_t - v_t$$ | |
| </div> | |
| <p>Where $\\gamma$ is the momentum coefficient (typically 0.9). This dampens oscillations and speeds convergence 2-10x.</p> | |
| <h3>3. RMSprop</h3> | |
| <p>Adapts the learning rate <strong>per parameter</strong>. Parameters with large gradients get smaller updates, and vice versa:</p> | |
| <div class="formula" style="text-align:center;"> | |
| $$s_t = \\beta s_{t-1} + (1-\\beta)g_t^2$$ | |
| $$\\theta_{t+1} = \\theta_t - \\frac{\\eta}{\\sqrt{s_t + \\epsilon}} g_t$$ | |
| </div> | |
| <p>Invented by Geoff Hinton in a Coursera lecture slide (never formally published!).</p> | |
| <h3>4. Adam (Adaptive Moment Estimation) ⭐</h3> | |
| <p><strong>The default optimizer for most deep learning.</strong> It combines the best of Momentum (first moment) and RMSprop (second moment):</p> | |
| <div class="formula" style="text-align:center;"> | |
| $$m_t = \\beta_1 m_{t-1} + (1-\\beta_1)g_t \\quad \\text{(1st moment: mean of gradients)}$$ | |
| $$v_t = \\beta_2 v_{t-1} + (1-\\beta_2)g_t^2 \\quad \\text{(2nd moment: variance of gradients)}$$ | |
| $$\\hat{m}_t = \\frac{m_t}{1-\\beta_1^t}, \\quad \\hat{v}_t = \\frac{v_t}{1-\\beta_2^t} \\quad \\text{(bias correction)}$$ | |
| $$\\theta_{t+1} = \\theta_t - \\frac{\\eta \\hat{m}_t}{\\sqrt{\\hat{v}_t} + \\epsilon}$$ | |
| </div> | |
| <p>Default hyperparameters: $\\beta_1=0.9$, $\\beta_2=0.999$, $\\epsilon=10^{-8}$, $\\eta=0.001$.</p> | |
| <h3>5. AdamW (Adam with Decoupled Weight Decay)</h3> | |
| <p>Standard Adam applies weight decay <em>inside</em> the adaptive learning rate, which doesn't regularize properly. AdamW <strong>decouples</strong> weight decay from the gradient update:</p> | |
| <div class="formula" style="text-align:center;">$$\\theta_{t+1} = (1 - \\lambda)\\theta_t - \\frac{\\eta \\hat{m}_t}{\\sqrt{\\hat{v}_t} + \\epsilon}$$</div> | |
| <p><strong>Used by:</strong> GPT, BERT, LLaMA, and virtually all modern large models.</p> | |
| <div class="callout tip"> | |
| <div class="callout-title">🎯 Which Optimizer Should You Use?</div> | |
| <strong>Default:</strong> Adam or AdamW with lr=0.001<br> | |
| <strong>Computer Vision:</strong> SGD with Momentum (lr=0.01, momentum=0.9) often generalizes better<br> | |
| <strong>Large Language Models:</strong> AdamW with cosine lr schedule and warmup<br> | |
| <strong>Fine-tuning pretrained:</strong> AdamW with very low lr (2e-5 to 5e-5) | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🚀 Training Acceleration</div> | |
| <div class="box-content"> | |
| Modern optimizers (Adam) reduce training time by 5-10× compared to basic SGD | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🎯 Architecture-Specific</div> | |
| <div class="box-content"> | |
| CNNs: SGD+Momentum | Transformers: AdamW | RNNs: RMSprop | Default: Adam | |
| </div> | |
| </div> | |
| <h3>Optimizer Comparison</h3> | |
| <table> | |
| <tr><th>Optimizer</th><th>Key Idea</th><th>Problem</th></tr> | |
| <tr><td>SGD</td><td>Simple, fast</td><td>Noisy convergence</td></tr> | |
| <tr><td>Momentum</td><td>Smooths updates</td><td>Needs tuning</td></tr> | |
| <tr><td>AdaGrad</td><td>Adaptive LR</td><td>LR shrinks too much</td></tr> | |
| <tr><td>RMSProp</td><td>Fixes AdaGrad</td><td>No momentum</td></tr> | |
| <tr><td><strong>Adam</strong></td><td><strong>Best of all</strong></td><td>Slightly more computation</td></tr> | |
| </table> | |
| <div class="callout tip"> | |
| <div class="callout-title">🎤 Probable Interview Questions</div> | |
| 1. Difference between optimizer and gradient descent?<br> | |
| 2. Why does SGD oscillate?<br> | |
| 3. Why does AdaGrad fail in deep networks?<br> | |
| 4. How does RMSProp fix AdaGrad?<br> | |
| 5. Why is bias correction needed in Adam?<br> | |
| 6. What happens if learning rate is too high?<br> | |
| 7. When would you prefer SGD over Adam?<br> | |
| 8. What is momentum intuitively?<br> | |
| 9. Why is Adam the default choice?<br> | |
| 10. Can Adam overfit? | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Gradient Descent: The Foundation</h3> | |
| <p>All optimizers are variations of gradient descent. The goal: minimize the loss function L(w).</p> | |
| <div class="formula" style="font-size: 1.2rem; text-align: center; margin: 20px 0; background: rgba(0, 212, 255, 0.08); padding: 25px; border-radius: 8px;"> | |
| <strong>w = w - α × ∇L(w)</strong> | |
| </div> | |
| <h3>1. Stochastic Gradient Descent (SGD)</h3> | |
| <div class="formula"> | |
| w_{t+1} = w_t - α × ∇L(w_t)<br><br> | |
| Where α = learning rate (typically 0.01 - 0.1) | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: SGD Step</div> | |
| <strong>Current w:</strong> 2.0, <strong>Gradient:</strong> ∇L = 0.5, <strong>α:</strong> 0.1<br><br> | |
| w_new = 2.0 - 0.1 × 0.5 = 2.0 - 0.05 = <strong>1.95</strong><br> | |
| The weight moved slightly toward lower loss! | |
| </div> | |
| <h3>2. SGD with Momentum</h3> | |
| <p>Adds a "velocity" term to accelerate through flat regions and dampen oscillations.</p> | |
| <div class="formula"> | |
| v_{t+1} = β × v_t + ∇L(w_t)<br> | |
| w_{t+1} = w_t - α × v_{t+1}<br><br> | |
| Where β = momentum coefficient (typically 0.9) | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Momentum Step</div> | |
| <strong>v_t:</strong> 0.3, <strong>∇L:</strong> 0.5, <strong>β:</strong> 0.9, <strong>α:</strong> 0.1<br><br> | |
| v_new = 0.9 × 0.3 + 0.5 = 0.27 + 0.5 = <strong>0.77</strong><br> | |
| w_new = w - 0.1 × 0.77 = <strong>larger step!</strong><br> | |
| Momentum accumulates past gradients for faster convergence. | |
| </div> | |
| <h3>3. AdaGrad (Adaptive Gradient)</h3> | |
| <p>Adapts learning rate per-parameter based on how frequently each parameter is updated.</p> | |
| <div class="formula"> | |
| <strong>Accumulated Gradient:</strong><br> | |
| G_t = G_{t-1} + (∇L)²<br><br> | |
| <strong>Update Rule:</strong><br> | |
| w_{t+1} = w_t - η / √(G_t + ε) × ∇L<br><br> | |
| Where ε = 1e-8 (numerical stability) | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: AdaGrad Intuition</div> | |
| <strong>Frequent parameters</strong> → G_t grows fast → learning rate shrinks<br> | |
| <strong>Rare parameters</strong> → G_t stays small → learning rate stays large<br><br> | |
| <strong>Problem:</strong> G_t only accumulates (never forgets), so learning rate keeps shrinking and training may stop early!<br> | |
| <strong>This is exactly why RMSprop was invented →</strong> | |
| </div> | |
| <h3>4. RMSprop (Root Mean Square Propagation)</h3> | |
| <p>Fixes AdaGrad's shrinking problem by using a <strong>decaying average</strong> of recent squared gradients instead of summing all.</p> | |
| <div class="formula"> | |
| v_t = β × v_{t-1} + (1-β) × (∇L)²<br> | |
| w_{t+1} = w_t - α × ∇L / √(v_t + ε)<br><br> | |
| β = 0.9, ε = 1e-8 (numerical stability) | |
| </div> | |
| <h3>5. Adam (Adaptive Moment Estimation)</h3> | |
| <p>Combines momentum (from SGD) AND adaptive learning rates (from RMSprop). The most popular optimizer.</p> | |
| <div class="formula" style="background: rgba(255, 107, 53, 0.08); padding: 20px; border-radius: 8px;"> | |
| <strong>Step 1 - First Moment (Momentum):</strong><br> | |
| m_t = β₁ × m_{t-1} + (1-β₁) × ∇L<br><br> | |
| <strong>Step 2 - Second Moment (RMSprop):</strong><br> | |
| v_t = β₂ × v_{t-1} + (1-β₂) × (∇L)²<br><br> | |
| <strong>Step 3 - Bias Correction:</strong><br> | |
| m̂_t = m_t / (1 - β₁ᵗ)<br> | |
| v̂_t = v_t / (1 - β₂ᵗ)<br><br> | |
| <strong>Step 4 - Update:</strong><br> | |
| w_{t+1} = w_t - α × m̂_t / (√v̂_t + ε) | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Adam Step-by-Step</div> | |
| <strong>Hyperparameters:</strong> α=0.001, β₁=0.9, β₂=0.999, ε=1e-8<br> | |
| <strong>t=2:</strong> ∇L = 0.5, m₁ = 0.05, v₁ = 0.00025<br><br> | |
| m₂ = 0.9 × 0.05 + 0.1 × 0.5 = 0.045 + 0.05 = 0.095<br> | |
| v₂ = 0.999 × 0.00025 + 0.001 × 0.25 = 0.000499<br><br> | |
| m̂₂ = 0.095 / (1 - 0.81) = 0.095 / 0.19 = 0.50<br> | |
| v̂₂ = 0.000499 / (1 - 0.998) = 0.2495<br><br> | |
| Δw = 0.001 × 0.50 / (√0.2495 + 1e-8) ≈ <strong>0.001</strong> | |
| </div> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ Why Bias Correction?</div> | |
| m₀ = 0, v₀ = 0 initialization biases early estimates toward zero.<br> | |
| Dividing by (1 - βᵗ) compensates for this, especially in early training steps. | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Implementing Optimizers in PyTorch</h3> | |
| <p>PyTorch provides all common optimizers in <code>torch.optim</code>. Here's a complete example showing how to use them and how to implement SGD from scratch.</p> | |
| <span class="code-title">📄 optimizers_comparison.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="keyword">import</span> torch.optim <span class="keyword">as</span> optim | |
| <span class="comment"># --- Simple model for demonstration ---</span> | |
| model = nn.Sequential( | |
| nn.Linear(<span class="number">784</span>, <span class="number">256</span>), | |
| nn.ReLU(), | |
| nn.Linear(<span class="number">256</span>, <span class="number">10</span>), | |
| ) | |
| <span class="comment"># ====== 1. SGD ======</span> | |
| optimizer = optim.SGD(model.parameters(), lr=<span class="number">0.01</span>) | |
| <span class="comment"># ====== 2. SGD with Momentum ======</span> | |
| optimizer = optim.SGD(model.parameters(), lr=<span class="number">0.01</span>, momentum=<span class="number">0.9</span>) | |
| <span class="comment"># ====== 3. Adam (Most Common) ======</span> | |
| optimizer = optim.Adam(model.parameters(), lr=<span class="number">0.001</span>) | |
| <span class="comment"># ====== 4. AdamW (For Transformers/LLMs) ======</span> | |
| optimizer = optim.AdamW(model.parameters(), lr=<span class="number">0.001</span>, weight_decay=<span class="number">0.01</span>) | |
| <span class="comment"># ====== Training Loop ======</span> | |
| criterion = nn.CrossEntropyLoss() | |
| <span class="keyword">for</span> epoch <span class="keyword">in</span> range(<span class="number">10</span>): | |
| <span class="keyword">for</span> X_batch, y_batch <span class="keyword">in</span> train_loader: | |
| <span class="comment"># 1. Forward pass</span> | |
| predictions = model(X_batch) | |
| loss = criterion(predictions, y_batch) | |
| <span class="comment"># 2. Backward pass</span> | |
| optimizer.zero_grad() <span class="comment"># Clear old gradients</span> | |
| loss.backward() <span class="comment"># Compute gradients</span> | |
| <span class="comment"># 3. Update weights</span> | |
| optimizer.step() <span class="comment"># Apply optimizer rule</span> | |
| <span class="keyword">print</span>(f<span class="string">"Epoch {epoch+1}, Loss: {loss.item():.4f}"</span>) | |
| </div> | |
| <h3>SGD from Scratch</h3> | |
| <p>Understanding what happens inside <code>optimizer.step()</code>:</p> | |
| <span class="code-title">📄 sgd_from_scratch.py</span><div class="code-block"><span class="keyword">import</span> numpy <span class="keyword">as</span> np | |
| <span class="keyword">class</span> <span class="function">SGD</span>: | |
| <span class="string">"""Stochastic Gradient Descent with Momentum."""</span> | |
| <span class="keyword">def</span> <span class="function">__init__</span>(self, params, lr=<span class="number">0.01</span>, momentum=<span class="number">0.9</span>): | |
| self.params = params | |
| self.lr = lr | |
| self.momentum = momentum | |
| self.velocities = [np.zeros_like(p) <span class="keyword">for</span> p <span class="keyword">in</span> params] | |
| <span class="keyword">def</span> <span class="function">step</span>(self, gradients): | |
| <span class="keyword">for</span> i, (param, grad) <span class="keyword">in</span> enumerate(zip(self.params, gradients)): | |
| <span class="comment"># v = momentum * v_prev + lr * gradient</span> | |
| self.velocities[i] = self.momentum * self.velocities[i] + self.lr * grad | |
| <span class="comment"># param = param - velocity</span> | |
| param -= self.velocities[i] | |
| <span class="comment"># Usage</span> | |
| W = np.random.randn(<span class="number">3</span>, <span class="number">3</span>) * <span class="number">0.01</span> | |
| b = np.zeros(<span class="number">3</span>) | |
| optimizer = SGD([W, b], lr=<span class="number">0.01</span>, momentum=<span class="number">0.9</span>) | |
| <span class="comment"># After computing gradients dW, db:</span> | |
| <span class="comment"># optimizer.step([dW, db])</span> | |
| </div> | |
| <h3>Learning Rate Schedulers</h3> | |
| <span class="code-title">📄 lr_schedulers.py</span><div class="code-block"><span class="keyword">from</span> torch.optim.lr_scheduler <span class="keyword">import</span> StepLR, CosineAnnealingLR | |
| optimizer = optim.Adam(model.parameters(), lr=<span class="number">0.001</span>) | |
| <span class="comment"># Reduce LR by 0.1 every 30 epochs</span> | |
| scheduler = StepLR(optimizer, step_size=<span class="number">30</span>, gamma=<span class="number">0.1</span>) | |
| <span class="comment"># Cosine annealing (smooth decay, used in modern training)</span> | |
| scheduler = CosineAnnealingLR(optimizer, T_max=<span class="number">100</span>, eta_min=<span class="number">1e-6</span>) | |
| <span class="keyword">for</span> epoch <span class="keyword">in</span> range(<span class="number">100</span>): | |
| train_one_epoch() | |
| scheduler.step() <span class="comment"># Update learning rate</span> | |
| <span class="keyword">print</span>(f<span class="string">"LR: {scheduler.get_last_lr()[0]:.6f}"</span>) | |
| </div> | |
| ` | |
| }, | |
| "backprop": { | |
| overview: ` | |
| <p>Backpropagation is the algorithm that makes neural networks <strong>learn</strong>. It computes how much each weight in the network contributed to the error, then adjusts each weight accordingly. It's conceptually simple: <strong>the chain rule of calculus applied systematically from output to input</strong>.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">💡 The Factory Analogy</div> | |
| Imagine a factory with 5 stations producing widgets. A quality inspector at the end finds defects. Backpropagation is like tracing the defect backward through each station to find which worker caused the problem and by how much. Each worker (weight) then adjusts their technique proportionally to their contribution to the defect. | |
| </div> | |
| <h3>The Algorithm in 4 Steps</h3> | |
| <div class="list-item"><div class="list-num">1</div><div><strong>Forward Pass:</strong> Compute outputs layer by layer: $z^l = W^l a^{l-1} + b^l$, then $a^l = \\sigma(z^l)$</div></div> | |
| <div class="list-item"><div class="list-num">2</div><div><strong>Compute Loss:</strong> Compare prediction to truth: $L = \\text{CrossEntropy}(\\hat{y}, y)$</div></div> | |
| <div class="list-item"><div class="list-num">3</div><div><strong>Backward Pass:</strong> Compute $\\frac{\\partial L}{\\partial W^l}$ for every layer using the chain rule</div></div> | |
| <div class="list-item"><div class="list-num">4</div><div><strong>Update:</strong> $W^l \\leftarrow W^l - \\eta \\frac{\\partial L}{\\partial W^l}$</div></div> | |
| <h3>Computational Graphs</h3> | |
| <p>Modern frameworks (PyTorch, TensorFlow) build a <strong>computational graph</strong> during the forward pass. Each operation becomes a node. During backprop, they traverse this graph in reverse, applying the chain rule at each node. This is called <strong>automatic differentiation (autograd)</strong>.</p> | |
| <div class="info-box"> | |
| <div class="box-title">🔑 Key Insight</div> | |
| <div class="box-content"> | |
| Backprop's time complexity is <strong>O(W)</strong> — proportional to the number of weights. This is the same as a forward pass! Computing all gradients is no more expensive than computing the prediction itself. | |
| </div> | |
| </div> | |
| `, | |
| concepts: ` | |
| <div class="formula"> | |
| Chain Rule:<br> | |
| ∂L/∂w = ∂L/∂y × ∂y/∂z × ∂z/∂w<br> | |
| <br> | |
| For layer l:<br> | |
| δˡ = (W^(l+1))^T δ^(l+1) ⊙ σ'(z^l)<br> | |
| ∂L/∂W^l = δ^l (a^(l-1))^T | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🧠 Universal Training Method</div> | |
| <div class="box-content"> | |
| Every modern neural network uses backprop - from CNNs to Transformers to GANs | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🔧 Automatic Differentiation</div> | |
| <div class="box-content"> | |
| PyTorch, TensorFlow implement automatic backprop - you define forward pass, framework does backward | |
| </div> | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">🎤 Probable Interview Questions</div> | |
| 1. What is the role of bias in a perceptron?<br> | |
| 2. Why can't we use MSE for classification?<br> | |
| 3. Difference between loss function and evaluation metric?<br> | |
| 4. Why is mini-batch GD preferred?<br> | |
| 5. Does backpropagation update weights?<br> | |
| 6. Can gradient descent work without backpropagation?<br> | |
| 7. What happens if learning rate is too high?<br> | |
| 8. How many times does forward propagation occur per epoch?<br> | |
| 9. What happens if we remove bias?<br> | |
| 10. What is the chain rule and why is it essential for backprop? | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The 4 Fundamental Equations of Backprop</h3> | |
| <p>Backpropagation is essentially the chain rule applied iteratively. We define the error signal δ = ∂L/∂z.</p> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Error at Output Layer (L):</strong><br> | |
| δᴸ = ∇ₐL ⊙ σ'(zᴸ)<br> | |
| <span class="formula-caption">Example for MSE: (aᴸ - y) ⊙ σ'(zᴸ)</span></div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Error at Layer l (Backwards):</strong><br> | |
| δˡ = ((Wˡ⁺¹)ᵀ δˡ⁺¹) ⊙ σ'(zˡ)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Gradient w.r.t Bias:</strong><br> | |
| ∂L / ∂bˡ = δˡ</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Gradient w.r.t Weights:</strong><br> | |
| ∂L / ∂Wˡ = δˡ (aˡ⁻¹)ᵀ</div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain Walkthrough</div> | |
| Suppose single neuron: z = wx + b, Loss L = (σ(z) - y)²/2<br> | |
| 1. <strong>Forward:</strong> z=2, a=σ(2)≈0.88, y=1, L=0.007<br> | |
| 2. <strong>Backward:</strong><br> | |
| ∂L/∂a = (a-y) = -0.12<br> | |
| ∂a/∂z = σ(z)(1-σ(z)) = 0.88 * 0.12 = 0.1056<br> | |
| δ = ∂L/∂z = -0.12 * 0.1056 = -0.01267<br> | |
| <strong>∂L/∂w = δ * x</strong> | <strong>∂L/∂b = δ</strong> | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Backpropagation from Scratch</h3> | |
| <p>A complete 2-layer neural network trained with backpropagation using only NumPy. This is the best way to truly understand how gradients flow.</p> | |
| <span class="code-title">📄 backprop_numpy.py</span><div class="code-block"><span class="keyword">import</span> numpy <span class="keyword">as</span> np | |
| np.random.seed(<span class="number">42</span>) | |
| <span class="comment"># --- Data: XOR problem ---</span> | |
| X = np.array([[<span class="number">0</span>,<span class="number">0</span>], [<span class="number">0</span>,<span class="number">1</span>], [<span class="number">1</span>,<span class="number">0</span>], [<span class="number">1</span>,<span class="number">1</span>]]) | |
| y = np.array([[<span class="number">0</span>], [<span class="number">1</span>], [<span class="number">1</span>], [<span class="number">0</span>]]) | |
| <span class="comment"># --- Initialize weights ---</span> | |
| W1 = np.random.randn(<span class="number">2</span>, <span class="number">4</span>) * <span class="number">0.5</span> <span class="comment"># Input(2) -> Hidden(4)</span> | |
| b1 = np.zeros((<span class="number">1</span>, <span class="number">4</span>)) | |
| W2 = np.random.randn(<span class="number">4</span>, <span class="number">1</span>) * <span class="number">0.5</span> <span class="comment"># Hidden(4) -> Output(1)</span> | |
| b2 = np.zeros((<span class="number">1</span>, <span class="number">1</span>)) | |
| lr = <span class="number">0.5</span> | |
| <span class="keyword">def</span> <span class="function">sigmoid</span>(z): | |
| <span class="keyword">return</span> <span class="number">1</span> / (<span class="number">1</span> + np.exp(-z)) | |
| <span class="keyword">for</span> epoch <span class="keyword">in</span> range(<span class="number">10000</span>): | |
| <span class="comment"># ====== FORWARD PASS ======</span> | |
| z1 = X @ W1 + b1 <span class="comment"># (4, 4)</span> | |
| a1 = sigmoid(z1) <span class="comment"># Hidden activation</span> | |
| z2 = a1 @ W2 + b2 <span class="comment"># (4, 1)</span> | |
| a2 = sigmoid(z2) <span class="comment"># Output prediction</span> | |
| <span class="comment"># ====== COMPUTE LOSS (MSE) ======</span> | |
| loss = np.mean((y - a2) ** <span class="number">2</span>) | |
| <span class="comment"># ====== BACKWARD PASS (Chain Rule) ======</span> | |
| <span class="comment"># dL/da2 = -2(y - a2) / n</span> | |
| dL_da2 = -<span class="number">2</span> * (y - a2) / len(y) | |
| <span class="comment"># da2/dz2 = sigmoid'(z2) = a2(1 - a2)</span> | |
| dz2 = dL_da2 * a2 * (<span class="number">1</span> - a2) | |
| <span class="comment"># Gradients for W2, b2</span> | |
| dW2 = a1.T @ dz2 <span class="comment"># (4, 1)</span> | |
| db2 = dz2.sum(axis=<span class="number">0</span>, keepdims=<span class="keyword">True</span>) | |
| <span class="comment"># Propagate to hidden layer</span> | |
| dz1 = (dz2 @ W2.T) * a1 * (<span class="number">1</span> - a1) | |
| dW1 = X.T @ dz1 <span class="comment"># (2, 4)</span> | |
| db1 = dz1.sum(axis=<span class="number">0</span>, keepdims=<span class="keyword">True</span>) | |
| <span class="comment"># ====== UPDATE WEIGHTS ======</span> | |
| W2 -= lr * dW2 | |
| b2 -= lr * db2 | |
| W1 -= lr * dW1 | |
| b1 -= lr * db1 | |
| <span class="keyword">if</span> epoch % <span class="number">2000</span> == <span class="number">0</span>: | |
| <span class="keyword">print</span>(f<span class="string">"Epoch {epoch}: Loss = {loss:.6f}"</span>) | |
| <span class="comment"># Final predictions</span> | |
| <span class="keyword">print</span>(<span class="string">"Predictions:"</span>, a2.round(<span class="number">2</span>).flatten()) | |
| <span class="comment"># Expected: [0, 1, 1, 0] for XOR</span> | |
| </div> | |
| <h3>PyTorch Autograd (Automatic Backprop)</h3> | |
| <span class="code-title">📄 backprop_pytorch.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="comment"># PyTorch handles backprop automatically via autograd!</span> | |
| x = torch.tensor([<span class="number">2.0</span>], requires_grad=<span class="keyword">True</span>) | |
| y = x**<span class="number">2</span> + <span class="number">3</span>*x + <span class="number">1</span> <span class="comment"># f(x) = x^2 + 3x + 1</span> | |
| y.backward() <span class="comment"># Compute df/dx automatically</span> | |
| <span class="keyword">print</span>(x.grad) <span class="comment"># 7.0 (derivative: 2x + 3, at x=2: 7)</span> | |
| </div> | |
| ` | |
| }, | |
| "regularization": { | |
| overview: ` | |
| <p>Regularization prevents a neural network from <strong>memorizing the training data</strong> instead of learning general patterns. Without it, the network performs brilliantly on training data but fails on new data — this is <strong>overfitting</strong>.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">📚 The Exam Analogy</div> | |
| Imagine a student who memorizes all answers in a textbook word-for-word but doesn't understand the concepts. They'll ace the exact same questions but fail on any new ones. Regularization forces the network to "understand" rather than "memorize." | |
| </div> | |
| <h3>Signs of Overfitting</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🚨 Red Flags</div> | |
| <div class="box-content"> | |
| ✗ Training accuracy: 99%, Validation accuracy: 65% (huge gap)<br> | |
| ✗ Training loss keeps decreasing but validation loss starts increasing<br> | |
| ✗ Model works perfectly on seen data but fails on new examples<br> | |
| ✗ Small dataset with a very large model (too many parameters for too little data) | |
| </div> | |
| </div> | |
| <h3>Regularization Techniques</h3> | |
| <div class="list-item"><div class="list-num">1</div><div><strong>L2 Regularization (Weight Decay):</strong> Adds $\\lambda \\sum w^2$ to the loss. Penalizes large weights, encouraging smaller, more distributed weights.</div></div> | |
| <div class="list-item"><div class="list-num">2</div><div><strong>L1 Regularization:</strong> Adds $\\lambda \\sum |w|$ to the loss. Encourages sparsity — some weights become exactly zero.</div></div> | |
| <div class="list-item"><div class="list-num">3</div><div><strong>Dropout:</strong> During training, randomly sets neurons to 0 with probability $p$. Forces the network to be redundant — no single neuron can be relied upon.</div></div> | |
| <div class="list-item"><div class="list-num">4</div><div><strong>Data Augmentation:</strong> Artificially increase training data by applying random transformations (flips, rotations, crops). The best regularizer is <em>more data</em>.</div></div> | |
| <div class="list-item"><div class="list-num">5</div><div><strong>Early Stopping:</strong> Monitor validation loss during training. Stop when it starts increasing even though training loss continues to decrease.</div></div> | |
| <div class="list-item"><div class="list-num">6</div><div><strong>Batch Normalization:</strong> While not designed as regularization, BN has a regularizing effect because mini-batch statistics add noise.</div></div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🎯 Best Practices</div> | |
| <div class="box-content"> | |
| • Start with Dropout (0.5) for hidden layers<br> | |
| • Add L2 if still overfitting (λ=0.01, 0.001)<br> | |
| • Always use Early Stopping<br> | |
| • Data Augmentation for images | |
| </div> | |
| </div> | |
| <h3>Dropout vs Batch Normalization</h3> | |
| <table> | |
| <tr><th>Feature</th><th>Dropout</th><th>Batch Normalization</th></tr> | |
| <tr><td>Purpose</td><td>Regularization</td><td>Faster training + mild regularization</td></tr> | |
| <tr><td>Mechanism</td><td>Randomly drops neurons</td><td>Normalizes layer inputs</td></tr> | |
| <tr><td>Training vs Test</td><td>Different behavior</td><td>Different behavior</td></tr> | |
| <tr><td>Combined?</td><td colspan="2">Yes, use BatchNorm <em>before</em> Dropout</td></tr> | |
| </table> | |
| <div class="callout tip"> | |
| <div class="callout-title">🎤 Probable Interview Questions</div> | |
| 1. Why can't we initialize all weights to zero?<br> | |
| 2. Difference between Xavier and He initialization?<br> | |
| 3. What is the vanishing gradient problem?<br> | |
| 4. How does Dropout prevent overfitting?<br> | |
| 5. Can we use Dropout at test time?<br> | |
| 6. Why is He initialization used with ReLU?<br> | |
| 7. What happens if weights are too large initially?<br> | |
| 8. Does Batch Normalization eliminate the need for Dropout?<br> | |
| 9. L1 vs L2 regularization — when to use each?<br> | |
| 10. What is the exploding gradient problem and how to fix it? | |
| </div> | |
| `, | |
| math: ` | |
| <h3>L2 Regularization (Weight Decay)</h3> | |
| <p>Add a penalty proportional to the squared magnitude of weights.</p> | |
| <div class="formula" style="font-size: 1.2rem; text-align: center; margin: 20px 0; background: rgba(0, 212, 255, 0.08); padding: 25px; border-radius: 8px;"> | |
| <strong>L_total = L_data + λ × Σ w²</strong> | |
| </div> | |
| <h4>Gradient with L2:</h4> | |
| <div class="formula"> | |
| ∂L_total/∂w = ∂L_data/∂w + 2λw<br><br> | |
| Update rule becomes:<br> | |
| w = w - α(∇L + 2λw) = w(1 - 2αλ) - α∇L | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: L2 Effect</div> | |
| <strong>Without L2:</strong> w = 5.0, ∇L = 0.1, α = 0.1<br> | |
| w_new = 5.0 - 0.1 × 0.1 = 4.99<br><br> | |
| <strong>With L2 (λ=0.01):</strong><br> | |
| w_new = 5.0 × (1 - 2×0.1×0.01) - 0.1 × 0.1<br> | |
| w_new = 5.0 × 0.998 - 0.01 = 4.99 - 0.01 = <strong>4.98</strong><br><br> | |
| The weight shrinks faster! Large weights shrink most. | |
| </div> | |
| <h3>L1 Regularization (Lasso)</h3> | |
| <p>Adds penalty proportional to absolute value of weights - encourages sparsity.</p> | |
| <div class="formula"> | |
| L_total = L_data + λ × Σ |w|<br><br> | |
| Gradient: ∂L/∂w = ∇L_data + λ × sign(w) | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">💡 L1 vs L2</div> | |
| • <strong>L1:</strong> Creates sparse weights (many zeros) → Feature selection<br> | |
| • <strong>L2:</strong> Small but non-zero weights → More stable<br> | |
| • <strong>Elastic Net:</strong> λ₁|w| + λ₂w² (both!) | |
| </div> | |
| <h3>Dropout Mathematics</h3> | |
| <p>Randomly set neurons to zero with probability p during training.</p> | |
| <div class="formula"> | |
| <strong>Training:</strong><br> | |
| r ~ Bernoulli(1-p) [mask of 0s and 1s]<br> | |
| ã = a ⊙ r [element-wise multiply]<br><br> | |
| <strong>Inference (scaling):</strong><br> | |
| ã = a × (1-p) [scale by keep probability] | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Dropout Example</div> | |
| <strong>Layer output (4 neurons):</strong> a = [1.0, 2.0, 0.5, 1.5]<br> | |
| <strong>Dropout p = 0.5:</strong> r = [1, 0, 1, 0] (random mask)<br><br> | |
| <strong>Training output:</strong> ã = [1.0, 0, 0.5, 0]<br> | |
| <strong>Inference output:</strong> ã = [0.5, 1.0, 0.25, 0.75]<br><br> | |
| During inference, we scale by (1-p)=0.5 to maintain expected value! | |
| </div> | |
| <h3>Why Dropout Works</h3> | |
| <div class="formula"> | |
| Dropout ≈ Training an ensemble of 2ⁿ sub-networks<br> | |
| (where n = number of neurons that can be dropped)<br><br> | |
| Each forward pass is a different architecture! | |
| </div> | |
| <h3>Weight Initialization Mathematics</h3> | |
| <h4>Xavier Initialization (for Sigmoid/Tanh)</h4> | |
| <div class="formula"> | |
| w ~ N(0, σ²) where σ² = 2 / (n_in + n_out)<br><br> | |
| Goal: Keep Var(output) ≈ Var(input) across layers | |
| </div> | |
| <h4>He Initialization (for ReLU)</h4> | |
| <div class="formula"> | |
| w ~ N(0, σ²) where σ² = 2 / n_in<br><br> | |
| ReLU zeros out ~50% of activations, so variance is halved → multiply by 2 to compensate! | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Why Zero Init Fails</div> | |
| If all weights = 0, every neuron computes the <strong>same output</strong>.<br> | |
| All gradients are <strong>identical</strong> → All weights update the same way.<br> | |
| Result: All neurons stay identical forever! The network is as good as <strong>1 neuron</strong>.<br><br> | |
| <strong>Random Init:</strong> w ~ N(0, 0.01) works for shallow networks but gradients shrink exponentially in deep ones.<br> | |
| <strong>Xavier:</strong> Calibrates variance based on layer width → stable gradients for Sigmoid/Tanh.<br> | |
| <strong>He:</strong> Accounts for ReLU zeroing out negative half → default for modern networks. | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Regularization Techniques in PyTorch</h3> | |
| <p>Complete examples of the most important regularization methods.</p> | |
| <span class="code-title">📄 regularization_pytorch.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="keyword">import</span> torchvision.transforms <span class="keyword">as</span> T | |
| <span class="comment"># ====== 1. Dropout ======</span> | |
| <span class="keyword">class</span> <span class="function">ModelWithDropout</span>(nn.Module): | |
| <span class="keyword">def</span> <span class="function">__init__</span>(self): | |
| super().__init__() | |
| self.net = nn.Sequential( | |
| nn.Linear(<span class="number">784</span>, <span class="number">256</span>), | |
| nn.ReLU(), | |
| nn.Dropout(p=<span class="number">0.5</span>), <span class="comment"># 50% of neurons randomly zeroed</span> | |
| nn.Linear(<span class="number">256</span>, <span class="number">128</span>), | |
| nn.ReLU(), | |
| nn.Dropout(p=<span class="number">0.3</span>), <span class="comment"># 30% dropout</span> | |
| nn.Linear(<span class="number">128</span>, <span class="number">10</span>), | |
| ) | |
| <span class="keyword">def</span> <span class="function">forward</span>(self, x): | |
| <span class="keyword">return</span> self.net(x) | |
| model = ModelWithDropout() | |
| model.train() <span class="comment"># Dropout active</span> | |
| model.eval() <span class="comment"># Dropout disabled (IMPORTANT for inference!)</span> | |
| <span class="comment"># ====== 2. L2 Regularization (Weight Decay) ======</span> | |
| optimizer = torch.optim.Adam( | |
| model.parameters(), | |
| lr=<span class="number">0.001</span>, | |
| weight_decay=<span class="number">0.01</span>, <span class="comment"># L2 penalty lambda</span> | |
| ) | |
| <span class="comment"># ====== 3. Data Augmentation ======</span> | |
| train_transforms = T.Compose([ | |
| T.RandomHorizontalFlip(p=<span class="number">0.5</span>), | |
| T.RandomRotation(degrees=<span class="number">15</span>), | |
| T.RandomResizedCrop(size=<span class="number">224</span>, scale=(<span class="number">0.8</span>, <span class="number">1.0</span>)), | |
| T.ColorJitter(brightness=<span class="number">0.2</span>, contrast=<span class="number">0.2</span>), | |
| T.ToTensor(), | |
| T.Normalize(mean=[<span class="number">0.485</span>, <span class="number">0.456</span>, <span class="number">0.406</span>], | |
| std=[<span class="number">0.229</span>, <span class="number">0.224</span>, <span class="number">0.225</span>]), | |
| ]) | |
| <span class="comment"># ====== 4. Early Stopping ======</span> | |
| best_val_loss = float(<span class="string">'inf'</span>) | |
| patience = <span class="number">5</span> | |
| counter = <span class="number">0</span> | |
| <span class="keyword">for</span> epoch <span class="keyword">in</span> range(<span class="number">100</span>): | |
| train_loss = train_one_epoch() | |
| val_loss = validate() | |
| <span class="keyword">if</span> val_loss < best_val_loss: | |
| best_val_loss = val_loss | |
| counter = <span class="number">0</span> | |
| torch.save(model.state_dict(), <span class="string">'best_model.pt'</span>) | |
| <span class="keyword">else</span>: | |
| counter += <span class="number">1</span> | |
| <span class="keyword">if</span> counter >= patience: | |
| <span class="keyword">print</span>(f<span class="string">"Early stopping at epoch {epoch}"</span>) | |
| <span class="keyword">break</span> | |
| </div> | |
| ` | |
| }, | |
| "batch-norm": { | |
| overview: ` | |
| <p>Batch Normalization (BatchNorm) was one of the most impactful innovations in deep learning. It <strong>normalizes the inputs to each layer</strong>, which stabilizes and dramatically accelerates training. Before BatchNorm, training deep networks was extremely difficult.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🎯 The Core Problem BatchNorm Solves</div> | |
| As data flows through layers, the distribution of activations keeps shifting — this is called <strong>Internal Covariate Shift</strong>. Each layer has to constantly adapt to new input distributions, slowing down training. BatchNorm fixes this by normalizing each layer's input to have mean=0 and variance=1. | |
| </div> | |
| <h3>How It Works (Step by Step)</h3> | |
| <div class="formula" style="text-align:center;"> | |
| Given a mini-batch $B = \\{x_1, ..., x_m\\}$:<br><br> | |
| <strong>Step 1 — Mean:</strong> $$\\mu_B = \\frac{1}{m}\\sum_{i=1}^{m} x_i$$ | |
| <strong>Step 2 — Variance:</strong> $$\\sigma_B^2 = \\frac{1}{m}\\sum_{i=1}^{m} (x_i - \\mu_B)^2$$ | |
| <strong>Step 3 — Normalize:</strong> $$\\hat{x}_i = \\frac{x_i - \\mu_B}{\\sqrt{\\sigma_B^2 + \\epsilon}}$$ | |
| <strong>Step 4 — Scale and Shift:</strong> $$y_i = \\gamma \\hat{x}_i + \\beta$$ | |
| </div> | |
| <p>$\\gamma$ and $\\beta$ are <strong>learnable parameters</strong>. They allow the network to undo the normalization if that's optimal — giving the network the flexibility to decide the best distribution for each layer.</p> | |
| <div class="info-box"> | |
| <div class="box-title">Benefits of BatchNorm</div> | |
| <div class="box-content"> | |
| ✓ <strong>Train 5-10x faster</strong> — allows much higher learning rates<br> | |
| ✓ <strong>Reduces sensitivity</strong> to weight initialization<br> | |
| ✓ <strong>Acts as regularization</strong> — mini-batch noise has a dropout-like effect<br> | |
| ✓ <strong>Enables deeper networks</strong> — reduces vanishing/exploding gradients | |
| </div> | |
| </div> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ BatchNorm Gotchas</div> | |
| <strong>1. Train vs. Eval:</strong> During training, BN uses mini-batch statistics. During inference, it uses running averages computed during training. Always call <code>model.eval()</code> before inference!<br> | |
| <strong>2. Small batch sizes:</strong> BN performs poorly with batch_size < 16 because statistics are noisy. Use LayerNorm instead (as in Transformers).<br> | |
| <strong>3. Placement:</strong> Usually placed <em>after</em> the linear/conv layer and <em>before</em> the activation: Linear → BN → ReLU. | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The 4 Steps of Batch Normalization</h3> | |
| <p>Calculated per mini-batch B = {x₁, ..., xₘ}:</p> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Mini-Batch Mean:</strong> μ_B = (1/m) Σ xᵢ</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Mini-Batch Variance:</strong> σ²_B = (1/m) Σ (xᵢ - μ_B)²</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Normalize:</strong> x̂ᵢ = (xᵢ - μ_B) / √(σ²_B + ε)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Scale and Shift:</strong> yᵢ = γ x̂ᵢ + β</div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Why γ and β?</div> | |
| If we only normalized to (0,1), we might restrict the representation power of the network. <br> | |
| γ and β allow the network to <strong>undo</strong> the normalization if that's optimal: <br> | |
| If γ = √(σ²) and β = μ, we get the original data back! | |
| </div> | |
| ` | |
| }, | |
| "cv-intro": { | |
| overview: ` | |
| <h3>Why Computer Vision Needs Special Architectures</h3> | |
| <p><strong>Problem:</strong> Images have huge dimensionality</p> | |
| <ul> | |
| <li>224×224 RGB image = 150,528 input features</li> | |
| <li>Fully connected layer with 1000 neurons = 150M parameters!</li> | |
| <li>Result: Overfitting, slow training, memory issues</li> | |
| </ul> | |
| <h3>Solution: Convolutional Neural Networks</h3> | |
| <ul> | |
| <li><strong>Weight Sharing:</strong> Same filter applied everywhere (1000x fewer parameters)</li> | |
| <li><strong>Local Connectivity:</strong> Neurons see small patches</li> | |
| <li><strong>Translation Invariance:</strong> Detect cat anywhere in image</li> | |
| </ul> | |
| `, | |
| concepts: ` | |
| <h3>Why CNNs Beat Fully Connected</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Parameter Efficiency:</strong> 1000× fewer parameters through weight sharing</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Translation Equivariance:</strong> Same object → same activation regardless of position</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">📸 Real-World CV</div> | |
| <div class="box-content"> | |
| Face ID, medical imaging (MRI/CT), autonomous drone navigation, manufacturing defect detection, and satellite imagery analysis | |
| </div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The Parameter Explosion Problem</h3> | |
| <p>Why do standard Neural Networks fail on images? Let's calculate the parameters for a small image.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: MLP vs Images</div> | |
| 1. **Input:** 224 × 224 pixels with 3 color channels (RGB)<br> | |
| 2. **Input Size:** 224 × 224 × 3 = <strong>150,528 features</strong><br> | |
| 3. **Hidden Layer:** Suppose we want just 1000 neurons.<br> | |
| 4. **Matrix size:** [1000, 150528]<br> | |
| 5. **Total Weights:** 1000 × 150528 ≈ <strong>150 Million parameters</strong> for just ONE layer! | |
| </div> | |
| <h3>The CNN Solution: Weight Sharing</h3> | |
| <p>Instead of every neuron looking at every pixel, we use <strong>translation invariance</strong>. If an edge detector works in the top-left, it should work in the bottom-right.</p> | |
| <div class="formula"> | |
| Total Params = (Kernel_H × Kernel_W × Input_Channels) × Num_Filters<br> | |
| <br> | |
| For a 3x3 filter: (3 × 3 × 3) × 64 = <strong>1,728 parameters</strong><br> | |
| Reduction: 150M / 1.7k ≈ <strong>86,000× more efficient!</strong> | |
| </div> | |
| ` | |
| }, | |
| "pooling": { | |
| overview: ` | |
| <h3>Pooling Layers</h3> | |
| <p>Pooling reduces spatial dimensions while retaining important information.</p> | |
| <table> | |
| <tr> | |
| <th>Type</th> | |
| <th>Operation</th> | |
| <th>Use Case</th> | |
| </tr> | |
| <tr> | |
| <td>Max Pooling</td> | |
| <td>Take maximum value</td> | |
| <td><strong>Most common</strong> - preserves strong activations</td> | |
| </tr> | |
| <tr> | |
| <td>Average Pooling</td> | |
| <td>Take average</td> | |
| <td>Smoother, less common (used in final layers)</td> | |
| </tr> | |
| <tr> | |
| <td>Global Pooling</td> | |
| <td>Pool entire feature map</td> | |
| <td>Replace FC layers (reduces parameters)</td> | |
| </tr> | |
| </table> | |
| <div class="callout tip"> | |
| <div class="callout-title">✅ Benefits</div> | |
| • Reduces spatial size (faster computation)<br> | |
| • Adds translation invariance<br> | |
| • Prevents overfitting<br> | |
| • Typical: 2×2 window, stride 2 (halves dimensions) | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Pooling Mechanics</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Downsampling:</strong> Reduces H×W by pooling factor (typically 2×)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>No Learnable Parameters:</strong> Fixed operation (max/average)</div> | |
| </div> | |
| <div class="formula"> | |
| Example: 4×4 input → 2×2 max pooling → 2×2 output | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🎯 Standard CNN Component</div> | |
| <div class="box-content"> | |
| Used after conv layers in AlexNet, VGG, and most classic CNNs to progressively reduce spatial dimensions | |
| </div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Max Pooling: Winning Signal Selection</h3> | |
| <p>Pooling operations are non-parametric (no weights). They simply select or average values within a local window.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: 2x2 Max Pooling</div> | |
| **Input (4x4):**<br> | |
| [1 3 | 2 1]<br> | |
| [5 1 | 0 2]<br> | |
| -----------<br> | |
| [1 1 | 8 2]<br> | |
| [0 2 | 4 1]<br> | |
| <br> | |
| **Output (2x2):**<br> | |
| Step 1: max(1, 3, 5, 1) = <strong>5</strong><br> | |
| Step 2: max(2, 1, 0, 2) = <strong>2</strong><br> | |
| Step 3: max(1, 1, 0, 2) = <strong>2</strong><br> | |
| Step 4: max(8, 2, 4, 1) = <strong>8</strong><br> | |
| **Final:** [5 2] / [2 8] | |
| </div> | |
| <h3>Backprop through Pooling</h3> | |
| <div class="list-item"> | |
| <div class="list-num">💡</div> | |
| <div><strong>Max Pooling:</strong> Gradient is routed ONLY to the neuron that had the maximum value. All others get 0.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">💡</div> | |
| <div><strong>Average Pooling:</strong> Gradient is distributed evenly among all neurons in the window.</div> | |
| </div> | |
| ` | |
| }, | |
| "cnn-basics": { | |
| overview: ` | |
| <h3>CNN Architecture Pattern</h3> | |
| <div class="formula"> | |
| Input → [Conv → ReLU → Pool] × N → Flatten → FC → Softmax | |
| </div> | |
| <h3>Typical Layering Strategy</h3> | |
| <ul> | |
| <li><strong>Early Layers:</strong> Detect low-level features (edges, textures) - small filters (3×3)</li> | |
| <li><strong>Middle Layers:</strong> Combine into patterns, parts - more filters, same size</li> | |
| <li><strong>Deep Layers:</strong> High-level concepts (faces, objects) - many filters</li> | |
| <li><strong>Final FC Layers:</strong> Classification based on learned features</li> | |
| </ul> | |
| <div class="callout insight"> | |
| <div class="callout-title">💡 Filter Progression</div> | |
| Layer 1: 32 filters (edges)<br> | |
| Layer 2: 64 filters (textures)<br> | |
| Layer 3: 128 filters (patterns)<br> | |
| Layer 4: 256 filters (parts)<br> | |
| Common pattern: double filters after each pooling | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Module Design Principles</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Spatial Reduction:</strong> Progressively downsample (224→112→56→28...)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Channel Expansion:</strong> Increase filters as spatial dims decrease</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🎯 All Modern Vision Models</div> | |
| <div class="box-content"> | |
| This pattern forms the backbone of ResNet, MobileNet, EfficientNet - fundamental CNN design | |
| </div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>1. The Golden Formula for Output Size</h3> | |
| <p>Given Input (W), Filter Size (F), Padding (P), and Stride (S):</p> | |
| <div class="formula" style="font-size: 1.2rem; text-align: center; margin: 20px 0;"> | |
| Output Size = ⌊(W - F + 2P) / S⌋ + 1 | |
| </div> | |
| <h3>2. Parameter Count Calculation</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Parameters PER Filter:</strong> (F × F × C_in) + 1 (bias)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Total Parameters:</strong> N_filters × ((F × F × C_in) + 1)</div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain Calculation</div> | |
| <strong>Input:</strong> 224x224x3 | <strong>Layer:</strong> 64 filters of 3x3 | <strong>Stride:</strong> 1 | <strong>Padding:</strong> 1<br> | |
| 1. <strong>Output Size:</strong> (224 - 3 + 2(1))/1 + 1 = 224 (Same Padding)<br> | |
| 2. <strong>Params:</strong> 64 * (3 * 3 * 3 + 1) = 64 * 28 = <strong>1,792 parameters</strong><br> | |
| 3. <strong>FLOPs:</strong> 224 * 224 * 1792 ≈ <strong>90 Million operations</strong> per image! | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Building a CNN from Scratch in PyTorch</h3> | |
| <p>A complete CNN for classifying CIFAR-10 images (32×32 RGB) into 10 categories.</p> | |
| <span class="code-title">📄 cnn_cifar10.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="keyword">import</span> torch.nn.functional <span class="keyword">as</span> F | |
| <span class="keyword">class</span> <span class="function">SimpleCNN</span>(nn.Module): | |
| <span class="string">""" | |
| Architecture: | |
| Input (3, 32, 32) | |
| -> Conv(3->32, 3x3) -> BN -> ReLU -> MaxPool(2x2) -> (32, 16, 16) | |
| -> Conv(32->64, 3x3) -> BN -> ReLU -> MaxPool(2x2) -> (64, 8, 8) | |
| -> Conv(64->128, 3x3) -> BN -> ReLU -> MaxPool(2x2) -> (128, 4, 4) | |
| -> Flatten -> FC(2048, 256) -> Dropout -> FC(256, 10) | |
| """</span> | |
| <span class="keyword">def</span> <span class="function">__init__</span>(self): | |
| super().__init__() | |
| <span class="comment"># Convolutional layers</span> | |
| self.conv1 = nn.Conv2d(<span class="number">3</span>, <span class="number">32</span>, kernel_size=<span class="number">3</span>, padding=<span class="number">1</span>) | |
| self.bn1 = nn.BatchNorm2d(<span class="number">32</span>) | |
| self.conv2 = nn.Conv2d(<span class="number">32</span>, <span class="number">64</span>, kernel_size=<span class="number">3</span>, padding=<span class="number">1</span>) | |
| self.bn2 = nn.BatchNorm2d(<span class="number">64</span>) | |
| self.conv3 = nn.Conv2d(<span class="number">64</span>, <span class="number">128</span>, kernel_size=<span class="number">3</span>, padding=<span class="number">1</span>) | |
| self.bn3 = nn.BatchNorm2d(<span class="number">128</span>) | |
| self.pool = nn.MaxPool2d(<span class="number">2</span>, <span class="number">2</span>) <span class="comment"># Halves spatial dimensions</span> | |
| <span class="comment"># Fully connected layers</span> | |
| self.fc1 = nn.Linear(<span class="number">128</span> * <span class="number">4</span> * <span class="number">4</span>, <span class="number">256</span>) | |
| self.dropout = nn.Dropout(<span class="number">0.5</span>) | |
| self.fc2 = nn.Linear(<span class="number">256</span>, <span class="number">10</span>) | |
| <span class="keyword">def</span> <span class="function">forward</span>(self, x): | |
| x = self.pool(F.relu(self.bn1(self.conv1(x)))) <span class="comment"># (B, 32, 16, 16)</span> | |
| x = self.pool(F.relu(self.bn2(self.conv2(x)))) <span class="comment"># (B, 64, 8, 8)</span> | |
| x = self.pool(F.relu(self.bn3(self.conv3(x)))) <span class="comment"># (B, 128, 4, 4)</span> | |
| x = x.view(x.size(<span class="number">0</span>), -<span class="number">1</span>) <span class="comment"># Flatten</span> | |
| x = self.dropout(F.relu(self.fc1(x))) | |
| x = self.fc2(x) | |
| <span class="keyword">return</span> x | |
| <span class="comment"># --- Training Setup ---</span> | |
| model = SimpleCNN() | |
| <span class="keyword">print</span>(f<span class="string">"Total parameters: {sum(p.numel() for p in model.parameters()):,}"</span>) | |
| <span class="comment"># Total parameters: ~600K (tiny by today's standards)</span> | |
| criterion = nn.CrossEntropyLoss() | |
| optimizer = torch.optim.Adam(model.parameters(), lr=<span class="number">0.001</span>) | |
| </div> | |
| ` | |
| }, | |
| "viz-filters": { | |
| overview: ` | |
| <h3>What CNNs Learn</h3> | |
| <p>CNN filters automatically learn hierarchical visual features:</p> | |
| <h3>Layer-by-Layer Visualization</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Layer 1:</strong> Edges and colors (horizontal, vertical, diagonal lines)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Layer 2:</strong> Textures and patterns (corners, curves, simple shapes)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Layer 3:</strong> Object parts (eyes, wheels, windows)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Layer 4-5:</strong> Whole objects (faces, cars, animals)</div> | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Visualization Techniques</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Activation Maximization:</strong> Find input that maximizes filter response</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Grad-CAM:</strong> Highlight important regions for predictions</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🔍 Model Interpretability</div> | |
| <div class="box-content"> | |
| Understanding what CNNs learn helps debug failures, build trust, and improve architecture design | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🎨 Art & Style Transfer</div> | |
| <div class="box-content"> | |
| Filter visualizations inspired neural style transfer (VGG features) | |
| </div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Activation Maximization</h3> | |
| <p>Find the input x* that maximizes a specific neuron's activation.</p> | |
| <div class="formula" style="font-size: 1.2rem; text-align: center; margin: 20px 0; background: rgba(0, 212, 255, 0.08); padding: 25px; border-radius: 8px;"> | |
| <strong>x* = argmax_x [a_ij(x) - λ||x||²]</strong> | |
| </div> | |
| <h4>Gradient Ascent on Input:</h4> | |
| <div class="formula"> | |
| x_{t+1} = x_t + α × ∂a_ij/∂x<br><br> | |
| Where:<br> | |
| • a_ij = activation of neuron (i,j) in layer l<br> | |
| • α = step size<br> | |
| • λ||x||² = regularization to keep input natural-looking | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Generate What a Filter "Sees"</div> | |
| <strong>Goal:</strong> Find image that maximally activates Conv1 filter #5<br><br> | |
| 1. Start with x = random noise (224×224×3)<br> | |
| 2. Forward pass → get activation a₅ at filter 5<br> | |
| 3. Backward pass: ∂a₅/∂x (gradient of activation w.r.t. input)<br> | |
| 4. Update: x = x + 0.01 × ∂a₅/∂x<br> | |
| 5. Repeat 100-500 times<br><br> | |
| <strong>Result:</strong> Image showing what pattern the filter detects! | |
| </div> | |
| <h3>Grad-CAM (Gradient-weighted Class Activation Mapping)</h3> | |
| <p>Highlight which regions of the image were important for a specific class prediction.</p> | |
| <div class="formula" style="background: rgba(255, 107, 53, 0.08); padding: 20px; border-radius: 8px;"> | |
| <strong>Step 1 - Global Average Pool the Gradients:</strong><br> | |
| αₖ = (1/Z) × Σᵢ Σⱼ (∂yᶜ/∂Aₖⁱʲ)<br><br> | |
| <strong>Step 2 - Weighted Sum of Feature Maps:</strong><br> | |
| L_Grad-CAM = ReLU(Σₖ αₖ × Aₖ)<br><br> | |
| Where:<br> | |
| • yᶜ = score for class c (before softmax)<br> | |
| • Aₖ = k-th feature map of last conv layer<br> | |
| • αₖ = importance weight of feature map k | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Grad-CAM Calculation</div> | |
| <strong>Last conv layer:</strong> 14×14×512 feature maps<br> | |
| <strong>Predicted class:</strong> "Dog" (class 5)<br><br> | |
| 1. Get gradient ∂y₅/∂A for all 512 feature maps<br> | |
| 2. Average each gradient map: α₁ = 0.8, α₂ = 0.1, α₃ = 0.5...<br> | |
| 3. Weighted sum: L = 0.8×A₁ + 0.1×A₂ + 0.5×A₃ + ...<br> | |
| 4. Apply ReLU (keep positive contributions only)<br> | |
| 5. Upsample to input size (14×14 → 224×224)<br><br> | |
| <strong>Result:</strong> Heatmap showing dog's face/body highlighted! | |
| </div> | |
| <h3>Saliency Maps (Vanilla Gradient)</h3> | |
| <div class="formula"> | |
| Saliency(x) = |∂yᶜ/∂x|<br><br> | |
| Take absolute value of gradient of class score w.r.t. input pixels. | |
| </div> | |
| ` | |
| }, | |
| "lenet": { | |
| overview: ` | |
| <h3>LeNet-5 (1998) - The Pioneer</h3> | |
| <p>First successful CNN for digit recognition (MNIST). Introduced the Conv → Pool → Conv → Pool pattern still used today.</p> | |
| <h3>Architecture</h3> | |
| <div class="formula"> | |
| Input 32×32 → Conv(6 filters, 5×5) → AvgPool → Conv(16 filters, 5×5) → AvgPool → FC(120) → FC(84)→ FC(10) | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">🏆 Historical Impact</div> | |
| • Used by US Postal Service for zip code recognition<br> | |
| • Proved CNNs work for real-world tasks<br> | |
| • Template for modern architectures | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Key Innovations</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Layered Architecture:</strong> Hierarchical feature extraction</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Shared Weights:</strong> Convolutional parameter sharing</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">✉️ Handwriting Recognition</div> | |
| <div class="box-content"> | |
| USPS mail sorting, check processing, form digitization | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">📚 Educational Foundation</div> | |
| <div class="box-content"> | |
| Perfect starting point for learning CNNs - simple enough to understand, complex enough to be useful | |
| </div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>LeNet-5 Complete Dimension Walkthrough</h3> | |
| <p>Follow the tensor shapes through each layer of LeNet-5.</p> | |
| <div class="formula" style="font-size: 1.0rem; margin: 20px 0; background: rgba(0, 212, 255, 0.08); padding: 20px; border-radius: 8px;"> | |
| <strong>Output Size Formula:</strong><br> | |
| W_out = (W_in - K + 2P) / S + 1<br> | |
| Where K=kernel, P=padding, S=stride | |
| </div> | |
| <h4>Layer-by-Layer Calculation:</h4> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div> | |
| <strong>Input:</strong> 32 × 32 × 1 (grayscale MNIST)<br> | |
| <span class="formula-caption">Original: 28×28, padded to 32×32</span> | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div> | |
| <strong>C1 - Conv:</strong> 6 filters, 5×5, stride 1, no padding<br> | |
| Output: (32 - 5 + 0)/1 + 1 = <strong>28 × 28 × 6</strong><br> | |
| Params: (5×5×1 + 1) × 6 = <strong>156</strong> | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div> | |
| <strong>S2 - AvgPool:</strong> 2×2, stride 2<br> | |
| Output: 28/2 = <strong>14 × 14 × 6</strong><br> | |
| Params: 0 (or 12 with learnable coefficients) | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div> | |
| <strong>C3 - Conv:</strong> 16 filters, 5×5<br> | |
| Output: (14 - 5)/1 + 1 = <strong>10 × 10 × 16</strong><br> | |
| Params: (5×5×6 + 1) × 16 = <strong>2,416</strong> | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div> | |
| <strong>S4 - AvgPool:</strong> 2×2, stride 2<br> | |
| Output: 10/2 = <strong>5 × 5 × 16</strong><br> | |
| Params: 0 | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">06</div> | |
| <div> | |
| <strong>C5 - Conv:</strong> 120 filters, 5×5<br> | |
| Output: (5 - 5)/1 + 1 = <strong>1 × 1 × 120</strong><br> | |
| Params: (5×5×16 + 1) × 120 = <strong>48,120</strong> | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">07</div> | |
| <div> | |
| <strong>F6 - FC:</strong> 120 → 84<br> | |
| Params: 120 × 84 + 84 = <strong>10,164</strong> | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">08</div> | |
| <div> | |
| <strong>Output - FC:</strong> 84 → 10<br> | |
| Params: 84 × 10 + 10 = <strong>850</strong> | |
| </div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Total Parameter Count</div> | |
| <strong>Convolutional Layers:</strong><br> | |
| C1: 156 + C3: 2,416 + C5: 48,120 = 50,692<br><br> | |
| <strong>Fully Connected Layers:</strong><br> | |
| F6: 10,164 + Output: 850 = 11,014<br><br> | |
| <strong>Total:</strong> ≈ <strong>61,706 parameters</strong><br><br> | |
| Compare to AlexNet's 60 million - LeNet is 1000× smaller! | |
| </div> | |
| <h3>Receptive Field Calculation</h3> | |
| <div class="formula"> | |
| After C1 (5×5): Each neuron sees 5×5 pixels<br> | |
| After S2: Sees 6×6 pixels (pooling expands RF)<br> | |
| After C3: Sees 14×14 pixels<br> | |
| After S4: Sees 16×16 pixels<br> | |
| After C5: <strong>Sees entire 32×32 input!</strong> | |
| </div> | |
| ` | |
| }, | |
| "alexnet": { | |
| overview: ` | |
| <h3>AlexNet (2012) - The Deep Learning Revolution</h3> | |
| <p>Won ImageNet 2012 by huge margin (15.3% vs 26.2% error), igniting the deep learning revolution.</p> | |
| <h3>Key Innovations</h3> | |
| <ul> | |
| <li><strong>ReLU Activation:</strong> Faster training than sigmoid/tanh</li> | |
| <li><strong>Dropout:</strong> Prevents overfitting (p=0.5)</li> | |
| <li><strong>Data Augmentation:</strong> Random crops/flips</li> | |
| <li><strong>GPU Training:</strong> Used 2 GTX580 GPUs</li> | |
| <li><strong>Deep:</strong> 8 layers (5 conv + 3 FC), 60M parameters</li> | |
| </ul> | |
| <div class="callout tip"> | |
| <div class="callout-title">💡 Why So Important?</div> | |
| First to show that deeper networks + more data + GPU compute = breakthrough performance | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Technical Contributions</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>ReLU:</strong> Solved vanishing gradients, enabled deeper networks</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Dropout:</strong> First major regularization for deep nets</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🎯 ImageNet Challenge</div> | |
| <div class="box-content"> | |
| Shattered records on 1000-class classification, proving deep learning superiority | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🚀 Industry Catalyst</div> | |
| <div class="box-content"> | |
| Sparked AI renaissance - Google, Facebook, Microsoft pivoted to deep learning after AlexNet | |
| </div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Paper & Pain: Parameter Counting</h3> | |
| <p>Understanding AlexNet's 60M parameters:</p> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Conv Layers:</strong> Only ~2.3 Million parameters. They do most of the work with small memory!</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>FC Layers:</strong> Over **58 Million parameters**. The first FC layer (FC6) takes 4096 * (6*6*256) ≈ 37M params!</div> | |
| </div> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ The Design Flaw</div> | |
| FC layers are the memory bottleneck. Modern models (ResNet, Inception) replace these with Global Average Pooling to save 90% parameters. | |
| </div> | |
| ` | |
| }, | |
| "vgg": { | |
| overview: ` | |
| <h3>VGGNet (2014) - The Power of Depth</h3> | |
| <p>VGG showed that depth matters - 16-19 layers using only small 3×3 filters.</p> | |
| `, | |
| concepts: ` | |
| <h3>Small Filters, Receptive Field</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Uniformity:</strong> Uses 3×3 filters everywhere with stride 1, padding 1.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Pooling Pattern:</strong> 2×2 max pooling after every 2-3 conv layers.</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The 5×5 vs 3×3+3×3 Equivalence</h3> | |
| <p>Why stack 3x3 filters instead of one large filter?</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Paramount Efficiency</div> | |
| 1. **Receptive Field:** Two 3x3 layers cover 5x5 area. Three 3x3 layers cover 7x7 area.<br> | |
| 2. **Param Count (C filters):**<br> | |
| • One 7x7 layer: 7² × C² = 49C² parameters.<br> | |
| • Three 3x3 layers: 3 × (3² × C²) = 27C² parameters.<br> | |
| **Result:** 45% reduction in weights for the SAME "view" of the image! | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🖼️ Feature Backbone</div> | |
| VGG is the preferred architectural backbone for Neural Style Transfer and early GANs due to its simple, clean feature extraction properties. | |
| </div> | |
| ` | |
| }, | |
| "resnet": { | |
| overview: ` | |
| <p>ResNet (Residual Networks) solved the <strong>degradation problem</strong> — deeper networks were performing WORSE than shallower ones, not because of overfitting, but because gradients couldn't flow through many layers. The solution? <strong>Skip connections</strong> that let gradients bypass layers entirely.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🏗️ The Highway Analogy</div> | |
| Imagine a highway with an exit ramp. If the road ahead is blocked (gradient vanishes), you can take the exit ramp and bypass the blockage. Skip connections are these exit ramps — they guarantee that information (and gradients) can always flow through the network, even if some layers become useless. | |
| </div> | |
| <h3>The Residual Block</h3> | |
| <p>Instead of learning the full mapping $H(x)$, the block learns the <strong>residual</strong> $F(x) = H(x) - x$. The output is:</p> | |
| <div class="formula" style="text-align:center;"> | |
| $$y = F(x, \\{W_i\\}) + x$$ | |
| </div> | |
| <p><strong>Why this works:</strong> If the optimal mapping is close to identity (common in deep networks), it's much easier for the network to learn $F(x) \\approx 0$ than to learn $H(x) \\approx x$. Pushing weights toward zero is easy; learning a complex identity mapping is hard.</p> | |
| <h3>ResNet Variants</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; color: var(--cyan);">Model</th> | |
| <th style="padding: 10px; color: var(--cyan);">Layers</th> | |
| <th style="padding: 10px; color: var(--cyan);">Parameters</th> | |
| <th style="padding: 10px; color: var(--cyan);">Top-1 Accuracy</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">ResNet-18</td><td>18</td><td>11.7M</td><td>69.8%</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">ResNet-34</td><td>34</td><td>21.8M</td><td>73.3%</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">ResNet-50</td><td>50</td><td>25.6M</td><td>76.1%</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">ResNet-101</td><td>101</td><td>44.5M</td><td>77.4%</td></tr> | |
| <tr><td style="padding: 8px;">ResNet-152</td><td>152</td><td>60.2M</td><td>78.3%</td></tr> | |
| </table> | |
| <div class="callout tip"> | |
| <div class="callout-title">💡 Impact</div> | |
| ResNet won the 2015 ImageNet competition with 152 layers and 3.6% top-5 error — the first time a neural network surpassed human performance (5.1%). ResNet concepts are now used in virtually every modern architecture. | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Identity & Projection Shortcuts</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Identity Shortcut:</strong> Used when dimensions match. y = F(x, {W}) + x</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Projection Shortcut (1×1 Conv):</strong> Used when dimensions change. y = F(x, {W}) + W_s x</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The Vanishing Gradient Solution</h3> | |
| <p>Why do skip connections help? Let's differentiate the output y = F(x) + x:</p> | |
| <div class="formula"> | |
| ∂y/∂x = ∂F/∂x + 1 | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Gradient Flow</div> | |
| The "+1" term acts as a **gradient highway**. Even if the weights in F(x) are small (causing ∂F/∂x → 0), the gradient can still flow through the +1 term. <br> | |
| This prevents the gradient from vanishing even in networks with 1000+ layers! | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🏗️ Modern Vision Backbones</div> | |
| <div class="box-content">ResNet is the default starting point for nearly all computer vision tasks today (Mask R-CNN, YOLO, etc.).</div> | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Skip Connection Math</h3> | |
| <div class="formula" style="text-align:center;"> | |
| $$\\mathcal{H}(x) = \\mathcal{F}(x) + x$$ | |
| The network learns the <strong>residual</strong> $\\mathcal{F}(x) = \\mathcal{H}(x) - x$ instead of the full mapping. | |
| </div> | |
| <span class="code-title">📄 resnet_pytorch.py</span><div class="code-block"><span class="keyword">import</span> torchvision.models <span class="keyword">as</span> models | |
| <span class="comment"># Load pretrained ResNet-50</span> | |
| model = models.resnet50(weights=<span class="string">"IMAGENET1K_V2"</span>) | |
| <span class="keyword">print</span>(f<span class="string">"Parameters: {sum(p.numel() for p in model.parameters()):,}"</span>) | |
| <span class="comment"># Output: 25,557,032 parameters</span></div> | |
| ` | |
| }, | |
| "inception": { | |
| overview: ` | |
| <h3>Inception/GoogLeNet (2014) - Going Wider</h3> | |
| <p>Instead of going deeper, Inception modules go wider - using multiple filter sizes in parallel.</p> | |
| <h3>Inception Module</h3> | |
| <div class="formula"> | |
| Input → [1×1 conv] ⊕ [3×3 conv] ⊕ [5×5 conv] ⊕ [3×3 pool] → Concatenate | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Core Innovations</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>1×1 Bottlenecks:</strong> Dimensionality reduction before expensive convolutions.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Auxiliary Classifiers:</strong> Used during training to combat gradient vanishing in middle layers.</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>1×1 Convolution Math (Network-in-Network)</h3> | |
| <p>A 1×1 convolution acts like a channel-wise MLP. It maps input channels C to output channels C' using 1×1×C parameters per filter.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Compression</div> | |
| Input: 28x28x256 | Target: 28x28x512 with 3x3 Filters.<br> | |
| **Direct:** 512 * (3*3*256) ≈ 1.1 Million params.<br> | |
| **Inception (1x1 bottleneck to 64):**<br> | |
| Step 1 (1x1): 64 * (1*1*256) = 16k params.<br> | |
| Step 2 (3x3): 512 * (3*3*64) = 294k params.<br> | |
| **Total:** 310k params. **~3.5× reduction in parameters!** | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🏎️ Computational Efficiency</div> | |
| Inception designs are optimized for running deep networks on limited compute budgets. | |
| </div> | |
| ` | |
| }, | |
| "mobilenet": { | |
| overview: ` | |
| <h3>MobileNet - CNNs for Mobile Devices</h3> | |
| <p>Designed for mobile/embedded vision using depthwise separable convolutions.</p> | |
| <h3>Depthwise Separable Convolution</h3> | |
| <div class="formula"> | |
| Standard Conv = Depthwise Conv + Pointwise (1×1) Conv | |
| </div> | |
| <h3>Computation Reduction</h3> | |
| <table> | |
| <tr> | |
| <th>Method</th> | |
| <th>Parameters</th> | |
| <th>FLOPs</th> | |
| </tr> | |
| <tr> | |
| <td>Standard 3×3 Conv</td> | |
| <td>3×3×C_in×C_out</td> | |
| <td>High</td> | |
| </tr> | |
| <tr> | |
| <td>Depthwise Separable</td> | |
| <td>3×3×C_in + C_in×C_out</td> | |
| <td><strong>8-9× less!</strong></td> | |
| </tr> | |
| </table> | |
| <div class="callout tip"> | |
| <div class="callout-title">✅ Applications</div> | |
| • Real-time mobile apps (camera filters, AR)<br> | |
| • Edge devices (drones, IoT)<br> | |
| • Latency-critical systems<br> | |
| • Good accuracy with 10-20× speedup | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Efficiency Factors</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Width Multiplier (α):</strong> Thins the network by reducing channels.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Resolution Multiplier (ρ):</strong> Reduces input image size.</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Depthwise Separable Math</h3> | |
| <p>Standard convolution complexity: F² × C_in × C_out × H × W</p> | |
| <p>Separable complexity: (F² × C_in + C_in × C_out) × H × W</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: The 9× Speedup</div> | |
| Reduction ratio is roughly: 1/C_out + 1/F². <br> | |
| For 3x3 filters (F=3): Reduction is roughly **1/9th** the computation of standard conv! | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">📱 Edge Devices</div> | |
| <div class="box-content">Real-time object detection on smartphones, web browsers (TensorFlow.js), and IoT devices.</div> | |
| </div> | |
| ` | |
| }, | |
| "transfer-learning": { | |
| overview: ` | |
| <p>Transfer learning is the most <strong>practical technique</strong> in modern deep learning. Instead of training from scratch (which requires millions of images and weeks of GPU time), you <strong>reuse a model already trained on a large dataset</strong> (like ImageNet's 14M images) and fine-tune it for your specific task.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🎓 The Education Analogy</div> | |
| Transfer learning is like a medical student who already learned anatomy, chemistry, and biology (pretrained knowledge). Instead of re-learning everything from scratch for cardiology (your task), they just need specialized training on heart-related topics. The foundational knowledge <strong>transfers</strong>. | |
| </div> | |
| <h3>Two Strategies</h3> | |
| <div class="list-item"><div class="list-num">1</div><div><strong>Feature Extraction (Freeze backbone)</strong><br>Freeze all pretrained layers. Only train a new classification head. Best when you have <strong>little data</strong> (<1000 images) or your task is similar to ImageNet.</div></div> | |
| <div class="list-item"><div class="list-num">2</div><div><strong>Fine-tuning (Unfreeze some layers)</strong><br>Unfreeze the last few layers and train with a very small learning rate (1e-4 to 1e-5). Best when you have <strong>moderate data</strong> (1000-50000 images) or your task is somewhat different.</div></div> | |
| <h3>When to Use Which Strategy</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; color: var(--cyan);">Data Size</th> | |
| <th style="padding: 10px; color: var(--cyan);">Similar to ImageNet?</th> | |
| <th style="padding: 10px; color: var(--cyan);">Strategy</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Small (<1K)</td><td>Yes</td><td>Feature extraction</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Small (<1K)</td><td>No</td><td>Feature extraction + heavy augmentation</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Medium (1K-50K)</td><td>Yes</td><td>Fine-tune last few layers</td></tr> | |
| <tr><td style="padding: 8px;">Large (>50K)</td><td>No</td><td>Fine-tune entire network (low lr)</td></tr> | |
| </table> | |
| `, | |
| concepts: ` | |
| <h3>Why Transfer Learning Works</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Feature Hierarchy:</strong> Early layers learn universal features (edges, textures) that transfer across domains</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Domain Similarity:</strong> The more similar source and target domains, the better transfer</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Regularization Effect:</strong> Pre-trained weights act as strong priors, preventing overfitting</div> | |
| </div> | |
| <h3>Transfer Learning Quadrant</h3> | |
| <table> | |
| <tr> | |
| <th></th> | |
| <th>Similar Domain</th> | |
| <th>Different Domain</th> | |
| </tr> | |
| <tr> | |
| <td><strong>Large Data</strong></td> | |
| <td>Fine-tune all layers</td> | |
| <td>Fine-tune top layers</td> | |
| </tr> | |
| <tr> | |
| <td><strong>Small Data</strong></td> | |
| <td>Feature extraction</td> | |
| <td>Feature extraction (risky)</td> | |
| </tr> | |
| </table> | |
| `, | |
| math: ` | |
| <h3>Learning Rate Strategies</h3> | |
| <p>Different layers need different learning rates during fine-tuning.</p> | |
| <div class="formula"> | |
| Discriminative Fine-tuning:<br> | |
| lr_layer_n = lr_base × decay^(L-n)<br> | |
| <br> | |
| Where L = total layers, n = layer index<br> | |
| Example: lr_base=1e-3, decay=0.9<br> | |
| Layer 1: 1e-3 × 0.9^9 ≈ 3.9e-4<br> | |
| Layer 10: 1e-3 × 0.9^0 = 1e-3 | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Domain Shift</div> | |
| When source and target distributions differ:<br> | |
| • <strong>Covariate Shift:</strong> P(X) changes, P(Y|X) same<br> | |
| • <strong>Label Shift:</strong> P(Y) changes, P(X|Y) same<br> | |
| • <strong>Concept Shift:</strong> P(Y|X) changes<br> | |
| Transfer learning handles covariate shift well but struggles with concept shift. | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🏥 Medical Imaging</div> | |
| <div class="box-content"> | |
| Train on ImageNet, fine-tune for X-ray diagnosis with only 1000 labeled images. Achieves 90%+ accuracy vs 60% from scratch. | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🛒 Retail & E-commerce</div> | |
| <div class="box-content"> | |
| Product classification, visual search, inventory management using pre-trained ResNet/EfficientNet models. | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🌍 Satellite Imagery</div> | |
| <div class="box-content"> | |
| Land use classification, deforestation detection, urban planning using models pre-trained on aerial imagery. | |
| </div> | |
| </div> | |
| `, | |
| code: `<span class="code-title">📄 transfer_learning.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torchvision.models <span class="keyword">as</span> models | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="comment"># --- Load pretrained ResNet-18 ---</span> | |
| model = models.resnet18(weights=<span class="string">"IMAGENET1K_V1"</span>) | |
| <span class="comment"># --- Strategy 1: Feature Extraction (freeze all) ---</span> | |
| <span class="keyword">for</span> param <span class="keyword">in</span> model.parameters(): | |
| param.requires_grad = <span class="keyword">False</span> | |
| <span class="comment"># Replace final layer for 5 classes</span> | |
| model.fc = nn.Linear(model.fc.in_features, <span class="number">5</span>) | |
| <span class="comment"># Only train the new layer</span> | |
| optimizer = torch.optim.Adam(model.fc.parameters(), lr=<span class="number">0.001</span>) | |
| <span class="comment"># --- Strategy 2: Fine-tuning (unfreeze some layers) ---</span> | |
| <span class="keyword">for</span> param <span class="keyword">in</span> model.layer4.parameters(): | |
| param.requires_grad = <span class="keyword">True</span> <span class="comment"># unfreeze last residual block</span> | |
| <span class="keyword">print</span>(f<span class="string">"Trainable: {sum(p.numel() for p in model.parameters() if p.requires_grad):,}"</span>)</div> | |
| ` | |
| }, | |
| "localization": { | |
| overview: ` | |
| <h3>Object Localization</h3> | |
| <p>Predict both class and bounding box for a single object in image.</p> | |
| <h3>Multi-Task Loss</h3> | |
| <div class="formula"> | |
| Total Loss = L_classification + λ × L_bbox<br> | |
| <br> | |
| Where:<br> | |
| L_classification = Cross-Entropy<br> | |
| L_bbox = Smooth L1 or IoU loss<br> | |
| λ = balance term (typically 1-10) | |
| </div> | |
| <h3>Bounding Box Representation</h3> | |
| <ul> | |
| <li><strong>Option 1:</strong> (x_min, y_min, x_max, y_max)</li> | |
| <li><strong>Option 2:</strong> (x_center, y_center, width, height) ← Most common</li> | |
| </ul> | |
| `, | |
| concepts: ` | |
| <h3>Localization vs Detection</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Classification:</strong> What is in the image? → "Cat"</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Localization:</strong> Where is the single object? → "Cat at [100, 50, 200, 150]"</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Detection:</strong> Where are ALL objects? → Multiple bounding boxes</div> | |
| </div> | |
| <h3>Network Architecture</h3> | |
| <p>Modify a classification network (ResNet, VGG) by adding a regression head:</p> | |
| <div class="formula"> | |
| CNN Backbone → Feature Map → [Classification Head (1000 classes)]<br> | |
| → [Regression Head (4 coordinates)] | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Smooth L1 Loss (Huber Loss)</h3> | |
| <p>Combines L1 and L2 loss for robust bounding box regression.</p> | |
| <div class="formula"> | |
| SmoothL1(x) = { 0.5x² if |x| < 1<br> | |
| { |x| - 0.5 otherwise | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Why Smooth L1?</div> | |
| • <strong>L2 Loss:</strong> Penalizes large errors too much (squared), sensitive to outliers<br> | |
| • <strong>L1 Loss:</strong> Robust to outliers but has discontinuous gradient at 0<br> | |
| • <strong>Smooth L1:</strong> Best of both worlds - quadratic near 0, linear for large errors | |
| </div> | |
| <h3>IoU Loss</h3> | |
| <div class="formula"> | |
| L_IoU = 1 - IoU(pred, target)<br> | |
| Where IoU = Intersection / Union | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🚗 Self-Driving Cars</div> | |
| <div class="box-content">Localize the primary vehicle ahead for adaptive cruise control</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">📸 Photo Auto-Crop</div> | |
| <div class="box-content">Detect main subject and automatically crop to optimal composition</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🏥 Medical Imaging</div> | |
| <div class="box-content">Localize tumors, organs, or anomalies in X-rays and CT scans</div> | |
| </div> | |
| ` | |
| }, | |
| "rcnn": { | |
| overview: ` | |
| <h3>R-CNN Family Evolution</h3> | |
| <table> | |
| <tr> | |
| <th>Model</th> | |
| <th>Year</th> | |
| <th>Speed (FPS)</th> | |
| <th>Key Innovation</th> | |
| </tr> | |
| <tr> | |
| <td>R-CNN</td> | |
| <td>2014</td> | |
| <td>0.05</td> | |
| <td>Selective Search + CNN features</td> | |
| </tr> | |
| <tr> | |
| <td>Fast R-CNN</td> | |
| <td>2015</td> | |
| <td>0.5</td> | |
| <td>RoI Pooling (share conv features)</td> | |
| </tr> | |
| <tr> | |
| <td>Faster R-CNN</td> | |
| <td>2015</td> | |
| <td>7</td> | |
| <td>Region Proposal Network (RPN)</td> | |
| </tr> | |
| <tr> | |
| <td>Mask R-CNN</td> | |
| <td>2017</td> | |
| <td>5</td> | |
| <td>+ Instance Segmentation masks</td> | |
| </tr> | |
| </table> | |
| <div class="callout tip"> | |
| <div class="callout-title">💡 When to Use</div> | |
| Faster R-CNN: Best accuracy for detection (not real-time)<br> | |
| Mask R-CNN: Detection + instance segmentation | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Two-Stage Detection Pipeline</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Stage 1 - Region Proposal:</strong> Find ~2000 candidate regions that might contain objects</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Stage 2 - Classification:</strong> Classify each region and refine bounding box</div> | |
| </div> | |
| <h3>Region Proposal Network (RPN)</h3> | |
| <p>The key innovation of Faster R-CNN - learns to propose regions instead of using hand-crafted algorithms.</p> | |
| <div class="formula"> | |
| RPN Output per location:<br> | |
| • k anchor boxes × 4 coordinates = 4k regression outputs<br> | |
| • k anchor boxes × 2 objectness scores = 2k classification outputs<br> | |
| Typical k = 9 (3 scales × 3 aspect ratios) | |
| </div> | |
| `, | |
| math: ` | |
| <h3>RoI Pooling: Fixed-Size Feature Maps</h3> | |
| <p>Convert variable-size regions into fixed 7×7 feature maps for FC layers.</p> | |
| <div class="formula"> | |
| For each RoI of size H×W:<br> | |
| 1. Divide into 7×7 grid (cells of size H/7 × W/7)<br> | |
| 2. Max-pool each cell → single value<br> | |
| 3. Output: Fixed 7×7 feature map regardless of input size | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: RoI Align vs RoI Pool</div> | |
| <strong>Problem:</strong> RoI Pooling quantizes coordinates, causing misalignment.<br> | |
| <strong>Solution:</strong> RoI Align uses bilinear interpolation instead of rounding.<br> | |
| This is critical for Mask R-CNN where pixel-level accuracy matters! | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🏥 Medical Imaging</div> | |
| <div class="box-content">High-accuracy tumor detection where speed is less critical than precision</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">📷 Photo Analysis</div> | |
| <div class="box-content">Face detection, scene understanding, object counting in static images</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🔬 Scientific Research</div> | |
| <div class="box-content">Cell detection, particle tracking, microscopy image analysis</div> | |
| </div> | |
| ` | |
| }, | |
| "ssd": { | |
| overview: ` | |
| <h3>SSD (Single Shot MultiBox Detector)</h3> | |
| <p>Balances speed and accuracy by predicting boxes at multiple scales.</p> | |
| <h3>Key Ideas</h3> | |
| <ul> | |
| <li><strong>Multi-Scale:</strong> Predictions from different layers (early = small objects, deep = large)</li> | |
| <li><strong>Default Boxes (Anchors):</strong> Pre-defined boxes of various aspects ratios</li> | |
| <li><strong>Single Pass:</strong> No separate region proposal step</li> | |
| </ul> | |
| <div class="callout insight"> | |
| <div class="callout-title">📊 Performance</div> | |
| SSD300: 59 FPS, 74.3% mAP<br> | |
| SSD512: 22 FPS, 76.8% mAP<br> | |
| <br> | |
| Sweet spot between YOLO (faster) and Faster R-CNN (more accurate) | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Multi-Scale Feature Maps</h3> | |
| <p>SSD makes predictions at multiple layers, each detecting objects at different scales.</p> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Early Layers (38×38):</strong> Detect small objects (high resolution)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Middle Layers (19×19, 10×10):</strong> Detect medium objects</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Deep Layers (5×5, 3×3, 1×1):</strong> Detect large objects</div> | |
| </div> | |
| <h3>Default Boxes (Anchors)</h3> | |
| <p>At each feature map cell, SSD predicts offsets for k default boxes with different aspect ratios (1:1, 2:1, 1:2, 3:1, 1:3).</p> | |
| `, | |
| math: ` | |
| <h3>SSD Loss Function</h3> | |
| <p>Weighted sum of localization and confidence losses.</p> | |
| <div class="formula"> | |
| L(x, c, l, g) = (1/N) × [L_conf(x, c) + α × L_loc(x, l, g)]<br> | |
| <br> | |
| Where:<br> | |
| • L_conf = Softmax loss over class confidences<br> | |
| • L_loc = Smooth L1 loss over box coordinates<br> | |
| • α = Weight factor (typically 1)<br> | |
| • N = Number of matched default boxes | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Hard Negative Mining</div> | |
| Problem: Most default boxes are background (class imbalance).<br> | |
| Solution: Sort negative boxes by confidence loss, pick top ones so pos:neg = 1:3.<br> | |
| This focuses training on hard negatives, not easy ones. | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">📹 Video Analytics</div> | |
| <div class="box-content">Real-time object detection in security cameras, sports broadcasting</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🤖 Robotics</div> | |
| <div class="box-content">Object detection for manipulation tasks, obstacle avoidance</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">📱 Mobile Apps</div> | |
| <div class="box-content">Lightweight models for on-device detection (MobileNet-SSD)</div> | |
| </div> | |
| ` | |
| }, | |
| "semantic-seg": { | |
| overview: ` | |
| <h3>Semantic Segmentation</h3> | |
| <p>Classify every pixel in the image (pixel-wise classification).</p> | |
| <h3>Popular Architectures</h3> | |
| <table> | |
| <tr> | |
| <th>Model</th> | |
| <th>Key Feature</th> | |
| </tr> | |
| <tr> | |
| <td>FCN</td> | |
| <td>Fully Convolutional (no FC layers)</td> | |
| </tr> | |
| <tr> | |
| <td>U-Net</td> | |
| <td>Skip connections from encoder to decoder</td> | |
| </tr> | |
| <tr> | |
| <td>DeepLab</td> | |
| <td>Atrous (dilated) convolutions + ASPP</td> | |
| </tr> | |
| </table> | |
| <div class="formula"> | |
| U-Net Pattern:<br> | |
| Input → Encoder (downsample) → Bottleneck → Decoder (upsample) → Pixel-wise Output<br> | |
| With skip connections from encoder to decoder at each level | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Key Concepts</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Encoder-Decoder:</strong> Downsample to capture context, upsample to recover spatial detail</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Skip Connections:</strong> Pass high-resolution features from encoder to decoder (U-Net)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Atrous Convolution:</strong> Expand receptive field without losing resolution (DeepLab)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>ASPP:</strong> Atrous Spatial Pyramid Pooling - capture multi-scale context</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Dice Loss for Segmentation</h3> | |
| <p>Better than cross-entropy for imbalanced classes (small objects).</p> | |
| <div class="formula"> | |
| Dice = 2 × |A ∩ B| / (|A| + |B|)<br> | |
| Dice Loss = 1 - Dice<br> | |
| <br> | |
| Where A = predicted mask, B = ground truth mask | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Why Dice > Cross-Entropy?</div> | |
| If only 1% of pixels are foreground:<br> | |
| • Cross-Entropy: Model can get 99% accuracy by predicting all background!<br> | |
| • Dice: Penalizes missed foreground pixels heavily<br> | |
| • Often use combination: L = BCE + Dice | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🏥 Medical Imaging</div> | |
| <div class="box-content">Tumor segmentation, organ delineation, cell analysis</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🚗 Autonomous Driving</div> | |
| <div class="box-content">Road segmentation, free space detection, drivable area</div> | |
| </div> | |
| ` | |
| }, | |
| "instance-seg": { | |
| overview: ` | |
| <h3>Instance Segmentation</h3> | |
| <p>Detect AND segment each individual object (combines object detection + semantic segmentation).</p> | |
| <h3>Difference from Semantic Segmentation</h3> | |
| <ul> | |
| <li><strong>Semantic:</strong> All "person" pixels get same label</li> | |
| <li><strong>Instance:</strong> Person #1, Person #2, Person #3 (separate instances)</li> | |
| </ul> | |
| <h3>Main Approach: Mask R-CNN</h3> | |
| <div class="formula"> | |
| Faster R-CNN + Segmentation Branch<br> | |
| <br> | |
| For each RoI:<br> | |
| 1. Bounding box regression<br> | |
| 2. Class prediction<br> | |
| 3. <strong>Binary mask for the object</strong> | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Mask R-CNN Architecture</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Backbone:</strong> ResNet-50/101 with Feature Pyramid Network (FPN)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>RPN:</strong> Region Proposal Network (same as Faster R-CNN)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>RoI Align:</strong> Better than RoI Pooling (no quantization)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Mask Head:</strong> Small FCN that outputs 28×28 binary mask per class</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Multi-Task Loss</h3> | |
| <p>Mask R-CNN optimizes three losses simultaneously:</p> | |
| <div class="formula"> | |
| L = L_cls + L_box + L_mask<br> | |
| <br> | |
| Where:<br> | |
| • L_cls = Classification loss (cross-entropy)<br> | |
| • L_box = Bounding box regression (smooth L1)<br> | |
| • L_mask = Binary cross-entropy per-pixel mask loss | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Key Insight: Decoupled Masks</div> | |
| Mask R-CNN predicts a binary mask for EACH class independently.<br> | |
| This avoids competition between classes and improves accuracy. | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">📸 Photo Editing</div> | |
| <div class="box-content">Auto-select objects for editing, background removal, composition</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🤖 Robotics</div> | |
| <div class="box-content">Object manipulation - need exact shape, not just bounding box</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🎬 Video Production</div> | |
| <div class="box-content">Rotoscoping, VFX, green screen replacement</div> | |
| </div> | |
| ` | |
| }, | |
| "face-recog": { | |
| overview: ` | |
| <h3>Face Recognition with Siamese Networks</h3> | |
| <p>Learn similarity between faces using metric learning instead of classification.</p> | |
| <h3>Triplet Loss Training</h3> | |
| <div class="formula"> | |
| Loss = max(||f(A) - f(P)||² - ||f(A) - f(N)||² + margin, 0)<br> | |
| <br> | |
| Where:<br> | |
| A = Anchor (reference face)<br> | |
| P = Positive (same person)<br> | |
| N = Negative (different person)<br> | |
| margin = minimum separation (e.g., 0.2) | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">💡 One-Shot Learning</div> | |
| After training, recognize new people with just 1-2 photos!<br> | |
| No retraining needed - just compare embeddings. | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Face Recognition Pipeline</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Face Detection:</strong> Find faces in image (MTCNN, RetinaFace)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Alignment:</strong> Normalize face orientation and scale</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Embedding:</strong> Extract 128/512-dim feature vector (FaceNet, ArcFace)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Matching:</strong> Compare embeddings with cosine similarity or L2 distance</div> | |
| </div> | |
| <h3>Key Models</h3> | |
| <table> | |
| <tr><th>Model</th><th>Key Innovation</th></tr> | |
| <tr><td>FaceNet</td><td>Triplet loss, 128-dim embedding</td></tr> | |
| <tr><td>ArcFace</td><td>Additive angular margin loss, SOTA accuracy</td></tr> | |
| <tr><td>DeepFace</td><td>Facebook's early success</td></tr> | |
| </table> | |
| `, | |
| math: ` | |
| <h3>Triplet Loss Intuition</h3> | |
| <p>Push same-person faces closer, different-person faces apart.</p> | |
| <div class="formula"> | |
| ||f(A) - f(P)||² + margin < ||f(A) - f(N)||² | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Hard Triplet Mining</div> | |
| Easy triplets: Random selection - margin already satisfied, loss=0<br> | |
| Hard triplets: Find P closest to anchor, N closest to anchor from different class<br> | |
| <strong>Training on hard triplets is critical for convergence!</strong> | |
| </div> | |
| <h3>ArcFace Angular Margin</h3> | |
| <div class="formula"> | |
| L = -log(e^(s·cos(θ + m)) / (e^(s·cos(θ + m)) + Σ e^(s·cos(θ_j))))<br> | |
| Where m = angular margin, s = scale factor | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">📱 Phone Unlock</div> | |
| <div class="box-content">Face ID, biometric authentication</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🔒 Security</div> | |
| <div class="box-content">Access control, surveillance, identity verification</div> | |
| </div> | |
| ` | |
| }, | |
| "autoencoders": { | |
| overview: ` | |
| <h3>Autoencoders</h3> | |
| <p>Unsupervised learning to compress data into latent representation and reconstruct it.</p> | |
| <h3>Architecture</h3> | |
| <div class="formula"> | |
| Input → Encoder → Latent Code (bottleneck) → Decoder → Reconstruction<br> | |
| <br> | |
| Loss = ||Input - Reconstruction||² (MSE) | |
| </div> | |
| <h3>Variants</h3> | |
| <ul> | |
| <li><strong>Vanilla:</strong> Basic autoencoder</li> | |
| <li><strong>Denoising:</strong> Input corrupted, output clean (learns robust features)</li> | |
| <li><strong>Variational (VAE):</strong> Probabilistic latent space (for generation)</li> | |
| <li><strong>Sparse:</strong> Encourage sparse activations</li> | |
| </ul> | |
| `, | |
| concepts: ` | |
| <h3>Key Concepts</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Bottleneck:</strong> Force information compression by using fewer dimensions than input</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Reconstruction:</strong> Learn to recreate input - captures essential features</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Latent Space:</strong> Compressed representation captures data structure</div> | |
| </div> | |
| <h3>Variational Autoencoder (VAE)</h3> | |
| <p>Instead of encoding to a point, encode to a probability distribution (mean + variance).</p> | |
| <div class="formula"> | |
| Encoder outputs: μ (mean) and σ (standard deviation)<br> | |
| Sample: z = μ + σ × ε (where ε ~ N(0,1))<br> | |
| This is the "reparameterization trick" for backprop! | |
| </div> | |
| `, | |
| math: ` | |
| <h3>VAE Loss Function (ELBO)</h3> | |
| <p>VAE maximizes the Evidence Lower Bound:</p> | |
| <div class="formula"> | |
| L = E[log p(x|z)] - KL(q(z|x) || p(z))<br> | |
| <br> | |
| Where:<br> | |
| • First term: Reconstruction quality<br> | |
| • Second term: KL divergence regularization (push q toward N(0,1)) | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: KL Divergence</div> | |
| For Gaussians:<br> | |
| KL = -0.5 × Σ(1 + log(σ²) - μ² - σ²)<br> | |
| This has a closed-form solution - no sampling needed! | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🗜️ Compression</div> | |
| <div class="box-content">Dimensionality reduction, data compression, feature extraction</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🔍 Anomaly Detection</div> | |
| <div class="box-content">High reconstruction error = anomaly (fraud detection, defect detection)</div> | |
| </div> | |
| ` | |
| }, | |
| "gans": { | |
| overview: ` | |
| <p>Generative Adversarial Networks (GANs) are a framework for training generative models using an <strong>adversarial game</strong> between two neural networks: a <strong>Generator</strong> (forger) and a <strong>Discriminator</strong> (detective).</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🎨 The Art Forger Analogy</div> | |
| The Generator is an art forger trying to create fake paintings that look real. The Discriminator is an art detective trying to spot fakes. As the detective gets better at spotting fakes, the forger improves to fool the detective. Eventually, the forger creates paintings so good that even the detective can't tell them apart from real ones. | |
| </div> | |
| <h3>The Min-Max Game</h3> | |
| <div class="formula" style="text-align:center;"> | |
| $$\\min_G \\max_D \\; V(D,G) = \\mathbb{E}_{x \\sim p_{data}}[\\log D(x)] + \\mathbb{E}_{z \\sim p_z}[\\log(1 - D(G(z)))]$$ | |
| </div> | |
| <p><strong>D(x)</strong> = probability that $x$ is real. <strong>G(z)</strong> = fake image generated from noise $z$.</p> | |
| <h3>GAN Variants</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; color: var(--cyan);">Variant</th> | |
| <th style="padding: 10px; color: var(--cyan);">Innovation</th> | |
| <th style="padding: 10px; color: var(--cyan);">Use Case</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">DCGAN</td><td>Convolutional architecture</td><td>Image generation</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">WGAN</td><td>Wasserstein distance (stable training)</td><td>Any generation task</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Pix2Pix</td><td>Paired image-to-image translation</td><td>Sketch → Photo</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">CycleGAN</td><td>Unpaired image translation</td><td>Horse → Zebra</td></tr> | |
| <tr><td style="padding: 8px;">StyleGAN</td><td>Style-based generation (faces)</td><td>High-res face synthesis</td></tr> | |
| </table> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ GAN Training Challenges</div> | |
| <strong>Mode Collapse:</strong> Generator produces only one type of output. Solution: use WGAN or add diversity penalties.<br> | |
| <strong>Training Instability:</strong> G and D can oscillate without converging. Solution: use spectral normalization, gradient penalty.<br> | |
| <strong>Evaluation:</strong> Hard to objectively measure quality. Metrics like FID (Fréchet Inception Distance) help but aren't perfect. | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🎨 Image Generation</div> | |
| <div class="box-content"> | |
| <strong>StyleGAN:</strong> Photorealistic faces, art generation<br> | |
| <strong>DCGAN:</strong> Bedroom images, object generation | |
| </div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The Minimax Game Objective</h3> | |
| <p>The original GAN objective from Ian Goodfellow (2014) is a zero-sum game between Discriminator (D) and Generator (G).</p> | |
| <div class="formula" style="font-size: 1.1rem; padding: 20px;"> | |
| min_G max_D V(D, G) = E_x∼p_data[log D(x)] + E_z∼p_z[log(1 - D(G(z)))] | |
| </div> | |
| <h3>Paper & Pain: Finding the Optimal Discriminator</h3> | |
| <p>For a fixed Generator, the optimal Discriminator D* is:</p> | |
| <div class="formula"> | |
| D*(x) = p_data(x) / (p_data(x) + p_g(x)) | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Theoretical Insight</div> | |
| When the Discriminator is optimal, the Generator's task is essentially to minimize the <strong>Jensen-Shannon Divergence (JSD)</strong> between the data distribution and the model distribution. <br> | |
| <strong>Problem:</strong> JSD is "flat" when distributions don't overlap, leading to vanishing gradients. This is why <strong>Wasserstein GAN (WGAN)</strong> was invented—using Earth Mover's distance instead! | |
| </div> | |
| <h3>Generator Gradient Problem</h3> | |
| <p>Early in training, D(G(z)) is near 0. The term log(1-D(G(z))) has a very small gradient. </p> | |
| <div class="list-item"> | |
| <div class="list-num">💡</div> | |
| <div><strong>Heuristic Fix:</strong> Instead of minimizing log(1-D(G(z))), we maximize <strong>log D(G(z))</strong>. This provides much stronger gradients early on!</div> | |
| </div> | |
| `, | |
| code: ` | |
| <h3>GAN Objective (Min-Max Game)</h3> | |
| <div class="formula" style="text-align:center;"> | |
| $$\\min_G \\max_D \\; V(D,G) = \\mathbb{E}_{x \\sim p_{data}}[\\log D(x)] + \\mathbb{E}_{z \\sim p_z}[\\log(1 - D(G(z)))]$$ | |
| </div> | |
| <p><strong>Generator</strong> tries to minimize — create fakes so good that $D(G(z)) \\to 1$. <strong>Discriminator</strong> tries to maximize — correctly classify real vs fake.</p> | |
| <span class="code-title">📄 dcgan.py</span><div class="code-block"><span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="keyword">class</span> <span class="class-name">Generator</span>(nn.Module): | |
| <span class="keyword">def</span> <span class="function">__init__</span>(self, latent_dim=<span class="number">100</span>): | |
| <span class="builtin">super</span>().__init__() | |
| self.net = nn.Sequential( | |
| nn.ConvTranspose2d(latent_dim, <span class="number">512</span>, <span class="number">4</span>, <span class="number">1</span>, <span class="number">0</span>), | |
| nn.BatchNorm2d(<span class="number">512</span>), nn.ReLU(), | |
| nn.ConvTranspose2d(<span class="number">512</span>, <span class="number">256</span>, <span class="number">4</span>, <span class="number">2</span>, <span class="number">1</span>), | |
| nn.BatchNorm2d(<span class="number">256</span>), nn.ReLU(), | |
| nn.ConvTranspose2d(<span class="number">256</span>, <span class="number">128</span>, <span class="number">4</span>, <span class="number">2</span>, <span class="number">1</span>), | |
| nn.BatchNorm2d(<span class="number">128</span>), nn.ReLU(), | |
| nn.ConvTranspose2d(<span class="number">128</span>, <span class="number">3</span>, <span class="number">4</span>, <span class="number">2</span>, <span class="number">1</span>), | |
| nn.Tanh(), <span class="comment"># Output: [-1, 1]</span> | |
| ) | |
| <span class="keyword">def</span> <span class="function">forward</span>(self, z): | |
| <span class="keyword">return</span> self.net(z.view(-<span class="number">1</span>, <span class="number">100</span>, <span class="number">1</span>, <span class="number">1</span>)) | |
| <span class="keyword">class</span> <span class="class-name">Discriminator</span>(nn.Module): | |
| <span class="keyword">def</span> <span class="function">__init__</span>(self): | |
| <span class="builtin">super</span>().__init__() | |
| self.net = nn.Sequential( | |
| nn.Conv2d(<span class="number">3</span>, <span class="number">64</span>, <span class="number">4</span>, <span class="number">2</span>, <span class="number">1</span>), | |
| nn.LeakyReLU(<span class="number">0.2</span>), | |
| nn.Conv2d(<span class="number">64</span>, <span class="number">128</span>, <span class="number">4</span>, <span class="number">2</span>, <span class="number">1</span>), | |
| nn.BatchNorm2d(<span class="number">128</span>), nn.LeakyReLU(<span class="number">0.2</span>), | |
| nn.Conv2d(<span class="number">128</span>, <span class="number">1</span>, <span class="number">4</span>, <span class="number">1</span>, <span class="number">0</span>), | |
| nn.Sigmoid(), | |
| ) | |
| <span class="keyword">def</span> <span class="function">forward</span>(self, x): | |
| <span class="keyword">return</span> self.net(x).view(-<span class="number">1</span>)</div> | |
| ` | |
| }, | |
| "diffusion": { | |
| overview: ` | |
| <h3>Diffusion Models</h3> | |
| <p>Learn to reverse a gradual noising process, generating high-quality images.</p> | |
| <h3>How Diffusion Works</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Forward Process:</strong> Gradually add Gaussian noise over T steps (x₀ → x₁ → ... → x_T = pure noise)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Reverse Process:</strong> Train neural network to denoise (x_T → x_{T-1} → ... → x₀ = clean image)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Generation:</strong> Start from random noise, iteratively denoise T steps</div> | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">✅ Advantages over GANs</div> | |
| • More stable training (no adversarial dynamics)<br> | |
| • Better sample quality and diversity<br> | |
| • Mode coverage (no mode collapse)<br> | |
| • Controllable generation (text-to-image) | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Key Components</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>U-Net Backbone:</strong> Encoder-decoder with skip connections predicts noise at each step</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Time Embedding:</strong> Tell the model which timestep it's at (sinusoidal encoding)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>CLIP Conditioning:</strong> Guide generation with text embeddings (Stable Diffusion)</div> | |
| </div> | |
| <h3>Latent Diffusion</h3> | |
| <p>Instead of diffusing in pixel space (expensive), work in VAE latent space (8× smaller).</p> | |
| <div class="formula"> | |
| Image (512×512×3) → VAE Encoder → Latent (64×64×4) → Diffuse → Decode | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Forward Process (Noising)</h3> | |
| <p>Add Gaussian noise according to a schedule β_t:</p> | |
| <div class="formula"> | |
| q(x_t | x_{t-1}) = N(x_t; √(1-β_t) × x_{t-1}, β_t × I)<br> | |
| <br> | |
| Or in closed form for any t:<br> | |
| x_t = √(ᾱ_t) × x_0 + √(1-ᾱ_t) × ε<br> | |
| Where ᾱ_t = Π_{s=1}^t (1-β_s) | |
| </div> | |
| <h3>Training Objective</h3> | |
| <p>Simple noise prediction loss:</p> | |
| <div class="formula"> | |
| L = E[||ε - ε_θ(x_t, t)||²] | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Simplified Loss</div> | |
| The full variational bound is complex, but Ho et al. (2020) showed this simple MSE loss on noise prediction works just as well and is much easier to implement! | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🖼️ Text-to-Image</div> | |
| <div class="box-content"> | |
| <strong>Stable Diffusion:</strong> Open-source, runs on consumer GPUs<br> | |
| <strong>DALL-E 2:</strong> OpenAI's photorealistic generator<br> | |
| <strong>Midjourney:</strong> Artistic image generation | |
| </div> | |
| </div> | |
| ` | |
| }, | |
| "rnn": { | |
| overview: ` | |
| <p>Recurrent Neural Networks (RNNs) are designed for <strong>sequential data</strong> — text, time series, audio, video frames. Unlike feedforward networks that process inputs independently, RNNs have a <strong>memory</strong> (hidden state) that carries information from previous time steps.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">📖 The Reading Analogy</div> | |
| When you read a sentence, you don't forget the beginning when you reach the end. Your brain maintains context. RNNs do the same — they process one word (token) at a time while maintaining a hidden state that summarizes everything seen so far. | |
| </div> | |
| <h3>Vanilla RNN vs. LSTM vs. GRU</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; color: var(--cyan);">Feature</th> | |
| <th style="padding: 10px; color: var(--cyan);">Vanilla RNN</th> | |
| <th style="padding: 10px; color: var(--cyan);">LSTM</th> | |
| <th style="padding: 10px; color: var(--cyan);">GRU</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Memory</td><td>Short (5-10 steps)</td><td>Long (100+ steps)</td><td>Long</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Gates</td><td>None</td><td>3 (forget, input, output)</td><td>2 (reset, update)</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Parameters</td><td>Fewest</td><td>Most (4x hidden_dim²)</td><td>Middle (3x hidden_dim²)</td></tr> | |
| <tr><td style="padding: 8px;">Use Today?</td><td>❌ Rarely</td><td>✅ Still useful</td><td>✅ Often preferred</td></tr> | |
| </table> | |
| <h3>The Vanishing Gradient Problem in RNNs</h3> | |
| <p>Vanilla RNNs multiply the gradient by the same weight matrix at each time step. After many steps, the gradient either <strong>vanishes</strong> (matrix eigenvalues < 1) or <strong>explodes</strong> (eigenvalues > 1). LSTMs solve this with a special <strong>cell state</strong> highway that allows gradients to flow unchanged over many steps.</p> | |
| <div class="callout tip"> | |
| <div class="callout-title">🔮 Historical Note</div> | |
| RNNs dominated NLP from 2013-2017. The Transformer architecture (2017) has now largely replaced them because Transformers can process all tokens in parallel, while RNNs must process sequentially. However, RNNs are still used for real-time streaming data and edge devices. | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Step-by-Step LSTM Walkthrough</h3> | |
| <p>LSTMs control the cell state via "Gates".</p> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Forget Gate layer:</strong> "What do we throw away?"<br> | |
| <span class="formula-caption">σ(W_f · [h_{t-1}, x_t] + b_f)</span><br> | |
| Output 0 = complete forget, 1 = keep everything.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Input Gate layer:</strong> "What new info do we add?"<br> | |
| 1. Sigmoid layer decides <strong>which values</strong> to update.<br> | |
| 2. Tanh layer creates <strong>new candidate values</strong> (~C_t).</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Update Cell State:</strong> The Critical Step<br> | |
| <span class="formula" style="font-size:1.1rem">C_t = f_t * C_{t-1} + i_t * ~C_t</span><br> | |
| We multiply old state by f_t (forgetting things) and add i_t * ~C_t (adding new things).</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Output Gate:</strong> "What do we reveal?"<br> | |
| We filter the cell state: h_t = o_t * tanh(C_t).</div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">💡 Intuition: Sigmoid vs Tanh</div> | |
| • <strong>Sigmoid (0 to 1):</strong> Acts like a valve or gate. Open/Close.<br> | |
| • <strong>Tanh (-1 to 1):</strong> Creates content/information. Normalized centered data. | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Paper & Pain: Why LSTMs Don't Vanish</h3> | |
| <p>Let's look at the gradient flow in the Cell State equation:</p> | |
| <div class="formula"> | |
| C_t = f_t \\cdot C_{t-1} + i_t \\cdot \\tilde{C}_t | |
| </div> | |
| <p>During backpropagation (BPTT), the derivative of C_t with respect to C_{t-1} is:</p> | |
| <div class="formula" style="color: #00ff88;"> | |
| \\frac{\\partial C_t}{\\partial C_{t-1}} = f_t + \\dots | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">✅ The Additive Gradient Highway</div> | |
| In standard RNNs, we had multiplicative gradients (W^t). <br> | |
| In LSTMs, the gradient is <strong>additive</strong>. If the Forget Gate f_t ≈ 1, the gradient passes through UNCHANGED (1.0).<br> | |
| The error signal can travel back 1000 steps without vanishing! | |
| </div> | |
| <h3>Paper & Pain: Manual "Echo" Task</h3> | |
| <p>Task: Input stream of 0s. If explicit "1" appears, remember it and output "1" 3 steps later.</p> | |
| <div class="list-item"> | |
| <div class="list-num">📝</div> | |
| <div><strong>Strategy:</strong> Set Input Gate to Open (1) only on trigger. Set Forget Gate to Closed (1) to maintain memory.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">🧮</div> | |
| <div><strong>Weights:</strong> We can manually solve for weights that force i_t high only when x_t=1.</div> | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🗣️ Sequence-to-Sequence</div> | |
| <div class="box-content"> | |
| <strong>Translation:</strong> Google Translate (pre-Transformer) used massive LSTM stacks (GNMT). | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">✍️ Handwriting Generation</div> | |
| <div class="box-content"> | |
| <strong>Alex Graves (2013):</strong> LSTMs can generate realistic cursive handwriting by predicting pen coordinates. | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🎵 Music Composition</div> | |
| <div class="box-content"> | |
| Generating melody and chords where context (key signature, tempo) must be maintained for minutes. | |
| </div> | |
| </div> | |
| `, | |
| code: ` | |
| <h3>LSTM Cell Equations</h3> | |
| <div class="formula" style="text-align:center;"> | |
| <strong>Forget Gate:</strong> $$f_t = \\sigma(W_f \\cdot [h_{t-1}, x_t] + b_f)$$ | |
| <strong>Input Gate:</strong> $$i_t = \\sigma(W_i \\cdot [h_{t-1}, x_t] + b_i)$$ | |
| <strong>Cell Update:</strong> $$\\tilde{C}_t = \\tanh(W_C \\cdot [h_{t-1}, x_t] + b_C)$$ | |
| $$C_t = f_t \\odot C_{t-1} + i_t \\odot \\tilde{C}_t$$ | |
| <strong>Output Gate:</strong> $$o_t = \\sigma(W_o \\cdot [h_{t-1}, x_t] + b_o)$$ | |
| $$h_t = o_t \\odot \\tanh(C_t)$$ | |
| </div> | |
| <span class="code-title">📄 lstm_sentiment.py</span><div class="code-block"><span class="keyword">import</span> torch | |
| <span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn | |
| <span class="keyword">class</span> <span class="class-name">SentimentLSTM</span>(nn.Module): | |
| <span class="keyword">def</span> <span class="function">__init__</span>(self, vocab_size, embed_dim, hidden_dim): | |
| <span class="builtin">super</span>().__init__() | |
| self.embedding = nn.Embedding(vocab_size, embed_dim) | |
| self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=<span class="keyword">True</span>, bidirectional=<span class="keyword">True</span>) | |
| self.fc = nn.Linear(hidden_dim * <span class="number">2</span>, <span class="number">1</span>) <span class="comment"># *2 for bidirectional</span> | |
| <span class="keyword">def</span> <span class="function">forward</span>(self, x): | |
| embeds = self.embedding(x) <span class="comment"># (batch, seq_len, embed_dim)</span> | |
| lstm_out, (h_n, c_n) = self.lstm(embeds) <span class="comment"># h_n: final hidden states</span> | |
| hidden = torch.cat((h_n[-<span class="number">2</span>], h_n[-<span class="number">1</span>]), dim=<span class="number">1</span>) | |
| <span class="keyword">return</span> torch.sigmoid(self.fc(hidden)) | |
| model = SentimentLSTM(vocab_size=<span class="number">10000</span>, embed_dim=<span class="number">128</span>, hidden_dim=<span class="number">256</span>) | |
| x = torch.randint(<span class="number">0</span>, <span class="number">10000</span>, (<span class="number">4</span>, <span class="number">50</span>)) <span class="comment"># batch=4, seq_len=50</span> | |
| <span class="keyword">print</span>(model(x).shape) <span class="comment"># (4, 1)</span></div> | |
| ` | |
| }, | |
| "bert": { | |
| overview: ` | |
| <p>BERT (Bidirectional Encoder Representations from Transformers) revolutionized NLP in 2018. Unlike GPT which reads left-to-right, BERT reads <strong>both directions simultaneously</strong>, giving it a deeper understanding of context.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 The Fill-in-the-Blank Analogy</div> | |
| BERT was trained by randomly masking 15% of words and predicting them using the surrounding context (both left and right). It's like a student learning vocabulary by doing fill-in-the-blank exercises: "The cat sat on the [MASK]" → "mat". This bidirectional context is what makes BERT so powerful for understanding language. | |
| </div> | |
| <h3>Two Training Objectives</h3> | |
| <div class="list-item"><div class="list-num">1</div><div><strong>Masked Language Modeling (MLM):</strong> Randomly mask 15% of tokens and predict them. This forces the model to understand context from both sides.</div></div> | |
| <div class="list-item"><div class="list-num">2</div><div><strong>Next Sentence Prediction (NSP):</strong> Given two sentences, predict if sentence B follows sentence A. This teaches the model inter-sentence relationships. (Note: Later papers showed NSP isn't crucial.)</div></div> | |
| <h3>BERT vs GPT</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; color: var(--cyan);">Feature</th> | |
| <th style="padding: 10px; color: var(--cyan);">BERT</th> | |
| <th style="padding: 10px; color: var(--cyan);">GPT</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Direction</td><td>Bidirectional</td><td>Left-to-right (autoregressive)</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Best for</td><td>Understanding (classification, QA, NER)</td><td>Generation (text, code, chat)</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Architecture</td><td>Transformer Encoder</td><td>Transformer Decoder</td></tr> | |
| <tr><td style="padding: 8px;">Parameters</td><td>110M (base) / 340M (large)</td><td>175B (GPT-3) / 1.7T (GPT-4)</td></tr> | |
| </table> | |
| `, | |
| concepts: ` | |
| <h3>BERT Architecture</h3> | |
| <p>BERT uses a multi-layer bidirectional Transformer encoder based on Vaswani et al. (2017).</p> | |
| <h3>Model Variants</h3> | |
| <table> | |
| <tr> | |
| <th>Model</th> | |
| <th>Layers (L)</th> | |
| <th>Hidden Size (H)</th> | |
| <th>Attention Heads (A)</th> | |
| <th>Parameters</th> | |
| </tr> | |
| <tr> | |
| <td>BERT<sub>BASE</sub></td> | |
| <td>12</td> | |
| <td>768</td> | |
| <td>12</td> | |
| <td>110M</td> | |
| </tr> | |
| <tr> | |
| <td>BERT<sub>LARGE</sub></td> | |
| <td>24</td> | |
| <td>1024</td> | |
| <td>16</td> | |
| <td>340M</td> | |
| </tr> | |
| </table> | |
| <p><em>Note: BERT<sub>BASE</sub> was designed to match GPT's size for comparison.</em></p> | |
| <h3>Input Representation</h3> | |
| <p>BERT's input embedding is the sum of three components:</p> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Token Embeddings:</strong> WordPiece tokenization with 30,000 token vocabulary. Handles unknown words by splitting into subwords (e.g., "playing" → "play" + "##ing")</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Segment Embeddings:</strong> Learned embedding to distinguish sentence A from sentence B (E<sub>A</sub> or E<sub>B</sub>)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Position Embeddings:</strong> Learned positional encodings (unlike Transformers' sinusoidal), supports sequences up to 512 tokens</div> | |
| </div> | |
| <div class="formula"> | |
| Input = Token_Embedding + Segment_Embedding + Position_Embedding | |
| </div> | |
| <h3>Special Tokens</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🏷️ Special Token Usage</div> | |
| <div class="box-content"> | |
| <strong>[CLS]:</strong> Prepended to every input. Final hidden state used for classification tasks<br> | |
| <strong>[SEP]:</strong> Separates sentence pairs and marks sequence end<br> | |
| <strong>[MASK]:</strong> Replaces masked tokens during pre-training (not used during fine-tuning) | |
| </div> | |
| </div> | |
| <h4>Example Input Format</h4> | |
| <div class="formula"> | |
| [CLS] My dog is cute [SEP] He likes playing [SEP]<br> | |
| <br> | |
| Tokens: [CLS] My dog is cute [SEP] He likes play ##ing [SEP]<br> | |
| Segments: E_A E_A E_A E_A E_A E_A E_B E_B E_B E_B E_B<br> | |
| Positions: 0 1 2 3 4 5 6 7 8 9 10 | |
| </div> | |
| <h3>Fine-tuning for Different Tasks</h3> | |
| <table> | |
| <tr><th>Task Type</th><th>Input Format</th><th>Output</th></tr> | |
| <tr> | |
| <td>Classification</td> | |
| <td>[CLS] text [SEP]</td> | |
| <td>[CLS] representation → classifier</td> | |
| </tr> | |
| <tr> | |
| <td>Sentence Pair</td> | |
| <td>[CLS] sent A [SEP] sent B [SEP]</td> | |
| <td>[CLS] representation → classifier</td> | |
| </tr> | |
| <tr> | |
| <td>Question Answering</td> | |
| <td>[CLS] question [SEP] passage [SEP]</td> | |
| <td>Start/End span vectors over passage tokens</td> | |
| </tr> | |
| <tr> | |
| <td>Token Classification</td> | |
| <td>[CLS] text [SEP]</td> | |
| <td>Each token representation → label</td> | |
| </tr> | |
| </table> | |
| `, | |
| math: ` | |
| <h3>Pre-training Objective</h3> | |
| <p>BERT simultaneously optimizes two unsupervised tasks:</p> | |
| <div class="formula"> | |
| L = L<sub>MLM</sub> + L<sub>NSP</sub> | |
| </div> | |
| <h3>Masked Language Modeling (MLM)</h3> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: The Masking Strategy</div> | |
| <strong>Problem:</strong> Standard left-to-right language modeling can't capture bidirectional context.<br> | |
| <strong>Solution:</strong> Randomly mask 15% of tokens and predict them using full context.<br><br> | |
| <strong>However:</strong> [MASK] token doesn't appear during fine-tuning!<br> | |
| <strong>Clever Fix:</strong> Of the 15% selected tokens:<br> | |
| • 80% → Replace with [MASK]<br> | |
| • 10% → Replace with random token<br> | |
| • 10% → Keep unchanged<br><br> | |
| This forces the model to maintain context representations for ALL tokens! | |
| </div> | |
| <h4>MLM Loss Derivation</h4> | |
| <p>Let's work through the MLM objective step by step:</p> | |
| <div class="formula"> | |
| Given input sequence: x = [x₁, x₂, ..., x_n]<br> | |
| Masked sequence: x̃ = [x̃₁, x̃₂, ..., x̃_n]<br> | |
| <br> | |
| Let M = {i₁, i₂, ..., i_m} be indices of masked tokens<br> | |
| <br> | |
| For each masked position i ∈ M:<br> | |
| h_i = BERT(x̃)_i (hidden state at position i)<br> | |
| logits_i = W · h_i + b (W ∈ ℝ^(V×H), vocab size V)<br> | |
| P(x_i | x̃) = softmax(logits_i)<br> | |
| <br> | |
| Cross-entropy loss per token:<br> | |
| L_i = -log P(x_i | x̃)<br> | |
| <br> | |
| Total MLM loss:<br> | |
| L_MLM = (1/|M|) Σ_{i∈M} L_i<br> | |
| L_MLM = -(1/|M|) Σ_{i∈M} log P(x_i | x̃) | |
| </div> | |
| <div class="callout warning"> | |
| <div class="callout-title">📊 Worked Example: MLM Calculation</div> | |
| <strong>Input:</strong> "The cat sat on the mat"<br> | |
| <strong>After masking (15%):</strong> "The [MASK] sat on the mat"<br> | |
| <strong>Target:</strong> Predict "cat" at position 2<br><br> | |
| <strong>Step 1:</strong> Forward pass through BERT<br> | |
| h₂ = BERT(x̃)₂ ∈ ℝ^768 (for BERT_BASE)<br><br> | |
| <strong>Step 2:</strong> Project to vocabulary space<br> | |
| logits₂ = W · h₂ + b ∈ ℝ^30000<br><br> | |
| <strong>Step 3:</strong> Compute probabilities<br> | |
| P(w | x̃) = exp(logits₂[w]) / Σ_v exp(logits₂[v])<br><br> | |
| <strong>Step 4:</strong> Compute loss (assume P("cat"|x̃) = 0.73)<br> | |
| L = -log(0.73) = 0.315 | |
| </div> | |
| <h3>Next Sentence Prediction (NSP)</h3> | |
| <p>Binary classification task to understand sentence relationships.</p> | |
| <div class="formula"> | |
| Input: [CLS] sentence_A [SEP] sentence_B [SEP]<br> | |
| <br> | |
| Let C = final hidden state of [CLS] token ∈ ℝ^H<br> | |
| <br> | |
| P(IsNext = True) = σ(W_NSP · C)<br> | |
| where σ = sigmoid function, W_NSP ∈ ℝ^(1×H)<br> | |
| <br> | |
| Binary cross-entropy loss:<br> | |
| L_NSP = -[y·log(ŷ) + (1-y)·log(1-ŷ)]<br> | |
| where y = 1 if B follows A, else 0 | |
| </div> | |
| <h4>NSP Training Data Generation</h4> | |
| <ul> | |
| <li><strong>50% IsNext:</strong> B actually follows A in corpus</li> | |
| <li><strong>50% NotNext:</strong> B sampled randomly from another document</li> | |
| </ul> | |
| <h3>Fine-tuning Math: Question Answering (SQuAD)</h3> | |
| <div class="formula"> | |
| Input: [CLS] question [SEP] paragraph [SEP]<br> | |
| <br> | |
| Let T_i = final hidden state for token i in paragraph<br> | |
| <br> | |
| Start position logits: S_i = W_start · T_i<br> | |
| End position logits: E_i = W_end · T_i<br> | |
| <br> | |
| P(start = i) = softmax(S)_i<br> | |
| P(end = j) = softmax(E)_j<br> | |
| <br> | |
| Answer span = tokens from position i to j<br> | |
| <br> | |
| Training loss:<br> | |
| L = -log P(start = i*) - log P(end = j*)<br> | |
| where i*, j* are ground truth positions | |
| </div> | |
| `, | |
| applications: ` | |
| <h3>SQuAD Benchmark Performance</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🏆 Stanford Question Answering Dataset (SQuAD)</div> | |
| <div class="box-content"> | |
| <strong>SQuAD 1.1:</strong> 100,000+ question-answer pairs on 500+ Wikipedia articles. Every question has an answer span in the passage.<br><br> | |
| <strong>SQuAD 2.0:</strong> Adds 50,000+ unanswerable questions. Models must determine when no answer exists.<br><br> | |
| <strong>Evaluation Metrics:</strong><br> | |
| • <strong>EM (Exact Match):</strong> % of predictions matching ground truth exactly<br> | |
| • <strong>F1:</strong> Token-level overlap between prediction and ground truth | |
| </div> | |
| </div> | |
| <h3>SQuAD 1.1 Results</h3> | |
| <table> | |
| <tr><th>Model</th><th>EM</th><th>F1</th></tr> | |
| <tr><td>Human Performance</td><td>82.3</td><td>91.2</td></tr> | |
| <tr><td>BERT<sub>BASE</sub></td><td>80.8</td><td>88.5</td></tr> | |
| <tr><td>BERT<sub>LARGE</sub></td><td><strong>84.1</strong></td><td><strong>90.9</strong></td></tr> | |
| </table> | |
| <p><em>BERT<sub>LARGE</sub> surpassed human performance on EM!</em></p> | |
| <h3>SQuAD 2.0 Results</h3> | |
| <table> | |
| <tr><th>Model</th><th>EM</th><th>F1</th></tr> | |
| <tr><td>Human Performance</td><td>86.9</td><td>89.5</td></tr> | |
| <tr><td>BERT<sub>BASE</sub></td><td>73.7</td><td>76.3</td></tr> | |
| <tr><td>BERT<sub>LARGE</sub></td><td><strong>78.7</strong></td><td><strong>81.9</strong></td></tr> | |
| </table> | |
| <div class="callout tip"> | |
| <div class="callout-title">💡 Example SQuAD Question</div> | |
| <strong>Passage:</strong> "The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France."<br><br> | |
| <strong>Question:</strong> "In what country is Normandy located?"<br><br> | |
| <strong>BERT Answer:</strong> "France" ✓<br> | |
| <strong>Start Token:</strong> position 32<br> | |
| <strong>End Token:</strong> position 32 | |
| </div> | |
| <h3>GLUE Benchmark (General Language Understanding Evaluation)</h3> | |
| <p>BERT set new state-of-the-art on all 9 GLUE tasks:</p> | |
| <table> | |
| <tr><th>Task</th><th>Metric</th><th>Previous SOTA</th><th>BERT<sub>LARGE</sub></th></tr> | |
| <tr><td>MNLI (NLI)</td><td>Acc</td><td>86.6</td><td><strong>86.7</strong></td></tr> | |
| <tr><td>QQP (Paraphrase)</td><td>F1</td><td>66.1</td><td><strong>72.1</strong></td></tr> | |
| <tr><td>QNLI (QA/NLI)</td><td>Acc</td><td>87.4</td><td><strong>92.7</strong></td></tr> | |
| <tr><td>SST-2 (Sentiment)</td><td>Acc</td><td>93.2</td><td><strong>94.9</strong></td></tr> | |
| <tr><td>CoLA (Acceptability)</td><td>Matthew's</td><td>35.0</td><td><strong>60.5</strong></td></tr> | |
| </table> | |
| <h3>Additional Applications</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🔍 Google Search</div> | |
| <div class="box-content"> | |
| In October 2019, Google began using BERT for 1 in 10 English search queries, calling it the biggest leap in 5 years. BERT helps understand search intent and context. | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🏷️ Named Entity Recognition (NER)</div> | |
| <div class="box-content"> | |
| BERT excels at identifying entities (person, location, organization) in text by treating it as token classification. Each token gets a label (B-PER, I-PER, B-LOC, etc.). | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">📊 Text Classification</div> | |
| <div class="box-content"> | |
| Sentiment analysis, topic classification, spam detection - all benefit from BERT's contextual understanding. Simply use [CLS] representation with a classifier. | |
| </div> | |
| </div> | |
| <h3>Using BERT: Quick Code Example</h3> | |
| <div class="formula"> | |
| # Using Hugging Face Transformers<br> | |
| from transformers import BertTokenizer, BertForQuestionAnswering<br> | |
| import torch<br> | |
| <br> | |
| # Load pre-trained model and tokenizer<br> | |
| tokenizer = BertTokenizer.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')<br> | |
| model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')<br> | |
| <br> | |
| # Example<br> | |
| question = "What is BERT?"<br> | |
| context = "BERT is a bidirectional Transformer for NLP."<br> | |
| <br> | |
| # Tokenize and get answer<br> | |
| inputs = tokenizer(question, context, return_tensors='pt')<br> | |
| outputs = model(**inputs)<br> | |
| <br> | |
| start_idx = torch.argmax(outputs.start_logits)<br> | |
| end_idx = torch.argmax(outputs.end_logits)<br> | |
| answer = tokenizer.convert_tokens_to_string(<br> | |
| tokenizer.convert_ids_to_tokens(inputs['input_ids'][0][start_idx:end_idx+1])<br> | |
| )<br> | |
| print(answer) # "a bidirectional Transformer for NLP" | |
| </div> | |
| `, | |
| code: ` | |
| <h3>BERT Training Objectives</h3> | |
| <div class="formula" style="text-align:center;"> | |
| <strong>MLM:</strong> $$L_{MLM} = -\\sum_{i \\in M} \\log P(x_i | x_{\\backslash M})$$ | |
| <strong>NSP:</strong> $$L_{NSP} = -[y\\log(p) + (1-y)\\log(1-p)]$$ | |
| </div> | |
| <span class="code-title">📄 bert_huggingface.py</span><div class="code-block"><span class="keyword">from</span> transformers <span class="keyword">import</span> BertTokenizer, BertModel, pipeline | |
| <span class="comment"># --- Quick sentiment analysis ---</span> | |
| classifier = pipeline(<span class="string">"sentiment-analysis"</span>) | |
| result = classifier(<span class="string">"Deep learning is fascinating!"</span>) | |
| <span class="keyword">print</span>(result) <span class="comment"># [{'label': 'POSITIVE', 'score': 0.9998}]</span> | |
| <span class="comment"># --- BERT Embeddings for custom tasks ---</span> | |
| tokenizer = BertTokenizer.from_pretrained(<span class="string">"bert-base-uncased"</span>) | |
| model = BertModel.from_pretrained(<span class="string">"bert-base-uncased"</span>) | |
| text = <span class="string">"Neural networks learn features automatically"</span> | |
| inputs = tokenizer(text, return_tensors=<span class="string">"pt"</span>) | |
| outputs = model(**inputs) | |
| <span class="comment"># CLS token embedding (sentence representation)</span> | |
| cls_embedding = outputs.last_hidden_state[:, <span class="number">0</span>, :] | |
| <span class="keyword">print</span>(f<span class="string">"Shape: {cls_embedding.shape}"</span>) <span class="comment"># (1, 768)</span></div> | |
| ` | |
| }, | |
| "gpt": { | |
| overview: ` | |
| <p>GPT (Generative Pre-trained Transformer) is the architecture behind ChatGPT, GitHub Copilot, and the AI revolution. It's a <strong>decoder-only Transformer</strong> trained to predict the next token in a sequence — a simple objective that produces remarkably intelligent behavior.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🎯 The Core Idea</div> | |
| GPT's entire training objective is <strong>next-token prediction</strong>: given "The cat sat on the", predict "mat". By doing this on trillions of tokens from the internet, the model learns grammar, facts, reasoning, coding, math — all as a byproduct of predicting what comes next. It's arguably the most powerful simple idea in AI. | |
| </div> | |
| <h3>The GPT Family</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; color: var(--cyan);">Model</th> | |
| <th style="padding: 10px; color: var(--cyan);">Year</th> | |
| <th style="padding: 10px; color: var(--cyan);">Parameters</th> | |
| <th style="padding: 10px; color: var(--cyan);">Key Innovation</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">GPT-1</td><td>2018</td><td>117M</td><td>Pretrain on unlabeled text, fine-tune</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">GPT-2</td><td>2019</td><td>1.5B</td><td>Zero-shot task transfer</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">GPT-3</td><td>2020</td><td>175B</td><td>In-context learning, few-shot prompting</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">GPT-4</td><td>2023</td><td>~1.7T (MoE)</td><td>Multimodal (text + images), RLHF</td></tr> | |
| <tr><td style="padding: 8px;">GPT-4o</td><td>2024</td><td>Unknown</td><td>Native multimodal (audio, video, text)</td></tr> | |
| </table> | |
| <h3>How GPT Generates Text</h3> | |
| <p>GPT uses <strong>autoregressive generation</strong>: it generates one token at a time, each conditioned on all previous tokens. The process:</p> | |
| <div class="list-item"><div class="list-num">1</div><div>Feed prompt tokens through the Transformer decoder</div></div> | |
| <div class="list-item"><div class="list-num">2</div><div>Get probability distribution over vocabulary (~50K tokens)</div></div> | |
| <div class="list-item"><div class="list-num">3</div><div>Sample from the distribution (temperature, top-k, top-p control randomness)</div></div> | |
| <div class="list-item"><div class="list-num">4</div><div>Append the new token and repeat from step 1</div></div> | |
| `, | |
| concepts: ` | |
| <h3>GPT Architecture</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Decoder Only:</strong> Uses causal (masked) attention - can only see past tokens</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Autoregressive:</strong> Generate one token at a time, feed back as input</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Pre-training:</strong> Next token prediction on massive text corpus</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>RLHF:</strong> Reinforcement Learning from Human Feedback (ChatGPT)</div> | |
| </div> | |
| <h3>In-Context Learning</h3> | |
| <p>GPT-3+ can learn from examples in the prompt without updating weights!</p> | |
| <div class="formula"> | |
| Zero-shot: "Translate to French: Hello" → "Bonjour"<br> | |
| Few-shot: "cat→chat, dog→chien, house→?" → "maison" | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Causal Language Modeling</h3> | |
| <p>GPT is trained to maximize the likelihood of the next token:</p> | |
| <div class="formula"> | |
| L = -Σ log P(x_t | x_{<t})<br> | |
| <br> | |
| Where P(x_t | x_{<t}) = softmax(h_t × W_vocab) | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Scaling Laws</div> | |
| Performance scales predictably with compute, data, and parameters:<br> | |
| L ∝ N^(-0.076) for model size N<br> | |
| This is why OpenAI trained GPT-3 (175B) and GPT-4 (1.8T)! | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">💬 ChatGPT & Assistants</div> | |
| <div class="box-content"> | |
| Conversational AI, customer support, tutoring, brainstorming | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">💻 Code Generation</div> | |
| <div class="box-content"> | |
| GitHub Copilot, code completion, bug fixing, documentation | |
| </div> | |
| </div> | |
| `, | |
| code: `<span class="code-title">📄 gpt_generation.py</span><div class="code-block"><span class="keyword">from</span> transformers <span class="keyword">import</span> GPT2LMHeadModel, GPT2Tokenizer | |
| tokenizer = GPT2Tokenizer.from_pretrained(<span class="string">"gpt2"</span>) | |
| model = GPT2LMHeadModel.from_pretrained(<span class="string">"gpt2"</span>) | |
| <span class="comment"># Generate text</span> | |
| prompt = <span class="string">"Deep learning has revolutionized"</span> | |
| inputs = tokenizer.encode(prompt, return_tensors=<span class="string">"pt"</span>) | |
| outputs = model.generate( | |
| inputs, max_length=<span class="number">100</span>, | |
| temperature=<span class="number">0.7</span>, <span class="comment"># creativity control</span> | |
| top_k=<span class="number">50</span>, <span class="comment"># top-k sampling</span> | |
| top_p=<span class="number">0.9</span>, <span class="comment"># nucleus sampling</span> | |
| do_sample=<span class="keyword">True</span>, | |
| no_repeat_ngram_size=<span class="number">2</span>, | |
| ) | |
| text = tokenizer.decode(outputs[<span class="number">0</span>]) | |
| <span class="keyword">print</span>(text)</div> | |
| ` | |
| }, | |
| "vit": { | |
| overview: ` | |
| <h3>Vision Transformer (ViT)</h3> | |
| <p>Apply Transformer architecture directly to images by treating them as sequences of patches.</p> | |
| <h3>How ViT Works</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Patchify:</strong> Split 224×224 image into 16×16 patches (14×14 = 196 patches)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Linear Projection:</strong> Flatten each patch → linear embedding (like word embeddings)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Positional Encoding:</strong> Add position information</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Transformer Encoder:</strong> Standard Transformer (self-attention, FFN)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div><strong>Classification:</strong> Use [CLS] token for final prediction</div> | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">💡 When ViT Shines</div> | |
| • <strong>Large Datasets:</strong> Needs 10M+ images (or pre-training on ImageNet-21K)<br> | |
| • <strong>Transfer Learning:</strong> Pre-trained ViT beats CNNs on many tasks<br> | |
| • <strong>Long-Range Dependencies:</strong> Global attention vs CNN's local receptive field | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>ViT vs CNN Comparison</h3> | |
| <table> | |
| <tr><th>Aspect</th><th>CNN</th><th>ViT</th></tr> | |
| <tr><td>Inductive Bias</td><td>Locality, translation invariance</td><td>Minimal - learns from data</td></tr> | |
| <tr><td>Data Efficiency</td><td>Better with small datasets</td><td>Needs large datasets</td></tr> | |
| <tr><td>Receptive Field</td><td>Local (grows with depth)</td><td>Global from layer 1</td></tr> | |
| <tr><td>Scalability</td><td>Diminishing returns</td><td>Scales well with compute</td></tr> | |
| </table> | |
| <h3>Key Innovations</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>No Convolutions:</strong> Pure attention - "An Image is Worth 16x16 Words"</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Learnable Position:</strong> Position embeddings are learned, not sinusoidal</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Patch Embedding</h3> | |
| <p>Convert image patches to token embeddings:</p> | |
| <div class="formula"> | |
| z_0 = [x_cls; x_p^1 E; x_p^2 E; ...; x_p^N E] + E_pos<br> | |
| <br> | |
| Where:<br> | |
| • x_p^i = flattened patch (16×16×3 = 768 dimensions)<br> | |
| • E = learnable linear projection<br> | |
| • E_pos = position embedding | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: Computation</div> | |
| ViT-Base: 12 layers, 768 hidden, 12 heads ~ 86M params<br> | |
| Self-attention cost: O(n²·d) where n=196 patches<br> | |
| This is why ViT is efficient for images (196 tokens) vs text (1000+ tokens) | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🖼️ Image Classification</div> | |
| <div class="box-content">SOTA on ImageNet with pre-training. Google/DeepMind use for internal systems.</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🔍 Object Detection</div> | |
| <div class="box-content">DETR, DINO - Transformer-based detection replacing Faster R-CNN</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🎬 Video Understanding</div> | |
| <div class="box-content">VideoViT, TimeSformer - extend patches to 3D (space + time)</div> | |
| </div> | |
| ` | |
| }, | |
| "seq2seq": { | |
| overview: ` | |
| <h3>Seq2Seq with Attention</h3> | |
| <p>The architecture that revolutionized Machine Translation (before Transformers).</p> | |
| <h3>The Bottleneck Problem</h3> | |
| <p>Standard Encoder-Decoder models try to compress the entire sentence "I love deep learning" into a single vector (Context Vector).</p> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ Information Bottleneck</div> | |
| For long sentences (e.g., 50 words), the fixed-size vector forgets early details. Performance degrades rapidly with length. | |
| </div> | |
| <h3>The Attention Solution</h3> | |
| <p><strong>Idea:</strong> Don't just look at the last state. Let the Decoder "look back" at ALL Encoder states at every step.</p> | |
| `, | |
| concepts: ` | |
| <h3>Visualizing Attention (Alammar Style)</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Encoder States:</strong> We keep all hidden states $h_1, h_2, h_3, h_4$.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Alignment Scores:</strong> Decoder asks: "How relevant is $h_i$ to what I'm translating now?"</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Context Vector:</strong> Weighted sum of relevant states. If attention is strong on $h_2$, the context vector looks a lot like $h_2$.</div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">💡 The "Searchlight" Analogy</div> | |
| Attention is like a searchlight. When generating "étudiant" (student), the light shines brightly on "student" in the input sentence. | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Attention Math (Bahdanau / Luong)</h3> | |
| <p>Let Decoder state be $s_{t-1}$ and Encoder states be $h_j$.</p> | |
| <div class="formula"> | |
| 1. Score: $e_{tj} = score(s_{t-1}, h_j)$ (e.g., Dot Product)<br> | |
| 2. Weights: $\\alpha_{tj} = softmax(e_{tj})$<br> | |
| 3. Context: $c_t = \\sum \\alpha_{tj} h_j$<br> | |
| 4. Output: $s_t = RNN(s_{t-1}, [y_{t-1}, c_t])$ | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">📝 Paper & Pain: Dot Product vs Additive</div> | |
| • <strong>Dot Product (Luong):</strong> $s^T h$ (Fast, matrix mult)<br> | |
| • <strong>Additive (Bahdanau):</strong> $v^T tanh(W_1 s + W_2 h)$ (More parameters, originally better for large dim) | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🗣️ Neural Machine Translation</div> | |
| <div class="box-content">Google Translate (2016) switched to GNMT (Attention-based), reducing errors by 60%.</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">📄 Text Summarization</div> | |
| <div class="box-content">Focusing on key sentences in a long document to generate a headline.</div> | |
| </div> | |
| ` | |
| }, | |
| "research-papers": { | |
| overview: ` | |
| <h3>Seminal Papers Library</h3> | |
| <p>A curated collection of the most impactful papers in Deep Learning history, with "Paper & Pain" insights.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🎓 How to Read Papers</div> | |
| Don't just read the abstract. Look for the <strong>Objective Function</strong> and the <strong>Architecture Diagram</strong>. That's where the truth lies. | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Computer Vision Hall of Fame</h3> | |
| <div class="list-item"> | |
| <div class="list-num">2012</div> | |
| <div><strong>AlexNet</strong> (Krizhevsky et al.)<br> | |
| <em>"ImageNet Classification with Deep Convolutional Neural Networks"</em><br> | |
| <span class="formula-caption">Insight: Relied on TWO GPUs because 3GB RAM wasn't enough. The split architecture was distinct.</span><br> | |
| <a href="https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf" target="_blank" style="color: #ff6b35;">📄 Read PDF</a></div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2014</div> | |
| <div><strong>VGGNet</strong> (Simonyan & Zisserman)<br> | |
| <em>"Very Deep Convolutional Networks for Large-Scale Image Recognition"</em><br> | |
| <span class="formula-caption">Insight: 3x3 filters are all you need. Two 3x3 layers have the same receptive field as one 5x5 but fewer parameters.</span><br> | |
| <a href="https://arxiv.org/pdf/1409.1556.pdf" target="_blank" style="color: #ff6b35;">📄 Read PDF</a></div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2015</div> | |
| <div><strong>U-Net</strong> (Ronneberger et al.)<br> | |
| <em>"Convolutional Networks for Biomedical Image Segmentation"</em><br> | |
| <span class="formula-caption">Insight: Skip connections concatenating features from encoder to decoder allow precise localization.</span><br> | |
| <a href="https://arxiv.org/pdf/1505.04597.pdf" target="_blank" style="color: #ff6b35;">📄 Read PDF</a></div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2015</div> | |
| <div><strong>ResNet</strong> (He et al.)<br> | |
| <em>"Deep Residual Learning for Image Recognition"</em><br> | |
| <span class="formula-caption">Insight: It's easier to learn 0 than Identity. $f(x) = H(x) - x$.</span><br> | |
| <a href="https://arxiv.org/pdf/1512.03385.pdf" target="_blank" style="color: #ff6b35;">📄 Read PDF</a></div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2016</div> | |
| <div><strong>YOLO</strong> (Redmon et al.)<br> | |
| <em>"You Only Look Once: Unified, Real-Time Object Detection"</em><br> | |
| <span class="formula-caption">Insight: Treated detection as a <strong>regression</strong> problem, not classification. Single forward pass.</span><br> | |
| <a href="https://arxiv.org/pdf/1506.02640.pdf" target="_blank" style="color: #ff6b35;">📄 Read PDF</a></div> | |
| </div> | |
| <hr style="border-color: #333; margin: 20px 0;"> | |
| <h3>NLP & GenAI Hall of Fame</h3> | |
| <div class="list-item"> | |
| <div class="list-num">2014</div> | |
| <div><strong>GANs</strong> (Goodfellow et al.)<br> | |
| <em>"Generative Adversarial Networks"</em><br> | |
| <span class="formula-caption">Insight: Training a generator by fighting a discriminator. The minimax game: $\\min_G \\max_D V(D, G)$.</span><br> | |
| <a href="https://arxiv.org/pdf/1406.2661.pdf" target="_blank" style="color: #a371f7;">📄 Read PDF</a></div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2017</div> | |
| <div><strong>Attention Is All You Need</strong> (Vaswani et al.)<br> | |
| <em>"Transformers"</em><br> | |
| <span class="formula-caption">Insight: Sinusoidal Positional Embeddings allow the model to generalize to lengths unseen during training.</span><br> | |
| <a href="https://arxiv.org/pdf/1706.03762.pdf" target="_blank" style="color: #00d4ff;">📄 Read PDF</a></div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2018</div> | |
| <div><strong>BERT</strong> (Devlin et al.)<br> | |
| <em>"Pre-training of Deep Bidirectional Transformers"</em><br> | |
| <span class="formula-caption">Insight: Masked LM (Cloze task) is inefficient (only 15% signal) but crucial for bidirectionality.</span><br> | |
| <a href="https://arxiv.org/pdf/1810.04805.pdf" target="_blank" style="color: #00d4ff;">📄 Read PDF</a></div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2020</div> | |
| <div><strong>GPT-3</strong> (Brown et al.)<br> | |
| <em>"Language Models are Few-Shot Learners"</em><br> | |
| <span class="formula-caption">Insight: Scale is all you need. 175B parameters enable emergent behavior like in-context learning.</span><br> | |
| <a href="https://arxiv.org/pdf/2005.14165.pdf" target="_blank" style="color: #00d4ff;">📄 Read PDF</a></div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2020</div> | |
| <div><strong>DDPM</strong> (Ho et al.)<br> | |
| <em>"Denoising Diffusion Probabilistic Models"</em><br> | |
| <span class="formula-caption">Insight: Predicting the noise $\\epsilon$ is mathematically equivalent to predicting the score function (gradient of data density).</span><br> | |
| <a href="https://arxiv.org/pdf/2006.11239.pdf" target="_blank" style="color: #a371f7;">📄 Read PDF</a></div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>The Formulas That Changed AI</h3> | |
| <p><strong>ResNet Residual:</strong></p> | |
| <div class="formula">y = F(x, \\{W_i\}) + x</div> | |
| <p><strong>Scaled Dot-Product Attention:</strong></p> | |
| <div class="formula">Attention(Q, K, V) = softmax(\\frac{QK^T}{\\sqrt{d_k}})V</div> | |
| <p><strong>Diffusion Reverse Process:</strong></p> | |
| <div class="formula">p_\\theta(x_{t-1}|x_t) = \\mathcal{N}(x_{t-1}; \\mu_\\theta(x_t, t), \\Sigma_\\theta(x_t, t))</div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">🚀 Impact</div> | |
| <div class="box-content">These papers form the foundation of ChatGPT, Midjourney, Self-Driving Cars, and Facial Recognition.</div> | |
| </div> | |
| ` | |
| }, | |
| "gnn": { | |
| overview: ` | |
| <h3>Graph Neural Networks (GNNs)</h3> | |
| <p>Deep learning on non-Euclidean data structures like social networks, molecules, and knowledge graphs.</p> | |
| <h3>Key Concepts</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Graph Structure:</strong> Nodes (entities) and Edges (relationships).</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Message Passing:</strong> Nodes exchange information with neighbors.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Aggregation:</strong> Combine incoming messages (Sum, Mean, Max).</div> | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">💡 Why GNNs?</div> | |
| Standard CNNs expect a fixed grid (euclidean). Graphs have arbitrary size and topology. GNNs are permutation invariant! | |
| </div> | |
| `, | |
| concepts: ` | |
| <h3>Message Passing Neural Networks (MPNN)</h3> | |
| <p>The core framework for most GNNs.</p> | |
| <div class="list-item"> | |
| <div class="list-num">1</div> | |
| <div><strong>Message Function:</strong> Compute message from neighbor to node.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">2</div> | |
| <div><strong>Aggregation Function:</strong> Sum all messages from neighbors.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">3</div> | |
| <div><strong>Update Function:</strong> Update node state based on aggregated messages.</div> | |
| </div> | |
| `, | |
| math: ` | |
| <h3>Graph Convolution Network (GCN)</h3> | |
| <p>The "Hello World" of GNNs (Kipf & Welling, 2017).</p> | |
| <div class="formula"> | |
| H^{(l+1)} = σ(D^{-1/2} A D^{-1/2} H^{(l)} W^{(l)}) | |
| </div> | |
| <p>Where:</p> | |
| <ul> | |
| <li><strong>A:</strong> Adjacency Matrix (connections)</li> | |
| <li><strong>D:</strong> Degree Matrix (number of connections)</li> | |
| <li><strong>H:</strong> Node Features</li> | |
| <li><strong>W:</strong> Learnable Weights</li> | |
| </ul> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ Over-smoothing</div> | |
| If GNN is too deep, all node representations become indistinguishable. Usually 2-4 layers are enough. | |
| </div> | |
| `, | |
| applications: ` | |
| <div class="info-box"> | |
| <div class="box-title">💊 Drug Discovery</div> | |
| <div class="box-content">Predicting molecular properties, protein folding (AlphaFold)</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🚗 Traffic Prediction</div> | |
| <div class="box-content">Road networks, estimating travel times (Google Maps)</div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🛒 Recommender Systems</div> | |
| <div class="box-content">Pinterest (PinSage), User-Item graphs</div> | |
| </div> | |
| ` | |
| }, | |
| "vector-db": { | |
| overview: ` | |
| <p>Vector databases store and search <strong>high-dimensional embeddings</strong> — the numerical representations that AI models use to understand text, images, and audio. They enable <strong>semantic search</strong>: finding items by meaning, not just keywords.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">📚 Traditional DB vs Vector DB</div> | |
| <strong>Traditional (SQL):</strong> "Find documents with the word 'dog'" → Exact keyword match<br> | |
| <strong>Vector DB:</strong> "Find documents about pets" → Returns articles about dogs, cats, hamsters — because their <em>meaning</em> is similar even if they don't share exact words. | |
| </div> | |
| <h3>How It Works</h3> | |
| <div class="list-item"><div class="list-num">1</div><div><strong>Embed:</strong> Convert text/images into vectors using an embedding model (e.g., OpenAI ada-002, sentence-transformers)</div></div> | |
| <div class="list-item"><div class="list-num">2</div><div><strong>Index:</strong> Store vectors in a specialized data structure (HNSW, IVF, product quantization) for fast search</div></div> | |
| <div class="list-item"><div class="list-num">3</div><div><strong>Query:</strong> Convert query to a vector, find the K nearest neighbors using cosine similarity or L2 distance</div></div> | |
| <h3>Popular Vector Databases</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; color: var(--cyan);">Database</th> | |
| <th style="padding: 10px; color: var(--cyan);">Type</th> | |
| <th style="padding: 10px; color: var(--cyan);">Best For</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">ChromaDB</td><td>In-memory, local</td><td>Prototyping, small datasets</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">FAISS (Meta)</td><td>Library</td><td>Billion-scale search, GPU-accelerated</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Pinecone</td><td>Cloud service</td><td>Production SaaS, managed infrastructure</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Weaviate</td><td>Open-source</td><td>Multi-modal search, GraphQL API</td></tr> | |
| <tr><td style="padding: 8px;">Milvus</td><td>Open-source</td><td>Enterprise, distributed scaling</td></tr> | |
| </table> | |
| `, | |
| concepts: ` | |
| <h3>Core Concepts</h3> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Embeddings:</strong> Dense numerical representations of data (text, images, etc.) produced by neural networks. Example: OpenAI's text-embedding-3-small produces 1536-dim vectors.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Similarity Metrics:</strong> Cosine similarity (angle), Euclidean distance (L2), Dot product (magnitude-aware). Cosine is most common for text embeddings.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>ANN Algorithms:</strong> Approximate Nearest Neighbor — trade small accuracy loss for massive speed gains (exact search is O(n), ANN is O(log n)).</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>HNSW (Hierarchical Navigable Small World):</strong> Graph-based index. Builds a multi-layer graph where higher layers have fewer, long-range connections. Most popular algorithm.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div><strong>IVF (Inverted File Index):</strong> Clusters vectors first (like k-means), then only searches relevant clusters. Fast but requires training step.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">06</div> | |
| <div><strong>Product Quantization (PQ):</strong> Compresses vectors by splitting them into sub-vectors and quantizing each. Reduces memory by 10-100x.</div> | |
| </div> | |
| <h3>Embedding Pipeline</h3> | |
| <div class="formul a"> | |
| 1. Raw Data → Embedding Model → Vector (e.g., [0.12, -0.34, 0.56, ...])<br> | |
| 2. Store vector + metadata in Vector DB<br> | |
| 3. Query: Convert query → vector → Find nearest neighbors<br> | |
| 4. Return top-k most similar results with scores | |
| </div> | |
| <div class="callout warning"> | |
| <div class="callout-title">⚠️ Common Pitfalls</div> | |
| • Using wrong distance metric (cosine for normalized, L2 for raw)<br> | |
| • Not chunking documents properly (too large = diluted embedding)<br> | |
| • Mixing embedding models (query and docs must use same model)<br> | |
| • Ignoring metadata filtering (combine vector search + filters) | |
| </div> | |
| `, | |
| math: ` | |
| <h3>📐 Paper & Pain: Vector Similarity Mathematics</h3> | |
| <h4>Cosine Similarity</h4> | |
| <div class="formula" style="font-size: 1.2rem; text-align: center; margin: 20px 0; background: rgba(0, 212, 255, 0.08); padding: 25px; border-radius: 8px;"> | |
| <strong>cos(A, B) = (A · B) / (||A|| × ||B||)</strong> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Manual Calculation</div> | |
| <strong>A = [1, 2, 3], B = [4, 5, 6]</strong><br><br> | |
| <strong>Step 1 — Dot Product:</strong><br> | |
| A · B = (1×4) + (2×5) + (3×6) = 4 + 10 + 18 = <strong>32</strong><br><br> | |
| <strong>Step 2 — Magnitudes:</strong><br> | |
| ||A|| = √(1² + 2² + 3²) = √14 ≈ 3.742<br> | |
| ||B|| = √(4² + 5² + 6²) = √77 ≈ 8.775<br><br> | |
| <strong>Step 3 — Cosine Similarity:</strong><br> | |
| cos(A, B) = 32 / (3.742 × 8.775) = 32 / 32.833 ≈ <strong>0.9746</strong><br><br> | |
| <strong>Interpretation:</strong> Very high similarity (close to 1.0 = identical direction) | |
| </div> | |
| <h4>Euclidean Distance (L2)</h4> | |
| <div class="formula"> | |
| d(A, B) = √(Σ(aᵢ - bᵢ)²)<br><br> | |
| For A = [1, 2, 3], B = [4, 5, 6]:<br> | |
| d = √((4-1)² + (5-2)² + (6-3)²) = √(9 + 9 + 9) = √27 ≈ <strong>5.196</strong> | |
| </div> | |
| <h4>HNSW Complexity</h4> | |
| <div class="formula"> | |
| Build time: O(N × log(N))<br> | |
| Query time: O(log(N)) — logarithmic!<br> | |
| Memory: O(N × M) where M = max connections per node<br><br> | |
| Compare to brute force: O(N) per query<br> | |
| For N = 1 billion vectors: HNSW is ~30 orders of magnitude faster | |
| </div> | |
| `, | |
| applications: ` | |
| <h3>Real-World Applications</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🔍 Semantic Search</div> | |
| <div class="box-content"> | |
| <strong>Google, Bing, Notion AI:</strong> Search by meaning, not keywords<br> | |
| <strong>Example:</strong> "How to fix my car" matches "automobile repair guide" even with zero keyword overlap | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🤖 RAG (Retrieval-Augmented Generation)</div> | |
| <div class="box-content"> | |
| Store knowledge base as vectors → retrieve relevant context → feed to LLM for grounded answers. Used by ChatGPT with browsing, Perplexity AI, and enterprise AI assistants. | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🛒 Recommendation Systems</div> | |
| <div class="box-content"> | |
| <strong>Spotify (music):</strong> Embed songs as vectors, recommend nearest neighbors<br> | |
| <strong>Netflix (movies):</strong> User + content embeddings for personalization | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🖼️ Image & Multi-Modal Search</div> | |
| <div class="box-content"> | |
| <strong>CLIP embeddings:</strong> Search images with text queries and vice versa<br> | |
| <strong>Google Lens:</strong> Find products by uploading a photo | |
| </div> | |
| </div> | |
| `, | |
| code: ` | |
| <h3>Similarity Metrics</h3> | |
| <div class="formula" style="text-align:center;"> | |
| <strong>Cosine Similarity:</strong> $$\\cos(\\theta) = \\frac{A \\cdot B}{\\|A\\| \\|B\\|}$$ | |
| <strong>L2 (Euclidean):</strong> $$d = \\sqrt{\\sum(a_i - b_i)^2}$$ | |
| </div> | |
| <span class="code-title">📄 chromadb_example.py</span><div class="code-block"><span class="keyword">import</span> chromadb | |
| client = chromadb.Client() | |
| collection = client.create_collection(<span class="string">"dl_docs"</span>) | |
| <span class="comment"># Add documents</span> | |
| collection.add( | |
| documents=[<span class="string">"Neural networks use backpropagation"</span>, <span class="string">"CNNs detect visual patterns"</span>], | |
| ids=[<span class="string">"doc1"</span>, <span class="string">"doc2"</span>], | |
| ) | |
| <span class="comment"># Query</span> | |
| results = collection.query(query_texts=[<span class="string">"How do neural nets learn?"</span>], n_results=<span class="number">1</span>) | |
| <span class="keyword">print</span>(results[<span class="string">"documents"</span>]) <span class="comment"># [["Neural networks use backpropagation"]]</span></div> | |
| ` | |
| }, | |
| "rag": { | |
| overview: ` | |
| <p>RAG (Retrieval-Augmented Generation) is the <strong>most practical pattern for building AI applications</strong> today. Instead of relying solely on an LLM's training data (which is frozen at training time), RAG retrieves relevant documents from your own knowledge base and includes them in the prompt.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">💡 Why RAG Matters</div> | |
| LLMs have two fundamental problems: (1) they hallucinate — confidently generating false information, and (2) their knowledge has a cutoff date. RAG solves both by grounding the LLM's responses in <strong>actual, verifiable documents</strong> from your data. | |
| </div> | |
| <h3>The RAG Pipeline</h3> | |
| <div class="list-item"><div class="list-num">1</div><div><strong>Ingestion (Offline):</strong> Load documents → Split into chunks (500-1000 tokens) → Embed each chunk → Store in vector DB</div></div> | |
| <div class="list-item"><div class="list-num">2</div><div><strong>Retrieval (Online):</strong> User asks a question → Embed the question → Find K most similar chunks in vector DB</div></div> | |
| <div class="list-item"><div class="list-num">3</div><div><strong>Generation:</strong> Stuff retrieved chunks into the prompt as context → LLM generates an answer grounded in the context</div></div> | |
| <h3>RAG vs. Fine-Tuning</h3> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; color: var(--cyan);">Aspect</th> | |
| <th style="padding: 10px; color: var(--cyan);">RAG</th> | |
| <th style="padding: 10px; color: var(--cyan);">Fine-Tuning</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Knowledge updates</td><td>Instant (add new docs)</td><td>Requires retraining</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Hallucination</td><td>Reduced (grounded in docs)</td><td>Can still hallucinate</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Cost</td><td>Low (no training needed)</td><td>High (GPU compute)</td></tr> | |
| <tr><td style="padding: 8px;">Best for</td><td>QA over docs, support bots</td><td>Teaching new skills/style</td></tr> | |
| </table> | |
| `, | |
| concepts: ` | |
| <h3>RAG Architecture Deep Dive</h3> | |
| <h4>1. Document Processing</h4> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Loading:</strong> Parse PDFs, HTML, Markdown, Databases, APIs using document loaders (LangChain, LlamaIndex)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Chunking Strategies:</strong><br> | |
| • <strong>Fixed-size:</strong> Split every N tokens (simplest, often good enough)<br> | |
| • <strong>Recursive:</strong> Split by paragraphs → sentences → words (preserves structure)<br> | |
| • <strong>Semantic:</strong> Use embeddings to detect topic boundaries<br> | |
| • <strong>Overlap:</strong> Add 10-20% overlap between chunks to preserve context | |
| </div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Embedding:</strong> Convert chunks to vectors using models like OpenAI text-embedding-3-small (1536-dim), Cohere embed-v3, or open-source models like BGE-large</div> | |
| </div> | |
| <h4>2. Retrieval Strategies</h4> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>Dense Retrieval:</strong> Cosine similarity on embeddings (semantic search)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div><strong>Sparse Retrieval:</strong> BM25/TF-IDF keyword matching (exact term matching)</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">06</div> | |
| <div><strong>Hybrid Search:</strong> Combine dense + sparse with Reciprocal Rank Fusion (RRF) — usually best results</div> | |
| </div> | |
| <h4>3. Advanced RAG Patterns</h4> | |
| <div class="list-item"> | |
| <div class="list-num">07</div> | |
| <div><strong>Multi-Query RAG:</strong> LLM generates multiple query variations → retrieve for each → merge results</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">08</div> | |
| <div><strong>Re-ranking:</strong> Use a cross-encoder (e.g., Cohere Rerank) to re-score retrieved chunks for higher relevance</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">09</div> | |
| <div><strong>Query Expansion:</strong> HyDE — generate a hypothetical answer, embed that, then retrieve</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">10</div> | |
| <div><strong>Agentic RAG:</strong> LLM decides when and what to retrieve; can self-reflect and re-retrieve if initial results are poor</div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">🔑 Production Checklist</div> | |
| ✅ Chunk size 256-512 tokens with 10-20% overlap<br> | |
| ✅ Use hybrid search (dense + BM25)<br> | |
| ✅ Add re-ranker for precision<br> | |
| ✅ Include metadata filtering (date, source, category)<br> | |
| ✅ Monitor retrieval quality with evaluation metrics | |
| </div> | |
| `, | |
| math: ` | |
| <h3>📐 RAG Evaluation Metrics</h3> | |
| <h4>Retrieval Quality</h4> | |
| <div class="formula" style="font-size: 1.1rem; text-align: center; margin: 20px 0; background: rgba(0, 212, 255, 0.08); padding: 25px; border-radius: 8px;"> | |
| <strong>Recall@k = |Relevant ∩ Retrieved@k| / |Relevant|</strong><br><br> | |
| <strong>Precision@k = |Relevant ∩ Retrieved@k| / k</strong><br><br> | |
| <strong>MRR = 1/|Q| × Σ(1 / rank_i)</strong> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Paper & Pain: MRR Calculation</div> | |
| <strong>3 queries, first relevant result at ranks 1, 3, 2:</strong><br><br> | |
| MRR = (1/3) × (1/1 + 1/3 + 1/2)<br> | |
| MRR = (1/3) × (1.0 + 0.333 + 0.5)<br> | |
| MRR = (1/3) × 1.833 = <strong>0.611</strong><br><br> | |
| <strong>Interpretation:</strong> On average, the first relevant result appears around position 1.6 | |
| </div> | |
| <h4>Reciprocal Rank Fusion (Hybrid Search)</h4> | |
| <div class="formula"> | |
| <strong>RRF(d) = Σ 1 / (k + rank_i(d))</strong><br><br> | |
| Where k = 60 (constant), rank_i = rank from retriever i<br><br> | |
| Example: Document D appears at rank 2 in dense, rank 5 in sparse:<br> | |
| RRF(D) = 1/(60+2) + 1/(60+5) = 0.0161 + 0.0154 = <strong>0.0315</strong> | |
| </div> | |
| <h4>Chunking Size Impact</h4> | |
| <div class="formula"> | |
| Optimal chunk size depends on:<br> | |
| • Embedding model context window (usually 512 tokens max)<br> | |
| • Query specificity (specific → smaller chunks)<br> | |
| • Document structure (code → function-level, prose → paragraph-level)<br><br> | |
| Rule of thumb: <strong>chunk_size ≈ 2-3× expected query length</strong> | |
| </div> | |
| `, | |
| applications: ` | |
| <h3>RAG in Production</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🔍 Perplexity AI</div> | |
| <div class="box-content"> | |
| RAG-powered search engine: retrieves web pages → feeds to LLM → generates cited answers. Processes 100M+ queries/month. | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">💼 Enterprise Knowledge Bases</div> | |
| <div class="box-content"> | |
| <strong>Notion AI, Glean, Guru:</strong> Index company docs → answer employee questions with source citations<br> | |
| <strong>Legal AI:</strong> Retrieve relevant case law for legal research (Harvey AI) | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">💻 Code Assistants</div> | |
| <div class="box-content"> | |
| <strong>GitHub Copilot, Cursor:</strong> Retrieve relevant code from the codebase to inform suggestions<br> | |
| <strong>Documentation Q&A:</strong> Answer questions about SDKs and libraries | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🏥 Healthcare</div> | |
| <div class="box-content"> | |
| Retrieve medical literature → generate evidence-based clinical summaries (Google Med-PaLM 2) | |
| </div> | |
| </div> | |
| <div class="callout tip"> | |
| <div class="callout-title">🛠️ Popular Frameworks</div> | |
| • <strong>LangChain:</strong> Most popular, modular pipeline builder<br> | |
| • <strong>LlamaIndex:</strong> Data-focused, great for document processing<br> | |
| • <strong>Haystack:</strong> Production-ready, NLP-first<br> | |
| • <strong>Semantic Kernel:</strong> Microsoft's enterprise RAG framework | |
| </div> | |
| `, | |
| code: `<span class="code-title">📄 rag_langchain.py</span><div class="code-block"><span class="keyword">from</span> langchain.document_loaders <span class="keyword">import</span> PyPDFLoader | |
| <span class="keyword">from</span> langchain.text_splitter <span class="keyword">import</span> RecursiveCharacterTextSplitter | |
| <span class="keyword">from</span> langchain.embeddings <span class="keyword">import</span> OpenAIEmbeddings | |
| <span class="keyword">from</span> langchain.vectorstores <span class="keyword">import</span> Chroma | |
| <span class="keyword">from</span> langchain.chains <span class="keyword">import</span> RetrievalQA | |
| <span class="keyword">from</span> langchain.llms <span class="keyword">import</span> OpenAI | |
| <span class="comment"># 1. Load & chunk documents</span> | |
| loader = PyPDFLoader(<span class="string">"deep_learning_book.pdf"</span>) | |
| docs = loader.load() | |
| splitter = RecursiveCharacterTextSplitter(chunk_size=<span class="number">1000</span>, chunk_overlap=<span class="number">200</span>) | |
| chunks = splitter.split_documents(docs) | |
| <span class="comment"># 2. Create vector store</span> | |
| embeddings = OpenAIEmbeddings() | |
| vectordb = Chroma.from_documents(chunks, embeddings, persist_directory=<span class="string">"./chroma_db"</span>) | |
| <span class="comment"># 3. Query with RAG</span> | |
| qa_chain = RetrievalQA.from_chain_type( | |
| llm=OpenAI(temperature=<span class="number">0</span>), | |
| retriever=vectordb.as_retriever(search_kwargs={<span class="string">"k"</span>: <span class="number">3</span>}), | |
| ) | |
| answer = qa_chain.run(<span class="string">"What is backpropagation?"</span>) | |
| <span class="keyword">print</span>(answer)</div> | |
| ` | |
| }, | |
| "advanced-llm": { | |
| overview: ` | |
| <p>Fine-tuning a 7B+ parameter model requires enormous GPU memory (28GB+ for fp32). <strong>LoRA (Low-Rank Adaptation)</strong> makes this practical by training only a tiny fraction of parameters — typically <strong>0.1% or less</strong> — while achieving similar performance to full fine-tuning.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">🎯 The Key Insight</div> | |
| Research showed that pretrained weight matrices have <strong>low intrinsic dimensionality</strong> — meaning changes during fine-tuning can be represented by low-rank matrices. Instead of updating the full d×d weight matrix W, we add a small update BA where B is d×r and A is r×d, with rank r << d. | |
| </div> | |
| <h3>LoRA Math</h3> | |
| <div class="formula" style="text-align:center;"> | |
| $$W' = W + \\Delta W = W + BA$$ | |
| $$B \\in \\mathbb{R}^{d \\times r}, \\; A \\in \\mathbb{R}^{r \\times d}, \\; r \\ll d$$ | |
| </div> | |
| <p><strong>Example:</strong> For a 4096×4096 weight matrix (16.7M params), LoRA with r=16 adds only 2×4096×16 = 131K params — a <strong>128x reduction</strong>.</p> | |
| <h3>Quantization + LoRA = QLoRA</h3> | |
| <p>QLoRA takes this further by keeping the base model in <strong>4-bit quantized</strong> format and training LoRA adapters in fp16. This means fine-tuning a 65B model on a <strong>single 48GB GPU</strong>.</p> | |
| <table style="width:100%; border-collapse: collapse; margin: 15px 0;"> | |
| <tr style="border-bottom: 2px solid var(--cyan);"> | |
| <th style="padding: 10px; color: var(--cyan);">Method</th> | |
| <th style="padding: 10px; color: var(--cyan);">7B Model Memory</th> | |
| <th style="padding: 10px; color: var(--cyan);">Trainable Params</th> | |
| </tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Full Fine-tune (fp32)</td><td>28 GB</td><td>7B (100%)</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">Full Fine-tune (fp16)</td><td>14 GB</td><td>7B (100%)</td></tr> | |
| <tr style="border-bottom: 1px solid rgba(255,255,255,0.1);"><td style="padding: 8px;">LoRA (fp16)</td><td>14 GB</td><td>~4M (0.06%)</td></tr> | |
| <tr><td style="padding: 8px;">QLoRA (4-bit)</td><td>~4 GB</td><td>~4M (0.06%)</td></tr> | |
| </table> | |
| `, | |
| concepts: ` | |
| <h3>LoRA (Low-Rank Adaptation)</h3> | |
| <p>Instead of updating all parameters, LoRA freezes the original weights and injects small trainable matrices into each layer.</p> | |
| <div class="list-item"> | |
| <div class="list-num">01</div> | |
| <div><strong>Core Idea:</strong> Weight update ΔW can be decomposed as a low-rank matrix: ΔW = B × A, where B ∈ ℝ^(d×r) and A ∈ ℝ^(r×d), with r ≪ d</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">02</div> | |
| <div><strong>Memory Savings:</strong> For a 7B model, full fine-tuning needs ~28GB VRAM. LoRA (r=16) needs ~6GB — a 4.7x reduction.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">03</div> | |
| <div><strong>Rank (r):</strong> Typical values: r=8, 16, 32, 64. Higher rank = more expressiveness but more parameters.</div> | |
| </div> | |
| <h3>QLoRA (Quantized LoRA)</h3> | |
| <div class="list-item"> | |
| <div class="list-num">04</div> | |
| <div><strong>4-bit NormalFloat (NF4):</strong> Quantize frozen weights to 4-bit. Train LoRA adapters in fp16/bf16. Enables fine-tuning 65B models on a single 48GB GPU.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">05</div> | |
| <div><strong>Double Quantization:</strong> Quantize the quantization constants themselves — saves additional memory.</div> | |
| </div> | |
| <h3>Quantization for Deployment</h3> | |
| <div class="list-item"> | |
| <div class="list-num">06</div> | |
| <div><strong>GGUF (llama.cpp):</strong> CPU-optimized format. Run Llama-2 70B on a MacBook with Q4_K_M quantization.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">07</div> | |
| <div><strong>GPTQ:</strong> GPU-optimized post-training quantization. 3-4 bit with minimal quality loss.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">08</div> | |
| <div><strong>AWQ (Activation-aware):</strong> Preserves important weight channels. State-of-the-art quality at 4-bit.</div> | |
| </div> | |
| <h3>RLHF & DPO</h3> | |
| <div class="list-item"> | |
| <div class="list-num">09</div> | |
| <div><strong>RLHF:</strong> Train a reward model from human preferences, then use PPO to optimize the LLM's policy. Used by ChatGPT, Claude.</div> | |
| </div> | |
| <div class="list-item"> | |
| <div class="list-num">10</div> | |
| <div><strong>DPO (Direct Preference Optimization):</strong> Skip the reward model entirely — optimize directly from preference pairs. Simpler, lower compute, often comparable results.</div> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">🔁 PEFT (Parameter-Efficient Fine-Tuning)</div> | |
| PEFT is the umbrella term for all methods that train <1% of parameters:<br> | |
| • <strong>LoRA / QLoRA:</strong> Low-rank weight decomposition<br> | |
| • <strong>Prefix Tuning:</strong> Trainable prefix tokens<br> | |
| • <strong>Adapters:</strong> Small bottleneck layers inserted into frozen model<br> | |
| • <strong>IA3:</strong> Learned rescaling vectors (even fewer params than LoRA) | |
| </div> | |
| `, | |
| math: ` | |
| <h3>📐 Paper & Pain: LoRA Mathematics</h3> | |
| <h4>Low-Rank Decomposition</h4> | |
| <div class="formula" style="font-size: 1.2rem; text-align: center; margin: 20px 0; background: rgba(0, 212, 255, 0.08); padding: 25px; border-radius: 8px;"> | |
| <strong>W' = W + ΔW = W + B × A</strong><br> | |
| <small>where W ∈ ℝ^(d×d), B ∈ ℝ^(d×r), A ∈ ℝ^(r×d), r ≪ d</small> | |
| </div> | |
| <div class="callout insight"> | |
| <div class="callout-title">📝 Parameter Count Comparison</div> | |
| <strong>Full fine-tuning of one attention layer (d=4096):</strong><br> | |
| Parameters = d × d = 4096 × 4096 = <strong>16,777,216</strong><br><br> | |
| <strong>LoRA (r=16):</strong><br> | |
| Parameters = d × r + r × d = 4096 × 16 + 16 × 4096 = <strong>131,072</strong><br><br> | |
| <strong>Reduction: 128x fewer parameters!</strong> (0.78% of original)<br><br> | |
| For a full 7B model (all attention layers):<br> | |
| Full: ~7B params to train (~28GB VRAM)<br> | |
| LoRA (r=16): ~4.2M params to train (~1.5GB for adapters) | |
| </div> | |
| <h4>Quantization Math</h4> | |
| <div class="formula"> | |
| <strong>Linear Quantization (INT8):</strong><br> | |
| q = round(w / scale + zero_point)<br> | |
| w_approx = (q - zero_point) × scale<br><br> | |
| <strong>Example:</strong><br> | |
| Weight w = 0.73, scale = 0.01, zero_point = 128<br> | |
| q = round(0.73 / 0.01 + 128) = round(201) = 201<br> | |
| w_approx = (201 - 128) × 0.01 = 0.73 ✓ | |
| </div> | |
| <h4>Memory Savings</h4> | |
| <div class="formula"> | |
| <strong>Model Size = Parameters × Bytes per Parameter</strong><br><br> | |
| 7B model at fp32: 7B × 4 bytes = 28 GB<br> | |
| 7B model at fp16: 7B × 2 bytes = 14 GB<br> | |
| 7B model at INT8: 7B × 1 byte = 7 GB<br> | |
| 7B model at INT4: 7B × 0.5 bytes = 3.5 GB<br><br> | |
| <strong>4-bit quantization = 8x memory reduction!</strong> | |
| </div> | |
| <h4>DPO Loss Function</h4> | |
| <div class="formula"> | |
| L_DPO = -E[log σ(β(log π_θ(y_w|x) - log π_ref(y_w|x) - log π_θ(y_l|x) + log π_ref(y_l|x)))]<br><br> | |
| Where:<br> | |
| • y_w = preferred response, y_l = rejected response<br> | |
| • π_θ = policy model, π_ref = reference model<br> | |
| • β = temperature controlling deviation from reference | |
| </div> | |
| `, | |
| applications: ` | |
| <h3>Production Deployment</h3> | |
| <div class="info-box"> | |
| <div class="box-title">🏠 Local LLM Deployment</div> | |
| <div class="box-content"> | |
| <strong>Ollama:</strong> One-command local LLM deployment (ollama run llama3)<br> | |
| <strong>llama.cpp:</strong> CPU inference with GGUF quantized models<br> | |
| <strong>vLLM:</strong> High-throughput GPU serving with PagedAttention | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🎯 Domain-Specific Fine-Tuning</div> | |
| <div class="box-content"> | |
| <strong>Medical:</strong> BioMistral, Med-PaLM (clinical notes, diagnoses)<br> | |
| <strong>Legal:</strong> SaulLM (contract analysis, case law)<br> | |
| <strong>Code:</strong> CodeLlama, StarCoder (code generation, completion) | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">💰 Cost Optimization</div> | |
| <div class="box-content"> | |
| <strong>Distillation:</strong> Train a small model to mimic a large one (GPT-4 → Phi-3)<br> | |
| <strong>Speculative Decoding:</strong> Small model drafts, large model verifies (2-3x speedup)<br> | |
| <strong>Mixture of Experts:</strong> Only activate 2 of 8 expert networks per token (Mixtral) | |
| </div> | |
| </div> | |
| <div class="info-box"> | |
| <div class="box-title">🛠️ Key Tools</div> | |
| <div class="box-content"> | |
| <strong>Hugging Face PEFT:</strong> Official LoRA/QLoRA library<br> | |
| <strong>Unsloth:</strong> 2x faster fine-tuning with 60% less memory<br> | |
| <strong>Axolotl:</strong> Config-driven fine-tuning (no code needed)<br> | |
| <strong>TRL:</strong> Transformer Reinforcement Learning (RLHF/DPO) | |
| </div> | |
| </div> | |
| `, | |
| code: ` | |
| <h3>LoRA Math</h3> | |
| <div class="formula" style="text-align:center;"> | |
| $$W' = W + BA, \\quad B \\in \\mathbb{R}^{d \\times r}, \\; A \\in \\mathbb{R}^{r \\times d}, \\; r \\ll d$$ | |
| Trainable params: $2 \\times d \\times r$ instead of $d^2$ (e.g., <strong>128x reduction</strong>) | |
| </div> | |
| <span class="code-title">📄 lora_finetuning.py</span><div class="code-block"><span class="keyword">from</span> peft <span class="keyword">import</span> LoraConfig, get_peft_model | |
| <span class="keyword">from</span> transformers <span class="keyword">import</span> AutoModelForCausalLM, AutoTokenizer | |
| <span class="comment"># Load base model</span> | |
| model = AutoModelForCausalLM.from_pretrained(<span class="string">"meta-llama/Llama-2-7b-hf"</span>) | |
| <span class="comment"># Apply LoRA</span> | |
| lora_config = LoraConfig( | |
| r=<span class="number">16</span>, <span class="comment"># rank (lower = fewer params)</span> | |
| lora_alpha=<span class="number">32</span>, <span class="comment"># scaling factor</span> | |
| target_modules=[<span class="string">"q_proj"</span>, <span class="string">"v_proj"</span>], <span class="comment"># which layers to adapt</span> | |
| lora_dropout=<span class="number">0.05</span>, | |
| task_type=<span class="string">"CAUSAL_LM"</span>, | |
| ) | |
| model = get_peft_model(model, lora_config) | |
| model.print_trainable_parameters() | |
| <span class="comment"># Output: trainable: 4,194,304 || all: 6,742,609,920 || 0.06%</span></div> | |
| ` | |
| } | |
| }; | |
| function createModuleHTML(module) { | |
| const content = MODULE_CONTENT[module.id] || {}; | |
| return ` | |
| <div class="module" id="${module.id}-module"> | |
| <button class="btn-back" onclick="switchTo('dashboard')">← Back to Dashboard</button> | |
| <header> | |
| <h1>${module.icon} ${module.title}</h1> | |
| <p class="subtitle">${module.description}</p> | |
| </header> | |
| <div class="tabs"> | |
| <button class="tab-btn active" onclick="switchTab(event, '${module.id}-overview')">Overview</button> | |
| <button class="tab-btn" onclick="switchTab(event, '${module.id}-concepts')">Key Concepts</button> | |
| <button class="tab-btn" onclick="switchTab(event, '${module.id}-visualization')">📊 Visualization</button> | |
| <button class="tab-btn" onclick="switchTab(event, '${module.id}-math')">Math</button> | |
| <button class="tab-btn" onclick="switchTab(event, '${module.id}-applications')">Applications</button> | |
| <button class="tab-btn" onclick="switchTab(event, '${module.id}-code')">🐍 Python Code</button> | |
| <button class="tab-btn" onclick="switchTab(event, '${module.id}-summary')">Summary</button> | |
| </div> | |
| <div id="${module.id}-overview" class="tab active"> | |
| <div class="section"> | |
| <h2>📖 Overview</h2> | |
| ${content.overview || ` | |
| <p>Complete coverage of ${module.title.toLowerCase()}. Learn the fundamentals, mathematics, real-world applications, and implementation details.</p> | |
| <div class="info-box"> | |
| <div class="box-title">Learning Objectives</div> | |
| <div class="box-content"> | |
| ✓ Understand core concepts and theory<br> | |
| ✓ Master mathematical foundations<br> | |
| ✓ Learn practical applications<br> | |
| ✓ Implement and experiment | |
| </div> | |
| </div> | |
| `} | |
| </div> | |
| </div> | |
| <div id="${module.id}-concepts" class="tab"> | |
| <div class="section"> | |
| <h2>🎯 Key Concepts</h2> | |
| ${content.concepts || ` | |
| <p>Fundamental concepts and building blocks for ${module.title.toLowerCase()}.</p> | |
| <div class="callout insight"> | |
| <div class="callout-title">💡 Main Ideas</div> | |
| This section covers the core ideas you need to understand before diving into mathematics. | |
| </div> | |
| `} | |
| </div> | |
| </div> | |
| <div id="${module.id}-visualization" class="tab"> | |
| <div class="section"> | |
| <h2>📊 Interactive Visualization</h2> | |
| <p>Visual representation to help understand ${module.title.toLowerCase()} concepts intuitively.</p> | |
| <div id="${module.id}-viz" class="viz-container"> | |
| <canvas id="${module.id}-canvas" width="800" height="400" style="border: 1px solid rgba(0, 212, 255, 0.3); border-radius: 8px; background: rgba(0, 212, 255, 0.02);"></canvas> | |
| </div> | |
| <div class="viz-controls"> | |
| <button onclick="drawVisualization('${module.id}')" class="btn-viz">🔄 Refresh Visualization</button> | |
| <button onclick="toggleVizAnimation('${module.id}')" class="btn-viz">▶️ Animate</button> | |
| <button onclick="downloadViz('${module.id}')" class="btn-viz">⬇️ Save Image</button> | |
| </div> | |
| </div> | |
| </div> | |
| <div id="${module.id}-math" class="tab"> | |
| <div class="section"> | |
| <h2>📐 Mathematical Foundation</h2> | |
| ${content.math || ` | |
| <p>Rigorous mathematical treatment of ${module.title.toLowerCase()}.</p> | |
| <div class="formula"> | |
| Mathematical formulas and derivations go here | |
| </div> | |
| `} | |
| </div> | |
| </div> | |
| <div id="${module.id}-applications" class="tab"> | |
| <div class="section"> | |
| <h2>🌍 Real-World Applications</h2> | |
| ${content.applications || ` | |
| <p>How ${module.title.toLowerCase()} is used in practice across different industries.</p> | |
| <div class="info-box"> | |
| <div class="box-title">Use Cases</div> | |
| <div class="box-content"> | |
| Common applications and practical examples | |
| </div> | |
| </div> | |
| `} | |
| </div> | |
| </div> | |
| <div id="${module.id}-code" class="tab"> | |
| <div class="section"> | |
| <h2>🐍 Python Implementation</h2> | |
| ${content.code || ` | |
| <p>Production-ready Python code for ${module.title.toLowerCase()} using PyTorch and TensorFlow.</p> | |
| <div class="callout tip"> | |
| <div class="callout-title">💡 Coming Soon</div> | |
| Python code examples for this module are being prepared. | |
| </div> | |
| `} | |
| </div> | |
| </div> | |
| <div id="${module.id}-summary" class="tab"> | |
| <div class="section"> | |
| <h2>✅ Summary</h2> | |
| <div class="info-box"> | |
| <div class="box-title">Key Takeaways</div> | |
| <div class="box-content"> | |
| ✓ Essential concepts covered<br> | |
| ✓ Mathematical foundations understood<br> | |
| ✓ Real-world applications identified<br> | |
| ✓ Ready for implementation | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| `; | |
| } | |
| function initDashboard() { | |
| const grid = document.getElementById("modulesGrid"); | |
| const container = document.getElementById("modulesContainer"); | |
| modules.forEach((module, index) => { | |
| const card = document.createElement("div"); | |
| // Add staggered animation class | |
| const staggerClass = `stagger stagger-${(index % 8) + 1}`; | |
| card.className = `card hover-glow ${staggerClass}`; | |
| card.style.borderColor = module.color; | |
| card.onclick = () => switchTo(module.id + "-module"); | |
| card.innerHTML = ` | |
| <div class="card-icon">${module.icon}</div> | |
| <h3>${module.title}</h3> | |
| <p>${module.description}</p> | |
| <span class="category-label">${module.category}</span> | |
| `; | |
| grid.appendChild(card); | |
| const moduleHTML = createModuleHTML(module); | |
| container.innerHTML += moduleHTML; | |
| }); | |
| } | |
| function switchTo(target) { | |
| document.querySelectorAll('.dashboard, .module').forEach(el => { | |
| el.classList.remove('active'); | |
| }); | |
| const elem = document.getElementById(target); | |
| if (elem) elem.classList.add('active'); | |
| } | |
| function switchTab(e, tabId) { | |
| const module = e.target.closest('.module'); | |
| if (!module) return; | |
| module.querySelectorAll('.tab').forEach(t => t.classList.remove('active')); | |
| module.querySelectorAll('.tab-btn').forEach(b => b.classList.remove('active')); | |
| const tab = document.getElementById(tabId); | |
| if (tab) tab.classList.add('active'); | |
| e.target.classList.add('active'); | |
| // Re-render MathJax in the newly visible tab | |
| if (window.MathJax && window.MathJax.typesetPromise) { | |
| window.MathJax.typesetPromise([tab]).catch(function (err) { console.log('MathJax typeset:', err); }); | |
| } | |
| // Trigger visualization when tabs are clicked | |
| setTimeout(() => { | |
| const moduleId = tabId.split('-')[0]; | |
| if (tabId.includes('-concepts')) { | |
| drawConceptsVisualization(moduleId); | |
| } else if (tabId.includes('-visualization')) { | |
| drawConceptsVisualization(moduleId); | |
| } else if (tabId.includes('-math')) { | |
| drawMathVisualization(moduleId); | |
| } else if (tabId.includes('-applications')) { | |
| drawApplicationVisualization(moduleId); | |
| } | |
| }, 150); | |
| } | |
| // Visualization Functions - Concepts Tab | |
| function drawConceptsVisualization(moduleId) { | |
| const canvas = document.getElementById(moduleId + '-canvas'); | |
| if (!canvas) return; | |
| const ctx = canvas.getContext('2d'); | |
| ctx.clearRect(0, 0, canvas.width, canvas.height); | |
| ctx.fillStyle = '#0f1419'; | |
| ctx.fillRect(0, 0, canvas.width, canvas.height); | |
| const vizMap = { | |
| 'nn-basics': drawNeuronAnimation, | |
| 'perceptron': drawDecisionBoundary, | |
| 'mlp': drawNetworkGraph, | |
| 'activation': drawActivationFunctions, | |
| 'weight-init': drawWeightDistribution, | |
| 'loss': drawLossLandscape, | |
| 'optimizers': drawConvergencePaths, | |
| 'backprop': drawGradientFlow, | |
| 'regularization': drawOverfitComparison, | |
| 'batch-norm': drawBatchNormalization, | |
| 'cv-intro': drawImageMatrix, | |
| 'conv-layer': drawConvolutionAnimation, | |
| 'pooling': drawPoolingDemo, | |
| 'cnn-basics': drawCNNArchitecture, | |
| 'viz-filters': drawLearnedFilters, | |
| 'lenet': drawLeNetArchitecture, | |
| 'alexnet': drawAlexNetArchitecture, | |
| 'vgg': drawVGGArchitecture, | |
| 'resnet': drawResNetArchitecture, | |
| 'inception': drawInceptionModule, | |
| 'mobilenet': drawMobileNetArchitecture, | |
| 'transfer-learning': drawTransferLearning, | |
| 'localization': drawBoundingBoxes, | |
| 'rcnn': drawRCNNPipeline, | |
| 'yolo': drawYOLOGrid, | |
| 'ssd': drawSSDDetector, | |
| 'semantic-seg': drawSemanticSegmentation, | |
| 'instance-seg': drawInstanceSegmentation, | |
| 'face-recog': drawFaceEmbeddings, | |
| 'autoencoders': drawAutoencoderArchitecture, | |
| 'gans': drawGANsGame, | |
| 'diffusion': drawDiffusionProcess, | |
| 'rnn': drawRNNUnrolled, | |
| 'transformers': drawAttentionMatrix, | |
| 'bert': drawBERTProcess, | |
| 'gpt': drawGPTGeneration, | |
| 'vit': drawVisionTransformer, | |
| 'gnn': drawGraphNetwork, | |
| 'seq2seq': drawSeq2SeqAttention, | |
| 'research-papers': drawDefaultVisualization, | |
| 'vector-db': drawVectorSpace, | |
| 'rag': drawRAGPipeline, | |
| 'advanced-llm': drawLoRADiagram | |
| }; | |
| if (vizMap[moduleId]) { | |
| vizMap[moduleId](ctx, canvas); | |
| } else { | |
| drawDefaultVisualization(ctx, canvas); | |
| } | |
| } | |
| // Default Visualization | |
| function drawDefaultVisualization(ctx, canvas) { | |
| const centerX = canvas.width / 2; | |
| const centerY = canvas.height / 2; | |
| ctx.fillStyle = 'rgba(0, 212, 255, 0.2)'; | |
| ctx.fillRect(centerX - 120, centerY - 60, 240, 120); | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.lineWidth = 2; | |
| ctx.strokeRect(centerX - 120, centerY - 60, 240, 120); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 18px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('📊 Interactive Visualization', centerX, centerY - 20); | |
| ctx.font = '13px Arial'; | |
| ctx.fillText('Custom visualization for this topic', centerX, centerY + 20); | |
| ctx.font = '11px Arial'; | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('Click Refresh to render', centerX, centerY + 45); | |
| } | |
| // Default Math Visualization | |
| function drawDefaultMathVisualization(ctx, canvas) { | |
| const centerX = canvas.width / 2; | |
| const centerY = canvas.height / 2; | |
| ctx.fillStyle = 'rgba(255, 107, 53, 0.2)'; | |
| ctx.fillRect(centerX - 120, centerY - 60, 240, 120); | |
| ctx.strokeStyle = '#ff6b35'; | |
| ctx.lineWidth = 2; | |
| ctx.strokeRect(centerX - 120, centerY - 60, 240, 120); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 18px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('📐 Mathematical Formulas', centerX, centerY - 20); | |
| ctx.font = '13px Arial'; | |
| ctx.fillText('Visual equation derivations', centerX, centerY + 20); | |
| ctx.font = '11px Arial'; | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('Click Visualize to render', centerX, centerY + 45); | |
| } | |
| // Default Application Visualization | |
| function drawDefaultApplicationVisualization(ctx, canvas) { | |
| const centerX = canvas.width / 2; | |
| const centerY = canvas.height / 2; | |
| ctx.fillStyle = 'rgba(0, 255, 136, 0.2)'; | |
| ctx.fillRect(centerX - 120, centerY - 60, 240, 120); | |
| ctx.strokeStyle = '#00ff88'; | |
| ctx.lineWidth = 2; | |
| ctx.strokeRect(centerX - 120, centerY - 60, 240, 120); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = 'bold 18px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('🌍 Real-World Applications', centerX, centerY - 20); | |
| ctx.font = '13px Arial'; | |
| ctx.fillText('Practical use cases and examples', centerX, centerY + 20); | |
| ctx.font = '11px Arial'; | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.fillText('Click Show Applications to render', centerX, centerY + 45); | |
| } | |
| // Activation Functions Visualization | |
| function drawActivationFunctions(ctx, canvas) { | |
| const width = canvas.width; | |
| const height = canvas.height; | |
| const centerX = width / 2; | |
| const centerY = height / 2; | |
| const scale = 40; | |
| // Draw grid | |
| ctx.strokeStyle = 'rgba(0, 212, 255, 0.1)'; | |
| ctx.lineWidth = 1; | |
| for (let i = -5; i <= 5; i += 1) { | |
| const x = centerX + i * scale; | |
| ctx.beginPath(); | |
| ctx.moveTo(x, centerY - 5 * scale); | |
| ctx.lineTo(x, centerY + 5 * scale); | |
| ctx.stroke(); | |
| } | |
| // Draw axes | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.lineWidth = 2; | |
| ctx.beginPath(); | |
| ctx.moveTo(centerX - 6 * scale, centerY); | |
| ctx.lineTo(centerX + 6 * scale, centerY); | |
| ctx.stroke(); | |
| ctx.beginPath(); | |
| ctx.moveTo(centerX, centerY - 6 * scale); | |
| ctx.lineTo(centerX, centerY + 6 * scale); | |
| ctx.stroke(); | |
| // Draw activation functions | |
| const functions = [ | |
| { name: 'ReLU', color: '#ff6b35', fn: x => Math.max(0, x) }, | |
| { name: 'Sigmoid', color: '#00ff88', fn: x => 1 / (1 + Math.exp(-x)) }, | |
| { name: 'Tanh', color: '#ffa500', fn: x => Math.tanh(x) } | |
| ]; | |
| functions.forEach(func => { | |
| ctx.strokeStyle = func.color; | |
| ctx.lineWidth = 2; | |
| ctx.beginPath(); | |
| for (let x = -5; x <= 5; x += 0.1) { | |
| const y = func.fn(x); | |
| const canvasX = centerX + x * scale; | |
| const canvasY = centerY - y * scale; | |
| if (x === -5) ctx.moveTo(canvasX, canvasY); | |
| else ctx.lineTo(canvasX, canvasY); | |
| } | |
| ctx.stroke(); | |
| }); | |
| // Legend | |
| ctx.font = 'bold 12px Arial'; | |
| functions.forEach((func, i) => { | |
| ctx.fillStyle = func.color; | |
| ctx.fillRect(10, 10 + i * 20, 10, 10); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.fillText(func.name, 25, 19 + i * 20); | |
| }); | |
| } | |
| // Neural Network Graph | |
| function drawNetworkGraph(ctx, canvas) { | |
| const layers = [2, 3, 3, 1]; | |
| const width = canvas.width; | |
| const height = canvas.height; | |
| const layerWidth = width / (layers.length + 1); | |
| ctx.fillStyle = 'rgba(0, 212, 255, 0.05)'; | |
| ctx.fillRect(0, 0, width, height); | |
| // Draw neurons and connections | |
| const neuronPositions = []; | |
| layers.forEach((numNeurons, layerIdx) => { | |
| const x = (layerIdx + 1) * layerWidth; | |
| const positions = []; | |
| for (let i = 0; i < numNeurons; i++) { | |
| const y = height / (numNeurons + 1) * (i + 1); | |
| positions.push({ x, y }); | |
| // Draw connections to next layer | |
| if (layerIdx < layers.length - 1) { | |
| const nextLayerPositions = []; | |
| const nextX = (layerIdx + 2) * layerWidth; | |
| for (let j = 0; j < layers[layerIdx + 1]; j++) { | |
| const nextY = height / (layers[layerIdx + 1] + 1) * (j + 1); | |
| nextLayerPositions.push({ x: nextX, y: nextY }); | |
| } | |
| nextLayerPositions.forEach(next => { | |
| ctx.strokeStyle = 'rgba(0, 212, 255, 0.2)'; | |
| ctx.lineWidth = 1; | |
| ctx.beginPath(); | |
| ctx.moveTo(x, y); | |
| ctx.lineTo(next.x, next.y); | |
| ctx.stroke(); | |
| }); | |
| } | |
| } | |
| // Draw neurons | |
| positions.forEach(pos => { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.beginPath(); | |
| ctx.arc(pos.x, pos.y, 8, 0, Math.PI * 2); | |
| ctx.fill(); | |
| }); | |
| neuronPositions.push(positions); | |
| }); | |
| // Labels | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = 'bold 12px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Input', layerWidth, height - 10); | |
| ctx.fillText('Hidden 1', layerWidth * 2, height - 10); | |
| ctx.fillText('Hidden 2', layerWidth * 3, height - 10); | |
| ctx.fillText('Output', layerWidth * 4, height - 10); | |
| } | |
| // Convolution Animation | |
| function drawConvolutionAnimation(ctx, canvas) { | |
| const width = canvas.width; | |
| const height = canvas.height; | |
| // Draw input image | |
| ctx.fillStyle = 'rgba(0, 212, 255, 0.1)'; | |
| ctx.fillRect(20, 20, 150, 150); | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.lineWidth = 2; | |
| ctx.strokeRect(20, 20, 150, 150); | |
| // Draw filter | |
| ctx.fillStyle = 'rgba(255, 107, 53, 0.1)'; | |
| const filterPos = 60 + Math.sin(Date.now() / 1000) * 40; | |
| ctx.fillRect(filterPos, 60, 60, 60); | |
| ctx.strokeStyle = '#ff6b35'; | |
| ctx.lineWidth = 3; | |
| ctx.strokeRect(filterPos, 60, 60, 60); | |
| // Draw output | |
| ctx.fillStyle = 'rgba(0, 255, 136, 0.1)'; | |
| ctx.fillRect(width - 170, 20, 150, 150); | |
| ctx.strokeStyle = '#00ff88'; | |
| ctx.lineWidth = 2; | |
| ctx.strokeRect(width - 170, 20, 150, 150); | |
| // Draw feature map | |
| for (let i = 0; i < 5; i++) { | |
| for (let j = 0; j < 5; j++) { | |
| const intensity = Math.random() * 100; | |
| ctx.fillStyle = `rgba(0, 212, 255, ${intensity / 100})`; | |
| ctx.fillRect(width - 160 + i * 25, 30 + j * 25, 20, 20); | |
| } | |
| } | |
| // Labels | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = 'bold 12px Arial'; | |
| ctx.textAlign = 'left'; | |
| ctx.fillText('Input Image', 20, 190); | |
| ctx.fillText('Filter', filterPos, 140); | |
| ctx.fillText('Feature Map', width - 170, 190); | |
| } | |
| // Loss Landscape | |
| function drawLossLandscape(ctx, canvas) { | |
| const width = canvas.width; | |
| const height = canvas.height; | |
| for (let x = 0; x < width; x += 20) { | |
| for (let y = 0; y < height; y += 20) { | |
| const nx = (x - width / 2) / (width / 4); | |
| const ny = (y - height / 2) / (height / 4); | |
| const loss = nx * nx + ny * ny; | |
| const intensity = Math.min(255, loss * 50); | |
| ctx.fillStyle = `rgb(${intensity}, ${100}, ${255 - intensity})`; | |
| ctx.fillRect(x, y, 20, 20); | |
| } | |
| } | |
| // Draw descent path | |
| ctx.strokeStyle = '#00ff88'; | |
| ctx.lineWidth = 2; | |
| ctx.beginPath(); | |
| const startX = width / 2 + 80; | |
| const startY = height / 2 + 80; | |
| ctx.moveTo(startX, startY); | |
| for (let i = 0; i < 20; i++) { | |
| const angle = Math.atan2(startY - height / 2, startX - width / 2); | |
| const newX = startX - Math.cos(angle) * 15; | |
| const newY = startY - Math.sin(angle) * 15; | |
| ctx.lineTo(newX, newY); | |
| } | |
| ctx.stroke(); | |
| // Minimum point | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.beginPath(); | |
| ctx.arc(width / 2, height / 2, 8, 0, Math.PI * 2); | |
| ctx.fill(); | |
| } | |
| // YOLO Grid | |
| function drawYOLOGrid(ctx, canvas) { | |
| const width = canvas.width; | |
| const height = canvas.height; | |
| const gridSize = 7; | |
| const cellWidth = width / gridSize; | |
| const cellHeight = height / gridSize; | |
| // Draw grid | |
| ctx.strokeStyle = 'rgba(0, 212, 255, 0.3)'; | |
| ctx.lineWidth = 1; | |
| for (let i = 0; i <= gridSize; i++) { | |
| ctx.beginPath(); | |
| ctx.moveTo(i * cellWidth, 0); | |
| ctx.lineTo(i * cellWidth, height); | |
| ctx.stroke(); | |
| ctx.beginPath(); | |
| ctx.moveTo(0, i * cellHeight); | |
| ctx.lineTo(width, i * cellHeight); | |
| ctx.stroke(); | |
| } | |
| // Draw detected objects | |
| const detections = [ | |
| { x: 2, y: 2, w: 2, h: 2, conf: 0.95 }, | |
| { x: 4, y: 5, w: 1.5, h: 1.5, conf: 0.87 } | |
| ]; | |
| detections.forEach(det => { | |
| ctx.fillStyle = `rgba(255, 107, 53, ${det.conf * 0.5})`; | |
| ctx.fillRect(det.x * cellWidth, det.y * cellHeight, det.w * cellWidth, det.h * cellHeight); | |
| ctx.strokeStyle = '#ff6b35'; | |
| ctx.lineWidth = 2; | |
| ctx.strokeRect(det.x * cellWidth, det.y * cellHeight, det.w * cellWidth, det.h * cellHeight); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 12px Arial'; | |
| ctx.fillText((det.conf * 100).toFixed(0) + '%', det.x * cellWidth + 5, det.y * cellHeight + 15); | |
| }); | |
| } | |
| // Attention Matrix | |
| function drawAttentionMatrix(ctx, canvas) { | |
| const size = 8; | |
| const cellSize = Math.min(canvas.width, canvas.height) / size; | |
| for (let i = 0; i < size; i++) { | |
| for (let j = 0; j < size; j++) { | |
| const distance = Math.abs(i - j); | |
| const attention = Math.exp(-distance / 2); | |
| const intensity = Math.floor(attention * 255); | |
| ctx.fillStyle = `rgb(${intensity}, 100, ${200 - intensity})`; | |
| ctx.fillRect(i * cellSize, j * cellSize, cellSize, cellSize); | |
| } | |
| } | |
| // Add labels | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = '10px Arial'; | |
| ctx.textAlign = 'center'; | |
| for (let i = 0; i < size; i++) { | |
| ctx.fillText('w' + i, i * cellSize + cellSize / 2, canvas.height - 5); | |
| } | |
| } | |
| // Math Visualization | |
| function drawMathVisualization(moduleId) { | |
| const canvas = document.getElementById(moduleId + '-math-canvas'); | |
| if (!canvas) return; | |
| const ctx = canvas.getContext('2d'); | |
| ctx.clearRect(0, 0, canvas.width, canvas.height); | |
| ctx.fillStyle = '#0f1419'; | |
| ctx.fillRect(0, 0, canvas.width, canvas.height); | |
| const mathVizMap = { | |
| 'nn-basics': () => drawNNMath(ctx, canvas), | |
| 'activation': () => drawActivationDerivatives(ctx, canvas), | |
| 'loss': () => drawLossComparison(ctx, canvas), | |
| 'optimizers': () => drawOptimizerSteps(ctx, canvas), | |
| 'backprop': () => drawChainRule(ctx, canvas), | |
| 'conv-layer': () => drawConvolutionMath(ctx, canvas), | |
| 'pooling': () => drawPoolingMath(ctx, canvas), | |
| 'regularization': () => drawRegularizationMath(ctx, canvas), | |
| 'transformers': () => drawAttentionMath(ctx, canvas), | |
| 'rnn': () => drawRNNMath(ctx, canvas), | |
| 'gnn': () => drawGNNMath(ctx, canvas) | |
| }; | |
| if (mathVizMap[moduleId]) { | |
| mathVizMap[moduleId](); | |
| } else { | |
| drawDefaultMathVisualization(ctx, canvas); | |
| } | |
| } | |
| // Application Visualization | |
| function drawApplicationVisualization(moduleId) { | |
| const canvas = document.getElementById(moduleId + '-app-canvas'); | |
| if (!canvas) return; | |
| const ctx = canvas.getContext('2d'); | |
| ctx.clearRect(0, 0, canvas.width, canvas.height); | |
| ctx.fillStyle = '#0f1419'; | |
| ctx.fillRect(0, 0, canvas.width, canvas.height); | |
| const appVizMap = { | |
| 'nn-basics': () => drawNNApplications(ctx, canvas), | |
| 'cnn-basics': () => drawCNNApplications(ctx, canvas), | |
| 'conv-layer': () => drawConvolutionApplications(ctx, canvas), | |
| 'yolo': () => drawYOLOApplications(ctx, canvas), | |
| 'semantic-seg': () => drawSegmentationApplications(ctx, canvas), | |
| 'instance-seg': () => drawInstanceSegmentationApps(ctx, canvas), | |
| 'face-recog': () => drawFaceRecognitionApps(ctx, canvas), | |
| 'transformers': () => drawTransformerApps(ctx, canvas), | |
| 'bert': () => drawBERTApplications(ctx, canvas), | |
| 'gpt': () => drawGPTApplications(ctx, canvas), | |
| 'gans': () => drawGANApplications(ctx, canvas), | |
| 'diffusion': () => drawDiffusionApplications(ctx, canvas), | |
| 'gnn': () => drawGNNApplications(ctx, canvas) | |
| }; | |
| if (appVizMap[moduleId]) { | |
| appVizMap[moduleId](); | |
| } else { | |
| drawDefaultApplicationVisualization(ctx, canvas); | |
| } | |
| } | |
| // Math visualization helper functions | |
| function drawNNMath(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 18px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Forward Pass: y = σ(Wx + b)', canvas.width / 2, 50); | |
| ctx.font = '14px Arial'; | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('Linear combination + Non-linearity', canvas.width / 2, 100); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.fillText('W: weights, b: bias, σ: activation', canvas.width / 2, 150); | |
| } | |
| function drawActivationDerivatives(ctx, canvas) { | |
| const width = canvas.width; | |
| const height = canvas.height; | |
| const centerX = width / 2; | |
| const centerY = height / 2; | |
| const scale = 40; | |
| ctx.strokeStyle = 'rgba(0, 212, 255, 0.2)'; | |
| ctx.lineWidth = 1; | |
| for (let i = -5; i <= 5; i += 1) { | |
| ctx.beginPath(); | |
| ctx.moveTo(centerX + i * scale, centerY - 5 * scale); | |
| ctx.lineTo(centerX + i * scale, centerY + 5 * scale); | |
| ctx.stroke(); | |
| } | |
| ctx.strokeStyle = '#00ff88'; | |
| ctx.lineWidth = 3; | |
| ctx.beginPath(); | |
| for (let x = -5; x <= 5; x += 0.1) { | |
| const y = 1 / (1 + Math.exp(-x)) * (1 - 1 / (1 + Math.exp(-x))); | |
| const canvasX = centerX + x * scale; | |
| const canvasY = centerY - y * scale * 10; | |
| if (x === -5) ctx.moveTo(canvasX, canvasY); | |
| else ctx.lineTo(canvasX, canvasY); | |
| } | |
| ctx.stroke(); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText("Sigmoid Derivative: σ'(x) = σ(x)(1-σ(x))", canvas.width / 2, 30); | |
| } | |
| function drawLossComparison(ctx, canvas) { | |
| const width = canvas.width; | |
| const height = canvas.height; | |
| // MSE | |
| ctx.fillStyle = 'rgba(0, 212, 255, 0.2)'; | |
| ctx.fillRect(20, 60, width / 2 - 30, height - 100); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.fillText('MSE Loss', width / 4, 45); | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('L = (1/n)Σ(y-ŷ)²', width / 4, 90); | |
| ctx.fillText('Regression', width / 4, 115); | |
| // Cross-Entropy | |
| ctx.fillStyle = 'rgba(255, 107, 53, 0.2)'; | |
| ctx.fillRect(width / 2 + 10, 60, width / 2 - 30, height - 100); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.fillText('Cross-Entropy Loss', width * 3 / 4, 45); | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('L = -Σ(y·log(ŷ))', width * 3 / 4, 90); | |
| ctx.fillText('Classification', width * 3 / 4, 115); | |
| } | |
| function drawOptimizerSteps(ctx, canvas) { | |
| const width = canvas.width; | |
| const height = canvas.height; | |
| const centerY = height / 2; | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('SGD', width / 4, 50); | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('w = w - α·∇L', width / 4, 100); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.fillText('Momentum', width / 2, 50); | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('v = β·v + (1-β)·∇L', width / 2, 100); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.fillText('Adam', width * 3 / 4, 50); | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Adaptive learning rate', width * 3 / 4, 100); | |
| } | |
| function drawChainRule(ctx, canvas) { | |
| const width = canvas.width; | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Backpropagation Chain Rule', width / 2, 50); | |
| ctx.font = '12px Arial'; | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('dL/dW = dL/dŷ · dŷ/da · da/dz · dz/dW', width / 2, 100); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.fillText('Compute gradient by multiplying partial derivatives', width / 2, 150); | |
| } | |
| function drawConvolutionMath(ctx, canvas) { | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Convolution Operation', canvas.width / 2, 50); | |
| ctx.font = '12px Arial'; | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('y[i,j] = Σ Σ w[m,n] * x[i+m,j+n] + b', canvas.width / 2, 100); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('Sliding window element-wise multiplication and summation', canvas.width / 2, 150); | |
| } | |
| function drawPoolingMath(ctx, canvas) { | |
| const width = canvas.width; | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Max Pooling', width / 3, 50); | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('y = max(neighborhood)', width / 3, 100); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.fillText('Average Pooling', width * 2 / 3, 50); | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('y = avg(neighborhood)', width * 2 / 3, 100); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = '11px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Reduces spatial dimensions', width / 2, 150); | |
| } | |
| function drawRegularizationMath(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('L1 Regularization: L = Loss + λΣ|w|', canvas.width / 2, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('L2 Regularization: L = Loss + λΣw²', canvas.width / 2, 110); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.fillText('Prevents overfitting by penalizing large weights', canvas.width / 2, 160); | |
| } | |
| function drawAttentionMath(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Attention Mechanism', canvas.width / 2, 50); | |
| ctx.font = '12px Arial'; | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('Attention(Q,K,V) = softmax(QK^T/√d_k) · V', canvas.width / 2, 100); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.fillText('Query-Key matching determines how much to focus on each value', canvas.width / 2, 150); | |
| } | |
| function drawRNNMath(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('RNN Hidden State Update', canvas.width / 2, 50); | |
| ctx.font = '12px Arial'; | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('h_t = σ(W_h·h_(t-1) + W_x·x_t + b)', canvas.width / 2, 100); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.fillText('Processes sequences step-by-step with recurrent connections', canvas.width / 2, 150); | |
| } | |
| // Application visualization helper functions | |
| function drawNNApplications(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('📱 Stock Price Prediction', canvas.width / 4, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('🏥 Medical Diagnosis', canvas.width / 2, 60); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.fillText('🎮 Game AI', canvas.width * 3 / 4, 60); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Fraud Detection', canvas.width / 4, 120); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('Recommendation Systems', canvas.width / 2, 120); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('Credit Scoring', canvas.width * 3 / 4, 120); | |
| } | |
| function drawCNNApplications(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Image Classification', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('Object Detection', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Deep Learning Backbone', canvas.width / 2, 150); | |
| } | |
| function drawConvolutionApplications(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('📷 Image Feature Extraction', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('🔍 Edge Detection', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Foundation of Computer Vision', canvas.width / 2, 150); | |
| } | |
| function drawYOLOApplications(ctx, canvas) { | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('🚗 Autonomous Driving', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('📹 Real-time Video Detection', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Ultra-fast inference for live applications', canvas.width / 2, 150); | |
| } | |
| function drawSegmentationApplications(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('🏥 Medical Imaging', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('🚗 Autonomous Vehicles', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Pixel-level understanding of scenes', canvas.width / 2, 150); | |
| } | |
| function drawInstanceSegmentationApps(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('👥 Person Detection & Tracking', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('🍎 Object Instance Counting', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Separates overlapping objects', canvas.width / 2, 150); | |
| } | |
| function drawFaceRecognitionApps(ctx, canvas) { | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('📱 Phone Unlock', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('🔒 Security Systems', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Identity verification and access control', canvas.width / 2, 150); | |
| } | |
| function drawTransformerApps(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('💬 ChatGPT / LLMs', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('🌐 Machine Translation', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Foundation of modern NLP and beyond', canvas.width / 2, 150); | |
| } | |
| function drawBERTApplications(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('🔍 Semantic Search', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('❓ Question Answering', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Deep language understanding', canvas.width / 2, 150); | |
| } | |
| function drawGPTApplications(ctx, canvas) { | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('✍️ Text Generation', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('💡 Idea Assistance', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Powerful autoregressive language models', canvas.width / 2, 150); | |
| } | |
| function drawGANApplications(ctx, canvas) { | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('🎨 Image Generation', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('🎭 Style Transfer', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Creative content generation and enhancement', canvas.width / 2, 150); | |
| } | |
| function drawDiffusionApplications(ctx, canvas) { | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('🖼️ Image Synthesis', canvas.width / 3, 60); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('🎬 Stable Diffusion', canvas.width * 2 / 3, 60); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('State-of-the-art generative AI', canvas.width / 2, 150); | |
| } | |
| // Missing visualization stub functions | |
| function drawNeuronAnimation(ctx, canvas) { | |
| drawNetworkGraph(ctx, canvas); | |
| } | |
| function drawDecisionBoundary(ctx, canvas) { | |
| const centerX = canvas.width / 2; | |
| const centerY = canvas.height / 2; | |
| // Draw decision boundary line | |
| ctx.strokeStyle = '#ff6b35'; | |
| ctx.lineWidth = 3; | |
| ctx.beginPath(); | |
| ctx.moveTo(0, centerY); | |
| ctx.lineTo(canvas.width, centerY); | |
| ctx.stroke(); | |
| // Draw sample points | |
| for (let i = 0; i < 20; i++) { | |
| const x = Math.random() * canvas.width; | |
| const y = Math.random() * canvas.height; | |
| ctx.fillStyle = y < centerY ? '#00d4ff' : '#00ff88'; | |
| ctx.beginPath(); | |
| ctx.arc(x, y, 5, 0, Math.PI * 2); | |
| ctx.fill(); | |
| } | |
| } | |
| function drawWeightDistribution(ctx, canvas) { | |
| const centerX = canvas.width / 2; | |
| const centerY = canvas.height / 2; | |
| // Draw Gaussian distribution | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.lineWidth = 2; | |
| ctx.beginPath(); | |
| for (let x = -100; x <= 100; x += 2) { | |
| const y = Math.exp(-(x * x) / 500) * 80; | |
| const canvasX = centerX + x; | |
| const canvasY = centerY - y; | |
| if (x === -100) ctx.moveTo(canvasX, canvasY); | |
| else ctx.lineTo(canvasX, canvasY); | |
| } | |
| ctx.stroke(); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Weight Distribution (Xavier/He Init)', centerX, 50); | |
| } | |
| function drawConvergencePaths(ctx, canvas) { | |
| drawLossLandscape(ctx, canvas); | |
| } | |
| function drawGradientFlow(ctx, canvas) { | |
| drawChainRule(ctx, canvas); | |
| } | |
| function drawOverfitComparison(ctx, canvas) { | |
| const width = canvas.width; | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Without Regularization', width / 4, 40); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.fillText('With Regularization', width * 3 / 4, 40); | |
| // Draw wavy overfit line | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.lineWidth = 2; | |
| ctx.beginPath(); | |
| for (let x = 0; x < width / 2 - 20; x += 5) { | |
| const y = 100 + Math.sin(x / 10) * 30 + Math.random() * 20; | |
| if (x === 0) ctx.moveTo(x + 20, y); | |
| else ctx.lineTo(x + 20, y); | |
| } | |
| ctx.stroke(); | |
| // Draw smooth regularized line | |
| ctx.strokeStyle = '#ff6b35'; | |
| ctx.beginPath(); | |
| for (let x = 0; x < width / 2 - 20; x += 5) { | |
| const y = 100 + Math.sin(x / 20) * 15; | |
| if (x === 0) ctx.moveTo(x + width / 2 + 20, y); | |
| else ctx.lineTo(x + width / 2 + 20, y); | |
| } | |
| ctx.stroke(); | |
| } | |
| function drawBatchNormalization(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Batch Normalization: μ=0, σ²=1', canvas.width / 2, 50); | |
| // Draw before/after distributions | |
| ctx.fillStyle = '#ffa500'; | |
| ctx.fillText('Input Distribution', canvas.width / 4, 100); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('Normalized Distribution', canvas.width * 3 / 4, 100); | |
| } | |
| function drawImageMatrix(ctx, canvas) { | |
| const cellSize = 20; | |
| for (let i = 0; i < 10; i++) { | |
| for (let j = 0; j < 10; j++) { | |
| const intensity = Math.random(); | |
| ctx.fillStyle = `rgba(0, 212, 255, ${intensity})`; | |
| ctx.fillRect(i * cellSize + 100, j * cellSize + 100, cellSize, cellSize); | |
| } | |
| } | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Image as Matrix (Pixel Values)', canvas.width / 2, 50); | |
| } | |
| function drawPoolingDemo(ctx, canvas) { | |
| const cellSize = 30; | |
| const input = [[20, 30, 0, 5], [8, 12, 2, 0], [34, 70, 37, 4], [112, 100, 25, 12]]; | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Max Pooling Demo (2x2)', canvas.width / 2, 30); | |
| // Draw input matrix | |
| for (let i = 0; i < 4; i++) { | |
| for (let j = 0; j < 4; j++) { | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.strokeRect(50 + j * cellSize, 50 + i * cellSize, cellSize, cellSize); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = '10px Arial'; | |
| ctx.fillText(input[i][j], 50 + j * cellSize + cellSize / 2, 50 + i * cellSize + cellSize / 2 + 4); | |
| } | |
| } | |
| // Draw output (max pooled) | |
| const pooled = [[20, 30], [112, 37]]; | |
| for (let i = 0; i < 2; i++) { | |
| for (let j = 0; j < 2; j++) { | |
| ctx.strokeStyle = '#00ff88'; | |
| ctx.strokeRect(250 + j * cellSize * 1.5, 70 + i * cellSize * 1.5, cellSize * 1.5, cellSize * 1.5); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.font = 'bold 12px Arial'; | |
| ctx.fillText(pooled[i][j], 250 + j * cellSize * 1.5 + cellSize * 0.75, 70 + i * cellSize * 1.5 + cellSize * 0.75 + 5); | |
| } | |
| } | |
| } | |
| function drawCNNArchitecture(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 12px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Input', 60, 200); | |
| ctx.fillText('Conv', 160, 200); | |
| ctx.fillText('Pool', 260, 200); | |
| ctx.fillText('Conv', 360, 200); | |
| ctx.fillText('Pool', 460, 200); | |
| ctx.fillText('FC', 560, 200); | |
| ctx.fillText('Output', 660, 200); | |
| // Draw blocks | |
| const blocks = [60, 160, 260, 360, 460, 560, 660]; | |
| blocks.forEach((x, i) => { | |
| const height = i === 0 ? 100 : (i < blocks.length - 2 ? 80 - i * 10 : 60); | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.strokeRect(x - 30, 100, 60, height); | |
| }); | |
| } | |
| function drawLearnedFilters(ctx, canvas) { | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('CNN Learned Filters', canvas.width / 2, 30); | |
| const labels = ['Edges', 'Textures', 'Patterns', 'Objects']; | |
| labels.forEach((label, i) => { | |
| const x = (i + 1) * canvas.width / 5; | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 12px Arial'; | |
| ctx.fillText(label, x, 80); | |
| // Draw filter representation | |
| for (let j = 0; j < 3; j++) { | |
| for (let k = 0; k < 3; k++) { | |
| const intensity = Math.random(); | |
| ctx.fillStyle = `rgba(0, 212, 255, ${intensity})`; | |
| ctx.fillRect(x - 20 + k * 12, 100 + j * 12, 10, 10); | |
| } | |
| } | |
| }); | |
| } | |
| function drawLeNetArchitecture(ctx, canvas) { drawCNNArchitecture(ctx, canvas); } | |
| function drawAlexNetArchitecture(ctx, canvas) { drawCNNArchitecture(ctx, canvas); } | |
| function drawVGGArchitecture(ctx, canvas) { drawCNNArchitecture(ctx, canvas); } | |
| function drawResNetArchitecture(ctx, canvas) { drawCNNArchitecture(ctx, canvas); } | |
| function drawInceptionModule(ctx, canvas) { drawCNNArchitecture(ctx, canvas); } | |
| function drawMobileNetArchitecture(ctx, canvas) { drawCNNArchitecture(ctx, canvas); } | |
| function drawTransferLearning(ctx, canvas) { drawCNNArchitecture(ctx, canvas); } | |
| function drawBoundingBoxes(ctx, canvas) { | |
| // Draw sample image | |
| ctx.fillStyle = 'rgba(0, 212, 255, 0.1)'; | |
| ctx.fillRect(50, 50, 300, 300); | |
| // Draw bounding boxes | |
| ctx.strokeStyle = '#ff6b35'; | |
| ctx.lineWidth = 3; | |
| ctx.strokeRect(100, 100, 150, 150); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 12px Arial'; | |
| ctx.fillText('Dog 95%', 105, 95); | |
| ctx.strokeStyle = '#00ff88'; | |
| ctx.strokeRect(180, 200, 100, 80); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('Cat 87%', 185, 195); | |
| } | |
| function drawRCNNPipeline(ctx, canvas) { drawBoundingBoxes(ctx, canvas); } | |
| function drawSSDDetector(ctx, canvas) { drawBoundingBoxes(ctx, canvas); } | |
| function drawSemanticSegmentation(ctx, canvas) { | |
| const cellSize = 15; | |
| const colors = ['rgba(0, 212, 255, 0.5)', 'rgba(255, 107, 53, 0.5)', 'rgba(0, 255, 136, 0.5)']; | |
| for (let i = 0; i < 20; i++) { | |
| for (let j = 0; j < 20; j++) { | |
| ctx.fillStyle = colors[Math.floor(Math.random() * colors.length)]; | |
| ctx.fillRect(i * cellSize + 100, j * cellSize + 50, cellSize, cellSize); | |
| } | |
| } | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Pixel-wise Classification', canvas.width / 2, 30); | |
| } | |
| function drawInstanceSegmentation(ctx, canvas) { drawSemanticSegmentation(ctx, canvas); } | |
| function drawFaceEmbeddings(ctx, canvas) { | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Face Embedding Space', canvas.width / 2, 30); | |
| // Draw embedding vectors | |
| const faces = 5; | |
| for (let i = 0; i < faces; i++) { | |
| const x = 100 + Math.random() * (canvas.width - 200); | |
| const y = 100 + Math.random() * 200; | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.beginPath(); | |
| ctx.arc(x, y, 10, 0, Math.PI * 2); | |
| ctx.fill(); | |
| } | |
| } | |
| function drawAutoencoderArchitecture(ctx, canvas) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 12px Arial'; | |
| ctx.textAlign = 'center'; | |
| const stages = ['Input', 'Encoder', 'Latent', 'Decoder', 'Output']; | |
| stages.forEach((label, i) => { | |
| const x = (i + 1) * canvas.width / 6; | |
| ctx.fillText(label, x, 50); | |
| const height = i === 2 ? 40 : (i === 0 || i === 4 ? 100 : 70); | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.strokeRect(x - 30, 100, 60, height); | |
| }); | |
| } | |
| function drawGANsGame(ctx, canvas) { | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Generator', canvas.width / 3, 50); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('Discriminator', canvas.width * 2 / 3, 50); | |
| // DrawGenerator | |
| ctx.strokeStyle = '#ff6b35'; | |
| ctx.strokeRect(canvas.width / 3 - 50, 100, 100, 100); | |
| // Draw Discriminator | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.strokeRect(canvas.width * 2 / 3 - 50, 100, 100, 100); | |
| // Draw arrow | |
| ctx.strokeStyle = '#00ff88'; | |
| ctx.lineWidth = 2; | |
| ctx.beginPath(); | |
| ctx.moveTo(canvas.width / 3 + 50, 150); | |
| ctx.lineTo(canvas.width * 2 / 3 - 50, 150); | |
| ctx.stroke(); | |
| } | |
| function drawDiffusionProcess(ctx, canvas) { | |
| const steps = 5; | |
| const stepWidth = canvas.width / (steps + 1); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Diffusion Process: From Noise to Image', canvas.width / 2, 30); | |
| for (let i = 0; i < steps; i++) { | |
| const x = (i + 1) * stepWidth; | |
| const noise = 1 - (i / steps); | |
| ctx.fillStyle = `rgba(0, 212, 255, ${1 - noise})`; | |
| ctx.fillRect(x - 40, 100, 80, 80); | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.strokeRect(x - 40, 100, 80, 80); | |
| } | |
| } | |
| function drawRNNUnrolled(ctx, canvas) { | |
| const cells = 5; | |
| const cellWidth = canvas.width / (cells + 1); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Unrolled RNN', canvas.width / 2, 30); | |
| for (let i = 0; i < cells; i++) { | |
| const x = (i + 1) * cellWidth; | |
| ctx.strokeStyle = '#00d4ff'; | |
| ctx.strokeRect(x - 30, 100, 60, 60); | |
| if (i < cells - 1) { | |
| ctx.strokeStyle = '#ff6b35'; | |
| ctx.lineWidth = 2; | |
| ctx.beginPath(); | |
| ctx.moveTo(x + 30, 130); | |
| ctx.lineTo(x + cellWidth - 30, 130); | |
| ctx.stroke(); | |
| } | |
| } | |
| } | |
| function drawBERTProcess(ctx, canvas) { drawAttentionMatrix(ctx, canvas); } | |
| function drawGPTGeneration(ctx, canvas) { drawAttentionMatrix(ctx, canvas); } | |
| function drawVisionTransformer(ctx, canvas) { drawAttentionMatrix(ctx, canvas); } | |
| function drawVisualization(moduleId) { | |
| drawConceptsVisualization(moduleId); | |
| } | |
| // Animation and download utilities | |
| let animationFrameId = null; | |
| function toggleVizAnimation(moduleId) { | |
| const btn = event.target; | |
| window.vizAnimating = !window.vizAnimating; | |
| if (window.vizAnimating) { | |
| btn.textContent = '⏹️ Stop'; | |
| btn.style.background = 'linear-gradient(135deg, #ff4444, #cc0000)'; | |
| animateVisualization(moduleId); | |
| } else { | |
| btn.textContent = '▶️ Animate'; | |
| btn.style.background = ''; | |
| if (animationFrameId) { | |
| cancelAnimationFrame(animationFrameId); | |
| animationFrameId = null; | |
| } | |
| } | |
| } | |
| function animateVisualization(moduleId) { | |
| if (!window.vizAnimating) return; | |
| const canvas = document.getElementById(moduleId + '-canvas'); | |
| if (!canvas) return; | |
| const ctx = canvas.getContext('2d'); | |
| ctx.clearRect(0, 0, canvas.width, canvas.height); | |
| ctx.fillStyle = '#0f1419'; | |
| ctx.fillRect(0, 0, canvas.width, canvas.height); | |
| // Call the appropriate animated drawing function | |
| const animatedVizMap = { | |
| 'nn-basics': drawAnimatedNetwork, | |
| 'perceptron': drawAnimatedDecisionBoundary, | |
| 'mlp': drawAnimatedMLP, | |
| 'activation': drawAnimatedActivations, | |
| 'conv-layer': drawAnimatedConvolution, | |
| 'gnn': drawAnimatedGNN, | |
| 'transformers': drawAnimatedAttention, | |
| 'backprop': drawAnimatedGradientFlow, | |
| 'gans': drawAnimatedGAN, | |
| 'diffusion': drawAnimatedDiffusion, | |
| 'rnn': drawAnimatedRNN | |
| }; | |
| if (animatedVizMap[moduleId]) { | |
| animatedVizMap[moduleId](ctx, canvas, Date.now()); | |
| } else { | |
| // Default animation - pulsing visualization | |
| drawDefaultAnimation(ctx, canvas, Date.now()); | |
| } | |
| animationFrameId = requestAnimationFrame(() => animateVisualization(moduleId)); | |
| } | |
| // Default animation for modules without specific animations | |
| function drawDefaultAnimation(ctx, canvas, time) { | |
| const centerX = canvas.width / 2; | |
| const centerY = canvas.height / 2; | |
| const pulse = Math.sin(time / 300) * 0.3 + 0.7; | |
| // Animated neural network | |
| const layers = [3, 4, 4, 2]; | |
| const layerWidth = canvas.width / (layers.length + 1); | |
| layers.forEach((neurons, layerIdx) => { | |
| const x = (layerIdx + 1) * layerWidth; | |
| const layerHeight = canvas.height / (neurons + 1); | |
| for (let i = 0; i < neurons; i++) { | |
| const y = (i + 1) * layerHeight; | |
| const radius = 12 + Math.sin(time / 200 + layerIdx + i) * 3; | |
| // Draw neuron | |
| ctx.fillStyle = `rgba(0, 212, 255, ${pulse})`; | |
| ctx.beginPath(); | |
| ctx.arc(x, y, radius, 0, Math.PI * 2); | |
| ctx.fill(); | |
| // Draw connections to next layer | |
| if (layerIdx < layers.length - 1) { | |
| const nextLayerHeight = canvas.height / (layers[layerIdx + 1] + 1); | |
| const nextX = (layerIdx + 2) * layerWidth; | |
| for (let j = 0; j < layers[layerIdx + 1]; j++) { | |
| const nextY = (j + 1) * nextLayerHeight; | |
| const signalProgress = ((time / 500) + layerIdx * 0.5) % 1; | |
| ctx.strokeStyle = `rgba(0, 212, 255, ${0.3 + signalProgress * 0.3})`; | |
| ctx.lineWidth = 1; | |
| ctx.beginPath(); | |
| ctx.moveTo(x + radius, y); | |
| ctx.lineTo(nextX - 12, nextY); | |
| ctx.stroke(); | |
| // Animated signal dot | |
| const dotX = x + radius + (nextX - 12 - x - radius) * signalProgress; | |
| const dotY = y + (nextY - y) * signalProgress; | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.beginPath(); | |
| ctx.arc(dotX, dotY, 3, 0, Math.PI * 2); | |
| ctx.fill(); | |
| } | |
| } | |
| } | |
| }); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('🔄 Neural Network Animation', centerX, 25); | |
| } | |
| // Animated GNN with message passing | |
| function drawAnimatedGNN(ctx, canvas, time) { | |
| ctx.fillStyle = '#9900ff'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Graph Neural Network - Message Passing', canvas.width / 2, 30); | |
| const nodes = [ | |
| { x: 100, y: 100 }, { x: 200, y: 60 }, { x: 320, y: 120 }, | |
| { x: 150, y: 200 }, { x: 400, y: 80 }, { x: 450, y: 180 } | |
| ]; | |
| const edges = [[0, 1], [0, 3], [1, 2], [1, 4], [2, 3], [2, 4], [4, 5]]; | |
| // Draw edges | |
| ctx.strokeStyle = 'rgba(153, 0, 255, 0.4)'; | |
| ctx.lineWidth = 2; | |
| edges.forEach(e => { | |
| ctx.beginPath(); | |
| ctx.moveTo(nodes[e[0]].x, nodes[e[0]].y); | |
| ctx.lineTo(nodes[e[1]].x, nodes[e[1]].y); | |
| ctx.stroke(); | |
| }); | |
| // Draw animated message passing | |
| const messageProgress = (time / 1000) % 1; | |
| ctx.fillStyle = '#00ff88'; | |
| edges.forEach((e, idx) => { | |
| const progress = (messageProgress + idx * 0.15) % 1; | |
| const x = nodes[e[0]].x + (nodes[e[1]].x - nodes[e[0]].x) * progress; | |
| const y = nodes[e[0]].y + (nodes[e[1]].y - nodes[e[0]].y) * progress; | |
| ctx.beginPath(); | |
| ctx.arc(x, y, 5, 0, Math.PI * 2); | |
| ctx.fill(); | |
| }); | |
| // Draw nodes with pulse | |
| const pulse = Math.sin(time / 300) * 5 + 15; | |
| nodes.forEach((n, i) => { | |
| ctx.fillStyle = '#9900ff'; | |
| ctx.beginPath(); | |
| ctx.arc(n.x, n.y, pulse, 0, Math.PI * 2); | |
| ctx.fill(); | |
| ctx.fillStyle = 'white'; | |
| ctx.font = '12px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText(i, n.x, n.y + 4); | |
| }); | |
| } | |
| // Animated attention matrix | |
| function drawAnimatedAttention(ctx, canvas, time) { | |
| const words = ['The', 'cat', 'sat', 'on', 'mat']; | |
| const cellSize = 50; | |
| const startX = (canvas.width - words.length * cellSize) / 2; | |
| const startY = 80; | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Self-Attention Animation', canvas.width / 2, 30); | |
| // Draw words | |
| ctx.font = '12px Arial'; | |
| words.forEach((word, i) => { | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.fillText(word, startX + i * cellSize + cellSize / 2, startY - 10); | |
| ctx.save(); | |
| ctx.translate(startX - 20, startY + i * cellSize + cellSize / 2); | |
| ctx.fillText(word, 0, 0); | |
| ctx.restore(); | |
| }); | |
| // Animated attention weights | |
| for (let i = 0; i < words.length; i++) { | |
| for (let j = 0; j < words.length; j++) { | |
| const baseWeight = i === j ? 0.8 : 0.2 + Math.abs(i - j) * 0.1; | |
| const animatedWeight = baseWeight + Math.sin(time / 500 + i + j) * 0.2; | |
| const alpha = Math.max(0.1, Math.min(1, animatedWeight)); | |
| ctx.fillStyle = `rgba(0, 212, 255, ${alpha})`; | |
| ctx.fillRect(startX + j * cellSize + 2, startY + i * cellSize + 2, cellSize - 4, cellSize - 4); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = '10px Arial'; | |
| ctx.fillText(animatedWeight.toFixed(2), startX + j * cellSize + cellSize / 2, startY + i * cellSize + cellSize / 2 + 4); | |
| } | |
| } | |
| } | |
| // Animated gradient flow for backprop | |
| function drawAnimatedGradientFlow(ctx, canvas, time) { | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Backpropagation - Gradient Flow', canvas.width / 2, 30); | |
| const layers = [2, 4, 4, 1]; | |
| const layerWidth = canvas.width / (layers.length + 1); | |
| // Forward pass (left to right) - blue | |
| const forwardProgress = (time / 2000) % 1; | |
| layers.forEach((neurons, layerIdx) => { | |
| const x = (layerIdx + 1) * layerWidth; | |
| const layerHeight = canvas.height / (neurons + 1); | |
| for (let i = 0; i < neurons; i++) { | |
| const y = (i + 1) * layerHeight; | |
| // Pulse effect based on forward pass | |
| const isActive = forwardProgress > layerIdx / layers.length; | |
| const radius = isActive ? 15 + Math.sin(time / 200) * 3 : 12; | |
| ctx.fillStyle = isActive ? '#00d4ff' : 'rgba(0, 212, 255, 0.3)'; | |
| ctx.beginPath(); | |
| ctx.arc(x, y, radius, 0, Math.PI * 2); | |
| ctx.fill(); | |
| } | |
| }); | |
| // Backward pass (right to left) - orange/red gradients | |
| const backwardProgress = ((time / 2000) + 0.5) % 1; | |
| for (let layerIdx = layers.length - 2; layerIdx >= 0; layerIdx--) { | |
| const x1 = (layerIdx + 1) * layerWidth; | |
| const x2 = (layerIdx + 2) * layerWidth; | |
| const gradientActive = backwardProgress > (layers.length - 2 - layerIdx) / (layers.length - 1); | |
| if (gradientActive) { | |
| const gradX = x2 - (x2 - x1) * ((backwardProgress * (layers.length - 1)) % 1); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.beginPath(); | |
| ctx.arc(gradX, canvas.height / 2, 8, 0, Math.PI * 2); | |
| ctx.fill(); | |
| } | |
| } | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Forward: Blue → | Backward: Orange ←', canvas.width / 2, canvas.height - 20); | |
| } | |
| // Animated network for nn-basics | |
| function drawAnimatedNetwork(ctx, canvas, time) { | |
| drawDefaultAnimation(ctx, canvas, time); | |
| } | |
| // Animated decision boundary for perceptron | |
| function drawAnimatedDecisionBoundary(ctx, canvas, time) { | |
| const centerX = canvas.width / 2; | |
| const centerY = canvas.height / 2; | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Perceptron Decision Boundary', canvas.width / 2, 30); | |
| // Animated rotating decision boundary | |
| const angle = time / 2000; | |
| const length = 200; | |
| ctx.strokeStyle = '#ff6b35'; | |
| ctx.lineWidth = 3; | |
| ctx.beginPath(); | |
| ctx.moveTo(centerX - Math.cos(angle) * length, centerY - Math.sin(angle) * length); | |
| ctx.lineTo(centerX + Math.cos(angle) * length, centerY + Math.sin(angle) * length); | |
| ctx.stroke(); | |
| // Fixed sample points | |
| const points = [ | |
| { x: 100, y: 80, c: 1 }, { x: 150, y: 100, c: 1 }, { x: 120, y: 150, c: 1 }, | |
| { x: 400, y: 200, c: 0 }, { x: 450, y: 180, c: 0 }, { x: 380, y: 250, c: 0 } | |
| ]; | |
| points.forEach(p => { | |
| ctx.fillStyle = p.c === 1 ? '#00d4ff' : '#00ff88'; | |
| ctx.beginPath(); | |
| ctx.arc(p.x, p.y, 8, 0, Math.PI * 2); | |
| ctx.fill(); | |
| }); | |
| } | |
| function drawAnimatedMLP(ctx, canvas, time) { | |
| drawDefaultAnimation(ctx, canvas, time); | |
| } | |
| function drawAnimatedActivations(ctx, canvas, time) { | |
| drawActivationFunctions(ctx, canvas); | |
| // Add animated input marker | |
| const x = Math.sin(time / 500) * 4; | |
| const centerX = canvas.width / 2; | |
| const centerY = canvas.height / 2; | |
| const scale = 40; | |
| ctx.fillStyle = '#ffffff'; | |
| ctx.beginPath(); | |
| ctx.arc(centerX + x * scale, centerY, 6, 0, Math.PI * 2); | |
| ctx.fill(); | |
| ctx.strokeStyle = '#ffffff'; | |
| ctx.setLineDash([5, 5]); | |
| ctx.beginPath(); | |
| ctx.moveTo(centerX + x * scale, 0); | |
| ctx.lineTo(centerX + x * scale, canvas.height); | |
| ctx.stroke(); | |
| ctx.setLineDash([]); | |
| } | |
| function drawAnimatedConvolution(ctx, canvas, time) { | |
| drawConvolutionAnimation(ctx, canvas); | |
| } | |
| function drawAnimatedGAN(ctx, canvas, time) { | |
| ctx.fillStyle = '#ffaa00'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('GAN Training Animation', canvas.width / 2, 30); | |
| const phase = Math.floor(time / 1000) % 4; | |
| // Generator | |
| ctx.fillStyle = phase <= 1 ? '#00ff88' : 'rgba(0, 255, 136, 0.3)'; | |
| ctx.fillRect(50, 100, 100, 80); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Generator', 100, 145); | |
| // Fake image | |
| const noiseToFake = Math.sin(time / 300) * 0.5 + 0.5; | |
| ctx.fillStyle = `rgba(255, 170, 0, ${noiseToFake})`; | |
| ctx.fillRect(200, 110, 60, 60); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.fillText('Fake', 230, 200); | |
| // Discriminator | |
| ctx.fillStyle = phase >= 2 ? '#ff6b35' : 'rgba(255, 107, 53, 0.3)'; | |
| ctx.fillRect(320, 100, 100, 80); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.fillText('Discriminator', 370, 145); | |
| // Output | |
| const output = phase === 3 ? 'Real?' : 'Fake?'; | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 14px Arial'; | |
| ctx.fillText(output, 370, 220); | |
| // Arrows | |
| ctx.strokeStyle = '#e4e6eb'; | |
| ctx.lineWidth = 2; | |
| ctx.beginPath(); | |
| ctx.moveTo(150, 140); | |
| ctx.lineTo(200, 140); | |
| ctx.stroke(); | |
| ctx.beginPath(); | |
| ctx.moveTo(260, 140); | |
| ctx.lineTo(320, 140); | |
| ctx.stroke(); | |
| } | |
| function drawAnimatedDiffusion(ctx, canvas, time) { | |
| ctx.fillStyle = '#9900ff'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Diffusion Process Animation', canvas.width / 2, 30); | |
| const steps = 5; | |
| const stepWidth = canvas.width / (steps + 1); | |
| const progress = (time / 3000) % 1; | |
| const currentStep = Math.floor(progress * steps); | |
| for (let i = 0; i < steps; i++) { | |
| const x = (i + 1) * stepWidth; | |
| const y = 150; | |
| const noiseLevel = i / (steps - 1); | |
| const isActive = i <= currentStep; | |
| // Draw square with noise | |
| ctx.fillStyle = isActive ? '#9900ff' : 'rgba(153, 0, 255, 0.3)'; | |
| ctx.fillRect(x - 30, y - 30, 60, 60); | |
| // Add noise dots | |
| if (noiseLevel > 0) { | |
| for (let j = 0; j < noiseLevel * 20; j++) { | |
| const nx = x - 25 + Math.random() * 50; | |
| const ny = y - 25 + Math.random() * 50; | |
| ctx.fillStyle = 'rgba(255, 255, 255, 0.5)'; | |
| ctx.fillRect(nx, ny, 2, 2); | |
| } | |
| } | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = '10px Arial'; | |
| ctx.fillText(`t=${i}`, x, y + 50); | |
| } | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText('Clean → Noisy (Forward) | Noisy → Clean (Reverse)', canvas.width / 2, canvas.height - 20); | |
| } | |
| function drawAnimatedRNN(ctx, canvas, time) { | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('RNN Unrolled Through Time', canvas.width / 2, 30); | |
| const steps = 5; | |
| const stepWidth = canvas.width / (steps + 1); | |
| const progress = (time / 500) % steps; | |
| const activeStep = Math.floor(progress); | |
| for (let i = 0; i < steps; i++) { | |
| const x = (i + 1) * stepWidth; | |
| const y = 150; | |
| const isActive = i === activeStep; | |
| // Hidden state | |
| ctx.fillStyle = isActive ? '#00d4ff' : 'rgba(0, 212, 255, 0.3)'; | |
| ctx.beginPath(); | |
| ctx.arc(x, y, 25, 0, Math.PI * 2); | |
| ctx.fill(); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = '10px Arial'; | |
| ctx.fillText(`h${i}`, x, y + 4); | |
| // Input arrow | |
| ctx.strokeStyle = isActive ? '#00ff88' : 'rgba(0, 255, 136, 0.3)'; | |
| ctx.lineWidth = 2; | |
| ctx.beginPath(); | |
| ctx.moveTo(x, y + 60); | |
| ctx.lineTo(x, y + 25); | |
| ctx.stroke(); | |
| ctx.fillText(`x${i}`, x, y + 75); | |
| // Recurrent connection | |
| if (i < steps - 1) { | |
| ctx.strokeStyle = isActive ? '#ff6b35' : 'rgba(255, 107, 53, 0.3)'; | |
| ctx.beginPath(); | |
| ctx.moveTo(x + 25, y); | |
| ctx.lineTo(x + stepWidth - 25, y); | |
| ctx.stroke(); | |
| // Animated signal | |
| if (isActive) { | |
| const signalX = x + 25 + (stepWidth - 50) * (progress % 1); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.beginPath(); | |
| ctx.arc(signalX, y, 5, 0, Math.PI * 2); | |
| ctx.fill(); | |
| } | |
| } | |
| } | |
| } | |
| function downloadViz(moduleId) { | |
| const canvas = document.getElementById(moduleId + '-canvas'); | |
| if (!canvas) return; | |
| const link = document.createElement('a'); | |
| link.href = canvas.toDataURL('image/png'); | |
| link.download = moduleId + '-visualization.png'; | |
| link.click(); | |
| } | |
| function drawGraphNetwork(ctx, canvas) { | |
| ctx.fillStyle = '#9900ff'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Graph Structure & Message Passing', canvas.width / 2, 30); | |
| const nodes = [ | |
| { x: 100, y: 100 }, { x: 200, y: 50 }, { x: 300, y: 150 }, | |
| { x: 150, y: 250 }, { x: 400, y: 100 }, { x: 500, y: 200 } | |
| ]; | |
| const edges = [ | |
| [0, 1], [0, 3], [1, 2], [1, 4], [2, 3], [2, 4], [4, 5] | |
| ]; | |
| // Draw edges | |
| ctx.strokeStyle = 'rgba(153, 0, 255, 0.4)'; | |
| ctx.lineWidth = 2; | |
| edges.forEach(e => { | |
| ctx.beginPath(); | |
| ctx.moveTo(nodes[e[0]].x, nodes[e[0]].y); | |
| ctx.lineTo(nodes[e[1]].x, nodes[e[1]].y); | |
| ctx.stroke(); | |
| }); | |
| // Draw nodes | |
| nodes.forEach((n, i) => { | |
| ctx.fillStyle = '#9900ff'; | |
| ctx.beginPath(); | |
| ctx.arc(n.x, n.y, 15, 0, Math.PI * 2); | |
| ctx.fill(); | |
| ctx.fillStyle = 'white'; | |
| ctx.font = '12px Arial'; | |
| ctx.fillText(i, n.x, n.y + 4); | |
| }); | |
| // Draw Message Passing Animation (fake) | |
| const t = (Date.now() / 1000) % 2; | |
| if (t > 1) { | |
| ctx.strokeStyle = '#00ff88'; | |
| ctx.lineWidth = 4; | |
| edges.forEach((e, idx) => { | |
| if (idx % 2 === 0) { | |
| ctx.beginPath(); | |
| ctx.moveTo(nodes[e[0]].x, nodes[e[0]].y); | |
| ctx.lineTo(nodes[e[1]].x, nodes[e[1]].y); | |
| ctx.stroke(); | |
| } | |
| }); | |
| } | |
| } | |
| function drawGNNMath(ctx, canvas) { | |
| ctx.fillStyle = '#9900ff'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Graph Convolution Math', canvas.width / 2, 50); | |
| ctx.fillStyle = '#e4e6eb'; | |
| ctx.font = '14px Courier New'; | |
| ctx.fillText('H(l+1) = σ(D^-½ A D^-½ H(l) W(l))', canvas.width / 2, 100); | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.fillText('A = Neighborhood Connections', canvas.width / 2, 150); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.fillText('D = Normalization Factor', canvas.width / 2, 180); | |
| } | |
| function drawGNNApplications(ctx, canvas) { | |
| ctx.fillStyle = '#9900ff'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('💊 Drug Discovery (Molecular Graphs)', canvas.width / 2, 60); | |
| ctx.fillStyle = '#00d4ff'; | |
| ctx.fillText('🚗 Traffic Flow Prediction', canvas.width / 2, 120); | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.fillText('🛒 Pinterest/Amazon Recommendations', canvas.width / 2, 180); | |
| } | |
| // ============ GenAI Visualizations ============ | |
| function drawVectorSpace(ctx, canvas) { | |
| const w = canvas.width, h = canvas.height; | |
| ctx.fillStyle = '#00c9a7'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('Vector Space — Similarity Search', w / 2, 30); | |
| // Draw axes | |
| ctx.strokeStyle = 'rgba(0, 201, 167, 0.3)'; | |
| ctx.lineWidth = 1; | |
| ctx.beginPath(); ctx.moveTo(60, h - 40); ctx.lineTo(w - 20, h - 40); ctx.stroke(); | |
| ctx.beginPath(); ctx.moveTo(60, h - 40); ctx.lineTo(60, 50); ctx.stroke(); | |
| ctx.fillStyle = '#b0b7c3'; ctx.font = '11px Arial'; | |
| ctx.fillText('Dimension 1', w / 2, h - 15); | |
| ctx.save(); ctx.translate(15, h / 2); ctx.rotate(-Math.PI / 2); | |
| ctx.fillText('Dimension 2', 0, 0); ctx.restore(); | |
| // Cluster A (documents) | |
| const clusterA = [ | |
| { x: 200, y: 120, label: 'doc1' }, { x: 230, y: 100, label: 'doc2' }, | |
| { x: 180, y: 140, label: 'doc3' }, { x: 220, y: 150, label: 'doc4' } | |
| ]; | |
| // Cluster B | |
| const clusterB = [ | |
| { x: 420, y: 200, label: 'doc5' }, { x: 450, y: 220, label: 'doc6' }, | |
| { x: 400, y: 240, label: 'doc7' } | |
| ]; | |
| // Query | |
| const query = { x: 190, y: 130, label: '🔍 Query' }; | |
| // Draw cluster backgrounds | |
| ctx.fillStyle = 'rgba(0, 136, 255, 0.08)'; | |
| ctx.beginPath(); ctx.arc(210, 130, 60, 0, Math.PI * 2); ctx.fill(); | |
| ctx.fillStyle = 'rgba(255, 107, 53, 0.08)'; | |
| ctx.beginPath(); ctx.arc(423, 220, 55, 0, Math.PI * 2); ctx.fill(); | |
| // Draw similarity lines from query to nearest | |
| ctx.strokeStyle = 'rgba(0, 255, 136, 0.5)'; | |
| ctx.lineWidth = 2; | |
| ctx.setLineDash([5, 3]); | |
| clusterA.forEach(d => { | |
| ctx.beginPath(); ctx.moveTo(query.x, query.y); | |
| ctx.lineTo(d.x, d.y); ctx.stroke(); | |
| }); | |
| ctx.setLineDash([]); | |
| // Draw document points | |
| clusterA.forEach(d => { | |
| ctx.fillStyle = '#0088ff'; | |
| ctx.beginPath(); ctx.arc(d.x, d.y, 8, 0, Math.PI * 2); ctx.fill(); | |
| ctx.fillStyle = '#e4e6eb'; ctx.font = '10px Arial'; | |
| ctx.fillText(d.label, d.x + 12, d.y + 4); | |
| }); | |
| clusterB.forEach(d => { | |
| ctx.fillStyle = '#ff6b35'; | |
| ctx.beginPath(); ctx.arc(d.x, d.y, 8, 0, Math.PI * 2); ctx.fill(); | |
| ctx.fillStyle = '#e4e6eb'; ctx.font = '10px Arial'; | |
| ctx.fillText(d.label, d.x + 12, d.y + 4); | |
| }); | |
| // Draw query | |
| ctx.fillStyle = '#00ff88'; | |
| ctx.beginPath(); ctx.arc(query.x, query.y, 10, 0, Math.PI * 2); ctx.fill(); | |
| ctx.fillStyle = '#00ff88'; ctx.font = 'bold 12px Arial'; | |
| ctx.fillText(query.label, query.x - 15, query.y - 18); | |
| // Legend | |
| ctx.font = '11px Arial'; let ly = h - 80; | |
| [['#0088ff', 'Cluster A (nearest)'], ['#ff6b35', 'Cluster B'], ['#00ff88', 'Query Vector']].forEach(([c, t]) => { | |
| ctx.fillStyle = c; ctx.fillRect(w - 160, ly, 10, 10); | |
| ctx.fillStyle = '#e4e6eb'; ctx.fillText(t, w - 145, ly + 9); ly += 18; | |
| }); | |
| // Cosine score | |
| ctx.fillStyle = '#00c9a7'; ctx.font = 'bold 13px Arial'; | |
| ctx.fillText('cos(query, doc1) = 0.97', w / 2, h - 55); | |
| } | |
| function drawRAGPipeline(ctx, canvas) { | |
| const w = canvas.width, h = canvas.height; | |
| ctx.fillStyle = '#00c9a7'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('RAG Pipeline — Retrieval-Augmented Generation', w / 2, 30); | |
| const boxes = [ | |
| { x: 30, y: 70, w: 90, h: 50, label: '📄 Docs', color: '#0088ff' }, | |
| { x: 145, y: 70, w: 90, h: 50, label: '✂️ Chunk', color: '#0088ff' }, | |
| { x: 260, y: 70, w: 90, h: 50, label: '🧮 Embed', color: '#0088ff' }, | |
| { x: 375, y: 70, w: 100, h: 50, label: '🧲 Vector DB', color: '#0088ff' }, | |
| { x: 30, y: 180, w: 90, h: 50, label: '❓ Query', color: '#00ff88' }, | |
| { x: 145, y: 180, w: 90, h: 50, label: '🧮 Embed', color: '#00ff88' }, | |
| { x: 260, y: 180, w: 90, h: 50, label: '🔍 Retrieve', color: '#ff6b35' }, | |
| { x: 375, y: 180, w: 100, h: 50, label: '🤖 LLM', color: '#ffaa00' }, | |
| { x: 500, y: 180, w: 90, h: 50, label: '✅ Answer', color: '#00c9a7' } | |
| ]; | |
| // Draw arrows (indexing pipeline) | |
| ctx.strokeStyle = 'rgba(0, 136, 255, 0.6)'; ctx.lineWidth = 2; | |
| for (let i = 0; i < 3; i++) { | |
| ctx.beginPath(); | |
| ctx.moveTo(boxes[i].x + boxes[i].w, boxes[i].y + 25); | |
| ctx.lineTo(boxes[i + 1].x, boxes[i + 1].y + 25); | |
| ctx.stroke(); | |
| // Arrowhead | |
| ctx.fillStyle = 'rgba(0, 136, 255, 0.6)'; | |
| ctx.beginPath(); | |
| ctx.moveTo(boxes[i + 1].x, boxes[i + 1].y + 25); | |
| ctx.lineTo(boxes[i + 1].x - 8, boxes[i + 1].y + 20); | |
| ctx.lineTo(boxes[i + 1].x - 8, boxes[i + 1].y + 30); | |
| ctx.fill(); | |
| } | |
| // Draw arrows (query pipeline) | |
| ctx.strokeStyle = 'rgba(0, 255, 136, 0.6)'; | |
| for (let i = 4; i < 8; i++) { | |
| ctx.beginPath(); | |
| ctx.moveTo(boxes[i].x + boxes[i].w, boxes[i].y + 25); | |
| ctx.lineTo(boxes[i + 1].x, boxes[i + 1].y + 25); | |
| ctx.stroke(); | |
| ctx.fillStyle = 'rgba(0, 255, 136, 0.6)'; | |
| ctx.beginPath(); | |
| ctx.moveTo(boxes[i + 1].x, boxes[i + 1].y + 25); | |
| ctx.lineTo(boxes[i + 1].x - 8, boxes[i + 1].y + 20); | |
| ctx.lineTo(boxes[i + 1].x - 8, boxes[i + 1].y + 30); | |
| ctx.fill(); | |
| } | |
| // Vector DB connection (vertical) | |
| ctx.strokeStyle = 'rgba(255, 107, 53, 0.6)'; ctx.setLineDash([4, 3]); | |
| ctx.beginPath(); | |
| ctx.moveTo(boxes[3].x + 50, boxes[3].y + boxes[3].h); | |
| ctx.lineTo(boxes[6].x + 45, boxes[6].y); | |
| ctx.stroke(); ctx.setLineDash([]); | |
| // Draw boxes | |
| boxes.forEach(b => { | |
| ctx.fillStyle = 'rgba(0, 0, 0, 0.5)'; | |
| ctx.fillRect(b.x, b.y, b.w, b.h); | |
| ctx.strokeStyle = b.color; ctx.lineWidth = 2; | |
| ctx.strokeRect(b.x, b.y, b.w, b.h); | |
| ctx.fillStyle = '#e4e6eb'; ctx.font = '11px Arial'; ctx.textAlign = 'center'; | |
| ctx.fillText(b.label, b.x + b.w / 2, b.y + 30); | |
| }); | |
| // Labels | |
| ctx.font = 'bold 12px Arial'; ctx.textAlign = 'left'; | |
| ctx.fillStyle = '#0088ff'; ctx.fillText('Indexing (Offline)', 30, 60); | |
| ctx.fillStyle = '#00ff88'; ctx.fillText('Query (Online)', 30, 170); | |
| } | |
| function drawLoRADiagram(ctx, canvas) { | |
| const w = canvas.width, h = canvas.height; | |
| ctx.fillStyle = '#00c9a7'; | |
| ctx.font = 'bold 16px Arial'; | |
| ctx.textAlign = 'center'; | |
| ctx.fillText('LoRA — Low-Rank Adaptation', w / 2, 30); | |
| // Original Weight Matrix W (large) | |
| const wX = 50, wY = 60, wW = 120, wH = 120; | |
| ctx.fillStyle = 'rgba(0, 136, 255, 0.15)'; | |
| ctx.fillRect(wX, wY, wW, wH); | |
| ctx.strokeStyle = '#0088ff'; ctx.lineWidth = 2; | |
| ctx.strokeRect(wX, wY, wW, wH); | |
| ctx.fillStyle = '#0088ff'; ctx.font = 'bold 14px Arial'; | |
| ctx.fillText('W', wX + wW / 2, wY + wH / 2 + 5); | |
| ctx.font = '10px Arial'; ctx.fillStyle = '#b0b7c3'; | |
| ctx.fillText('d × d', wX + wW / 2, wY + wH / 2 + 22); | |
| ctx.fillText('(Frozen)', wX + wW / 2, wY - 8); | |
| // Plus sign | |
| ctx.fillStyle = '#e4e6eb'; ctx.font = 'bold 24px Arial'; | |
| ctx.fillText('+', wX + wW + 30, wY + wH / 2 + 8); | |
| // LoRA matrices B and A | |
| const bX = 230, bY = 60, bW = 30, bH = 120; | |
| ctx.fillStyle = 'rgba(0, 255, 136, 0.2)'; | |
| ctx.fillRect(bX, bY, bW, bH); | |
| ctx.strokeStyle = '#00ff88'; ctx.lineWidth = 2; | |
| ctx.strokeRect(bX, bY, bW, bH); | |
| ctx.fillStyle = '#00ff88'; ctx.font = 'bold 14px Arial'; | |
| ctx.fillText('B', bX + bW / 2, bY + bH / 2 + 5); | |
| ctx.font = '10px Arial'; ctx.fillStyle = '#b0b7c3'; | |
| ctx.fillText('d×r', bX + bW / 2, bY - 8); | |
| // × symbol | |
| ctx.fillStyle = '#e4e6eb'; ctx.font = 'bold 18px Arial'; | |
| ctx.fillText('×', bX + bW + 18, bY + bH / 2 + 5); | |
| const aX = 290, aY = 100, aW = 120, aH = 30; | |
| ctx.fillStyle = 'rgba(255, 107, 53, 0.2)'; | |
| ctx.fillRect(aX, aY, aW, aH); | |
| ctx.strokeStyle = '#ff6b35'; ctx.lineWidth = 2; | |
| ctx.strokeRect(aX, aY, aW, aH); | |
| ctx.fillStyle = '#ff6b35'; ctx.font = 'bold 14px Arial'; | |
| ctx.fillText('A', aX + aW / 2, aY + aH / 2 + 5); | |
| ctx.font = '10px Arial'; ctx.fillStyle = '#b0b7c3'; | |
| ctx.fillText('r×d', aX + aW / 2, aY - 8); | |
| // = sign | |
| ctx.fillStyle = '#e4e6eb'; ctx.font = 'bold 24px Arial'; | |
| ctx.fillText('=', aX + aW + 25, wY + wH / 2 + 8); | |
| // Result W' | |
| const rX = 460, rY = 60, rW = 120, rH = 120; | |
| ctx.fillStyle = 'rgba(0, 201, 167, 0.15)'; | |
| ctx.fillRect(rX, rY, rW, rH); | |
| ctx.strokeStyle = '#00c9a7'; ctx.lineWidth = 2; | |
| ctx.strokeRect(rX, rY, rW, rH); | |
| ctx.fillStyle = '#00c9a7'; ctx.font = 'bold 14px Arial'; | |
| ctx.fillText("W'", rX + rW / 2, rY + rH / 2 + 5); | |
| ctx.font = '10px Arial'; ctx.fillStyle = '#b0b7c3'; | |
| ctx.fillText('d × d', rX + rW / 2, rY + rH / 2 + 22); | |
| ctx.fillText('(Adapted)', rX + rW / 2, rY - 8); | |
| // Stats | |
| ctx.font = '12px Arial'; ctx.textAlign = 'left'; | |
| ctx.fillStyle = '#0088ff'; ctx.fillText('Full: 16,777,216 params', 50, 220); | |
| ctx.fillStyle = '#00ff88'; ctx.fillText('LoRA (r=16): 131,072 params', 50, 240); | |
| ctx.fillStyle = '#00c9a7'; ctx.fillText('Savings: 128x reduction (0.78%)', 50, 260); | |
| // Quantization bar chart | |
| ctx.fillStyle = '#e4e6eb'; ctx.font = 'bold 12px Arial'; ctx.textAlign = 'center'; | |
| ctx.fillText('Memory: 7B Model', 480, 210); | |
| const bars = [ | |
| { label: 'fp32', val: 28, color: '#ff6b35' }, | |
| { label: 'fp16', val: 14, color: '#ffaa00' }, | |
| { label: 'INT8', val: 7, color: '#0088ff' }, | |
| { label: 'INT4', val: 3.5, color: '#00ff88' } | |
| ]; | |
| bars.forEach((b, i) => { | |
| const bw = b.val * 4.5; | |
| ctx.fillStyle = b.color; | |
| ctx.fillRect(430, 220 + i * 22, bw, 16); | |
| ctx.fillStyle = '#e4e6eb'; ctx.font = '10px Arial'; ctx.textAlign = 'left'; | |
| ctx.fillText(`${b.label}: ${b.val}GB`, 430 + bw + 5, 233 + i * 22); | |
| }); | |
| } | |
| initDashboard(); | |
| </script> | |
| </body> | |
| </html> |