/** * Interactive ML History Timeline * Handles popup functionality for milestone cards */ document.addEventListener('DOMContentLoaded', function() { const timelineData = { perceptron: { year: "1958", title: "The Perceptron", researcher: "Frank Rosenblatt", subtitle: "The first trainable neural network proves machines can learn from data", achievement: "Binary classification with gradient descent", architecture: "Input → Linear → Sigmoid → Output", whatYouBuild: [ "Binary classification with gradient descent", "Simple but revolutionary architecture", "YOUR Linear layer recreates history" ], systemsInsights: [ "Memory: O(n) parameters", "Compute: O(n) operations", "Limitation: Only linearly separable problems" ], modules: "After Modules 02-04", expectedResults: "~50% (untrained) → 95%+ (trained) accuracy", commands: [ "tito milestone run perceptron" ] }, xor: { year: "1969", title: "The XOR Crisis", researcher: "Minsky & Papert", subtitle: "Hidden layers solve non-linear problems that nearly ended AI research", achievement: "Non-linear learning through hidden representations", architecture: "Input → Linear → ReLU → Linear → Output", whatYouBuild: [ "Hidden layers enable non-linear solutions", "Multi-layer networks break through limitations", "YOUR autograd makes it possible" ], systemsInsights: [ "Memory: O(n²) with hidden layers", "Compute: O(n²) operations", "Breakthrough: Hidden representations" ], modules: "After Modules 02-06", expectedResults: "50% (single layer) → 100% (multi-layer) on XOR", commands: [ "tito milestone run xor" ] }, mlp: { year: "1986", title: "MLP Revival", researcher: "Backpropagation Era", subtitle: "Backpropagation enables training deep networks on real datasets", achievement: "Multi-class digit recognition", architecture: "Images → Flatten → Linear → ReLU → Linear → ReLU → Linear → Classes", whatYouBuild: [ "Multi-class digit recognition", "Complete training pipelines", "YOUR optimizers achieve 95%+ accuracy" ], systemsInsights: [ "Memory: ~100K parameters for MNIST", "Compute: Dense matrix operations", "Architecture: Multi-layer feature learning" ], modules: "After Modules 02-08", expectedResults: "95%+ accuracy on MNIST", commands: [ "tito milestone run mlp" ] }, cnn: { year: "1998", title: "CNN Revolution", researcher: "Yann LeCun", subtitle: "CNNs exploit spatial structure for computer vision—enabling modern AI", achievement: "Spatial intelligence for computer vision", architecture: "Images → Conv → ReLU → Pool → Conv → ReLU → Pool → Flatten → Linear → Classes", whatYouBuild: [ "Convolutional feature extraction", "Natural image classification (CIFAR-10)", "YOUR Conv2d + MaxPool2d unlock spatial intelligence" ], systemsInsights: [ "Memory: ~1M parameters (weight sharing reduces vs dense)", "Compute: Convolution is intensive but parallelizable", "Architecture: Local connectivity + translation invariance" ], modules: "After Modules 02-09", expectedResults: "75%+ accuracy on CIFAR-10 ✨", commands: [ "tito milestone run cnn" ], northStar: true }, transformer: { year: "2017", title: "Transformer Era", researcher: "Vaswani et al.", subtitle: "Attention mechanism launches the LLM revolution (GPT, BERT, ChatGPT)", achievement: "Self-attention for language understanding", architecture: "Tokens → Embeddings → Attention → FFN → ... → Attention → Output", whatYouBuild: [ "Self-attention mechanisms", "Autoregressive text generation", "YOUR attention implementation generates language" ], systemsInsights: [ "Memory: O(n²) attention requires careful management", "Compute: Highly parallelizable", "Architecture: Long-range dependencies" ], modules: "After Modules 02-13", expectedResults: "Loss < 1.5, coherent responses to questions", commands: [ "tito milestone run transformer" ] }, olympics: { year: "2018", title: "MLPerf Torch Olympics", researcher: "MLCommons (founded 2018)", subtitle: "Systematic optimization becomes essential as models grow larger", achievement: "Production-ready optimization", architecture: "Profile → Compress → Accelerate", whatYouBuild: [ "Performance profiling and bottleneck analysis", "Model compression (quantization + pruning)", "Inference acceleration (KV-cache + batching)" ], systemsInsights: [ "Memory: 4-16× compression through quantization/pruning", "Speed: 12-40× faster generation with KV-cache + batching", "Workflow: Systematic 'measure → optimize → validate' methodology" ], modules: "After Modules 14-18", expectedResults: "8-16× smaller models, 12-40× faster inference", commands: [ "tito milestone run mlperf" ] } }; // Create popup HTML if not exists let popup = document.getElementById('ml-timeline-popup'); if (!popup) { popup = document.createElement('div'); popup.id = 'ml-timeline-popup'; popup.className = 'ml-timeline-popup'; popup.innerHTML = '
'; document.body.appendChild(popup); } // Handle clicks on timeline items document.querySelectorAll('.ml-timeline-content').forEach(card => { card.addEventListener('click', function(e) { const item = this.closest('.ml-timeline-item'); const milestoneType = item.classList[1]; // Get the milestone class (perceptron, xor, etc.) const data = timelineData[milestoneType]; if (!data) return; const popupContent = popup.querySelector('.ml-timeline-popup-content'); popupContent.innerHTML = `${data.subtitle}
${data.northStar ? '${data.achievement}