diff --git a/milestones/05_2017_transformer/00_vaswani_attention_proof.py b/milestones/05_2017_transformer/00_vaswani_attention_proof.py index 908c5b66..f0a6d4ff 100755 --- a/milestones/05_2017_transformer/00_vaswani_attention_proof.py +++ b/milestones/05_2017_transformer/00_vaswani_attention_proof.py @@ -129,7 +129,8 @@ from pathlib import Path sys.path.insert(0, os.getcwd()) # Import TinyTorch components YOU BUILT! -from tinytorch import Tensor, Linear, ReLU, CrossEntropyLoss, Adam +from tinytorch import Tensor, Linear, ReLU, CrossEntropyLoss +from tinytorch.core.optimizers import Adam from tinytorch.text.embeddings import Embedding, PositionalEncoding from tinytorch.core.attention import MultiHeadAttention from tinytorch.models.transformer import LayerNorm diff --git a/milestones/05_2017_transformer/DEBUG_REVERSAL.md b/milestones/05_2017_transformer/DEBUG_REVERSAL.md new file mode 100644 index 00000000..215865b5 --- /dev/null +++ b/milestones/05_2017_transformer/DEBUG_REVERSAL.md @@ -0,0 +1,103 @@ +# Debugging Sequence Reversal: The Attention Test + +## Current Status + +❌ **Model is NOT learning** (0% accuracy after 30 epochs) +- Loss barely moving: 1.5342 → 1.3062 +- Predictions are mostly random or mode-collapsed (lots of 2's) +- This should reach 95%+ if attention works correctly + +## Why This Is Perfect for Debugging + +This task is **binary**: either attention works (95%+) or it doesn't (0-5%). +No gray area, no "partial success" - it's a perfect diagnostic! + +## Comparison: What Works vs What Doesn't + +### ✅ Working Implementation +- `tests/milestones/test_transformer_capabilities.py` +- Uses functional approach: `build_simple_transformer()` +- Achieves 95%+ accuracy reliably + +### ❌ Failing Implementation +- `milestones/05_2017_transformer/00_vaswani_attention_proof.py` +- Uses class-based approach: `ReversalTransformer` class +- Gets 0% accuracy + +## Debugging Strategy + +### Phase 1: Component-Level Tests +1. **Embedding Layer** + - [ ] Verify embedding lookup works + - [ ] Check positional encoding is added correctly + - [ ] Ensure gradients flow through embeddings + +2. **Attention Mechanism** + - [ ] Verify Q, K, V projections + - [ ] Check attention score computation + - [ ] Verify softmax and weighted sum + - [ ] Test multi-head split and concatenation + - [ ] Ensure attention gradients flow + +3. **Feed-Forward Network** + - [ ] Check Linear → ReLU → Linear path + - [ ] Verify FFN gradients + +4. **Residual Connections** + - [ ] Verify `x + attn_out` preserves computation graph + - [ ] Check `x + ffn_out` preserves computation graph + +5. **LayerNorm** + - [ ] Verify normalization computation + - [ ] Check gradients through LayerNorm + +6. **Output Projection** + - [ ] Verify reshape logic: (batch, seq, embed) → (batch*seq, embed) → (batch, seq, vocab) + - [ ] Check output projection gradients + +### Phase 2: Integration Tests +- [ ] Full forward pass produces correct shapes +- [ ] Loss computation is correct +- [ ] Backward pass flows to all parameters +- [ ] Optimizer updates all parameters +- [ ] Parameters actually change after training step + +### Phase 3: Architectural Comparison +- [ ] Compare class-based vs functional implementations +- [ ] Identify structural differences +- [ ] Port fixes from working to failing version + +### Phase 4: Hyperparameter Sweep +- [ ] Learning rate (try 0.001, 0.003, 0.005, 0.01) +- [ ] Epochs (try 50, 100) +- [ ] Embed dimension (try 16, 32, 64) +- [ ] Number of heads (try 2, 4, 8) + +## Key Questions to Answer + +1. **Are gradients flowing?** + - Check `param.grad` is not None for all parameters + - Check `param.grad` is not zero + +2. **Are weights updating?** + - Save initial weights + - Train for 1 epoch + - Verify weights changed + +3. **Is the architecture correct?** + - Does forward pass match our working implementation? + - Are residual connections preserved? + +4. **Is the data correct?** + - Are input sequences correctly formatted? + - Are targets correctly formatted? + - Is vocab size consistent? + +## Next Steps + +1. Create minimal reproduction test +2. Test each component in isolation +3. Compare with working implementation line-by-line +4. Fix identified issues +5. Verify with full training run + diff --git a/milestones/05_2017_transformer/STATUS.md b/milestones/05_2017_transformer/STATUS.md new file mode 100644 index 00000000..3e0e9e75 --- /dev/null +++ b/milestones/05_2017_transformer/STATUS.md @@ -0,0 +1,99 @@ +# Sequence Reversal Milestone - Current Status + +## 🔧 Fixes Applied + +### 1. Embedding Gradient Flow ✅ +- **Fixed:** `Embedding.weight` now gets gradients +- **Issue:** Missing `_grad_fn` attachment in compiled `tinytorch/text/embeddings.py` +- **Solution:** Exported Module 11 to sync the fix +- **Result:** 19/19 parameters now have gradients (was 18/19) + +### 2. Tensor `.data` Access Cleanup 🔄 +- **Addressed:** Multiple `.data` accesses that could break computation graphs +- **Changes:** + - `token_embeds = token_embeds * scale_factor` (was creating new Tensor from `.data`) + - Documented limitation: `PositionalEncoding` uses `.data` for slicing (Tensor doesn't have `__getitem__`) + +### 3. Component Tests ✅ +- **All 6 tests PASS:** + - ✅ Embedding Layer + - ✅ Attention Layer + - ✅ FFN Layer + - ✅ Residual Connections + - ✅ Full Forward Pass (19/19 params have gradients) + - ✅ Training Step (all 19/19 weights update) + +## ❌ Still Not Learning + +### Current Performance +- **Test Accuracy:** 0.0% (target: 95%+) +- **Training Accuracy:** 2.7% after 30 epochs +- **Loss:** 1.62 → 1.24 (minimal decrease) + +### What This Means +- ✅ Architecture is correctly wired (all tests pass) +- ✅ Gradients flow to all parameters +- ✅ All weights update during training +- ❌ Model is NOT learning the reversal task + +## 🔍 Possible Causes + +### 1. Hyperparameter Issues +- Learning rate might be too high/low (currently 0.005) +- Not enough epochs (currently 30) +- Architecture might be too small (embed_dim=32, 4 heads) + +### 2. Positional Encoding Limitation +- Position embeddings don't get gradients (due to Tensor slicing limitation) +- This might be critical for reversal task since positions are key +- **Impact:** Model can't learn position-dependent transformations + +### 3. Architectural Differences +- Our implementation (class-based) vs working test (functional) +- Subtle differences in how operations are composed + +### 4. Task Setup +- Data generation might have issues +- Loss computation might be incorrect +- Vocab size (10 vs 11 in working test) + +## 📋 Next Steps (Prioritized) + +### High Priority: Fix Positional Encoding Gradients +**Problem:** Positional embeddings are learnable but don't get gradients because we can't slice Tensors + +**Solution Options:** +1. **Implement `Tensor.__getitem__`** (proper fix, enables gradient-preserving slicing) +2. **Use full position embeddings** (no slicing, pad inputs to max_seq_len) +3. **Make position embeddings fixed** (requires_grad=False, like sinusoidal) + +**Recommended:** Option 1 - Implement `Tensor.__getitem__` with proper backward function + +### Medium Priority: Hyperparameter Sweep +Try different combinations: +- Learning rates: [0.001, 0.003, 0.005, 0.01] +- Epochs: [50, 100] +- Embed dims: [64, 128] +- Attention heads: [2, 4, 8] + +### Low Priority: Architecture Comparison +- Line-by-line comparison with working functional implementation +- Check if there are subtle differences in forward pass + +## 💡 Key Insight + +**The model has all the right pieces, they're all connected correctly, but it's not learning.** + +This suggests the issue is either: +1. A critical component (positional encoding) isn't learning properly +2. Hyperparameters are preventing convergence +3. There's a subtle bug we haven't found yet + +The fact that positional encodings (which are CRITICAL for reversal) don't get gradients is the most suspicious issue. + +## 🎯 Recommended Action + +**Implement `Tensor.__getitem__` to enable gradient-preserving slicing**, then re-test. + +If that doesn't work, try the hyperparameter sweep. + diff --git a/milestones/05_2017_transformer/TENSOR_SLICING_IMPLEMENTATION.md b/milestones/05_2017_transformer/TENSOR_SLICING_IMPLEMENTATION.md new file mode 100644 index 00000000..99ad2fbf --- /dev/null +++ b/milestones/05_2017_transformer/TENSOR_SLICING_IMPLEMENTATION.md @@ -0,0 +1,106 @@ +# Tensor Slicing Implementation - Progressive Disclosure + +## What We Implemented + +### Module 01 (Tensor): Basic Slicing +**File:** `tinytorch/core/tensor.py` + +```python +def __getitem__(self, key): + """Enable indexing and slicing operations on Tensors.""" + result_data = self.data[key] + if not isinstance(result_data, np.ndarray): + result_data = np.array(result_data) + result = Tensor(result_data, requires_grad=self.requires_grad) + return result +``` + +**Progressive Disclosure:** NO mention of gradients, `_grad_fn`, or `SliceBackward` at this stage! + +### Module 05 (Autograd): Gradient Tracking +**File:** `tinytorch/core/autograd.py` + +```python +def enable_autograd(): + # Store original __getitem__ + _original_getitem = Tensor.__getitem__ + + # Create tracked version + def tracked_getitem(self, key): + result = _original_getitem(self, key) + if self.requires_grad: + result.requires_grad = True + result._grad_fn = SliceBackward(self, key) + return result + + # Monkey-patch it + Tensor.__getitem__ = tracked_getitem +``` + +**Progressive Disclosure:** Gradient tracking added ONLY when autograd is enabled! + +### Module 05 (Autograd): SliceBackward Function +**File:** `tinytorch/core/autograd.py` + +```python +class SliceBackward(Function): + """Gradient computation for tensor slicing.""" + + def __init__(self, tensor, key): + super().__init__(tensor) + self.key = key + self.original_shape = tensor.shape + + def apply(self, grad_output): + grad_input = np.zeros(self.original_shape, dtype=np.float32) + grad_input[self.key] = grad_output + return (grad_input,) +``` + +## Test Results + +### ✅ Component Tests: ALL PASS +``` +✓ PASS - Embedding Layer (gradients flow) +✓ PASS - Attention Layer (8/8 params) +✓ PASS - FFN Layer (4/4 params) +✓ PASS - Residual Connections (preserves gradients) +✓ PASS - Full Forward Pass (19/19 params with gradients) +✓ PASS - Training Step (19/19 weights update) +``` + +### ⚠️ End-to-End Training: Still Not Learning +``` +Test Accuracy: 0.0% (target: 95%+) +Loss: 1.54 → 1.08 (improved from 1.62 → 1.24 before) +``` + +**Progress:** Loss is dropping BETTER than before, showing gradients ARE flowing! + +## Why It's Still Not Learning + +### Current Theory: +The monkey-patching happens AFTER `enable_autograd()` has already been called during import. So the gradient-tracked version of `__getitem__` isn't being used in the current session. + +### To Test: +Need a FRESH Python session where: +1. `__getitem__` is defined in Tensor +2. `SliceBackward` is defined in Autograd +3. `enable_autograd()` is called +4. THEN the model is trained + +## Next Steps + +1. **Verify in fresh session:** Restart Python and test +2. **Check position embedding gradients:** Are they actually getting updated? +3. **Hyperparameter sweep:** Try different learning rates if gradients work +4. **Comparison test:** Run the functional implementation side-by-side + +## Architecture Principle Learned + +**Progressive Disclosure is CRITICAL:** +- Module 01: Simple operations, no gradient mentions +- Module 05: Monkey-patch to add gradients +- Students see features WHEN they're ready + +This is how ALL TinyTorch operations work (add, mul, matmul, etc.), and now slicing follows the same pattern! diff --git a/milestones/05_2017_transformer/test_reversal_debug.py b/milestones/05_2017_transformer/test_reversal_debug.py new file mode 100644 index 00000000..63e1c0c1 --- /dev/null +++ b/milestones/05_2017_transformer/test_reversal_debug.py @@ -0,0 +1,347 @@ +#!/usr/bin/env python3 +""" +Debug script for sequence reversal milestone. + +This script systematically tests each component to find what's broken. +""" + +import sys +import os +import numpy as np + +sys.path.insert(0, os.getcwd()) + +from tinytorch import Tensor, Linear, ReLU, CrossEntropyLoss +from tinytorch.core.optimizers import Adam +from tinytorch.text.embeddings import Embedding, PositionalEncoding +from tinytorch.core.attention import MultiHeadAttention +from tinytorch.models.transformer import LayerNorm + +from rich.console import Console +from rich.panel import Panel + +console = Console() + +def test_embedding_layer(): + """Test that embedding layer works correctly.""" + console.print("\n[bold cyan]Test 1: Embedding Layer[/bold cyan]") + + vocab_size = 10 + embed_dim = 32 + seq_len = 6 + + # Create embedding + embedding = Embedding(vocab_size, embed_dim) + pos_encoding = PositionalEncoding(seq_len, embed_dim) + + # Create input + x = Tensor(np.array([[1, 2, 3, 4, 5, 6]])) # (1, 6) + + # Embed + embedded = embedding(x) # Should be (1, 6, 32) + console.print(f" Input shape: {x.shape}") + console.print(f" Embedded shape: {embedded.shape}") + console.print(f" Expected: (1, 6, 32)") + + # Add positional encoding + pos_embedded = pos_encoding(embedded) + console.print(f" After pos encoding: {pos_embedded.shape}") + + # Check gradient flow + loss = pos_embedded.sum() + loss.backward() + + has_grad = embedding.weight.grad is not None + grad_nonzero = np.any(embedding.weight.grad.data) if has_grad else False + + console.print(f" Embedding has gradient: {has_grad}") + console.print(f" Gradient is non-zero: {grad_nonzero}") + + if pos_embedded.shape == (1, 6, 32) and has_grad and grad_nonzero: + console.print(" [green]✓ Embedding layer works![/green]") + return True + else: + console.print(" [red]✗ Embedding layer has issues[/red]") + return False + + +def test_attention_layer(): + """Test that attention mechanism works.""" + console.print("\n[bold cyan]Test 2: Attention Layer[/bold cyan]") + + embed_dim = 32 + num_heads = 4 + seq_len = 6 + + # Create attention + attention = MultiHeadAttention(embed_dim, num_heads) + + # Create input (batch=1, seq=6, embed=32) + x = Tensor(np.random.randn(1, seq_len, embed_dim)) + + console.print(f" Input shape: {x.shape}") + + # Forward + attn_out = attention.forward(x, mask=None) + console.print(f" Attention output shape: {attn_out.shape}") + console.print(f" Expected: (1, 6, 32)") + + # Check gradient flow + loss = attn_out.sum() + loss.backward() + + params = attention.parameters() + has_grads = all(p.grad is not None for p in params) + grads_nonzero = all(np.any(p.grad.data) for p in params) if has_grads else False + + console.print(f" All params have gradients: {has_grads}") + console.print(f" All gradients non-zero: {grads_nonzero}") + console.print(f" Number of parameters: {len(params)}") + + if attn_out.shape == (1, 6, 32) and has_grads: + console.print(" [green]✓ Attention layer works![/green]") + return True + else: + console.print(" [red]✗ Attention layer has issues[/red]") + return False + + +def test_ffn_layer(): + """Test feed-forward network.""" + console.print("\n[bold cyan]Test 3: Feed-Forward Network[/bold cyan]") + + embed_dim = 32 + + fc1 = Linear(embed_dim, embed_dim * 2) + relu = ReLU() + fc2 = Linear(embed_dim * 2, embed_dim) + + # Input + x = Tensor(np.random.randn(1, 6, embed_dim)) + + # Forward + h = fc1(x) + h = relu(h) + out = fc2(h) + + console.print(f" Input shape: {x.shape}") + console.print(f" Output shape: {out.shape}") + console.print(f" Expected: (1, 6, 32)") + + # Gradient flow + loss = out.sum() + loss.backward() + + params = [fc1.weight, fc1.bias, fc2.weight, fc2.bias] + has_grads = all(p.grad is not None for p in params) + + console.print(f" All params have gradients: {has_grads}") + + if out.shape == (1, 6, 32) and has_grads: + console.print(" [green]✓ FFN works![/green]") + return True + else: + console.print(" [red]✗ FFN has issues[/red]") + return False + + +def test_residual_connection(): + """Test that residual connections preserve computation graph.""" + console.print("\n[bold cyan]Test 4: Residual Connections[/bold cyan]") + + embed_dim = 32 + + # Create layers + attention = MultiHeadAttention(embed_dim, 4) + ln = LayerNorm(embed_dim) + + # Input + x = Tensor(np.random.randn(1, 6, embed_dim)) + x.requires_grad = True + + # Residual connection + attn_out = attention.forward(x, mask=None) + residual = x + attn_out # This should preserve graph + out = ln(residual) + + console.print(f" Output shape: {out.shape}") + + # Gradient flow + loss = out.sum() + loss.backward() + + has_x_grad = x.grad is not None + has_attn_grads = all(p.grad is not None for p in attention.parameters()) + has_ln_grads = all(p.grad is not None for p in ln.parameters()) + + console.print(f" Input has gradient: {has_x_grad}") + console.print(f" Attention has gradients: {has_attn_grads}") + console.print(f" LayerNorm has gradients: {has_ln_grads}") + + if has_x_grad and has_attn_grads and has_ln_grads: + console.print(" [green]✓ Residual connection preserves gradients![/green]") + return True + else: + console.print(" [red]✗ Residual connection breaks gradients[/red]") + return False + + +def test_full_forward_pass(): + """Test full forward pass through transformer.""" + console.print("\n[bold cyan]Test 5: Full Forward Pass[/bold cyan]") + + # Import by loading the file directly (can't import modules starting with numbers) + import importlib.util + spec = importlib.util.spec_from_file_location( + "attention_proof", + "milestones/05_2017_transformer/00_vaswani_attention_proof.py" + ) + attention_proof = importlib.util.module_from_spec(spec) + spec.loader.exec_module(attention_proof) + ReversalTransformer = attention_proof.ReversalTransformer + + # Create model + model = ReversalTransformer(vocab_size=10, embed_dim=32, num_heads=4, seq_len=6) + + # Set requires_grad + for param in model.parameters(): + param.requires_grad = True + + # Input + x = Tensor(np.array([[1, 2, 3, 4, 5, 6]])) + + console.print(f" Input shape: {x.shape}") + + # Forward + logits = model(x) + + console.print(f" Output shape: {logits.shape}") + console.print(f" Expected: (1, 6, 10)") + + # Loss + target = Tensor(np.array([[6, 5, 4, 3, 2, 1]])) + loss_fn = CrossEntropyLoss() + + logits_2d = logits.reshape(-1, 10) + target_1d = target.reshape(-1) + loss = loss_fn(logits_2d, target_1d) + + console.print(f" Loss value: {loss.data:.4f}") + console.print(f" Loss has grad_fn: {loss._grad_fn is not None}") + + # Backward + loss.backward() + + # Check gradients + params_with_grad = sum(1 for p in model.parameters() if p.grad is not None) + total_params = len(model.parameters()) + + console.print(f" Parameters with gradients: {params_with_grad}/{total_params}") + + if logits.shape == (1, 6, 10) and params_with_grad == total_params: + console.print(" [green]✓ Full forward/backward pass works![/green]") + return True + else: + console.print(" [red]✗ Full pass has issues[/red]") + return False + + +def test_training_step(): + """Test that one training step actually updates weights.""" + console.print("\n[bold cyan]Test 6: Training Step Updates Weights[/bold cyan]") + + # Import by loading the file directly (can't import modules starting with numbers) + import importlib.util + spec = importlib.util.spec_from_file_location( + "attention_proof", + "milestones/05_2017_transformer/00_vaswani_attention_proof.py" + ) + attention_proof = importlib.util.module_from_spec(spec) + spec.loader.exec_module(attention_proof) + ReversalTransformer = attention_proof.ReversalTransformer + + # Create model + model = ReversalTransformer(vocab_size=10, embed_dim=32, num_heads=4, seq_len=6) + + # Set requires_grad + for param in model.parameters(): + param.requires_grad = True + + # Optimizer + optimizer = Adam(model.parameters(), lr=0.005) + loss_fn = CrossEntropyLoss() + + # Save initial weights + initial_weights = {} + for i, param in enumerate(model.parameters()): + initial_weights[i] = param.data.copy() + + # Training step + x = Tensor(np.array([[1, 2, 3, 4, 5, 6]])) + target = Tensor(np.array([[6, 5, 4, 3, 2, 1]])) + + logits = model(x) + logits_2d = logits.reshape(-1, 10) + target_1d = target.reshape(-1) + loss = loss_fn(logits_2d, target_1d) + + console.print(f" Initial loss: {loss.data:.4f}") + + loss.backward() + optimizer.step() + optimizer.zero_grad() + + # Check if weights changed + weights_changed = 0 + for i, param in enumerate(model.parameters()): + if not np.allclose(param.data, initial_weights[i], atol=1e-6): + weights_changed += 1 + + console.print(f" Weights changed: {weights_changed}/{len(model.parameters())}") + + if weights_changed == len(model.parameters()): + console.print(" [green]✓ All weights updated![/green]") + return True + else: + console.print(f" [yellow]⚠ Only {weights_changed} weights updated[/yellow]") + return False + + +def main(): + console.print(Panel.fit( + "[bold]Sequence Reversal Debug Suite[/bold]\n" + "Testing each component systematically", + border_style="cyan" + )) + + results = { + "Embedding Layer": test_embedding_layer(), + "Attention Layer": test_attention_layer(), + "FFN Layer": test_ffn_layer(), + "Residual Connections": test_residual_connection(), + "Full Forward Pass": test_full_forward_pass(), + "Training Step": test_training_step() + } + + console.print("\n" + "="*70) + console.print(Panel.fit( + "[bold]Summary[/bold]", + border_style="green" + )) + + for test_name, passed in results.items(): + status = "[green]✓ PASS[/green]" if passed else "[red]✗ FAIL[/red]" + console.print(f" {status} - {test_name}") + + all_passed = all(results.values()) + if all_passed: + console.print("\n[bold green]All tests passed! The issue might be hyperparameters.[/bold green]") + else: + console.print("\n[bold red]Some tests failed! Fix these components first.[/bold red]") + + console.print("="*70) + + +if __name__ == "__main__": + main() + diff --git a/modules/01_tensor/tensor.ipynb b/modules/01_tensor/tensor.ipynb new file mode 100644 index 00000000..ed016eb2 --- /dev/null +++ b/modules/01_tensor/tensor.ipynb @@ -0,0 +1,2241 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "1ff9f3d2", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "# Module 01: Tensor Foundation - Building Blocks of ML\n", + "\n", + "Welcome to Module 01! You're about to build the foundational Tensor class that powers all machine learning operations.\n", + "\n", + "## 🔗 Prerequisites & Progress\n", + "**You've Built**: Nothing - this is our foundation!\n", + "**You'll Build**: A complete Tensor class with arithmetic, matrix operations, and shape manipulation\n", + "**You'll Enable**: Foundation for activations, layers, and all future neural network components\n", + "\n", + "**Connection Map**:\n", + "```\n", + "NumPy Arrays → Tensor → Activations (Module 02)\n", + "(raw data) (ML ops) (intelligence)\n", + "```\n", + "\n", + "## Learning Objectives\n", + "By the end of this module, you will:\n", + "1. Implement a complete Tensor class with fundamental operations\n", + "2. Understand tensors as the universal data structure in ML\n", + "3. Test tensor operations with immediate validation\n", + "4. Prepare for gradient computation in Module 05\n", + "\n", + "Let's get started!\n", + "\n", + "## 📦 Where This Code Lives in the Final Package\n", + "\n", + "**Learning Side:** You work in modules/01_tensor/tensor_dev.py\n", + "**Building Side:** Code exports to tinytorch.core.tensor\n", + "\n", + "```python\n", + "# Final package structure:\n", + "# Future modules will import and extend this Tensor\n", + "```\n", + "\n", + "**Why this matters:**\n", + "- **Learning:** Complete tensor system in one focused module for deep understanding\n", + "- **Production:** Proper organization like PyTorch's torch.Tensor with all core operations together\n", + "- **Consistency:** All tensor operations and data manipulation in core.tensor\n", + "- **Integration:** Foundation that every other module will build upon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f11c9ef5", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "imports", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| default_exp core.tensor\n", + "#| export\n", + "\n", + "import numpy as np\n", + "\n", + "# Constants for memory calculations\n", + "BYTES_PER_FLOAT32 = 4 # Standard float32 size in bytes\n", + "KB_TO_BYTES = 1024 # Kilobytes to bytes conversion\n", + "MB_TO_BYTES = 1024 * 1024 # Megabytes to bytes conversion" + ] + }, + { + "cell_type": "markdown", + "id": "0939afba", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 📋 Module Dependencies\n", + "\n", + "**Prerequisites**: NONE - This is the foundation module\n", + "\n", + "**External Dependencies**:\n", + "- `numpy` (for array operations and numerical computing)\n", + "\n", + "**TinyTorch Dependencies**: NONE\n", + "\n", + "**Important**: This module has NO TinyTorch dependencies.\n", + "All future modules will import FROM this module.\n", + "\n", + "**Dependency Flow**:\n", + "```\n", + "Module 01 (Tensor) → All Future Modules\n", + " ↓\n", + " Foundation for entire TinyTorch system\n", + "```\n", + "\n", + "Students completing this module will have built the foundation\n", + "that every other TinyTorch component depends on." + ] + }, + { + "cell_type": "markdown", + "id": "d8af6619", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 1. Introduction: What is a Tensor?\n", + "\n", + "A tensor is a multi-dimensional array that serves as the fundamental data structure in machine learning. Think of it as a universal container that can hold data in different dimensions:\n", + "\n", + "```\n", + "Tensor Dimensions:\n", + "┌─────────────┐\n", + "│ 0D: Scalar │ 5.0 (just a number)\n", + "│ 1D: Vector │ [1, 2, 3] (list of numbers)\n", + "│ 2D: Matrix │ [[1, 2] (grid of numbers)\n", + "│ │ [3, 4]]\n", + "│ 3D: Cube │ [[[... (stack of matrices)\n", + "└─────────────┘\n", + "```\n", + "\n", + "In machine learning, tensors flow through operations like water through pipes:\n", + "\n", + "```\n", + "Neural Network Data Flow:\n", + "Input Tensor → Layer 1 → Activation → Layer 2 → ... → Output Tensor\n", + " [batch, [batch, [batch, [batch, [batch,\n", + " features] hidden] hidden] hidden2] classes]\n", + "```\n", + "\n", + "Every neural network, from simple linear regression to modern transformers, processes tensors. Understanding tensors means understanding the foundation of all ML computations.\n", + "\n", + "### Why Tensors Matter in ML Systems\n", + "\n", + "In production ML systems, tensors carry more than just data - they carry the computational graph, memory layout information, and execution context:\n", + "\n", + "```\n", + "Real ML Pipeline:\n", + "Raw Data → Preprocessing → Tensor Creation → Model Forward Pass → Loss Computation\n", + " ↓ ↓ ↓ ↓ ↓\n", + " Files NumPy Arrays Tensors GPU Tensors Scalar Loss\n", + "```\n", + "\n", + "**Key Insight**: Tensors bridge the gap between mathematical concepts and efficient computation on modern hardware." + ] + }, + { + "cell_type": "markdown", + "id": "13208411", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 2. Foundations: Mathematical Background\n", + "\n", + "### Core Operations We'll Implement\n", + "\n", + "Our Tensor class will support all fundamental operations that neural networks need:\n", + "\n", + "```\n", + "Operation Types:\n", + "┌─────────────────┬─────────────────┬─────────────────┐\n", + "│ Element-wise │ Matrix Ops │ Shape Ops │\n", + "├─────────────────┼─────────────────┼─────────────────┤\n", + "│ + Addition │ @ Matrix Mult │ .reshape() │\n", + "│ - Subtraction │ .transpose() │ .sum() │\n", + "│ * Multiplication│ │ .mean() │\n", + "│ / Division │ │ .max() │\n", + "└─────────────────┴─────────────────┴─────────────────┘\n", + "```\n", + "\n", + "### Broadcasting: Making Tensors Work Together\n", + "\n", + "Broadcasting automatically aligns tensors of different shapes for operations:\n", + "\n", + "```\n", + "Broadcasting Examples:\n", + "┌─────────────────────────────────────────────────────────┐\n", + "│ Scalar + Vector: │\n", + "│ 5 + [1, 2, 3] → [5, 5, 5] + [1, 2, 3] = [6, 7, 8]│\n", + "│ │\n", + "│ Matrix + Vector (row-wise): │\n", + "│ [[1, 2]] [10] [[1, 2]] [[10, 10]] [[11, 12]] │\n", + "│ [[3, 4]] + [10] = [[3, 4]] + [[10, 10]] = [[13, 14]] │\n", + "└─────────────────────────────────────────────────────────┘\n", + "```\n", + "\n", + "**Memory Layout**: NumPy uses row-major (C-style) storage where elements are stored row by row in memory for cache efficiency:\n", + "\n", + "```\n", + "Memory Layout (2×3 matrix):\n", + "Matrix: Memory:\n", + "[[1, 2, 3] [1][2][3][4][5][6]\n", + " [4, 5, 6]] ↑ Row 1 ↑ Row 2\n", + "\n", + "Cache Behavior:\n", + "Sequential Access: Fast (uses cache lines efficiently)\n", + " Row access: [1][2][3] → cache hit, hit, hit\n", + "Random Access: Slow (cache misses)\n", + " Column access: [1][4] → cache hit, miss\n", + "```\n", + "\n", + "This memory layout affects performance in real ML workloads - algorithms that access data sequentially run faster than those that access randomly." + ] + }, + { + "cell_type": "markdown", + "id": "af97aeae", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 3. Implementation: Building Tensor Foundation\n", + "\n", + "Let's build our Tensor class step by step, testing each component as we go.\n", + "\n", + "**Key Design Decision**: We'll include gradient-related attributes from the start, but they'll remain dormant until Module 05. This ensures a consistent interface throughout the course while keeping the cognitive load manageable.\n", + "\n", + "### Tensor Class Architecture\n", + "\n", + "```\n", + "Tensor Class Structure:\n", + "┌─────────────────────────────────┐\n", + "│ Core Attributes: │\n", + "│ • data: np.array (the numbers) │\n", + "│ • shape: tuple (dimensions) │\n", + "│ • size: int (total elements) │\n", + "│ • dtype: type (float32, int64) │\n", + "├─────────────────────────────────┤\n", + "│ Gradient Attributes (dormant): │\n", + "│ • requires_grad: bool │\n", + "│ • grad: None (until Module 05) │\n", + "├─────────────────────────────────┤\n", + "│ Operations: │\n", + "│ • __add__, __sub__, __mul__ │\n", + "│ • matmul(), reshape() │\n", + "│ • sum(), mean(), max() │\n", + "│ • __repr__(), __str__() │\n", + "└─────────────────────────────────┘\n", + "```\n", + "\n", + "The beauty of this design: **all methods are defined inside the class from day one**. No monkey-patching, no dynamic attribute addition. Clean, consistent, debugger-friendly." + ] + }, + { + "cell_type": "markdown", + "id": "7c2a0180", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### Tensor Creation and Initialization\n", + "\n", + "Before we implement operations, let's understand how tensors store data and manage their attributes. This initialization is the foundation that everything else builds upon.\n", + "\n", + "```\n", + "Tensor Initialization Process:\n", + "Input Data → Validation → NumPy Array → Tensor Wrapper → Ready for Operations\n", + " [1,2,3] → types → np.array → shape=(3,) → + - * / @ ...\n", + " ↓ ↓ ↓ ↓\n", + " List/Array Type Check Memory Attributes Set\n", + " (optional) Allocation\n", + "\n", + "Memory Allocation Example:\n", + "Input: [[1, 2, 3], [4, 5, 6]]\n", + " ↓\n", + "NumPy allocates: [1][2][3][4][5][6] in contiguous memory\n", + " ↓\n", + "Tensor wraps with: shape=(2,3), size=6, dtype=int64\n", + "```\n", + "\n", + "**Key Design Principle**: Our Tensor is a wrapper around NumPy arrays that adds ML-specific functionality. We leverage NumPy's battle-tested memory management and computation kernels while adding the gradient tracking and operation chaining needed for deep learning.\n", + "\n", + "**Why This Approach?**\n", + "- **Performance**: NumPy's C implementations are highly optimized\n", + "- **Compatibility**: Easy integration with scientific Python ecosystem\n", + "- **Memory Efficiency**: No unnecessary data copying\n", + "- **Future-Proof**: Easy transition to GPU tensors in advanced modules" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b8476c7c", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "tensor-class", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class Tensor:\n", + " \"\"\"Educational tensor that grows with student knowledge.\n", + "\n", + " This class starts simple but includes dormant features for future modules:\n", + " - requires_grad: Will be used for automatic differentiation (Module 05)\n", + " - grad: Will store computed gradients (Module 05)\n", + " - backward(): Will compute gradients (Module 05)\n", + "\n", + " For now, focus on: data, shape, and basic operations.\n", + " \"\"\"\n", + "\n", + " def __init__(self, data, requires_grad=False):\n", + " \"\"\"\n", + " Create a new tensor from data.\n", + "\n", + " TODO: Initialize tensor attributes\n", + "\n", + " APPROACH:\n", + " 1. Convert data to NumPy array - handles lists, scalars, etc.\n", + " 2. Store shape and size for quick access\n", + " 3. Set up gradient tracking (dormant until Module 05)\n", + "\n", + " EXAMPLE:\n", + " >>> tensor = Tensor([1, 2, 3])\n", + " >>> print(tensor.data)\n", + " [1 2 3]\n", + " >>> print(tensor.shape)\n", + " (3,)\n", + "\n", + " HINT: np.array() handles type conversion automatically\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " # Core tensor data - always present\n", + " self.data = np.array(data, dtype=np.float32) # Consistent float32 for ML\n", + " self.shape = self.data.shape\n", + " self.size = self.data.size\n", + " self.dtype = self.data.dtype\n", + "\n", + " # Gradient features (dormant until Module 05)\n", + " self.requires_grad = requires_grad\n", + " self.grad = None\n", + " ### END SOLUTION\n", + "\n", + " def __repr__(self):\n", + " \"\"\"String representation of tensor for debugging.\"\"\"\n", + " grad_info = f\", requires_grad={self.requires_grad}\" if self.requires_grad else \"\"\n", + " return f\"Tensor(data={self.data}, shape={self.shape}{grad_info})\"\n", + "\n", + " def __str__(self):\n", + " \"\"\"Human-readable string representation.\"\"\"\n", + " return f\"Tensor({self.data})\"\n", + "\n", + " def numpy(self):\n", + " \"\"\"Return the underlying NumPy array.\"\"\"\n", + " return self.data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ddb7f4ab", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "addition-impl", + "solution": true + } + }, + "outputs": [], + "source": [ + " def __add__(self, other):\n", + " \"\"\"\n", + " Add two tensors element-wise with broadcasting support.\n", + "\n", + " TODO: Implement tensor addition with automatic broadcasting\n", + "\n", + " APPROACH:\n", + " 1. Handle both Tensor and scalar inputs\n", + " 2. Use NumPy's broadcasting for automatic shape alignment\n", + " 3. Return new Tensor with result (don't modify self)\n", + "\n", + " EXAMPLE:\n", + " >>> a = Tensor([1, 2, 3])\n", + " >>> b = Tensor([4, 5, 6])\n", + " >>> result = a + b\n", + " >>> print(result.data)\n", + " [5. 7. 9.]\n", + "\n", + " BROADCASTING EXAMPLE:\n", + " >>> matrix = Tensor([[1, 2], [3, 4]]) # Shape: (2, 2)\n", + " >>> vector = Tensor([10, 20]) # Shape: (2,)\n", + " >>> result = matrix + vector # Broadcasting: (2,2) + (2,) → (2,2)\n", + " >>> print(result.data)\n", + " [[11. 22.]\n", + " [13. 24.]]\n", + "\n", + " HINTS:\n", + " - Use isinstance() to check if other is a Tensor\n", + " - NumPy handles broadcasting automatically with +\n", + " - Always return a new Tensor, don't modify self\n", + " - Preserve gradient tracking for future modules\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " if isinstance(other, Tensor):\n", + " # Tensor + Tensor: let NumPy handle broadcasting\n", + " return Tensor(self.data + other.data)\n", + " else:\n", + " # Tensor + scalar: NumPy broadcasts automatically\n", + " return Tensor(self.data + other)\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fde58c98", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "subtraction-impl", + "solution": true + } + }, + "outputs": [], + "source": [ + " def __sub__(self, other):\n", + " \"\"\"\n", + " Subtract two tensors element-wise.\n", + "\n", + " Common use: Centering data (x - mean), computing differences for loss functions.\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " if isinstance(other, Tensor):\n", + " return Tensor(self.data - other.data)\n", + " else:\n", + " return Tensor(self.data - other)\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75eec50f", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "multiplication-impl", + "solution": true + } + }, + "outputs": [], + "source": [ + " def __mul__(self, other):\n", + " \"\"\"\n", + " Multiply two tensors element-wise (NOT matrix multiplication).\n", + "\n", + " Common use: Scaling features, applying masks, gating mechanisms in neural networks.\n", + " Note: This is * operator, not @ (which will be matrix multiplication).\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " if isinstance(other, Tensor):\n", + " return Tensor(self.data * other.data)\n", + " else:\n", + " return Tensor(self.data * other)\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f717578", + "metadata": { + "lines_to_next_cell": 0, + "nbgrader": { + "grade": false, + "grade_id": "division-impl", + "solution": true + } + }, + "outputs": [], + "source": [ + " def __truediv__(self, other):\n", + " \"\"\"\n", + " Divide two tensors element-wise.\n", + "\n", + " Common use: Normalization (x / std), converting counts to probabilities.\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " if isinstance(other, Tensor):\n", + " return Tensor(self.data / other.data)\n", + " else:\n", + " return Tensor(self.data / other)\n", + " ### END SOLUTION\n", + "\n", + " # nbgrader={\"grade\": false, \"grade_id\": \"matmul-impl\", \"solution\": true}\n", + " def matmul(self, other):\n", + " \"\"\"\n", + " Matrix multiplication of two tensors.\n", + "\n", + " TODO: Implement matrix multiplication using np.dot with proper validation\n", + "\n", + " APPROACH:\n", + " 1. Validate inputs are Tensors\n", + " 2. Check dimension compatibility (inner dimensions must match)\n", + " 3. Use np.dot for optimized computation\n", + " 4. Return new Tensor with result\n", + "\n", + " EXAMPLE:\n", + " >>> a = Tensor([[1, 2], [3, 4]]) # 2×2\n", + " >>> b = Tensor([[5, 6], [7, 8]]) # 2×2\n", + " >>> result = a.matmul(b) # 2×2 result\n", + " >>> # Result: [[1×5+2×7, 1×6+2×8], [3×5+4×7, 3×6+4×8]] = [[19, 22], [43, 50]]\n", + "\n", + " SHAPE RULES:\n", + " - (M, K) @ (K, N) → (M, N) ✓ Valid\n", + " - (M, K) @ (J, N) → Error ✗ K ≠ J\n", + "\n", + " COMPLEXITY: O(M×N×K) for (M×K) @ (K×N) matrices\n", + "\n", + " HINTS:\n", + " - np.dot handles the optimization for us\n", + " - Check self.shape[-1] == other.shape[-2] for compatibility\n", + " - Provide clear error messages for debugging\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " if not isinstance(other, Tensor):\n", + " raise TypeError(f\"Expected Tensor for matrix multiplication, got {type(other)}\")\n", + "\n", + " # Handle edge cases\n", + " if self.shape == () or other.shape == ():\n", + " # Scalar multiplication\n", + " return Tensor(self.data * other.data)\n", + "\n", + " # For matrix multiplication, we need at least 1D tensors\n", + " if len(self.shape) == 0 or len(other.shape) == 0:\n", + " return Tensor(self.data * other.data)\n", + "\n", + " # Check dimension compatibility for matrix multiplication\n", + " if len(self.shape) >= 2 and len(other.shape) >= 2:\n", + " if self.shape[-1] != other.shape[-2]:\n", + " raise ValueError(\n", + " f\"Cannot perform matrix multiplication: {self.shape} @ {other.shape}. \"\n", + " f\"Inner dimensions must match: {self.shape[-1]} ≠ {other.shape[-2]}. \"\n", + " f\"💡 HINT: For (M,K) @ (K,N) → (M,N), the K dimensions must be equal.\"\n", + " )\n", + " elif len(self.shape) == 1 and len(other.shape) == 2:\n", + " # Vector @ Matrix\n", + " if self.shape[0] != other.shape[0]:\n", + " raise ValueError(\n", + " f\"Cannot multiply vector {self.shape} with matrix {other.shape}. \"\n", + " f\"Vector length {self.shape[0]} must match matrix rows {other.shape[0]}.\"\n", + " )\n", + " elif len(self.shape) == 2 and len(other.shape) == 1:\n", + " # Matrix @ Vector\n", + " if self.shape[1] != other.shape[0]:\n", + " raise ValueError(\n", + " f\"Cannot multiply matrix {self.shape} with vector {other.shape}. \"\n", + " f\"Matrix columns {self.shape[1]} must match vector length {other.shape[0]}.\"\n", + " )\n", + "\n", + " # Perform optimized matrix multiplication\n", + " # Use np.matmul (not np.dot) for proper batched matrix multiplication with 3D+ tensors\n", + " result_data = np.matmul(self.data, other.data)\n", + " return Tensor(result_data)\n", + " ### END SOLUTION\n", + "\n", + " # nbgrader={\"grade\": false, \"grade_id\": \"shape-ops\", \"solution\": true}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1a41b233", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "getitem-impl", + "solution": true + } + }, + "outputs": [], + "source": [ + " def __getitem__(self, key):\n", + " \"\"\"\n", + " Enable indexing and slicing operations on Tensors.\n", + " \n", + " This allows Tensors to be indexed like NumPy arrays while preserving\n", + " gradient computation capabilities (when autograd is enabled in Module 05).\n", + " \n", + " TODO: Implement tensor indexing/slicing with gradient support\n", + " \n", + " APPROACH:\n", + " 1. Use NumPy's indexing to slice the underlying data\n", + " 2. Create new Tensor with sliced data\n", + " 3. Preserve requires_grad flag\n", + " 4. Store backward function (if autograd enabled - Module 05)\n", + " \n", + " EXAMPLES:\n", + " >>> x = Tensor([1, 2, 3, 4, 5])\n", + " >>> x[0] # Single element: Tensor(1)\n", + " >>> x[:3] # Slice: Tensor([1, 2, 3])\n", + " >>> x[1:4] # Range: Tensor([2, 3, 4])\n", + " >>> \n", + " >>> y = Tensor([[1, 2, 3], [4, 5, 6]])\n", + " >>> y[0] # Row: Tensor([1, 2, 3])\n", + " >>> y[:, 1] # Column: Tensor([2, 5])\n", + " >>> y[0, 1:3] # Mixed: Tensor([2, 3])\n", + " \n", + " GRADIENT BEHAVIOR (Module 05):\n", + " - Slicing preserves gradient flow\n", + " - Gradients flow back to original positions\n", + " - Example: x[:3].backward() updates x.grad[:3]\n", + " \n", + " HINTS:\n", + " - NumPy handles the indexing: self.data[key]\n", + " - Result is always a Tensor (even single elements)\n", + " - Preserve requires_grad for gradient tracking\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " # Perform the indexing on underlying NumPy array\n", + " result_data = self.data[key]\n", + " \n", + " # Ensure result is always an array (even for scalar indexing)\n", + " if not isinstance(result_data, np.ndarray):\n", + " result_data = np.array(result_data)\n", + " \n", + " # Create new Tensor with sliced data\n", + " result = Tensor(result_data, requires_grad=self.requires_grad)\n", + " \n", + " # If gradients are tracked and autograd is available, attach backward function\n", + " # Note: This will be used by Module 05 (Autograd)\n", + " if self.requires_grad:\n", + " # Check if SliceBackward exists (added in Module 05)\n", + " try:\n", + " from tinytorch.core.autograd import SliceBackward\n", + " result._grad_fn = SliceBackward(self, key)\n", + " except (ImportError, AttributeError):\n", + " # Autograd not yet available - gradient tracking will be added in Module 05\n", + " pass\n", + " \n", + " return result\n", + " ### END SOLUTION\n", + "\n", + " def reshape(self, *shape):\n", + " \"\"\"\n", + " Reshape tensor to new dimensions.\n", + "\n", + " TODO: Implement tensor reshaping with validation\n", + "\n", + " APPROACH:\n", + " 1. Handle different calling conventions: reshape(2, 3) vs reshape((2, 3))\n", + " 2. Validate total elements remain the same\n", + " 3. Use NumPy's reshape for the actual operation\n", + " 4. Return new Tensor (keep immutability)\n", + "\n", + " EXAMPLE:\n", + " >>> tensor = Tensor([1, 2, 3, 4, 5, 6]) # Shape: (6,)\n", + " >>> reshaped = tensor.reshape(2, 3) # Shape: (2, 3)\n", + " >>> print(reshaped.data)\n", + " [[1. 2. 3.]\n", + " [4. 5. 6.]]\n", + "\n", + " COMMON USAGE:\n", + " >>> # Flatten for MLP input\n", + " >>> image = Tensor(np.random.rand(3, 32, 32)) # (channels, height, width)\n", + " >>> flattened = image.reshape(-1) # (3072,) - all pixels in vector\n", + " >>>\n", + " >>> # Prepare batch for convolution\n", + " >>> batch = Tensor(np.random.rand(32, 784)) # (batch, features)\n", + " >>> images = batch.reshape(32, 1, 28, 28) # (batch, channels, height, width)\n", + "\n", + " HINTS:\n", + " - Handle both reshape(2, 3) and reshape((2, 3)) calling styles\n", + " - Check np.prod(new_shape) == self.size for validation\n", + " - Use descriptive error messages for debugging\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " # Handle both reshape(2, 3) and reshape((2, 3)) calling conventions\n", + " if len(shape) == 1 and isinstance(shape[0], (tuple, list)):\n", + " new_shape = tuple(shape[0])\n", + " else:\n", + " new_shape = shape\n", + "\n", + " # Handle -1 for automatic dimension inference (like NumPy)\n", + " if -1 in new_shape:\n", + " if new_shape.count(-1) > 1:\n", + " raise ValueError(\n", + " \"Can only specify one unknown dimension with -1.\\n\"\n", + " \" Issue: Reshape allows one -1 to auto-calculate that dimension.\\n\"\n", + " \" Fix: Specify only one -1 in the new_shape tuple.\"\n", + " )\n", + "\n", + " # Calculate the unknown dimension\n", + " known_size = 1\n", + " unknown_idx = new_shape.index(-1)\n", + " for i, dim in enumerate(new_shape):\n", + " if i != unknown_idx:\n", + " known_size *= dim\n", + "\n", + " unknown_dim = self.size // known_size\n", + " new_shape = list(new_shape)\n", + " new_shape[unknown_idx] = unknown_dim\n", + " new_shape = tuple(new_shape)\n", + "\n", + " # Validate total elements remain the same\n", + " if np.prod(new_shape) != self.size:\n", + " raise ValueError(\n", + " f\"Cannot reshape tensor of size {self.size} to shape {new_shape}. \"\n", + " f\"Total elements must match: {self.size} ≠ {np.prod(new_shape)}. \"\n", + " f\"💡 HINT: Make sure new_shape dimensions multiply to {self.size}\"\n", + " )\n", + "\n", + " # Reshape the data (NumPy handles the memory layout efficiently)\n", + " reshaped_data = np.reshape(self.data, new_shape)\n", + " # Preserve gradient tracking from the original tensor (important for autograd!)\n", + " result = Tensor(reshaped_data, requires_grad=self.requires_grad)\n", + " return result\n", + " ### END SOLUTION\n", + "\n", + " def transpose(self, dim0=None, dim1=None):\n", + " \"\"\"\n", + " Transpose tensor dimensions.\n", + "\n", + " TODO: Implement tensor transposition\n", + "\n", + " APPROACH:\n", + " 1. Handle default case (transpose last two dimensions)\n", + " 2. Handle specific dimension swapping\n", + " 3. Use NumPy's transpose with proper axis specification\n", + " 4. Return new Tensor\n", + "\n", + " EXAMPLE:\n", + " >>> matrix = Tensor([[1, 2, 3], [4, 5, 6]]) # (2, 3)\n", + " >>> transposed = matrix.transpose() # (3, 2)\n", + " >>> print(transposed.data)\n", + " [[1. 4.]\n", + " [2. 5.]\n", + " [3. 6.]]\n", + "\n", + " NEURAL NETWORK USAGE:\n", + " >>> # Weight matrix transpose for backward pass\n", + " >>> W = Tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]) # (3, 2)\n", + " >>> W_T = W.transpose() # (2, 3) - for gradient computation\n", + " >>>\n", + " >>> # Attention mechanism\n", + " >>> Q = Tensor([[1, 2], [3, 4]]) # queries (2, 2)\n", + " >>> K = Tensor([[5, 6], [7, 8]]) # keys (2, 2)\n", + " >>> attention_scores = Q.matmul(K.transpose()) # Q @ K^T\n", + "\n", + " HINTS:\n", + " - Default: transpose last two dimensions (most common case)\n", + " - Use np.transpose() with axes parameter\n", + " - Handle 1D tensors gracefully (transpose is identity)\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " if dim0 is None and dim1 is None:\n", + " # Default: transpose last two dimensions\n", + " if len(self.shape) < 2:\n", + " # For 1D tensors, transpose is identity operation\n", + " return Tensor(self.data.copy())\n", + " else:\n", + " # Transpose last two dimensions (most common in ML)\n", + " axes = list(range(len(self.shape)))\n", + " axes[-2], axes[-1] = axes[-1], axes[-2]\n", + " transposed_data = np.transpose(self.data, axes)\n", + " else:\n", + " # Specific dimensions to transpose\n", + " if dim0 is None or dim1 is None:\n", + " raise ValueError(\n", + " \"Both dim0 and dim1 must be specified for specific dimension transpose.\\n\"\n", + " \" Issue: transpose(dim0, dim1) requires both dimension indices.\\n\"\n", + " \" Fix: Provide both dim0 and dim1, e.g., tensor.transpose(0, 1).\"\n", + " )\n", + "\n", + " # Validate dimensions exist\n", + " if dim0 >= len(self.shape) or dim1 >= len(self.shape) or dim0 < 0 or dim1 < 0:\n", + " raise ValueError(\n", + " f\"Dimension out of range for tensor with shape {self.shape}. \"\n", + " f\"Got dim0={dim0}, dim1={dim1}, but tensor has {len(self.shape)} dimensions.\"\n", + " )\n", + "\n", + " # Create axes list and swap the specified dimensions\n", + " axes = list(range(len(self.shape)))\n", + " axes[dim0], axes[dim1] = axes[dim1], axes[dim0]\n", + " transposed_data = np.transpose(self.data, axes)\n", + "\n", + " # Preserve requires_grad for gradient tracking (Module 05 will add _grad_fn)\n", + " result = Tensor(transposed_data, requires_grad=self.requires_grad)\n", + " return result\n", + " ### END SOLUTION\n", + "\n", + " # nbgrader={\"grade\": false, \"grade_id\": \"reduction-ops\", \"solution\": true}\n", + " def sum(self, axis=None, keepdims=False):\n", + " \"\"\"\n", + " Sum tensor along specified axis.\n", + "\n", + " TODO: Implement tensor sum with axis control\n", + "\n", + " APPROACH:\n", + " 1. Use NumPy's sum with axis parameter\n", + " 2. Handle axis=None (sum all elements) vs specific axis\n", + " 3. Support keepdims to maintain shape for broadcasting\n", + " 4. Return new Tensor with result\n", + "\n", + " EXAMPLE:\n", + " >>> tensor = Tensor([[1, 2], [3, 4]])\n", + " >>> total = tensor.sum() # Sum all elements: 10\n", + " >>> col_sum = tensor.sum(axis=0) # Sum columns: [4, 6]\n", + " >>> row_sum = tensor.sum(axis=1) # Sum rows: [3, 7]\n", + "\n", + " NEURAL NETWORK USAGE:\n", + " >>> # Batch loss computation\n", + " >>> batch_losses = Tensor([0.1, 0.3, 0.2, 0.4]) # Individual losses\n", + " >>> total_loss = batch_losses.sum() # Total: 1.0\n", + " >>> avg_loss = batch_losses.mean() # Average: 0.25\n", + " >>>\n", + " >>> # Global average pooling\n", + " >>> feature_maps = Tensor(np.random.rand(32, 256, 7, 7)) # (batch, channels, h, w)\n", + " >>> global_features = feature_maps.sum(axis=(2, 3)) # (batch, channels)\n", + "\n", + " HINTS:\n", + " - np.sum handles all the complexity for us\n", + " - axis=None sums all elements (returns scalar)\n", + " - axis=0 sums along first dimension, axis=1 along second, etc.\n", + " - keepdims=True preserves dimensions for broadcasting\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " result = np.sum(self.data, axis=axis, keepdims=keepdims)\n", + " return Tensor(result)\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "616cd6f6", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "mean-impl", + "solution": true + } + }, + "outputs": [], + "source": [ + " def mean(self, axis=None, keepdims=False):\n", + " \"\"\"\n", + " Compute mean of tensor along specified axis.\n", + "\n", + " Common usage: Batch normalization, loss averaging, global pooling.\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " result = np.mean(self.data, axis=axis, keepdims=keepdims)\n", + " return Tensor(result)\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a0b461cb", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "max-impl", + "solution": true + } + }, + "outputs": [], + "source": [ + " def max(self, axis=None, keepdims=False):\n", + " \"\"\"\n", + " Find maximum values along specified axis.\n", + "\n", + " Common usage: Max pooling, finding best predictions, activation clipping.\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " result = np.max(self.data, axis=axis, keepdims=keepdims)\n", + " return Tensor(result)\n", + " ### END SOLUTION\n", + "\n", + " # nbgrader={\"grade\": false, \"grade_id\": \"gradient-placeholder\", \"solution\": true}\n", + " def backward(self):\n", + " \"\"\"\n", + " Compute gradients (implemented in Module 05: Autograd).\n", + "\n", + " TODO: Placeholder implementation for gradient computation\n", + "\n", + " STUDENT NOTE:\n", + " This method exists but does nothing until Module 05: Autograd.\n", + " Don't worry about it for now - focus on the basic tensor operations.\n", + "\n", + " In Module 05, we'll implement:\n", + " - Gradient computation via chain rule\n", + " - Automatic differentiation\n", + " - Backpropagation through operations\n", + " - Computation graph construction\n", + "\n", + " FUTURE IMPLEMENTATION PREVIEW:\n", + " ```python\n", + " def backward(self, gradient=None):\n", + " # Module 05 will implement:\n", + " # 1. Set gradient for this tensor\n", + " # 2. Propagate to parent operations\n", + " # 3. Apply chain rule recursively\n", + " # 4. Accumulate gradients properly\n", + " pass\n", + " ```\n", + "\n", + " CURRENT BEHAVIOR:\n", + " >>> x = Tensor([1, 2, 3], requires_grad=True)\n", + " >>> y = x * 2\n", + " >>> y.sum().backward() # Calls this method - does nothing\n", + " >>> print(x.grad) # Still None\n", + " None\n", + " \"\"\"\n", + " ### BEGIN SOLUTION\n", + " # Placeholder - will be implemented in Module 05\n", + " # For now, just ensure it doesn't crash when called\n", + " # This allows students to experiment with gradient syntax\n", + " # without getting confusing errors about missing methods\n", + " pass\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "markdown", + "id": "df42c2fa", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### 🧪 Unit Test: Tensor Creation\n", + "\n", + "This test validates our Tensor constructor works correctly with various data types and properly initializes all attributes.\n", + "\n", + "**What we're testing**: Basic tensor creation and attribute setting\n", + "**Why it matters**: Foundation for all other operations - if creation fails, nothing works\n", + "**Expected**: Tensor wraps data correctly with proper attributes and consistent dtype" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "333452fe", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-tensor-creation", + "locked": true, + "points": 10 + } + }, + "outputs": [], + "source": [ + "def test_unit_tensor_creation():\n", + " \"\"\"🧪 Test Tensor creation with various data types.\"\"\"\n", + " print(\"🧪 Unit Test: Tensor Creation...\")\n", + "\n", + " # Test scalar creation\n", + " scalar = Tensor(5.0)\n", + " assert scalar.data == 5.0\n", + " assert scalar.shape == ()\n", + " assert scalar.size == 1\n", + " assert scalar.requires_grad == False\n", + " assert scalar.grad is None\n", + " assert scalar.dtype == np.float32\n", + "\n", + " # Test vector creation\n", + " vector = Tensor([1, 2, 3])\n", + " assert np.array_equal(vector.data, np.array([1, 2, 3], dtype=np.float32))\n", + " assert vector.shape == (3,)\n", + " assert vector.size == 3\n", + "\n", + " # Test matrix creation\n", + " matrix = Tensor([[1, 2], [3, 4]])\n", + " assert np.array_equal(matrix.data, np.array([[1, 2], [3, 4]], dtype=np.float32))\n", + " assert matrix.shape == (2, 2)\n", + " assert matrix.size == 4\n", + "\n", + " # Test gradient flag (dormant feature)\n", + " grad_tensor = Tensor([1, 2], requires_grad=True)\n", + " assert grad_tensor.requires_grad == True\n", + " assert grad_tensor.grad is None # Still None until Module 05\n", + "\n", + " print(\"✅ Tensor creation works correctly!\")\n", + "\n", + "if __name__ == \"__main__\":\n", + " test_unit_tensor_creation()" + ] + }, + { + "cell_type": "markdown", + "id": "40f9ba8f", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## Element-wise Arithmetic Operations\n", + "\n", + "Element-wise operations are the workhorses of neural network computation. They apply the same operation to corresponding elements in tensors, often with broadcasting to handle different shapes elegantly.\n", + "\n", + "### Why Element-wise Operations Matter\n", + "\n", + "In neural networks, element-wise operations appear everywhere:\n", + "- **Activation functions**: Apply ReLU, sigmoid to every element\n", + "- **Batch normalization**: Subtract mean, divide by std per element\n", + "- **Loss computation**: Compare predictions vs. targets element-wise\n", + "- **Gradient updates**: Add scaled gradients to parameters element-wise\n", + "\n", + "### Element-wise Addition: The Foundation\n", + "\n", + "Addition is the simplest and most fundamental operation. Understanding it deeply helps with all others.\n", + "\n", + "```\n", + "Element-wise Addition Visual:\n", + "[1, 2, 3] + [4, 5, 6] = [1+4, 2+5, 3+6] = [5, 7, 9]\n", + "\n", + "Matrix Addition:\n", + "[[1, 2]] [[5, 6]] [[1+5, 2+6]] [[6, 8]]\n", + "[[3, 4]] + [[7, 8]] = [[3+7, 4+8]] = [[10, 12]]\n", + "\n", + "Broadcasting Addition (Matrix + Vector):\n", + "[[1, 2]] [10] [[1, 2]] [[10, 10]] [[11, 12]]\n", + "[[3, 4]] + [20] = [[3, 4]] + [[20, 20]] = [[23, 24]]\n", + " ↑ ↑ ↑ ↑ ↑\n", + " (2,2) (2,1) (2,2) broadcast result\n", + "\n", + "Broadcasting Rules:\n", + "1. Start from rightmost dimension\n", + "2. Dimensions must be equal OR one must be 1 OR one must be missing\n", + "3. Missing dimensions are assumed to be 1\n", + "```\n", + "\n", + "**Key Insight**: Broadcasting makes tensors of different shapes compatible by automatically expanding dimensions. This is crucial for batch processing where you often add a single bias vector to an entire batch of data.\n", + "\n", + "**Memory Efficiency**: Broadcasting doesn't actually create expanded copies in memory - NumPy computes results on-the-fly, saving memory." + ] + }, + { + "cell_type": "markdown", + "id": "5492e66f", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 2 + }, + "source": [ + "### Subtraction, Multiplication, and Division\n", + "\n", + "These operations follow the same pattern as addition, working element-wise with broadcasting support. Each serves specific purposes in neural networks:\n", + "\n", + "```\n", + "Element-wise Operations in Neural Networks:\n", + "\n", + "┌─────────────────┬─────────────────┬─────────────────┬─────────────────┐\n", + "│ Subtraction │ Multiplication │ Division │ Use Cases │\n", + "├─────────────────┼─────────────────┼─────────────────┼─────────────────┤\n", + "│ [6,8] - [1,2] │ [2,3] * [4,5] │ [8,9] / [2,3] │ • Gradient │\n", + "│ = [5,6] │ = [8,15] │ = [4.0, 3.0] │ computation │\n", + "│ │ │ │ • Normalization │\n", + "│ Center data: │ Gate values: │ Scale features: │ • Loss functions│\n", + "│ x - mean │ x * mask │ x / std │ • Attention │\n", + "└─────────────────┴─────────────────┴─────────────────┴─────────────────┘\n", + "\n", + "Broadcasting with Scalars (very common in ML):\n", + "[1, 2, 3] * 2 = [2, 4, 6] (scale all values)\n", + "[1, 2, 3] - 1 = [0, 1, 2] (shift all values)\n", + "[2, 4, 6] / 2 = [1, 2, 3] (normalize all values)\n", + "\n", + "Real ML Example - Batch Normalization:\n", + "batch_data = [[1, 2], [3, 4], [5, 6]] # Shape: (3, 2)\n", + "mean = [3, 4] # Shape: (2,)\n", + "std = [2, 2] # Shape: (2,)\n", + "\n", + "# Normalize: (x - mean) / std\n", + "normalized = (batch_data - mean) / std\n", + "# Broadcasting: (3,2) - (2,) = (3,2), then (3,2) / (2,) = (3,2)\n", + "```\n", + "\n", + "**Performance Note**: Element-wise operations are highly optimized in NumPy and run efficiently on modern CPUs with vectorization (SIMD instructions)." + ] + }, + { + "cell_type": "markdown", + "id": "178ea8e9", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### 🧪 Unit Test: Arithmetic Operations\n", + "\n", + "This test validates our arithmetic operations work correctly with both tensor-tensor and tensor-scalar operations, including broadcasting behavior.\n", + "\n", + "**What we're testing**: Addition, subtraction, multiplication, division with broadcasting\n", + "**Why it matters**: Foundation for neural network forward passes, batch processing, normalization\n", + "**Expected**: Operations work with both tensors and scalars, proper broadcasting alignment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45d35e25", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-arithmetic", + "locked": true, + "points": 15 + } + }, + "outputs": [], + "source": [ + "def test_unit_arithmetic_operations():\n", + " \"\"\"🧪 Test arithmetic operations with broadcasting.\"\"\"\n", + " print(\"🧪 Unit Test: Arithmetic Operations...\")\n", + "\n", + " # Test tensor + tensor\n", + " a = Tensor([1, 2, 3])\n", + " b = Tensor([4, 5, 6])\n", + " result = a + b\n", + " assert np.array_equal(result.data, np.array([5, 7, 9], dtype=np.float32))\n", + "\n", + " # Test tensor + scalar (very common in ML)\n", + " result = a + 10\n", + " assert np.array_equal(result.data, np.array([11, 12, 13], dtype=np.float32))\n", + "\n", + " # Test broadcasting with different shapes (matrix + vector)\n", + " matrix = Tensor([[1, 2], [3, 4]])\n", + " vector = Tensor([10, 20])\n", + " result = matrix + vector\n", + " expected = np.array([[11, 22], [13, 24]], dtype=np.float32)\n", + " assert np.array_equal(result.data, expected)\n", + "\n", + " # Test subtraction (data centering)\n", + " result = b - a\n", + " assert np.array_equal(result.data, np.array([3, 3, 3], dtype=np.float32))\n", + "\n", + " # Test multiplication (scaling)\n", + " result = a * 2\n", + " assert np.array_equal(result.data, np.array([2, 4, 6], dtype=np.float32))\n", + "\n", + " # Test division (normalization)\n", + " result = b / 2\n", + " assert np.array_equal(result.data, np.array([2.0, 2.5, 3.0], dtype=np.float32))\n", + "\n", + " # Test chaining operations (common in ML pipelines)\n", + " normalized = (a - 2) / 2 # Center and scale\n", + " expected = np.array([-0.5, 0.0, 0.5], dtype=np.float32)\n", + " assert np.allclose(normalized.data, expected)\n", + "\n", + " print(\"✅ Arithmetic operations work correctly!\")\n", + "\n", + "if __name__ == \"__main__\":\n", + " test_unit_arithmetic_operations()" + ] + }, + { + "cell_type": "markdown", + "id": "79d4de15", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 2 + }, + "source": [ + "## Matrix Multiplication: The Heart of Neural Networks\n", + "\n", + "Matrix multiplication is fundamentally different from element-wise multiplication. It's the operation that gives neural networks their power to transform and combine information across features.\n", + "\n", + "### Why Matrix Multiplication is Central to ML\n", + "\n", + "Every neural network layer essentially performs matrix multiplication:\n", + "\n", + "```\n", + "Linear Layer (the building block of neural networks):\n", + "Input Features × Weight Matrix = Output Features\n", + " (N, D_in) × (D_in, D_out) = (N, D_out)\n", + "\n", + "Real Example - Image Classification:\n", + "Flattened Image × Hidden Weights = Hidden Features\n", + " (32, 784) × (784, 256) = (32, 256)\n", + " ↑ ↑ ↑\n", + " 32 images 784→256 transform 32 feature vectors\n", + "```\n", + "\n", + "### Matrix Multiplication Visualization\n", + "\n", + "```\n", + "Matrix Multiplication Process:\n", + " A (2×3) B (3×2) C (2×2)\n", + " ┌ ┐ ┌ ┐ ┌ ┐\n", + " │ 1 2 3 │ │ 7 8 │ │ 1×7+2×9+3×1 │ ┌ ┐\n", + " │ │ × │ 9 1 │ = │ │ = │ 28 13│\n", + " │ 4 5 6 │ │ 1 2 │ │ 4×7+5×9+6×1 │ │ 79 37│\n", + " └ ┘ └ ┘ └ ┘ └ ┘\n", + "\n", + "Computation Breakdown:\n", + "C[0,0] = A[0,:] · B[:,0] = [1,2,3] · [7,9,1] = 1×7 + 2×9 + 3×1 = 28\n", + "C[0,1] = A[0,:] · B[:,1] = [1,2,3] · [8,1,2] = 1×8 + 2×1 + 3×2 = 13\n", + "C[1,0] = A[1,:] · B[:,0] = [4,5,6] · [7,9,1] = 4×7 + 5×9 + 6×1 = 79\n", + "C[1,1] = A[1,:] · B[:,1] = [4,5,6] · [8,1,2] = 4×8 + 5×1 + 6×2 = 37\n", + "\n", + "Key Rule: Inner dimensions must match!\n", + "A(m,n) @ B(n,p) = C(m,p)\n", + " ↑ ↑\n", + " these must be equal\n", + "```\n", + "\n", + "### Computational Complexity and Performance\n", + "\n", + "```\n", + "Computational Cost:\n", + "For C = A @ B where A is (M×K), B is (K×N):\n", + "- Multiplications: M × N × K\n", + "- Additions: M × N × (K-1) ≈ M × N × K\n", + "- Total FLOPs: ≈ 2 × M × N × K\n", + "\n", + "Example: (1000×1000) @ (1000×1000)\n", + "- FLOPs: 2 × 1000³ = 2 billion operations\n", + "- On 1 GHz CPU: ~2 seconds if no optimization\n", + "- With optimized BLAS: ~0.1 seconds (20× speedup!)\n", + "\n", + "Memory Access Pattern:\n", + "A: M×K (row-wise access) ✓ Good cache locality\n", + "B: K×N (column-wise) ✗ Poor cache locality\n", + "C: M×N (row-wise write) ✓ Good cache locality\n", + "\n", + "This is why optimized libraries like OpenBLAS, Intel MKL use:\n", + "- Blocking algorithms (process in cache-sized chunks)\n", + "- Vectorization (SIMD instructions)\n", + "- Parallelization (multiple cores)\n", + "```\n", + "\n", + "### Neural Network Context\n", + "\n", + "```\n", + "Multi-layer Neural Network:\n", + "Input (batch=32, features=784)\n", + " ↓ W1: (784, 256)\n", + "Hidden1 (batch=32, features=256)\n", + " ↓ W2: (256, 128)\n", + "Hidden2 (batch=32, features=128)\n", + " ↓ W3: (128, 10)\n", + "Output (batch=32, classes=10)\n", + "\n", + "Each arrow represents a matrix multiplication:\n", + "- Forward pass: 3 matrix multiplications\n", + "- Backward pass: 3 more matrix multiplications (with transposes)\n", + "- Total: 6 matrix mults per forward+backward pass\n", + "\n", + "For training batch: 32 × (784×256 + 256×128 + 128×10) FLOPs\n", + "= 32 × (200,704 + 32,768 + 1,280) = 32 × 234,752 = 7.5M FLOPs per batch\n", + "```\n", + "\n", + "This is why GPU acceleration matters - modern GPUs can perform thousands of these operations in parallel!" + ] + }, + { + "cell_type": "markdown", + "id": "31d52df2", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### 🧪 Unit Test: Matrix Multiplication\n", + "\n", + "This test validates matrix multiplication works correctly with proper shape checking and error handling.\n", + "\n", + "**What we're testing**: Matrix multiplication with shape validation and edge cases\n", + "**Why it matters**: Core operation in neural networks (linear layers, attention mechanisms)\n", + "**Expected**: Correct results for valid shapes, clear error messages for invalid shapes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "58c5b9c9", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-matmul", + "locked": true, + "points": 15 + } + }, + "outputs": [], + "source": [ + "def test_unit_matrix_multiplication():\n", + " \"\"\"🧪 Test matrix multiplication operations.\"\"\"\n", + " print(\"🧪 Unit Test: Matrix Multiplication...\")\n", + "\n", + " # Test 2×2 matrix multiplication (basic case)\n", + " a = Tensor([[1, 2], [3, 4]]) # 2×2\n", + " b = Tensor([[5, 6], [7, 8]]) # 2×2\n", + " result = a.matmul(b)\n", + " # Expected: [[1×5+2×7, 1×6+2×8], [3×5+4×7, 3×6+4×8]] = [[19, 22], [43, 50]]\n", + " expected = np.array([[19, 22], [43, 50]], dtype=np.float32)\n", + " assert np.array_equal(result.data, expected)\n", + "\n", + " # Test rectangular matrices (common in neural networks)\n", + " c = Tensor([[1, 2, 3], [4, 5, 6]]) # 2×3 (like batch_size=2, features=3)\n", + " d = Tensor([[7, 8], [9, 10], [11, 12]]) # 3×2 (like features=3, outputs=2)\n", + " result = c.matmul(d)\n", + " # Expected: [[1×7+2×9+3×11, 1×8+2×10+3×12], [4×7+5×9+6×11, 4×8+5×10+6×12]]\n", + " expected = np.array([[58, 64], [139, 154]], dtype=np.float32)\n", + " assert np.array_equal(result.data, expected)\n", + "\n", + " # Test matrix-vector multiplication (common in forward pass)\n", + " matrix = Tensor([[1, 2, 3], [4, 5, 6]]) # 2×3\n", + " vector = Tensor([1, 2, 3]) # 3×1 (conceptually)\n", + " result = matrix.matmul(vector)\n", + " # Expected: [1×1+2×2+3×3, 4×1+5×2+6×3] = [14, 32]\n", + " expected = np.array([14, 32], dtype=np.float32)\n", + " assert np.array_equal(result.data, expected)\n", + "\n", + " # Test shape validation - should raise clear error\n", + " try:\n", + " incompatible_a = Tensor([[1, 2]]) # 1×2\n", + " incompatible_b = Tensor([[1], [2], [3]]) # 3×1\n", + " incompatible_a.matmul(incompatible_b) # 1×2 @ 3×1 should fail (2 ≠ 3)\n", + " assert False, \"Should have raised ValueError for incompatible shapes\"\n", + " except ValueError as e:\n", + " assert \"Inner dimensions must match\" in str(e)\n", + " assert \"2 ≠ 3\" in str(e) # Should show specific dimensions\n", + "\n", + " print(\"✅ Matrix multiplication works correctly!\")\n", + "\n", + "if __name__ == \"__main__\":\n", + " test_unit_matrix_multiplication()" + ] + }, + { + "cell_type": "markdown", + "id": "74bd602f", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 2 + }, + "source": [ + "## Shape Manipulation: Reshape and Transpose\n", + "\n", + "Neural networks constantly change tensor shapes to match layer requirements. Understanding these operations is crucial for data flow through networks.\n", + "\n", + "### Why Shape Manipulation Matters\n", + "\n", + "Real neural networks require constant shape changes:\n", + "\n", + "```\n", + "CNN Data Flow Example:\n", + "Input Image: (32, 3, 224, 224) # batch, channels, height, width\n", + " ↓ Convolutional layers\n", + "Feature Maps: (32, 512, 7, 7) # batch, features, spatial\n", + " ↓ Global Average Pool\n", + "Pooled: (32, 512, 1, 1) # batch, features, 1, 1\n", + " ↓ Flatten for classifier\n", + "Flattened: (32, 512) # batch, features\n", + " ↓ Linear classifier\n", + "Output: (32, 1000) # batch, classes\n", + "\n", + "Each ↓ involves reshape or view operations!\n", + "```\n", + "\n", + "### Reshape: Changing Interpretation of the Same Data\n", + "\n", + "```\n", + "Reshaping (changing dimensions without changing data):\n", + "Original: [1, 2, 3, 4, 5, 6] (shape: (6,))\n", + " ↓ reshape(2, 3)\n", + "Result: [[1, 2, 3], (shape: (2, 3))\n", + " [4, 5, 6]]\n", + "\n", + "Memory Layout (unchanged):\n", + "Before: [1][2][3][4][5][6]\n", + "After: [1][2][3][4][5][6] ← Same memory, different interpretation\n", + "\n", + "Key Insight: Reshape is O(1) operation - no data copying!\n", + "Just changes how we interpret the memory layout.\n", + "\n", + "Common ML Reshapes:\n", + "┌─────────────────────┬─────────────────────┬─────────────────────┐\n", + "│ Flatten for MLP │ Unflatten for CNN │ Batch Dimension │\n", + "├─────────────────────┼─────────────────────┼─────────────────────┤\n", + "│ (N,H,W,C) → (N,H×W×C) │ (N,D) → (N,H,W,C) │ (H,W) → (1,H,W) │\n", + "│ Images to vectors │ Vectors to images │ Add batch dimension │\n", + "└─────────────────────┴─────────────────────┴─────────────────────┘\n", + "```\n", + "\n", + "### Transpose: Swapping Dimensions\n", + "\n", + "```\n", + "Transposing (swapping dimensions - data rearrangement):\n", + "Original: [[1, 2, 3], (shape: (2, 3))\n", + " [4, 5, 6]]\n", + " ↓ transpose()\n", + "Result: [[1, 4], (shape: (3, 2))\n", + " [2, 5],\n", + " [3, 6]]\n", + "\n", + "Memory Layout (rearranged):\n", + "Before: [1][2][3][4][5][6]\n", + "After: [1][4][2][5][3][6] ← Data actually moves in memory\n", + "\n", + "Key Insight: Transpose involves data movement - more expensive than reshape.\n", + "\n", + "Neural Network Usage:\n", + "┌─────────────────────┬─────────────────────┬─────────────────────┐\n", + "│ Weight Matrices │ Attention Mechanism │ Gradient Computation│\n", + "├─────────────────────┼─────────────────────┼─────────────────────┤\n", + "│ Forward: X @ W │ Q @ K^T attention │ ∂L/∂W = X^T @ ∂L/∂Y│\n", + "│ Backward: X @ W^T │ scores │ │\n", + "└─────────────────────┴─────────────────────┴─────────────────────┘\n", + "```\n", + "\n", + "### Performance Implications\n", + "\n", + "```\n", + "Operation Performance (for 1000×1000 matrix):\n", + "┌─────────────────┬──────────────┬─────────────────┬─────────────────┐\n", + "│ Operation │ Time │ Memory Access │ Cache Behavior │\n", + "├─────────────────┼──────────────┼─────────────────┼─────────────────┤\n", + "│ reshape() │ ~0.001 ms │ No data copy │ No cache impact │\n", + "│ transpose() │ ~10 ms │ Full data copy │ Poor locality │\n", + "│ view() (future) │ ~0.001 ms │ No data copy │ No cache impact │\n", + "└─────────────────┴──────────────┴─────────────────┴─────────────────┘\n", + "\n", + "Why transpose() is slower:\n", + "- Must rearrange data in memory\n", + "- Poor cache locality (accessing columns)\n", + "- Can't be parallelized easily\n", + "```\n", + "\n", + "This is why frameworks like PyTorch often use \"lazy\" transpose operations that defer the actual data movement until necessary." + ] + }, + { + "cell_type": "markdown", + "id": "25a8e453", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### 🧪 Unit Test: Shape Manipulation\n", + "\n", + "This test validates reshape and transpose operations work correctly with validation and edge cases.\n", + "\n", + "**What we're testing**: Reshape and transpose operations with proper error handling\n", + "**Why it matters**: Essential for data flow in neural networks, CNN/RNN architectures\n", + "**Expected**: Correct shape changes, proper error handling for invalid operations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eda5f8f3", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-shape-ops", + "locked": true, + "points": 15 + } + }, + "outputs": [], + "source": [ + "def test_unit_shape_manipulation():\n", + " \"\"\"🧪 Test reshape and transpose operations.\"\"\"\n", + " print(\"🧪 Unit Test: Shape Manipulation...\")\n", + "\n", + " # Test basic reshape (flatten → matrix)\n", + " tensor = Tensor([1, 2, 3, 4, 5, 6]) # Shape: (6,)\n", + " reshaped = tensor.reshape(2, 3) # Shape: (2, 3)\n", + " assert reshaped.shape == (2, 3)\n", + " expected = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)\n", + " assert np.array_equal(reshaped.data, expected)\n", + "\n", + " # Test reshape with tuple (alternative calling style)\n", + " reshaped2 = tensor.reshape((3, 2)) # Shape: (3, 2)\n", + " assert reshaped2.shape == (3, 2)\n", + " expected2 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)\n", + " assert np.array_equal(reshaped2.data, expected2)\n", + "\n", + " # Test reshape with -1 (automatic dimension inference)\n", + " auto_reshaped = tensor.reshape(2, -1) # Should infer -1 as 3\n", + " assert auto_reshaped.shape == (2, 3)\n", + "\n", + " # Test reshape validation - should raise error for incompatible sizes\n", + " try:\n", + " tensor.reshape(2, 2) # 6 elements can't fit in 2×2=4\n", + " assert False, \"Should have raised ValueError\"\n", + " except ValueError as e:\n", + " assert \"Total elements must match\" in str(e)\n", + " assert \"6 ≠ 4\" in str(e)\n", + "\n", + " # Test matrix transpose (most common case)\n", + " matrix = Tensor([[1, 2, 3], [4, 5, 6]]) # (2, 3)\n", + " transposed = matrix.transpose() # (3, 2)\n", + " assert transposed.shape == (3, 2)\n", + " expected = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.float32)\n", + " assert np.array_equal(transposed.data, expected)\n", + "\n", + " # Test 1D transpose (should be identity)\n", + " vector = Tensor([1, 2, 3])\n", + " vector_t = vector.transpose()\n", + " assert np.array_equal(vector.data, vector_t.data)\n", + "\n", + " # Test specific dimension transpose\n", + " tensor_3d = Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # (2, 2, 2)\n", + " swapped = tensor_3d.transpose(0, 2) # Swap first and last dimensions\n", + " assert swapped.shape == (2, 2, 2) # Same shape but data rearranged\n", + "\n", + " # Test neural network reshape pattern (flatten for MLP)\n", + " batch_images = Tensor(np.random.rand(2, 3, 4)) # (batch=2, height=3, width=4)\n", + " flattened = batch_images.reshape(2, -1) # (batch=2, features=12)\n", + " assert flattened.shape == (2, 12)\n", + "\n", + " print(\"✅ Shape manipulation works correctly!\")\n", + "\n", + "if __name__ == \"__main__\":\n", + " test_unit_shape_manipulation()" + ] + }, + { + "cell_type": "markdown", + "id": "b037ba5a", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 2 + }, + "source": [ + "## Reduction Operations: Aggregating Information\n", + "\n", + "Reduction operations collapse dimensions by aggregating data, which is essential for computing statistics, losses, and preparing data for different layers.\n", + "\n", + "### Why Reductions are Crucial in ML\n", + "\n", + "Reduction operations appear throughout neural networks:\n", + "\n", + "```\n", + "Common ML Reduction Patterns:\n", + "\n", + "┌─────────────────────┬─────────────────────┬─────────────────────┐\n", + "│ Loss Computation │ Batch Normalization │ Global Pooling │\n", + "├─────────────────────┼─────────────────────┼─────────────────────┤\n", + "│ Per-sample losses → │ Batch statistics → │ Feature maps → │\n", + "│ Single batch loss │ Normalization │ Single features │\n", + "│ │ │ │\n", + "│ losses.mean() │ batch.mean(axis=0) │ fmaps.mean(axis=(2,3))│\n", + "│ (N,) → scalar │ (N,D) → (D,) │ (N,C,H,W) → (N,C) │\n", + "└─────────────────────┴─────────────────────┴─────────────────────┘\n", + "\n", + "Real Examples:\n", + "• Cross-entropy loss: -log(predictions).mean() [average over batch]\n", + "• Batch norm: (x - x.mean()) / x.std() [normalize each feature]\n", + "• Global avg pool: features.mean(dim=(2,3)) [spatial → scalar per channel]\n", + "```\n", + "\n", + "### Understanding Axis Operations\n", + "\n", + "```\n", + "Visual Axis Understanding:\n", + "Matrix: [[1, 2, 3], All reductions operate on this data\n", + " [4, 5, 6]] Shape: (2, 3)\n", + "\n", + " axis=0 (↓)\n", + " ┌─────────┐\n", + "axis=1 │ 1 2 3 │ → axis=1 reduces across columns (→)\n", + " (→) │ 4 5 6 │ → Result shape: (2,) [one value per row]\n", + " └─────────┘\n", + " ↓ ↓ ↓\n", + " axis=0 reduces down rows (↓)\n", + " Result shape: (3,) [one value per column]\n", + "\n", + "Reduction Results:\n", + "├─ .sum() → 21 (sum all: 1+2+3+4+5+6)\n", + "├─ .sum(axis=0) → [5, 7, 9] (sum columns: [1+4, 2+5, 3+6])\n", + "├─ .sum(axis=1) → [6, 15] (sum rows: [1+2+3, 4+5+6])\n", + "├─ .mean() → 3.5 (average all: 21/6)\n", + "├─ .mean(axis=0) → [2.5, 3.5, 4.5] (average columns)\n", + "└─ .max() → 6 (maximum element)\n", + "\n", + "3D Tensor Example (batch, height, width):\n", + "data.shape = (2, 3, 4) # 2 samples, 3×4 images\n", + "│\n", + "├─ .sum(axis=0) → (3, 4) # Sum across batch dimension\n", + "├─ .sum(axis=1) → (2, 4) # Sum across height dimension\n", + "├─ .sum(axis=2) → (2, 3) # Sum across width dimension\n", + "└─ .sum(axis=(1,2)) → (2,) # Sum across both spatial dims (global pool)\n", + "```\n", + "\n", + "### Memory and Performance Considerations\n", + "\n", + "```\n", + "Reduction Performance:\n", + "┌─────────────────┬──────────────┬─────────────────┬─────────────────┐\n", + "│ Operation │ Time Complex │ Memory Access │ Cache Behavior │\n", + "├─────────────────┼──────────────┼─────────────────┼─────────────────┤\n", + "│ .sum() │ O(N) │ Sequential read │ Excellent │\n", + "│ .sum(axis=0) │ O(N) │ Column access │ Poor (strided) │\n", + "│ .sum(axis=1) │ O(N) │ Row access │ Excellent │\n", + "│ .mean() │ O(N) │ Sequential read │ Excellent │\n", + "│ .max() │ O(N) │ Sequential read │ Excellent │\n", + "└─────────────────┴──────────────┴─────────────────┴─────────────────┘\n", + "\n", + "Why axis=0 is slower:\n", + "- Accesses elements with large strides\n", + "- Poor cache locality (jumping rows)\n", + "- Less vectorization-friendly\n", + "\n", + "Optimization strategies:\n", + "- Prefer axis=-1 operations when possible\n", + "- Use keepdims=True to maintain shape for broadcasting\n", + "- Consider reshaping before reduction for better cache behavior\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "3cf13e53", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### 🧪 Unit Test: Reduction Operations\n", + "\n", + "This test validates reduction operations work correctly with axis control and maintain proper shapes.\n", + "\n", + "**What we're testing**: Sum, mean, max operations with axis parameter and keepdims\n", + "**Why it matters**: Essential for loss computation, batch processing, and pooling operations\n", + "**Expected**: Correct reduction along specified axes with proper shape handling" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bbb98661", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-reductions", + "locked": true, + "points": 10 + } + }, + "outputs": [], + "source": [ + "def test_unit_reduction_operations():\n", + " \"\"\"🧪 Test reduction operations.\"\"\"\n", + " print(\"🧪 Unit Test: Reduction Operations...\")\n", + "\n", + " matrix = Tensor([[1, 2, 3], [4, 5, 6]]) # Shape: (2, 3)\n", + "\n", + " # Test sum all elements (common for loss computation)\n", + " total = matrix.sum()\n", + " assert total.data == 21.0 # 1+2+3+4+5+6\n", + " assert total.shape == () # Scalar result\n", + "\n", + " # Test sum along axis 0 (columns) - batch dimension reduction\n", + " col_sum = matrix.sum(axis=0)\n", + " expected_col = np.array([5, 7, 9], dtype=np.float32) # [1+4, 2+5, 3+6]\n", + " assert np.array_equal(col_sum.data, expected_col)\n", + " assert col_sum.shape == (3,)\n", + "\n", + " # Test sum along axis 1 (rows) - feature dimension reduction\n", + " row_sum = matrix.sum(axis=1)\n", + " expected_row = np.array([6, 15], dtype=np.float32) # [1+2+3, 4+5+6]\n", + " assert np.array_equal(row_sum.data, expected_row)\n", + " assert row_sum.shape == (2,)\n", + "\n", + " # Test mean (average loss computation)\n", + " avg = matrix.mean()\n", + " assert np.isclose(avg.data, 3.5) # 21/6\n", + " assert avg.shape == ()\n", + "\n", + " # Test mean along axis (batch normalization pattern)\n", + " col_mean = matrix.mean(axis=0)\n", + " expected_mean = np.array([2.5, 3.5, 4.5], dtype=np.float32) # [5/2, 7/2, 9/2]\n", + " assert np.allclose(col_mean.data, expected_mean)\n", + "\n", + " # Test max (finding best predictions)\n", + " maximum = matrix.max()\n", + " assert maximum.data == 6.0\n", + " assert maximum.shape == ()\n", + "\n", + " # Test max along axis (argmax-like operation)\n", + " row_max = matrix.max(axis=1)\n", + " expected_max = np.array([3, 6], dtype=np.float32) # [max(1,2,3), max(4,5,6)]\n", + " assert np.array_equal(row_max.data, expected_max)\n", + "\n", + " # Test keepdims (important for broadcasting)\n", + " sum_keepdims = matrix.sum(axis=1, keepdims=True)\n", + " assert sum_keepdims.shape == (2, 1) # Maintains 2D shape\n", + " expected_keepdims = np.array([[6], [15]], dtype=np.float32)\n", + " assert np.array_equal(sum_keepdims.data, expected_keepdims)\n", + "\n", + " # Test 3D reduction (simulating global average pooling)\n", + " tensor_3d = Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # (2, 2, 2)\n", + " spatial_mean = tensor_3d.mean(axis=(1, 2)) # Average across spatial dimensions\n", + " assert spatial_mean.shape == (2,) # One value per batch item\n", + "\n", + " print(\"✅ Reduction operations work correctly!\")\n", + "\n", + "if __name__ == \"__main__\":\n", + " test_unit_reduction_operations()" + ] + }, + { + "cell_type": "markdown", + "id": "a37d2b20", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 2 + }, + "source": [ + "## Gradient Features: Preparing for Module 05\n", + "\n", + "Our Tensor includes dormant gradient features that will spring to life in Module 05. For now, they exist but do nothing - this design choice ensures a consistent interface throughout the course.\n", + "\n", + "### Why Include Gradient Features Now?\n", + "\n", + "```\n", + "Gradient System Evolution:\n", + "Module 01: Tensor with dormant gradients\n", + " ┌─────────────────────────────────┐\n", + " │ Tensor │\n", + " │ • data: actual values │\n", + " │ • requires_grad: False │ ← Present but unused\n", + " │ • grad: None │ ← Present but stays None\n", + " │ • backward(): pass │ ← Present but does nothing\n", + " └─────────────────────────────────┘\n", + " ↓ Module 05 activates these\n", + "Module 05: Tensor with active gradients\n", + " ┌─────────────────────────────────┐\n", + " │ Tensor │\n", + " │ • data: actual values │\n", + " │ • requires_grad: True │ ← Now controls gradient tracking\n", + " │ • grad: computed gradients │ ← Now accumulates gradients\n", + " │ • backward(): computes grads │ ← Now implements chain rule\n", + " └─────────────────────────────────┘\n", + "```\n", + "\n", + "### Design Benefits\n", + "\n", + "**Consistency**: Same Tensor class interface throughout all modules\n", + "- No confusing Variable vs. Tensor distinction (unlike early PyTorch)\n", + "- Students never need to learn a \"new\" Tensor class\n", + "- IDE autocomplete works from day one\n", + "\n", + "**Gradual Complexity**: Features activate when students are ready\n", + "- Module 01-04: Ignore gradient features, focus on operations\n", + "- Module 05: Gradient features \"turn on\" magically\n", + "- No cognitive overload in early modules\n", + "\n", + "**Future-Proof**: Easy to extend without breaking changes\n", + "- Additional features can be added as dormant initially\n", + "- No monkey-patching or dynamic class modification\n", + "- Clean evolution path\n", + "\n", + "### Current State (Module 01)\n", + "\n", + "```\n", + "Gradient Features - Current Behavior:\n", + "┌─────────────────────────────────────────────────────────┐\n", + "│ Feature │ Current State │ Module 05 State │\n", + "├─────────────────────────────────────────────────────────┤\n", + "│ requires_grad │ False │ True (when needed) │\n", + "│ grad │ None │ np.array(...) │\n", + "│ backward() │ pass (no-op) │ Chain rule impl │\n", + "│ Operation chaining│ Not tracked │ Computation graph │\n", + "└─────────────────────────────────────────────────────────┘\n", + "\n", + "Student Experience:\n", + "• Can call .backward() without errors (just does nothing)\n", + "• Can set requires_grad=True (just gets stored)\n", + "• Focus on understanding tensor operations first\n", + "• Gradients remain \"mysterious\" until Module 05 reveals them\n", + "```\n", + "\n", + "This approach matches the pedagogical principle of \"progressive disclosure\" - reveal complexity only when students are ready to handle it." + ] + }, + { + "cell_type": "markdown", + "id": "4b01be76", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "## Systems Analysis: Memory Layout and Performance\n", + "\n", + "Even as a foundation module, let's understand ONE key systems concept that will inform every design decision in future modules: **memory layout and cache behavior**.\n", + "\n", + "This single analysis reveals why certain operations are fast while others are slow, and why framework designers make specific architectural choices." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6c19d39", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], + "source": [ + "def analyze_memory_layout():\n", + " \"\"\"📊 Demonstrate cache effects with row vs column access patterns.\"\"\"\n", + " print(\"📊 Analyzing Memory Access Patterns...\")\n", + " print(\"=\" * 60)\n", + "\n", + " # Create a moderately-sized matrix (large enough to show cache effects)\n", + " size = 2000\n", + " matrix = Tensor(np.random.rand(size, size))\n", + "\n", + " import time\n", + "\n", + " print(f\"\\nTesting with {size}×{size} matrix ({matrix.size * BYTES_PER_FLOAT32 / MB_TO_BYTES:.1f} MB)\")\n", + " print(\"-\" * 60)\n", + "\n", + " # Test 1: Row-wise access (cache-friendly)\n", + " # Memory layout: [row0][row1][row2]... stored contiguously\n", + " print(\"\\n🔬 Test 1: Row-wise Access (Cache-Friendly)\")\n", + " start = time.time()\n", + " row_sums = []\n", + " for i in range(size):\n", + " row_sum = matrix.data[i, :].sum() # Access entire row sequentially\n", + " row_sums.append(row_sum)\n", + " row_time = time.time() - start\n", + " print(f\" Time: {row_time*1000:.1f}ms\")\n", + " print(f\" Access pattern: Sequential (follows memory layout)\")\n", + "\n", + " # Test 2: Column-wise access (cache-unfriendly)\n", + " # Must jump between rows, poor spatial locality\n", + " print(\"\\n🔬 Test 2: Column-wise Access (Cache-Unfriendly)\")\n", + " start = time.time()\n", + " col_sums = []\n", + " for j in range(size):\n", + " col_sum = matrix.data[:, j].sum() # Access entire column with large strides\n", + " col_sums.append(col_sum)\n", + " col_time = time.time() - start\n", + " print(f\" Time: {col_time*1000:.1f}ms\")\n", + " print(f\" Access pattern: Strided (jumps {size * BYTES_PER_FLOAT32} bytes per element)\")\n", + "\n", + " # Calculate slowdown\n", + " slowdown = col_time / row_time\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(f\"📊 PERFORMANCE IMPACT:\")\n", + " print(f\" Slowdown factor: {slowdown:.2f}× ({col_time/row_time:.1f}× slower)\")\n", + " print(f\" Cache misses cause {(slowdown-1)*100:.0f}% performance loss\")\n", + "\n", + " # Educational insights\n", + " print(\"\\n💡 KEY INSIGHTS:\")\n", + " print(f\" 1. Memory layout matters: Row-major (C-style) storage is sequential\")\n", + " print(f\" 2. Cache lines are ~64 bytes: Row access loads nearby elements \\\"for free\\\"\")\n", + " print(f\" 3. Column access misses cache: Must reload from DRAM every time\")\n", + " print(f\" 4. This is O(n) algorithm but {slowdown:.1f}× different wall-clock time!\")\n", + "\n", + " print(\"\\n🚀 REAL-WORLD IMPLICATIONS:\")\n", + " print(f\" • CNNs use NCHW format (channels sequential) for cache efficiency\")\n", + " print(f\" • Matrix multiplication optimized with blocking (tile into cache-sized chunks)\")\n", + " print(f\" • Transpose is expensive ({slowdown:.1f}×) because it changes memory layout\")\n", + " print(f\" • This is why GPU frameworks obsess over memory coalescing\")\n", + "\n", + " print(\"\\n\" + \"=\" * 60)\n", + "\n", + "# Run the systems analysis\n", + "if __name__ == \"__main__\":\n", + " analyze_memory_layout()" + ] + }, + { + "cell_type": "markdown", + "id": "37411779", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 2 + }, + "source": [ + "## 4. Integration: Bringing It Together\n", + "\n", + "Let's test how our Tensor operations work together in realistic scenarios that mirror neural network computations. This integration demonstrates that our individual operations combine correctly for complex ML workflows.\n", + "\n", + "### Neural Network Layer Simulation\n", + "\n", + "The fundamental building block of neural networks is the linear transformation: **y = xW + b**\n", + "\n", + "```\n", + "Linear Layer Forward Pass: y = xW + b\n", + "\n", + "Input Features → Weight Matrix → Matrix Multiply → Add Bias → Output Features\n", + " (batch, in) (in, out) (batch, out) (batch, out) (batch, out)\n", + "\n", + "Step-by-Step Breakdown:\n", + "1. Input: X shape (batch_size, input_features)\n", + "2. Weight: W shape (input_features, output_features)\n", + "3. Matmul: XW shape (batch_size, output_features)\n", + "4. Bias: b shape (output_features,)\n", + "5. Result: XW + b shape (batch_size, output_features)\n", + "\n", + "Example Flow:\n", + "Input: [[1, 2, 3], Weight: [[0.1, 0.2], Bias: [0.1, 0.2]\n", + " [4, 5, 6]] [0.3, 0.4],\n", + " (2, 3) [0.5, 0.6]]\n", + " (3, 2)\n", + "\n", + "Step 1: Matrix Multiply\n", + "[[1, 2, 3]] @ [[0.1, 0.2]] = [[1×0.1+2×0.3+3×0.5, 1×0.2+2×0.4+3×0.6]]\n", + "[[4, 5, 6]] [[0.3, 0.4]] [[4×0.1+5×0.3+6×0.5, 4×0.2+5×0.4+6×0.6]]\n", + " [[0.5, 0.6]]\n", + " = [[1.6, 2.6],\n", + " [4.9, 6.8]]\n", + "\n", + "Step 2: Add Bias (Broadcasting)\n", + "[[1.6, 2.6]] + [0.1, 0.2] = [[1.7, 2.8],\n", + " [4.9, 6.8]] [5.0, 7.0]]\n", + "\n", + "This is the foundation of every neural network layer!\n", + "```\n", + "\n", + "### Why This Integration Matters\n", + "\n", + "This simulation shows how our basic operations combine to create the computational building blocks of neural networks:\n", + "\n", + "- **Matrix Multiplication**: Transforms input features into new feature space\n", + "- **Broadcasting Addition**: Applies learned biases efficiently across batches\n", + "- **Shape Handling**: Ensures data flows correctly through layers\n", + "- **Memory Management**: Creates new tensors without corrupting inputs\n", + "\n", + "Every layer in a neural network - from simple MLPs to complex transformers - uses this same pattern." + ] + }, + { + "cell_type": "markdown", + "id": "999d8586", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "## 🧪 Module Integration Test\n", + "\n", + "Final validation that everything works together correctly before module completion." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "65e534dd", + "metadata": { + "lines_to_next_cell": 2, + "nbgrader": { + "grade": true, + "grade_id": "module-integration", + "locked": true, + "points": 20 + } + }, + "outputs": [], + "source": [ + "def test_module():\n", + " \"\"\"\n", + " Comprehensive test of entire module functionality.\n", + "\n", + " This final test runs before module summary to ensure:\n", + " - All unit tests pass\n", + " - Functions work together correctly\n", + " - Module is ready for integration with TinyTorch\n", + " \"\"\"\n", + " print(\"🧪 RUNNING MODULE INTEGRATION TEST\")\n", + " print(\"=\" * 50)\n", + "\n", + " # Run all unit tests\n", + " print(\"Running unit tests...\")\n", + " test_unit_tensor_creation()\n", + " test_unit_arithmetic_operations()\n", + " test_unit_matrix_multiplication()\n", + " test_unit_shape_manipulation()\n", + " test_unit_reduction_operations()\n", + "\n", + " print(\"\\nRunning integration scenarios...\")\n", + "\n", + " # Test realistic neural network computation\n", + " print(\"🧪 Integration Test: Two-Layer Neural Network...\")\n", + "\n", + " # Create input data (2 samples, 3 features)\n", + " x = Tensor([[1, 2, 3], [4, 5, 6]])\n", + "\n", + " # First layer: 3 inputs → 4 hidden units\n", + " W1 = Tensor([[0.1, 0.2, 0.3, 0.4],\n", + " [0.5, 0.6, 0.7, 0.8],\n", + " [0.9, 1.0, 1.1, 1.2]])\n", + " b1 = Tensor([0.1, 0.2, 0.3, 0.4])\n", + "\n", + " # Forward pass: hidden = xW1 + b1\n", + " hidden = x.matmul(W1) + b1\n", + " assert hidden.shape == (2, 4), f\"Expected (2, 4), got {hidden.shape}\"\n", + "\n", + " # Second layer: 4 hidden → 2 outputs\n", + " W2 = Tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8]])\n", + " b2 = Tensor([0.1, 0.2])\n", + "\n", + " # Output layer: output = hiddenW2 + b2\n", + " output = hidden.matmul(W2) + b2\n", + " assert output.shape == (2, 2), f\"Expected (2, 2), got {output.shape}\"\n", + "\n", + " # Verify data flows correctly (no NaN, reasonable values)\n", + " assert not np.isnan(output.data).any(), \"Output contains NaN values\"\n", + " assert np.isfinite(output.data).all(), \"Output contains infinite values\"\n", + "\n", + " print(\"✅ Two-layer neural network computation works!\")\n", + "\n", + " # Test gradient attributes are preserved and functional\n", + " print(\"🧪 Integration Test: Gradient System Readiness...\")\n", + " grad_tensor = Tensor([1, 2, 3], requires_grad=True)\n", + " result = grad_tensor + 5\n", + " assert grad_tensor.requires_grad == True, \"requires_grad not preserved\"\n", + " assert grad_tensor.grad is None, \"grad should still be None\"\n", + "\n", + " # Test backward() doesn't crash (even though it does nothing)\n", + " grad_tensor.backward() # Should not raise any exception\n", + "\n", + " print(\"✅ Gradient system ready for Module 05!\")\n", + "\n", + " # Test complex shape manipulations\n", + " print(\"🧪 Integration Test: Complex Shape Operations...\")\n", + " data = Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n", + "\n", + " # Reshape to 3D tensor (simulating batch processing)\n", + " tensor_3d = data.reshape(2, 2, 3) # (batch=2, height=2, width=3)\n", + " assert tensor_3d.shape == (2, 2, 3)\n", + "\n", + " # Global average pooling simulation\n", + " pooled = tensor_3d.mean(axis=(1, 2)) # Average across spatial dimensions\n", + " assert pooled.shape == (2,), f\"Expected (2,), got {pooled.shape}\"\n", + "\n", + " # Flatten for MLP\n", + " flattened = tensor_3d.reshape(2, -1) # (batch, features)\n", + " assert flattened.shape == (2, 6)\n", + "\n", + " # Transpose for different operations\n", + " transposed = tensor_3d.transpose() # Should transpose last two dims\n", + " assert transposed.shape == (2, 3, 2)\n", + "\n", + " print(\"✅ Complex shape operations work!\")\n", + "\n", + " # Test broadcasting edge cases\n", + " print(\"🧪 Integration Test: Broadcasting Edge Cases...\")\n", + "\n", + " # Scalar broadcasting\n", + " scalar = Tensor(5.0)\n", + " vector = Tensor([1, 2, 3])\n", + " result = scalar + vector # Should broadcast scalar to vector shape\n", + " expected = np.array([6, 7, 8], dtype=np.float32)\n", + " assert np.array_equal(result.data, expected)\n", + "\n", + " # Matrix + vector broadcasting\n", + " matrix = Tensor([[1, 2], [3, 4]])\n", + " vec = Tensor([10, 20])\n", + " result = matrix + vec\n", + " expected = np.array([[11, 22], [13, 24]], dtype=np.float32)\n", + " assert np.array_equal(result.data, expected)\n", + "\n", + " print(\"✅ Broadcasting edge cases work!\")\n", + "\n", + " print(\"\\n\" + \"=\" * 50)\n", + " print(\"🎉 ALL TESTS PASSED! Module ready for export.\")\n", + " print(\"Run: tito module complete 01_tensor\")\n", + "\n", + "# Run comprehensive module test\n", + "if __name__ == \"__main__\":\n", + " test_module()" + ] + }, + { + "cell_type": "markdown", + "id": "e3b468dc", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 🤔 ML Systems Reflection Questions\n", + "\n", + "Answer these to deepen your understanding of tensor operations and their systems implications:\n", + "\n", + "### 1. Memory Layout and Cache Performance\n", + "**Question**: How does row-major vs column-major storage affect cache performance in tensor operations?\n", + "\n", + "**Consider**:\n", + "- What happens when you access matrix elements sequentially vs. with large strides?\n", + "- Why did our analysis show column-wise access being ~2-3× slower than row-wise?\n", + "- How would this affect the design of a convolutional neural network's memory layout?\n", + "\n", + "**Real-world context**: PyTorch uses NCHW (batch, channels, height, width) format specifically because accessing channels sequentially has better cache locality than NHWC format.\n", + "\n", + "---\n", + "\n", + "### 2. Batch Processing and Scaling\n", + "**Question**: If you double the batch size in a neural network, what happens to memory usage? What about computation time?\n", + "\n", + "**Consider**:\n", + "- A linear layer with input (batch, features): y = xW + b\n", + "- Memory for: input tensor, weight matrix, output tensor, intermediate results\n", + "- How does matrix multiplication time scale with batch size?\n", + "\n", + "**Think about**:\n", + "- If (32, 784) @ (784, 256) takes 10ms, how long does (64, 784) @ (784, 256) take?\n", + "- Does doubling batch size double memory usage? Why or why not?\n", + "- What are the trade-offs between large and small batch sizes?\n", + "\n", + "---\n", + "\n", + "### 3. Data Type Precision and Memory\n", + "**Question**: What's the memory difference between float64 and float32 for a (1000, 1000) tensor? When would you choose each?\n", + "\n", + "**Calculate**:\n", + "- float64: 8 bytes per element\n", + "- float32: 4 bytes per element\n", + "- Total elements in (1000, 1000): ___________\n", + "- Memory difference: ___________\n", + "\n", + "**Trade-offs to consider**:\n", + "- Training accuracy vs. memory consumption\n", + "- GPU memory limits (often 8-16GB for consumer GPUs)\n", + "- Numerical stability in gradient computation\n", + "- Inference speed on mobile devices\n", + "\n", + "---\n", + "\n", + "### 4. Production Scale: Memory Requirements\n", + "**Question**: A GPT-3-scale model has 175 billion parameters. How much RAM is needed just to store the weights in float32? What about with an optimizer like Adam?\n", + "\n", + "**Calculate**:\n", + "- Parameters: 175 × 10^9\n", + "- Bytes per float32: 4\n", + "- Weight memory: ___________GB\n", + "\n", + "**Additional memory for Adam optimizer**:\n", + "- Adam stores: parameters, gradients, first moment (m), second moment (v)\n", + "- Total multiplier: 4× the parameter count\n", + "- Total with Adam: ___________GB\n", + "\n", + "**Real-world implications**:\n", + "- Why do we need 8× A100 GPUs (40GB each) for training?\n", + "- What is mixed-precision training (float16/bfloat16)?\n", + "- How does gradient checkpointing help?\n", + "\n", + "---\n", + "\n", + "### 5. Hardware Awareness: GPU Efficiency\n", + "**Question**: Why do GPUs strongly prefer operations on large tensors over many small ones?\n", + "\n", + "**Consider these scenarios**:\n", + "- **Scenario A**: 1000 separate (10, 10) matrix multiplications\n", + "- **Scenario B**: 1 batched (1000, 10, 10) matrix multiplication\n", + "\n", + "**Think about**:\n", + "- GPU kernel launch overhead (~5-10 microseconds per launch)\n", + "- Thread parallelism utilization (GPUs have 1000s of cores)\n", + "- Memory transfer costs (CPU→GPU has ~10GB/s bandwidth, GPU memory has ~900GB/s)\n", + "- When is the GPU actually doing computation vs. waiting?\n", + "\n", + "**Design principle**: Batch operations together to amortize overhead and maximize parallelism.\n", + "\n", + "---\n", + "\n", + "### Bonus Challenge: Optimization Analysis\n", + "\n", + "**Scenario**: You're implementing a custom activation function that will be applied to every element in a tensor. You have two implementation choices:\n", + "\n", + "**Option A**: Python loop over each element\n", + "```python\n", + "def custom_activation(tensor):\n", + " result = np.empty_like(tensor.data)\n", + " for i in range(tensor.data.size):\n", + " result.flat[i] = complex_math_function(tensor.data.flat[i])\n", + " return Tensor(result)\n", + "```\n", + "\n", + "**Option B**: NumPy vectorized operation\n", + "```python\n", + "def custom_activation(tensor):\n", + " return Tensor(complex_math_function(tensor.data))\n", + "```\n", + "\n", + "**Questions**:\n", + "1. For a (1000, 1000) tensor, estimate the speedup of Option B vs Option A\n", + "2. Why is vectorization faster even though both are O(n) operations?\n", + "3. What if the tensor is tiny (10, 10) - does the answer change?\n", + "4. How would this change if we move to GPU computation?\n", + "\n", + "**Key insight**: Algorithmic complexity (Big-O) doesn't tell the whole performance story. Constant factors from vectorization, cache behavior, and parallelism dominate in practice." + ] + }, + { + "cell_type": "markdown", + "id": "c3499857", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 🎯 MODULE SUMMARY: Tensor Foundation\n", + "\n", + "Congratulations! You've built the foundational Tensor class that powers all machine learning operations!\n", + "\n", + "### Key Accomplishments\n", + "- **Built a complete Tensor class** with arithmetic operations, matrix multiplication, and shape manipulation\n", + "- **Implemented broadcasting semantics** that match NumPy for automatic shape alignment\n", + "- **Created dormant gradient features** that will activate in Module 05 (autograd)\n", + "- **Added comprehensive ASCII diagrams** showing tensor operations visually\n", + "- **All methods defined INSIDE the class** (no monkey-patching) for clean, maintainable code\n", + "- **All tests pass ✅** (validated by `test_module()`)\n", + "\n", + "### Systems Insights Discovered\n", + "- **Memory scaling**: Matrix operations create new tensors (3× memory during computation)\n", + "- **Broadcasting efficiency**: NumPy's automatic shape alignment vs. explicit operations\n", + "- **Shape validation trade-offs**: Clear errors vs. performance in tight loops\n", + "- **Architecture decisions**: Dormant features vs. inheritance for clean evolution\n", + "\n", + "### Ready for Next Steps\n", + "Your Tensor implementation enables all future modules! The dormant gradient features will spring to life in Module 05, and every neural network component will build on this foundation.\n", + "\n", + "Export with: `tito module complete 01_tensor`\n", + "\n", + "**Next**: Module 02 will add activation functions (ReLU, Sigmoid, GELU) that bring intelligence to neural networks by introducing nonlinearity!" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/modules/01_tensor/tensor.py b/modules/01_tensor/tensor.py index 9f757fff..ec130d1c 100644 --- a/modules/01_tensor/tensor.py +++ b/modules/01_tensor/tensor.py @@ -468,6 +468,68 @@ class Tensor: ### END SOLUTION # nbgrader={"grade": false, "grade_id": "shape-ops", "solution": true} + # %% nbgrader={"grade": false, "grade_id": "getitem-impl", "solution": true} + def __getitem__(self, key): + """ + Enable indexing and slicing operations on Tensors. + + This allows Tensors to be indexed like NumPy arrays while preserving + gradient computation capabilities (when autograd is enabled in Module 05). + + TODO: Implement tensor indexing/slicing with gradient support + + APPROACH: + 1. Use NumPy's indexing to slice the underlying data + 2. Create new Tensor with sliced data + 3. Preserve requires_grad flag + 4. Store backward function (if autograd enabled - Module 05) + + EXAMPLES: + >>> x = Tensor([1, 2, 3, 4, 5]) + >>> x[0] # Single element: Tensor(1) + >>> x[:3] # Slice: Tensor([1, 2, 3]) + >>> x[1:4] # Range: Tensor([2, 3, 4]) + >>> + >>> y = Tensor([[1, 2, 3], [4, 5, 6]]) + >>> y[0] # Row: Tensor([1, 2, 3]) + >>> y[:, 1] # Column: Tensor([2, 5]) + >>> y[0, 1:3] # Mixed: Tensor([2, 3]) + + GRADIENT BEHAVIOR (Module 05): + - Slicing preserves gradient flow + - Gradients flow back to original positions + - Example: x[:3].backward() updates x.grad[:3] + + HINTS: + - NumPy handles the indexing: self.data[key] + - Result is always a Tensor (even single elements) + - Preserve requires_grad for gradient tracking + """ + ### BEGIN SOLUTION + # Perform the indexing on underlying NumPy array + result_data = self.data[key] + + # Ensure result is always an array (even for scalar indexing) + if not isinstance(result_data, np.ndarray): + result_data = np.array(result_data) + + # Create new Tensor with sliced data + result = Tensor(result_data, requires_grad=self.requires_grad) + + # If gradients are tracked and autograd is available, attach backward function + # Note: This will be used by Module 05 (Autograd) + if self.requires_grad: + # Check if SliceBackward exists (added in Module 05) + try: + from tinytorch.core.autograd import SliceBackward + result._grad_fn = SliceBackward(self, key) + except (ImportError, AttributeError): + # Autograd not yet available - gradient tracking will be added in Module 05 + pass + + return result + ### END SOLUTION + def reshape(self, *shape): """ Reshape tensor to new dimensions. diff --git a/modules/05_autograd/autograd.ipynb b/modules/05_autograd/autograd.ipynb new file mode 100644 index 00000000..904d6d4a --- /dev/null +++ b/modules/05_autograd/autograd.ipynb @@ -0,0 +1,2489 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "7aa794a8", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "# Module 05: Autograd ⚡ - The Gradient Engine\n", + "\n", + "Welcome to Module 05! Today you'll awaken the gradient engine and unlock automatic differentiation.\n", + "\n", + "## 🔗 Prerequisites & Progress\n", + "**You've Built**: Tensor operations, activations, layers, and loss functions \n", + "**You'll Build**: The autograd system that computes gradients automatically \n", + "**You'll Enable**: Learning! Training! The ability to optimize neural networks!\n", + "\n", + "**Connection Map**:\n", + "```\n", + "Modules 01-04 → Autograd → Training (Module 06-07)\n", + "(forward pass) (backward pass) (learning loops)\n", + "```\n", + "\n", + "## Learning Objectives ⭐⭐\n", + "By the end of this module, you will:\n", + "1. **Enhance Tensor** with automatic differentiation capabilities\n", + "2. **Build computation graphs** that track operations for gradient flow\n", + "3. **Implement backward()** method for reverse-mode differentiation\n", + "4. **Create Function classes** for operation-specific gradient rules\n", + "5. **Test gradient correctness** with mathematical validation\n", + "\n", + "**CRITICAL**: This module enhances the existing Tensor class - no new wrapper classes needed!\n", + "\n", + "## 📦 Where This Code Lives in the Final Package\n", + "\n", + "**Learning Side:** You work in `modules/05_autograd/autograd_dev.py` \n", + "**Building Side:** Code exports to `tinytorch.core.autograd`\n", + "\n", + "```python\n", + "# How to use this module:\n", + "from tinytorch.core.autograd import Function, enable_autograd\n", + "```\n", + "\n", + "**Why this matters:**\n", + "- **Learning:** Complete autograd system enabling automatic differentiation\n", + "- **Production:** PyTorch-style computational graph and backward pass\n", + "- **Consistency:** All gradient operations in core.autograd\n", + "- **Integration:** Enhances existing Tensor without breaking anything\n", + "\n", + "Let's build the gradient engine that makes neural networks learn! 🚀" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a90d894d", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "imports", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| default_exp core.autograd\n", + "#| export\n", + "\n", + "import numpy as np\n", + "from typing import Optional, List, Tuple\n", + "import sys\n", + "import os\n", + "\n", + "from tinytorch.core.tensor import Tensor\n", + "\n", + "# Constants for numerical differentiation\n", + "EPSILON = 1e-7 # Small perturbation for numerical gradient computation" + ] + }, + { + "cell_type": "markdown", + "id": "b939c017", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 1. Introduction: What is Automatic Differentiation?\n", + "\n", + "Automatic differentiation (autograd) is the magic that makes neural networks learn. Instead of manually computing gradients for every parameter, autograd tracks operations and automatically computes gradients via the chain rule.\n", + "\n", + "### The Challenge\n", + "In previous modules, you implemented layers and loss functions. To train a model, you need:\n", + "```\n", + "Loss = f(W₃, f(W₂, f(W₁, x)))\n", + "∂Loss/∂W₁ = ? ∂Loss/∂W₂ = ? ∂Loss/∂W₃ = ?\n", + "```\n", + "\n", + "Manual gradient computation becomes impossible for complex models with millions of parameters.\n", + "\n", + "### The Solution: Computational Graphs\n", + "```\n", + "Forward Pass: x → Linear₁ → ReLU → Linear₂ → Loss\n", + "Backward Pass: ∇x ← ∇Linear₁ ← ∇ReLU ← ∇Linear₂ ← ∇Loss\n", + "```\n", + "\n", + "**Complete Autograd Process Visualization:**\n", + "```\n", + "┌─ FORWARD PASS ──────────────────────────────────────────────┐\n", + "│ │\n", + "│ x ──┬── W₁ ──┐ │\n", + "│ │ ├──[Linear₁]──→ z₁ ──[ReLU]──→ a₁ ──┬── W₂ ──┐ │\n", + "│ └── b₁ ──┘ │ ├─→ Loss\n", + "│ └── b₂ ──┘ │\n", + "│ │\n", + "└─ COMPUTATION GRAPH BUILT ──────────────────────────────────┘\n", + " │\n", + " ▼\n", + "┌─ BACKWARD PASS ─────────────────────────────────────────────┐\n", + "│ │\n", + "│∇x ←┬← ∇W₁ ←┐ │\n", + "│ │ ├←[Linear₁]←─ ∇z₁ ←[ReLU]← ∇a₁ ←┬← ∇W₂ ←┐ │\n", + "│ └← ∇b₁ ←┘ │ ├← ∇Loss │\n", + "│ └← ∇b₂ ←┘ │\n", + "│ │\n", + "└─ GRADIENTS COMPUTED ───────────────────────────────────────┘\n", + "\n", + "Key Insight: Each [operation] stores how to compute its backward pass.\n", + "The chain rule automatically flows gradients through the entire graph.\n", + "```\n", + "\n", + "Each operation records how to compute its backward pass. The chain rule connects them all." + ] + }, + { + "cell_type": "markdown", + "id": "6decec1e", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 2. Foundations: The Chain Rule in Action\n", + "\n", + "### Mathematical Foundation\n", + "For composite functions: f(g(x)), the derivative is:\n", + "```\n", + "df/dx = (df/dg) × (dg/dx)\n", + "```\n", + "\n", + "### Computational Graph Example\n", + "```\n", + "Simple computation: L = (x * y + 5)²\n", + "\n", + "Forward Pass:\n", + " x=2 ──┐\n", + " ├──[×]──→ z=6 ──[+5]──→ w=11 ──[²]──→ L=121\n", + " y=3 ──┘\n", + "\n", + "Backward Pass (Chain Rule in Action):\n", + " ∂L/∂x = ∂L/∂w × ∂w/∂z × ∂z/∂x\n", + " = 2w × 1 × y\n", + " = 2(11) × 1 × 3 = 66\n", + "\n", + " ∂L/∂y = ∂L/∂w × ∂w/∂z × ∂z/∂y\n", + " = 2w × 1 × x\n", + " = 2(11) × 1 × 2 = 44\n", + "\n", + "Gradient Flow Visualization:\n", + " ∇x=66 ←──┐\n", + " ├──[×]←── ∇z=22 ←──[+]←── ∇w=22 ←──[²]←── ∇L=1\n", + " ∇y=44 ←──┘\n", + "```\n", + "\n", + "### Memory Layout During Backpropagation\n", + "```\n", + "Computation Graph Memory Structure:\n", + "┌─────────────────────────────────────────────────────────┐\n", + "│ Forward Pass (stored for backward) │\n", + "├─────────────────────────────────────────────────────────┤\n", + "│ Node 1: x=2 (leaf, requires_grad=True) │ grad: None→66 │\n", + "│ Node 2: y=3 (leaf, requires_grad=True) │ grad: None→44 │\n", + "│ Node 3: z=x*y (MulFunction) │ grad: None→22 │\n", + "│ saved: (x=2, y=3) │ inputs: [x,y] │\n", + "│ Node 4: w=z+5 (AddFunction) │ grad: None→22 │\n", + "│ saved: (z=6, 5) │ inputs: [z] │\n", + "│ Node 5: L=w² (PowFunction) │ grad: 1 │\n", + "│ saved: (w=11) │ inputs: [w] │\n", + "└─────────────────────────────────────────────────────────┘\n", + "\n", + "Memory Cost: 2× parameters (data + gradients) + graph overhead\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "e971c4ac", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 3. Implementation: Building the Autograd Engine\n", + "\n", + "Let's implement the autograd system step by step. We'll enhance the existing Tensor class and create supporting infrastructure.\n", + "\n", + "### The Function Architecture\n", + "\n", + "Every differentiable operation needs two things:\n", + "1. **Forward pass**: Compute the result\n", + "2. **Backward pass**: Compute gradients for inputs\n", + "\n", + "```\n", + "Function Class Design:\n", + "┌─────────────────────────────────────┐\n", + "│ Function (Base Class) │\n", + "├─────────────────────────────────────┤\n", + "│ • saved_tensors ← Store data │\n", + "│ • apply() ← Compute grads │\n", + "└─────────────────────────────────────┘\n", + " ↑\n", + " ┌─────┴─────┬─────────┬──────────┐\n", + " │ │ │ │\n", + "┌───▼────┐ ┌────▼───┐ ┌───▼────┐ ┌───▼────┐\n", + "│ Add │ │ Mul │ │ Matmul │ │ Sum │\n", + "│Backward│ │Backward│ │Backward│ │Backward│\n", + "└────────┘ └────────┘ └────────┘ └────────┘\n", + "```\n", + "\n", + "Each operation inherits from Function and implements specific gradient rules." + ] + }, + { + "cell_type": "markdown", + "id": "b379f8b5", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### Function Base Class - The Foundation of Autograd\n", + "\n", + "The Function class is the foundation that makes autograd possible. Every differentiable operation (addition, multiplication, etc.) inherits from this class.\n", + "\n", + "**Why Functions Matter:**\n", + "- They remember inputs needed for backward pass\n", + "- They implement gradient computation via apply()\n", + "- They connect to form computation graphs\n", + "- They enable the chain rule to flow gradients\n", + "\n", + "**The Pattern:**\n", + "```\n", + "Forward: inputs → Function.forward() → output\n", + "Backward: grad_output → Function.apply() → grad_inputs\n", + "```\n", + "\n", + "This pattern enables the chain rule to flow gradients through complex computations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "149c1aaa", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "function-base", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class Function:\n", + " \"\"\"\n", + " Base class for differentiable operations.\n", + "\n", + " Every operation that needs gradients (add, multiply, matmul, etc.)\n", + " will inherit from this class and implement the apply() method.\n", + " \n", + " **Key Concepts:**\n", + " - **saved_tensors**: Store inputs needed for backward pass\n", + " - **apply()**: Compute gradients using chain rule\n", + " - **next_functions**: Track computation graph connections\n", + " \n", + " **Example Usage:**\n", + " ```python\n", + " class AddBackward(Function):\n", + " def apply(self, grad_output):\n", + " # Addition distributes gradients equally\n", + " return grad_output, grad_output\n", + " ```\n", + " \"\"\"\n", + "\n", + " def __init__(self, *tensors):\n", + " \"\"\"\n", + " Initialize function with input tensors.\n", + " \n", + " Args:\n", + " *tensors: Input tensors that will be saved for backward pass\n", + " \"\"\"\n", + " self.saved_tensors = tensors\n", + " self.next_functions = []\n", + "\n", + " # Build computation graph connections\n", + " for t in tensors:\n", + " if isinstance(t, Tensor) and t.requires_grad:\n", + " # Check if this tensor was created by another operation\n", + " # _grad_fn is only present if autograd is enabled and tensor came from an operation\n", + " if getattr(t, '_grad_fn', None) is not None:\n", + " self.next_functions.append(t._grad_fn)\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradients for inputs.\n", + " \n", + " Args:\n", + " grad_output: Gradient flowing backward from the output\n", + " \n", + " Returns:\n", + " Tuple of gradients for each input tensor\n", + " \n", + " **Must be implemented by subclasses**\n", + " \"\"\"\n", + " raise NotImplementedError(\"Each Function must implement apply() method\")" + ] + }, + { + "cell_type": "markdown", + "id": "f5613ef1", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "### Operation Functions - Implementing Gradient Rules\n", + "\n", + "Now we'll implement specific operations that compute gradients correctly. Each operation has mathematical rules for how gradients flow backward.\n", + "\n", + "**Gradient Flow Visualization:**\n", + "```\n", + "Addition (z = a + b):\n", + " ∂z/∂a = 1 ∂z/∂b = 1\n", + "\n", + " a ──┐ grad_a ←──┐\n", + " ├─[+]─→ z ├─[+]←── grad_z\n", + " b ──┘ grad_b ←──┘\n", + "\n", + "Multiplication (z = a * b):\n", + " ∂z/∂a = b ∂z/∂b = a\n", + "\n", + " a ──┐ grad_a = grad_z * b\n", + " ├─[×]─→ z\n", + " b ──┘ grad_b = grad_z * a\n", + "\n", + "Matrix Multiplication (Z = A @ B):\n", + " ∂Z/∂A = grad_Z @ B.T\n", + " ∂Z/∂B = A.T @ grad_Z\n", + "\n", + " A ──┐ grad_A = grad_Z @ B.T\n", + " ├─[@]─→ Z\n", + " B ──┘ grad_B = A.T @ grad_Z\n", + "```\n", + "\n", + "Each operation stores the inputs it needs for computing gradients." + ] + }, + { + "cell_type": "markdown", + "id": "ab349525", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### AddBackward - Gradient Rules for Addition\n", + "\n", + "Addition is the simplest gradient operation: gradients flow unchanged to both inputs.\n", + "\n", + "**Mathematical Principle:**\n", + "```\n", + "If z = a + b, then:\n", + "∂z/∂a = 1 (gradient of z w.r.t. a)\n", + "∂z/∂b = 1 (gradient of z w.r.t. b)\n", + "\n", + "By chain rule:\n", + "∂Loss/∂a = ∂Loss/∂z × ∂z/∂a = grad_output × 1 = grad_output\n", + "∂Loss/∂b = ∂Loss/∂z × ∂z/∂b = grad_output × 1 = grad_output\n", + "```\n", + "\n", + "**Broadcasting Challenge:**\n", + "When tensors have different shapes, NumPy broadcasts automatically in forward pass,\n", + "but we must \"unbroadcast\" gradients in backward pass to match original shapes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4be50082", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "add-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class AddBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for tensor addition.\n", + " \n", + " **Mathematical Rule:** If z = a + b, then ∂z/∂a = 1 and ∂z/∂b = 1\n", + " \n", + " **Key Insight:** Addition distributes gradients equally to both inputs.\n", + " The gradient flowing backward is passed unchanged to each input.\n", + " \n", + " **Broadcasting Handling:** When input shapes differ due to broadcasting,\n", + " we sum gradients appropriately to match original tensor shapes.\n", + " \"\"\"\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradients for addition.\n", + " \n", + " Args:\n", + " grad_output: Gradient flowing backward from output\n", + " \n", + " Returns:\n", + " Tuple of (grad_a, grad_b) for the two inputs\n", + " \n", + " **Mathematical Foundation:**\n", + " - ∂(a+b)/∂a = 1 → grad_a = grad_output\n", + " - ∂(a+b)/∂b = 1 → grad_b = grad_output\n", + " \"\"\"\n", + " a, b = self.saved_tensors\n", + " grad_a = grad_b = None\n", + "\n", + " # Gradient for first input\n", + " if isinstance(a, Tensor) and a.requires_grad:\n", + " grad_a = grad_output\n", + "\n", + " # Gradient for second input \n", + " if isinstance(b, Tensor) and b.requires_grad:\n", + " grad_b = grad_output\n", + "\n", + " return grad_a, grad_b" + ] + }, + { + "cell_type": "markdown", + "id": "6e614b97", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### MulBackward - Gradient Rules for Element-wise Multiplication\n", + "\n", + "Element-wise multiplication follows the product rule of calculus.\n", + "\n", + "**Mathematical Principle:**\n", + "```\n", + "If z = a * b (element-wise), then:\n", + "∂z/∂a = b (gradient w.r.t. a equals the other input)\n", + "∂z/∂b = a (gradient w.r.t. b equals the other input)\n", + "\n", + "By chain rule:\n", + "∂Loss/∂a = grad_output * b\n", + "∂Loss/∂b = grad_output * a\n", + "```\n", + "\n", + "**Visual Example:**\n", + "```\n", + "Forward: a=[2,3] * b=[4,5] = z=[8,15]\n", + "Backward: grad_z=[1,1]\n", + " grad_a = grad_z * b = [1,1] * [4,5] = [4,5]\n", + " grad_b = grad_z * a = [1,1] * [2,3] = [2,3]\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "479135ed", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "mul-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class MulBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for tensor multiplication.\n", + " \n", + " **Mathematical Rule:** If z = a * b, then ∂z/∂a = b and ∂z/∂b = a\n", + " \n", + " **Key Insight:** Each input's gradient equals the gradient output \n", + " multiplied by the OTHER input's value (product rule).\n", + " \n", + " **Applications:** Used in weight scaling, attention mechanisms,\n", + " and anywhere element-wise multiplication occurs.\n", + " \"\"\"\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradients for multiplication.\n", + " \n", + " Args:\n", + " grad_output: Gradient flowing backward from output\n", + " \n", + " Returns:\n", + " Tuple of (grad_a, grad_b) for the two inputs\n", + " \n", + " **Mathematical Foundation:**\n", + " - ∂(a*b)/∂a = b → grad_a = grad_output * b\n", + " - ∂(a*b)/∂b = a → grad_b = grad_output * a\n", + " \"\"\"\n", + " a, b = self.saved_tensors\n", + " grad_a = grad_b = None\n", + "\n", + " # Gradient for first input: grad_output * b\n", + " if isinstance(a, Tensor) and a.requires_grad:\n", + " if isinstance(b, Tensor):\n", + " grad_a = grad_output * b.data\n", + " else:\n", + " grad_a = grad_output * b\n", + "\n", + " # Gradient for second input: grad_output * a\n", + " if isinstance(b, Tensor) and b.requires_grad:\n", + " grad_b = grad_output * a.data\n", + "\n", + " return grad_a, grad_b" + ] + }, + { + "cell_type": "markdown", + "id": "cebc7031", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### SubBackward - Gradient Rules for Subtraction\n", + "\n", + "Subtraction is mathematically simple but important for operations like normalization.\n", + "\n", + "**Mathematical Principle:**\n", + "```\n", + "If z = a - b, then:\n", + "∂z/∂a = 1\n", + "∂z/∂b = -1\n", + "```\n", + "\n", + "**Key Insight:** Gradient flows forward to the first operand, but **negated** to the second.\n", + "This is crucial for operations like `x - mean` in LayerNorm." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "95d1ecad", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "sub-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class SubBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for tensor subtraction.\n", + " \n", + " **Mathematical Rule:** If z = a - b, then ∂z/∂a = 1 and ∂z/∂b = -1\n", + " \"\"\"\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradients for subtraction.\n", + " \n", + " Returns:\n", + " Tuple of (grad_a, grad_b) where grad_b is negated\n", + " \"\"\"\n", + " a, b = self.saved_tensors\n", + " grad_a = grad_b = None\n", + "\n", + " if isinstance(a, Tensor) and a.requires_grad:\n", + " grad_a = grad_output # ∂(a-b)/∂a = 1\n", + "\n", + " if isinstance(b, Tensor) and b.requires_grad:\n", + " grad_b = -grad_output # ∂(a-b)/∂b = -1 (note the negative!)\n", + "\n", + " return grad_a, grad_b" + ] + }, + { + "cell_type": "markdown", + "id": "8852ff28", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### DivBackward - Gradient Rules for Division\n", + "\n", + "Division requires the quotient rule from calculus.\n", + "\n", + "**Mathematical Principle:**\n", + "```\n", + "If z = a / b, then:\n", + "∂z/∂a = 1/b\n", + "∂z/∂b = -a/b²\n", + "```\n", + "\n", + "**Quotient Rule:** For z = f/g, dz = (g·df - f·dg)/g²" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ddfaede", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "div-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class DivBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for tensor division.\n", + " \n", + " **Mathematical Rule:** If z = a / b, then:\n", + " - ∂z/∂a = 1/b\n", + " - ∂z/∂b = -a/b²\n", + " \"\"\"\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradients for division using quotient rule.\n", + " \n", + " Returns:\n", + " Tuple of (grad_a, grad_b)\n", + " \"\"\"\n", + " a, b = self.saved_tensors\n", + " grad_a = grad_b = None\n", + "\n", + " if isinstance(a, Tensor) and a.requires_grad:\n", + " # ∂(a/b)/∂a = 1/b\n", + " if isinstance(b, Tensor):\n", + " grad_a = grad_output / b.data\n", + " else:\n", + " grad_a = grad_output / b\n", + "\n", + " if isinstance(b, Tensor) and b.requires_grad:\n", + " # ∂(a/b)/∂b = -a/b²\n", + " grad_b = -grad_output * a.data / (b.data ** 2)\n", + "\n", + " return grad_a, grad_b" + ] + }, + { + "cell_type": "markdown", + "id": "d650bdb5", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### MatmulBackward - Gradient Rules for Matrix Multiplication\n", + "\n", + "Matrix multiplication has more complex gradient rules based on matrix calculus.\n", + "\n", + "**Mathematical Principle:**\n", + "```\n", + "If Z = A @ B (matrix multiplication), then:\n", + "∂Z/∂A = grad_Z @ B.T\n", + "∂Z/∂B = A.T @ grad_Z\n", + "```\n", + "\n", + "**Why These Rules Work:**\n", + "```\n", + "For element Z[i,j] = Σ_k A[i,k] * B[k,j]\n", + "∂Z[i,j]/∂A[i,k] = B[k,j] ← This gives us grad_Z @ B.T\n", + "∂Z[i,j]/∂B[k,j] = A[i,k] ← This gives us A.T @ grad_Z\n", + "```\n", + "\n", + "**Dimension Analysis:**\n", + "```\n", + "Forward: A(m×k) @ B(k×n) = Z(m×n)\n", + "Backward: grad_Z(m×n) @ B.T(n×k) = grad_A(m×k) ✓\n", + " A.T(k×m) @ grad_Z(m×n) = grad_B(k×n) ✓\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "57b74463", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "matmul-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class MatmulBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for matrix multiplication.\n", + " \n", + " **Mathematical Rule:** If Z = A @ B, then:\n", + " - ∂Z/∂A = grad_Z @ B.T\n", + " - ∂Z/∂B = A.T @ grad_Z\n", + " \n", + " **Key Insight:** Matrix multiplication gradients involve transposing\n", + " one input and multiplying with the gradient output.\n", + " \n", + " **Applications:** Core operation in neural networks for weight updates\n", + " in linear layers, attention mechanisms, and transformers.\n", + " \"\"\"\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradients for matrix multiplication.\n", + " \n", + " Args:\n", + " grad_output: Gradient flowing backward from output\n", + " \n", + " Returns:\n", + " Tuple of (grad_a, grad_b) for the two matrix inputs\n", + " \n", + " **Mathematical Foundation:**\n", + " - ∂(A@B)/∂A = grad_output @ B.T\n", + " - ∂(A@B)/∂B = A.T @ grad_output\n", + " \n", + " **Batched Operation:** For 3D+ tensors, we transpose only the last two\n", + " dimensions using np.swapaxes, preserving batch dimensions.\n", + " \"\"\"\n", + " a, b = self.saved_tensors\n", + " grad_a = grad_b = None\n", + "\n", + " # Gradient for first input: grad_output @ b.T\n", + " if isinstance(a, Tensor) and a.requires_grad:\n", + " # For batched tensors, transpose only last two dims\n", + " if b.data.ndim >= 2:\n", + " b_T = np.swapaxes(b.data, -2, -1)\n", + " else:\n", + " b_T = b.data.T\n", + " grad_a = np.matmul(grad_output, b_T)\n", + "\n", + " # Gradient for second input: a.T @ grad_output\n", + " if isinstance(b, Tensor) and b.requires_grad:\n", + " # For batched tensors, transpose only last two dims\n", + " if a.data.ndim >= 2:\n", + " a_T = np.swapaxes(a.data, -2, -1)\n", + " else:\n", + " a_T = a.data.T\n", + " grad_b = np.matmul(a_T, grad_output)\n", + "\n", + " return grad_a, grad_b" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0096f1dd", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "transpose-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class TransposeBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for transpose operation.\n", + " \n", + " **Mathematical Rule:** If Y = X.T, then:\n", + " - ∂Y/∂X = grad_Y.T\n", + " \n", + " **Key Insight:** The gradient of transpose is just transpose the gradient!\n", + " This is because transpose is a linear operation that just rearranges elements.\n", + " \n", + " **Applications:** Used in attention (K.T for scores), weight gradients (W.T),\n", + " and any operation that needs to swap matrix dimensions.\n", + " \"\"\"\n", + "\n", + " def __init__(self, tensor, dim0, dim1):\n", + " \"\"\"\n", + " Args:\n", + " tensor: Input tensor\n", + " dim0: First dimension to swap (None for default)\n", + " dim1: Second dimension to swap (None for default)\n", + " \"\"\"\n", + " super().__init__(tensor)\n", + " self.dim0 = dim0\n", + " self.dim1 = dim1\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradient for transpose.\n", + " \n", + " Args:\n", + " grad_output: Gradient flowing backward from output\n", + " \n", + " Returns:\n", + " Tuple with single gradient for input tensor\n", + " \n", + " **Mathematical Foundation:**\n", + " - ∂(X.T)/∂X = grad_output.T\n", + " - Just transpose the gradient back!\n", + " \"\"\"\n", + " x, = self.saved_tensors\n", + " grad_x = None\n", + "\n", + " if isinstance(x, Tensor) and x.requires_grad:\n", + " # Transpose gradient using the same dims\n", + " if self.dim0 is None and self.dim1 is None:\n", + " # Default: transpose last two dimensions\n", + " if grad_output.ndim < 2:\n", + " grad_x = grad_output.copy()\n", + " else:\n", + " axes = list(range(grad_output.ndim))\n", + " axes[-2], axes[-1] = axes[-1], axes[-2]\n", + " grad_x = np.transpose(grad_output, axes)\n", + " else:\n", + " # Specific dimensions: swap them back\n", + " axes = list(range(grad_output.ndim))\n", + " axes[self.dim0], axes[self.dim1] = axes[self.dim1], axes[self.dim0]\n", + " grad_x = np.transpose(grad_output, axes)\n", + "\n", + " return (grad_x,)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "64725404", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "permute-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class PermuteBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for arbitrary axis permutation (general transpose).\n", + " \n", + " **Mathematical Rule:** If Y = X.permute(axes), then:\n", + " - ∂Y/∂X = grad_Y.permute(inverse_axes)\n", + " \n", + " **Example:** If axes = (0, 2, 1, 3), the inverse is (0, 2, 1, 3) (self-inverse).\n", + " More generally, if axes = (2, 0, 1), the inverse is (1, 2, 0).\n", + " \n", + " **Key Insight:** To reverse a permutation, we need to know where each axis went.\n", + " If axis i went to position axes[i], then in the inverse, position axes[i] should go to i.\n", + " \n", + " **Applications:** Multi-head attention uses (0, 2, 1, 3) to rearrange heads.\n", + " \"\"\"\n", + "\n", + " def __init__(self, tensor, axes):\n", + " \"\"\"\n", + " Args:\n", + " tensor: Input tensor\n", + " axes: Tuple of axis indices defining the permutation\n", + " \"\"\"\n", + " super().__init__(tensor)\n", + " self.axes = axes\n", + " # Compute inverse permutation: if axes[i] = j, then inverse_axes[j] = i\n", + " self.inverse_axes = tuple(np.argsort(axes))\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradient for permutation.\n", + " \n", + " The gradient is permuted back using the inverse permutation.\n", + " \n", + " **Mathematical Foundation:**\n", + " - ∂(X.permute(axes))/∂X = grad_output.permute(inverse_axes)\n", + " \"\"\"\n", + " x, = self.saved_tensors\n", + " grad_x = None\n", + "\n", + " if isinstance(x, Tensor) and x.requires_grad:\n", + " # Permute gradient back to original axis order\n", + " grad_x = np.transpose(grad_output, self.inverse_axes)\n", + "\n", + " return (grad_x,)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48c3ed12", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "embedding-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class EmbeddingBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for embedding lookup operation.\n", + " \n", + " **Mathematical Rule:** If Y = Embedding[indices], then:\n", + " - ∂Loss/∂Embedding[i] = sum of all gradients where index==i\n", + " \n", + " **Key Insight:** Embedding lookup is a gather operation. The backward\n", + " is a scatter operation that accumulates gradients to the embedding weights.\n", + " \n", + " **Applications:** Word embeddings, positional embeddings, token embeddings\n", + " in transformers.\n", + " \"\"\"\n", + "\n", + " def __init__(self, weight, indices):\n", + " \"\"\"\n", + " Args:\n", + " weight: Embedding weight matrix\n", + " indices: Indices used for lookup\n", + " \"\"\"\n", + " super().__init__(weight)\n", + " self.indices = indices\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradient for embedding lookup.\n", + " \n", + " Args:\n", + " grad_output: Gradient flowing backward from output\n", + " \n", + " Returns:\n", + " Tuple with single gradient for weight tensor\n", + " \n", + " **Mathematical Foundation:**\n", + " - ∂(Embedding[indices])/∂Embedding = scatter gradients to selected rows\n", + " - Multiple indices can point to same embedding → gradients accumulate\n", + " \"\"\"\n", + " weight, = self.saved_tensors\n", + " grad_weight = None\n", + "\n", + " if isinstance(weight, Tensor) and weight.requires_grad:\n", + " # Initialize gradient with zeros\n", + " grad_weight = np.zeros_like(weight.data)\n", + " \n", + " # Scatter gradients back to embedding weights\n", + " # np.add.at accumulates gradients for repeated indices\n", + " indices_flat = self.indices.data.astype(int).flatten()\n", + " grad_output_reshaped = grad_output.reshape(-1, grad_output.shape[-1])\n", + " \n", + " np.add.at(grad_weight, indices_flat, grad_output_reshaped)\n", + "\n", + " return (grad_weight,)\n", + "\n", + "\n", + "class SliceBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for tensor slicing/indexing operations.\n", + " \n", + " **Mathematical Rule:** If Y = X[key], then:\n", + " - ∂Loss/∂X[key] = grad_output\n", + " - ∂Loss/∂X[other positions] = 0\n", + " \n", + " **Key Insight:** Slicing is a masking operation. The backward\n", + " places gradients back into the original tensor positions, with\n", + " zeros everywhere else.\n", + " \n", + " **Applications:** Positional encodings, sequence slicing, batch selection,\n", + " attention masking in transformers.\n", + " \n", + " **Examples:**\n", + " >>> x = Tensor([1, 2, 3, 4, 5], requires_grad=True)\n", + " >>> y = x[:3] # Slice first 3 elements\n", + " >>> loss = y.sum()\n", + " >>> loss.backward()\n", + " >>> # x.grad = [1, 1, 1, 0, 0] - gradients only for sliced positions\n", + " \"\"\"\n", + "\n", + " def __init__(self, tensor, key):\n", + " \"\"\"\n", + " Args:\n", + " tensor: Original tensor being sliced\n", + " key: Slicing key (index, slice, tuple of slices, etc.)\n", + " \"\"\"\n", + " super().__init__(tensor)\n", + " self.key = key\n", + " self.original_shape = tensor.shape\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradient for slicing operation.\n", + " \n", + " Args:\n", + " grad_output: Gradient flowing backward from sliced output\n", + " \n", + " Returns:\n", + " Tuple with single gradient for input tensor\n", + " \n", + " **Mathematical Foundation:**\n", + " - Slicing extracts a subset of elements\n", + " - Backward scatters gradients back to original positions\n", + " - Unsliced positions receive zero gradient\n", + " \n", + " **Example:**\n", + " If X = [a, b, c, d, e] and Y = X[1:4] = [b, c, d]\n", + " Then dL/dX = [0, dL/db, dL/dc, dL/dd, 0]\n", + " \"\"\"\n", + " tensor, = self.saved_tensors\n", + " grad_input = None\n", + "\n", + " if isinstance(tensor, Tensor) and tensor.requires_grad:\n", + " # Create gradient array with same shape as original tensor\n", + " grad_input = np.zeros(self.original_shape, dtype=np.float32)\n", + " \n", + " # Place gradients back into the sliced positions\n", + " # This is the inverse of the forward slicing operation\n", + " grad_input[self.key] = grad_output\n", + "\n", + " return (grad_input,)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "850e960e", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "reshape-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class ReshapeBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for reshape operation.\n", + " \n", + " **Mathematical Rule:** If Y = X.reshape(new_shape), then:\n", + " - ∂Y/∂X = grad_Y.reshape(X.shape)\n", + " \n", + " **Key Insight:** Reshape just rearranges the same elements.\n", + " The gradient is simply reshaped back to the original shape!\n", + " \n", + " **Applications:** Flattening tensors for linear layers, reshaping\n", + " between convolutional and dense layers.\n", + " \"\"\"\n", + "\n", + " def __init__(self, tensor, original_shape):\n", + " \"\"\"\n", + " Args:\n", + " tensor: Input tensor\n", + " original_shape: Shape before reshape\n", + " \"\"\"\n", + " super().__init__(tensor)\n", + " self.original_shape = original_shape\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradient for reshape.\n", + " \n", + " Args:\n", + " grad_output: Gradient flowing backward from output\n", + " \n", + " Returns:\n", + " Tuple with single gradient for input tensor\n", + " \n", + " **Mathematical Foundation:**\n", + " - ∂(X.reshape(...))/∂X = grad_output.reshape(X.shape)\n", + " - Just reshape the gradient back!\n", + " \"\"\"\n", + " x, = self.saved_tensors\n", + " grad_x = None\n", + "\n", + " if isinstance(x, Tensor) and x.requires_grad:\n", + " # Reshape gradient back to original shape\n", + " grad_x = grad_output.reshape(self.original_shape)\n", + "\n", + " return (grad_x,)" + ] + }, + { + "cell_type": "markdown", + "id": "42ebf05e", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### SumBackward - Gradient Rules for Reduction Operations\n", + "\n", + "Sum operations reduce tensor dimensions, so gradients must be broadcast back.\n", + "\n", + "**Mathematical Principle:**\n", + "```\n", + "If z = sum(a), then ∂z/∂a[i] = 1 for all i\n", + "Gradient is broadcasted from scalar result back to input shape.\n", + "```\n", + "\n", + "**Gradient Broadcasting Examples:**\n", + "```\n", + "Case 1: Full sum\n", + " Forward: a=[1,2,3] → sum() → z=6 (scalar)\n", + " Backward: grad_z=1 → broadcast → grad_a=[1,1,1]\n", + "\n", + "Case 2: Axis sum\n", + " Forward: a=[[1,2],[3,4]] → sum(axis=0) → z=[4,6]\n", + " Backward: grad_z=[1,1] → broadcast → grad_a=[[1,1],[1,1]]\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "92785a04", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "sum-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class SumBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for tensor sum.\n", + " \n", + " **Mathematical Rule:** If z = sum(a), then ∂z/∂a[i] = 1 for all i\n", + " \n", + " **Key Insight:** Sum distributes the gradient equally to all input elements.\n", + " The gradient is broadcast from the reduced output back to input shape.\n", + " \n", + " **Applications:** Used in loss functions, mean operations, and\n", + " anywhere tensor reduction occurs.\n", + " \"\"\"\n", + "\n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradients for sum operation.\n", + " \n", + " Args:\n", + " grad_output: Gradient flowing backward from output\n", + " \n", + " Returns:\n", + " Tuple containing gradient for the input tensor\n", + " \n", + " **Mathematical Foundation:**\n", + " - ∂sum(a)/∂a[i] = 1 → grad_a = ones_like(a) * grad_output\n", + " \"\"\"\n", + " tensor, = self.saved_tensors\n", + "\n", + " if isinstance(tensor, Tensor) and tensor.requires_grad:\n", + " # Gradient is 1 for all elements, scaled by grad_output\n", + " return np.ones_like(tensor.data) * grad_output,\n", + " return None," + ] + }, + { + "cell_type": "markdown", + "id": "0f3cf319", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### 🔬 Unit Test: Function Classes\n", + "This test validates our Function classes compute gradients correctly.\n", + "**What we're testing**: Forward and backward passes for each operation\n", + "**Why it matters**: These are the building blocks of autograd\n", + "**Expected**: Correct gradients that satisfy mathematical definitions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea3cc0f6", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-function-classes", + "locked": true, + "points": 15 + } + }, + "outputs": [], + "source": [ + "def test_unit_function_classes():\n", + " \"\"\"🔬 Test Function classes.\"\"\"\n", + " print(\"🔬 Unit Test: Function Classes...\")\n", + "\n", + " # Test AddBackward\n", + " a = Tensor([1, 2, 3], requires_grad=True)\n", + " b = Tensor([4, 5, 6], requires_grad=True)\n", + " add_func = AddBackward(a, b)\n", + " grad_output = np.array([1, 1, 1])\n", + " grad_a, grad_b = add_func.apply(grad_output)\n", + " assert np.allclose(grad_a, grad_output), f\"AddBackward grad_a failed: {grad_a}\"\n", + " assert np.allclose(grad_b, grad_output), f\"AddBackward grad_b failed: {grad_b}\"\n", + "\n", + " # Test MulBackward\n", + " mul_func = MulBackward(a, b)\n", + " grad_a, grad_b = mul_func.apply(grad_output)\n", + " assert np.allclose(grad_a, b.data), f\"MulBackward grad_a failed: {grad_a}\"\n", + " assert np.allclose(grad_b, a.data), f\"MulBackward grad_b failed: {grad_b}\"\n", + "\n", + " # Test MatmulBackward\n", + " a_mat = Tensor([[1, 2], [3, 4]], requires_grad=True)\n", + " b_mat = Tensor([[5, 6], [7, 8]], requires_grad=True)\n", + " matmul_func = MatmulBackward(a_mat, b_mat)\n", + " grad_output = np.ones((2, 2))\n", + " grad_a, grad_b = matmul_func.apply(grad_output)\n", + " assert grad_a.shape == a_mat.shape, f\"MatmulBackward grad_a shape: {grad_a.shape}\"\n", + " assert grad_b.shape == b_mat.shape, f\"MatmulBackward grad_b shape: {grad_b.shape}\"\n", + "\n", + " print(\"✅ Function classes work correctly!\")\n", + "\n", + "if __name__ == \"__main__\":\n", + " test_unit_function_classes()" + ] + }, + { + "cell_type": "markdown", + "id": "83af6174", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 4. Enhancing Tensor with Autograd Capabilities\n", + "\n", + "Now we'll enhance the existing Tensor class to use these gradient functions and build computation graphs automatically.\n", + "\n", + "**Computation Graph Formation:**\n", + "```\n", + "Before Autograd: After Autograd:\n", + " x → operation → y x → [Function] → y\n", + " ↓\n", + " Stores operation\n", + " for backward pass\n", + "```\n", + "\n", + "**The Enhancement Strategy:**\n", + "1. **Add backward() method** - Triggers gradient computation\n", + "2. **Enhance operations** - Replace simple ops with gradient-tracking versions\n", + "3. **Track computation graphs** - Each tensor remembers how it was created\n", + "4. **Maintain compatibility** - All existing code continues to work\n", + "\n", + "**Critical Design Decision:**\n", + "We enhance the EXISTING Tensor class rather than creating a new one.\n", + "This means:\n", + "- ✅ All previous modules continue working unchanged\n", + "- ✅ No import changes needed\n", + "- ✅ Gradients are \"opt-in\" via requires_grad=True\n", + "- ✅ No confusion between Tensor types" + ] + }, + { + "cell_type": "markdown", + "id": "0f70cbea", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### The enable_autograd() Function\n", + "\n", + "This function is the magic that brings gradients to life! It enhances the existing Tensor class with autograd capabilities by:\n", + "\n", + "1. **Monkey-patching operations** - Replaces `__add__`, `__mul__`, etc. with gradient-aware versions\n", + "2. **Adding backward() method** - Implements reverse-mode automatic differentiation\n", + "3. **Maintaining compatibility** - All existing code continues to work unchanged\n", + "\n", + "**The Pattern:**\n", + "```\n", + "Original: x + y → simple addition\n", + "Enhanced: x + y → addition + gradient tracking (if requires_grad=True)\n", + "```\n", + "\n", + "This approach follows PyTorch 2.0 style - clean, modern, and educational." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b15fd17", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "relu-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class ReLUBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for ReLU activation.\n", + " \n", + " ReLU: f(x) = max(0, x)\n", + " Derivative: f'(x) = 1 if x > 0, else 0\n", + " \"\"\"\n", + " \n", + " def __init__(self, input_tensor):\n", + " \"\"\"Initialize with input tensor.\"\"\"\n", + " super().__init__(input_tensor)\n", + " \n", + " def apply(self, grad_output):\n", + " \"\"\"Compute gradient for ReLU.\"\"\"\n", + " tensor, = self.saved_tensors\n", + " \n", + " if isinstance(tensor, Tensor) and tensor.requires_grad:\n", + " # ReLU gradient: 1 if x > 0, else 0\n", + " relu_grad = (tensor.data > 0).astype(np.float32)\n", + " return grad_output * relu_grad,\n", + " return None," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e57904c", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "sigmoid-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class SigmoidBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for sigmoid activation.\n", + " \n", + " Sigmoid: σ(x) = 1/(1 + exp(-x))\n", + " Derivative: σ'(x) = σ(x) * (1 - σ(x))\n", + " \"\"\"\n", + " \n", + " def __init__(self, input_tensor, output_tensor):\n", + " \"\"\"\n", + " Initialize with both input and output.\n", + " \n", + " Args:\n", + " input_tensor: Original input to sigmoid\n", + " output_tensor: Output of sigmoid (saves recomputation)\n", + " \"\"\"\n", + " super().__init__(input_tensor)\n", + " self.output_data = output_tensor.data\n", + " \n", + " def apply(self, grad_output):\n", + " \"\"\"Compute gradient for sigmoid.\"\"\"\n", + " tensor, = self.saved_tensors\n", + " \n", + " if isinstance(tensor, Tensor) and tensor.requires_grad:\n", + " # σ'(x) = σ(x) * (1 - σ(x))\n", + " sigmoid_grad = self.output_data * (1 - self.output_data)\n", + " return grad_output * sigmoid_grad,\n", + " return None," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7097398", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "softmax-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class SoftmaxBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for softmax activation.\n", + " \n", + " Softmax: softmax(x)[i] = exp(x[i]) / sum(exp(x))\n", + " Derivative: ∂softmax/∂x[i] = softmax[i] * (δ[i,j] - softmax[j])\n", + " \n", + " For gradient computation:\n", + " grad_x[i] = softmax[i] * (grad_y[i] - sum(grad_y * softmax))\n", + " \n", + " **Key Insight:** The gradient depends on all elements of softmax due to\n", + " the normalization, not just the element being differentiated.\n", + " \"\"\"\n", + " \n", + " def __init__(self, input_tensor, output_tensor, dim=-1):\n", + " \"\"\"\n", + " Initialize with input, output, and dimension.\n", + " \n", + " Args:\n", + " input_tensor: Original input to softmax\n", + " output_tensor: Output of softmax (needed for gradient)\n", + " dim: Dimension along which softmax was applied\n", + " \"\"\"\n", + " super().__init__(input_tensor)\n", + " self.output_data = output_tensor.data\n", + " self.dim = dim\n", + " \n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradient for softmax.\n", + " \n", + " Mathematical formula:\n", + " ∂L/∂x[i] = softmax[i] * (∂L/∂y[i] - sum_j(∂L/∂y[j] * softmax[j]))\n", + " \n", + " This can be vectorized as:\n", + " grad_x = softmax * (grad_y - sum(grad_y * softmax, keepdims=True))\n", + " \"\"\"\n", + " tensor, = self.saved_tensors\n", + " \n", + " if isinstance(tensor, Tensor) and tensor.requires_grad:\n", + " # Compute sum(grad_output * softmax) along the softmax dimension\n", + " sum_term = np.sum(grad_output * self.output_data, axis=self.dim, keepdims=True)\n", + " \n", + " # Softmax gradient: softmax * (grad_output - sum_term)\n", + " grad_x = self.output_data * (grad_output - sum_term)\n", + " \n", + " return (grad_x,)\n", + " return (None,)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8fd1af6e", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "gelu-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class GELUBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for GELU activation.\n", + " \n", + " GELU: f(x) = x * Φ(x) where Φ is the CDF of standard normal\n", + " Approximation: gelu(x) ≈ 0.5 * x * (1 + tanh(√(2/π) * (x + 0.044715 * x³)))\n", + " \n", + " **Key Insight:** GELU is smoother than ReLU, providing non-zero gradients\n", + " for negative values, which helps training deep networks.\n", + " \"\"\"\n", + " \n", + " def __init__(self, input_tensor):\n", + " \"\"\"Initialize with input tensor.\"\"\"\n", + " super().__init__(input_tensor)\n", + " \n", + " def apply(self, grad_output):\n", + " \"\"\"\n", + " Compute gradient for GELU.\n", + " \n", + " Mathematical formula (using approximation):\n", + " ∂gelu/∂x ≈ 0.5 * (1 + tanh(...)) + 0.5 * x * sech²(...) * (...)\n", + " \n", + " Simplified: We compute the derivative numerically or use the formula.\n", + " \"\"\"\n", + " tensor, = self.saved_tensors\n", + " \n", + " if isinstance(tensor, Tensor) and tensor.requires_grad:\n", + " x = tensor.data\n", + " # GELU derivative approximation\n", + " # Using the tanh approximation: gelu(x) ≈ 0.5 * x * (1 + tanh(sqrt(2/pi) * (x + 0.044715 * x^3)))\n", + " sqrt_2_over_pi = np.sqrt(2.0 / np.pi)\n", + " x_cubed = x ** 3\n", + " tanh_arg = sqrt_2_over_pi * (x + 0.044715 * x_cubed)\n", + " tanh_out = np.tanh(tanh_arg)\n", + " sech_squared = 1 - tanh_out ** 2\n", + " \n", + " # Derivative: 0.5 * (1 + tanh(...)) + 0.5 * x * sech²(...) * d(tanh_arg)/dx\n", + " d_tanh_arg = sqrt_2_over_pi * (1 + 0.134145 * x ** 2)\n", + " gelu_grad = 0.5 * (1 + tanh_out) + 0.5 * x * sech_squared * d_tanh_arg\n", + " \n", + " return (grad_output * gelu_grad,)\n", + " return (None,)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "366a2a7c", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "mse-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class MSEBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for Mean Squared Error Loss.\n", + " \n", + " MSE: L = mean((predictions - targets)²)\n", + " Derivative: ∂L/∂predictions = 2 * (predictions - targets) / N\n", + " \"\"\"\n", + " \n", + " def __init__(self, predictions, targets):\n", + " \"\"\"Initialize with predictions and targets.\"\"\"\n", + " super().__init__(predictions)\n", + " self.targets_data = targets.data\n", + " self.num_samples = np.size(targets.data)\n", + " \n", + " def apply(self, grad_output):\n", + " \"\"\"Compute gradient for MSE loss.\"\"\"\n", + " predictions, = self.saved_tensors\n", + " \n", + " if isinstance(predictions, Tensor) and predictions.requires_grad:\n", + " # Gradient: 2 * (predictions - targets) / N\n", + " grad = 2.0 * (predictions.data - self.targets_data) / self.num_samples\n", + " \n", + " return grad * grad_output,\n", + " return None," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7cc7567", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "bce-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class BCEBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for Binary Cross-Entropy Loss.\n", + " \n", + " BCE: L = -[y*log(p) + (1-y)*log(1-p)]\n", + " Derivative: ∂L/∂p = (p - y) / (p*(1-p)*N)\n", + " \"\"\"\n", + " \n", + " def __init__(self, predictions, targets):\n", + " \"\"\"Initialize with predictions and targets.\"\"\"\n", + " super().__init__(predictions)\n", + " self.targets_data = targets.data\n", + " self.num_samples = np.size(targets.data)\n", + " \n", + " def apply(self, grad_output):\n", + " \"\"\"Compute gradient for BCE loss.\"\"\"\n", + " predictions, = self.saved_tensors\n", + " \n", + " if isinstance(predictions, Tensor) and predictions.requires_grad:\n", + " eps = EPSILON\n", + " p = np.clip(predictions.data, eps, 1 - eps)\n", + " y = self.targets_data\n", + " \n", + " # Gradient: (p - y) / (p * (1-p) * N)\n", + " grad = (p - y) / (p * (1 - p) * self.num_samples)\n", + " \n", + " return grad * grad_output,\n", + " return None," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "df0e924f", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "ce-backward", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class CrossEntropyBackward(Function):\n", + " \"\"\"\n", + " Gradient computation for Cross-Entropy Loss.\n", + " \n", + " CrossEntropy: L = -mean(log_softmax(logits)[targets])\n", + " \n", + " The gradient with respect to logits is remarkably elegant:\n", + " ∂L/∂logits = (softmax(logits) - one_hot(targets)) / N\n", + " \n", + " This is one of the most beautiful results in machine learning:\n", + " - The gradient is simply the difference between predictions and targets\n", + " - It naturally scales with how wrong we are\n", + " - It's numerically stable when computed via softmax\n", + " \"\"\"\n", + " \n", + " def __init__(self, logits, targets):\n", + " \"\"\"Initialize with logits and target class indices.\"\"\"\n", + " super().__init__(logits)\n", + " self.targets_data = targets.data.astype(int)\n", + " self.batch_size = logits.data.shape[0]\n", + " self.num_classes = logits.data.shape[1]\n", + " \n", + " def apply(self, grad_output):\n", + " \"\"\"Compute gradient for cross-entropy loss.\"\"\"\n", + " logits, = self.saved_tensors\n", + " \n", + " if isinstance(logits, Tensor) and logits.requires_grad:\n", + " # Compute softmax probabilities\n", + " # Using stable softmax: subtract max for numerical stability\n", + " logits_data = logits.data\n", + " max_logits = np.max(logits_data, axis=1, keepdims=True)\n", + " exp_logits = np.exp(logits_data - max_logits)\n", + " softmax = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)\n", + " \n", + " # Create one-hot encoding of targets\n", + " one_hot = np.zeros((self.batch_size, self.num_classes), dtype=np.float32)\n", + " one_hot[np.arange(self.batch_size), self.targets_data] = 1.0\n", + " \n", + " # Gradient: (softmax - one_hot) / batch_size\n", + " grad = (softmax - one_hot) / self.batch_size\n", + " \n", + " return grad * grad_output,\n", + " return None," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62bff14f", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "enable-autograd", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "def enable_autograd():\n", + " \"\"\"\n", + " Enable gradient tracking for all Tensor operations.\n", + "\n", + " This function enhances the existing Tensor class with autograd capabilities.\n", + " Call this once to activate gradients globally.\n", + "\n", + " **What it does:**\n", + " - Replaces Tensor operations with gradient-tracking versions\n", + " - Adds backward() method for reverse-mode differentiation\n", + " - Enables computation graph building\n", + " - Maintains full backward compatibility\n", + "\n", + " **After calling this:**\n", + " - Tensor operations will track computation graphs\n", + " - backward() method becomes available\n", + " - Gradients will flow through operations\n", + " - requires_grad=True enables tracking per tensor\n", + "\n", + " **Example:**\n", + " ```python\n", + " enable_autograd() # Call once\n", + " x = Tensor([2.0], requires_grad=True)\n", + " y = x * 3\n", + " y.backward()\n", + " print(x.grad) # [3.0]\n", + " ```\n", + " \"\"\"\n", + "\n", + " # Educational Note: hasattr() is LEGITIMATE here because:\n", + " # 1. This is a runtime monkey-patch system (meta-programming)\n", + " # 2. We're checking if a class has been dynamically modified\n", + " # 3. _autograd_enabled is a marker attribute we add at runtime\n", + " # This is the CORRECT use of hasattr() for dynamic class modification\n", + " if hasattr(Tensor, '_autograd_enabled'):\n", + " print(\"⚠️ Autograd already enabled\")\n", + " return\n", + "\n", + " # Store original operations\n", + " # These are guaranteed to exist from Module 01 (Tensor class)\n", + " _original_add = Tensor.__add__\n", + " _original_sub = Tensor.__sub__\n", + " _original_mul = Tensor.__mul__\n", + " _original_div = Tensor.__truediv__\n", + "\n", + " # These methods are also guaranteed from Module 01 - trust Single Tensor Class\n", + " _original_matmul = Tensor.matmul\n", + " _original_transpose = Tensor.transpose\n", + " _original_reshape = Tensor.reshape\n", + "\n", + " # Enhanced operations that track gradients\n", + " def tracked_add(self, other):\n", + " \"\"\"\n", + " Addition with gradient tracking.\n", + " \n", + " Enhances the original __add__ method to build computation graphs\n", + " when requires_grad=True for any input.\n", + " \"\"\"\n", + " # Convert scalar to Tensor if needed\n", + " if not isinstance(other, Tensor):\n", + " other = Tensor(other)\n", + "\n", + " # Call original operation\n", + " result = _original_add(self, other)\n", + "\n", + " # Track gradient if needed\n", + " if self.requires_grad or other.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = AddBackward(self, other)\n", + "\n", + " return result\n", + "\n", + " def tracked_mul(self, other):\n", + " \"\"\"\n", + " Multiplication with gradient tracking.\n", + " \n", + " Enhances the original __mul__ method to build computation graphs\n", + " when requires_grad=True for any input.\n", + " \"\"\"\n", + " # Convert scalar to Tensor if needed for consistency\n", + " if not isinstance(other, Tensor):\n", + " other_tensor = Tensor(other)\n", + " else:\n", + " other_tensor = other\n", + "\n", + " # Call original operation\n", + " result = _original_mul(self, other)\n", + "\n", + " # Track gradient if needed\n", + " if self.requires_grad or (isinstance(other, Tensor) and other.requires_grad):\n", + " result.requires_grad = True\n", + " result._grad_fn = MulBackward(self, other)\n", + "\n", + " return result\n", + "\n", + " def tracked_matmul(self, other):\n", + " \"\"\"\n", + " Matrix multiplication with gradient tracking.\n", + "\n", + " Enhances the original matmul method to build computation graphs\n", + " when requires_grad=True for any input.\n", + " \"\"\"\n", + " # Call original matmul from Module 01\n", + " result = _original_matmul(self, other)\n", + "\n", + " # Track gradient if needed\n", + " if self.requires_grad or other.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = MatmulBackward(self, other)\n", + "\n", + " return result\n", + "\n", + " def tracked_transpose(self, dim0=None, dim1=None):\n", + " \"\"\"\n", + " Transpose with gradient tracking.\n", + "\n", + " Enhances the original transpose method to build computation graphs\n", + " when requires_grad=True for the input.\n", + " \"\"\"\n", + " # Call original transpose from Module 01\n", + " result = _original_transpose(self, dim0, dim1)\n", + "\n", + " # Track gradient if needed\n", + " if self.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = TransposeBackward(self, dim0, dim1)\n", + "\n", + " return result\n", + "\n", + " def tracked_reshape(self, *shape):\n", + " \"\"\"\n", + " Reshape with gradient tracking.\n", + "\n", + " Enhances the original reshape method to build computation graphs\n", + " when requires_grad=True for the input.\n", + " \"\"\"\n", + " original_shape = self.shape\n", + "\n", + " # Call original reshape from Module 01\n", + " result = _original_reshape(self, *shape)\n", + "\n", + " # Track gradient if needed\n", + " if self.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = ReshapeBackward(self, original_shape)\n", + "\n", + " return result\n", + "\n", + " def tracked_sub(self, other):\n", + " \"\"\"\n", + " Subtraction with gradient tracking.\n", + " \n", + " Enhances the original __sub__ method to build computation graphs\n", + " when requires_grad=True for any input.\n", + " \"\"\"\n", + " # Convert scalar to Tensor if needed\n", + " if not isinstance(other, Tensor):\n", + " other = Tensor(other)\n", + "\n", + " # Call original operation\n", + " result = _original_sub(self, other)\n", + "\n", + " # Track gradient if needed\n", + " if self.requires_grad or other.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = SubBackward(self, other)\n", + "\n", + " return result\n", + "\n", + " def tracked_div(self, other):\n", + " \"\"\"\n", + " Division with gradient tracking.\n", + " \n", + " Enhances the original __truediv__ method to build computation graphs\n", + " when requires_grad=True for any input.\n", + " \"\"\"\n", + " # Convert scalar to Tensor if needed\n", + " if not isinstance(other, Tensor):\n", + " other = Tensor(other)\n", + "\n", + " # Call original operation\n", + " result = _original_div(self, other)\n", + "\n", + " # Track gradient if needed\n", + " if self.requires_grad or other.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = DivBackward(self, other)\n", + "\n", + " return result\n", + "\n", + " def sum_op(self, axis=None, keepdims=False):\n", + " \"\"\"\n", + " Sum operation with gradient tracking.\n", + " \n", + " Creates a new sum method that builds computation graphs\n", + " when requires_grad=True.\n", + " \"\"\"\n", + " result_data = np.sum(self.data, axis=axis, keepdims=keepdims)\n", + " result = Tensor(result_data)\n", + "\n", + " if self.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = SumBackward(self)\n", + "\n", + " return result\n", + "\n", + " def backward(self, gradient=None):\n", + " \"\"\"\n", + " Compute gradients via backpropagation.\n", + "\n", + " This is the key method that makes training possible!\n", + " It implements reverse-mode automatic differentiation.\n", + " \n", + " **Algorithm:**\n", + " 1. Initialize gradient if not provided (for scalar outputs)\n", + " 2. Accumulate gradient in self.grad\n", + " 3. If this tensor has a _grad_fn, call it to propagate gradients\n", + " 4. Recursively call backward() on parent tensors\n", + " \n", + " **Example:**\n", + " ```python\n", + " x = Tensor([2.0], requires_grad=True)\n", + " y = x * 3\n", + " y.backward() # Computes gradients for x\n", + " print(x.grad) # [3.0]\n", + " ```\n", + " \"\"\"\n", + " # Only compute gradients if required\n", + " if not self.requires_grad:\n", + " return\n", + "\n", + " # Initialize gradient if not provided (for scalar outputs)\n", + " if gradient is None:\n", + " if self.data.size == 1:\n", + " gradient = np.ones_like(self.data)\n", + " else:\n", + " raise ValueError(\n", + " f\"backward() called on non-scalar tensor without gradient argument.\\n\"\n", + " f\" Tensor shape: {self.shape}\\n\"\n", + " f\" Issue: For non-scalar outputs, you must provide the gradient from the next layer.\\n\"\n", + " f\" Fix: Call backward(gradient) with the gradient tensor from the loss function.\"\n", + " )\n", + "\n", + " # Initialize or accumulate gradient\n", + " if self.grad is None:\n", + " self.grad = np.zeros_like(self.data)\n", + " \n", + " # Handle broadcasting: sum gradient to match self.data shape\n", + " # This happens when operations broadcast tensors (e.g., adding bias to batch)\n", + " if gradient.shape != self.grad.shape:\n", + " # Step 1: Remove extra leading dimensions added during forward pass\n", + " # Example: gradient (batch_size, features) → self.grad (features,)\n", + " while gradient.ndim > self.grad.ndim:\n", + " gradient = gradient.sum(axis=0)\n", + " \n", + " # Step 2: Sum over dimensions that were size-1 in original tensor\n", + " # Example: bias with shape (1,) broadcast to (batch_size,) during forward\n", + " for i in range(gradient.ndim):\n", + " if self.grad.shape[i] == 1 and gradient.shape[i] != 1:\n", + " gradient = gradient.sum(axis=i, keepdims=True)\n", + " \n", + " self.grad += gradient\n", + "\n", + " # Propagate gradients through computation graph\n", + " # _grad_fn is set by autograd enhancement when tensor is created from an operation\n", + " grad_fn = getattr(self, '_grad_fn', None)\n", + " if grad_fn is not None:\n", + " grads = grad_fn.apply(gradient)\n", + "\n", + " # Recursively call backward on parent tensors\n", + " for tensor, grad in zip(grad_fn.saved_tensors, grads):\n", + " if isinstance(tensor, Tensor) and tensor.requires_grad and grad is not None:\n", + " tensor.backward(grad)\n", + "\n", + " def zero_grad(self):\n", + " \"\"\"\n", + " Reset gradients to zero.\n", + " \n", + " Call this before each backward pass to prevent gradient accumulation\n", + " from previous iterations.\n", + " \"\"\"\n", + " self.grad = None\n", + "\n", + " # Install enhanced operations\n", + " Tensor.__add__ = tracked_add\n", + " Tensor.__sub__ = tracked_sub\n", + " Tensor.__mul__ = tracked_mul\n", + " Tensor.__truediv__ = tracked_div\n", + " Tensor.matmul = tracked_matmul\n", + " Tensor.transpose = tracked_transpose\n", + " Tensor.reshape = tracked_reshape\n", + " Tensor.sum = sum_op\n", + " Tensor.backward = backward\n", + " Tensor.zero_grad = zero_grad\n", + "\n", + " # Patch activations and losses to track gradients\n", + " try:\n", + " from tinytorch.core.activations import Sigmoid, ReLU, Softmax, GELU\n", + " from tinytorch.core.losses import BinaryCrossEntropyLoss, MSELoss, CrossEntropyLoss\n", + " \n", + " # Store original methods\n", + " _original_sigmoid_forward = Sigmoid.forward\n", + " _original_relu_forward = ReLU.forward\n", + " _original_softmax_forward = Softmax.forward\n", + " _original_gelu_forward = GELU.forward\n", + " _original_bce_forward = BinaryCrossEntropyLoss.forward\n", + " _original_mse_forward = MSELoss.forward\n", + " _original_ce_forward = CrossEntropyLoss.forward\n", + " \n", + " def tracked_sigmoid_forward(self, x):\n", + " \"\"\"Sigmoid with gradient tracking.\"\"\"\n", + " result_data = 1.0 / (1.0 + np.exp(-x.data))\n", + " result = Tensor(result_data)\n", + " \n", + " if x.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = SigmoidBackward(x, result)\n", + " \n", + " return result\n", + " \n", + " def tracked_relu_forward(self, x):\n", + " \"\"\"ReLU with gradient tracking.\"\"\"\n", + " result_data = np.maximum(0, x.data)\n", + " result = Tensor(result_data)\n", + " \n", + " if x.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = ReLUBackward(x)\n", + " \n", + " return result\n", + " \n", + " def tracked_softmax_forward(self, x, dim=-1):\n", + " \"\"\"Softmax with gradient tracking.\"\"\"\n", + " # Call original forward to get result using Tensor operations\n", + " result = _original_softmax_forward(self, x, dim=dim)\n", + " \n", + " # Attach the correct gradient function\n", + " if x.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = SoftmaxBackward(x, result, dim)\n", + " \n", + " return result\n", + " \n", + " def tracked_gelu_forward(self, x):\n", + " \"\"\"GELU with gradient tracking.\"\"\"\n", + " # Call original forward to get result\n", + " result = _original_gelu_forward(self, x)\n", + " \n", + " # Attach the correct gradient function\n", + " if x.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = GELUBackward(x)\n", + " \n", + " return result\n", + " \n", + " def tracked_bce_forward(self, predictions, targets):\n", + " \"\"\"Binary cross-entropy with gradient tracking.\"\"\"\n", + " # Compute BCE loss\n", + " eps = EPSILON\n", + " clamped_preds = np.clip(predictions.data, eps, 1 - eps)\n", + " log_preds = np.log(clamped_preds)\n", + " log_one_minus_preds = np.log(1 - clamped_preds)\n", + " bce_per_sample = -(targets.data * log_preds + (1 - targets.data) * log_one_minus_preds)\n", + " bce_loss = np.mean(bce_per_sample)\n", + " \n", + " result = Tensor(bce_loss)\n", + " \n", + " if predictions.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = BCEBackward(predictions, targets)\n", + " \n", + " return result\n", + " \n", + " def tracked_mse_forward(self, predictions, targets):\n", + " \"\"\"MSE loss with gradient tracking.\"\"\"\n", + " # Compute MSE loss\n", + " diff = predictions.data - targets.data\n", + " squared_diff = diff ** 2\n", + " mse = np.mean(squared_diff)\n", + " \n", + " result = Tensor(mse)\n", + " \n", + " if predictions.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = MSEBackward(predictions, targets)\n", + " \n", + " return result\n", + " \n", + " def tracked_ce_forward(self, logits, targets):\n", + " \"\"\"Cross-entropy loss with gradient tracking.\"\"\"\n", + " from tinytorch.core.losses import log_softmax\n", + " \n", + " # Compute log-softmax for numerical stability\n", + " log_probs = log_softmax(logits, dim=-1)\n", + " \n", + " # Select log-probabilities for correct classes\n", + " batch_size = logits.shape[0]\n", + " target_indices = targets.data.astype(int)\n", + " selected_log_probs = log_probs.data[np.arange(batch_size), target_indices]\n", + " \n", + " # Return negative mean\n", + " ce_loss = -np.mean(selected_log_probs)\n", + " \n", + " result = Tensor(ce_loss)\n", + " \n", + " if logits.requires_grad:\n", + " result.requires_grad = True\n", + " result._grad_fn = CrossEntropyBackward(logits, targets)\n", + " \n", + " return result\n", + " \n", + " # Install patched methods\n", + " Sigmoid.forward = tracked_sigmoid_forward\n", + " ReLU.forward = tracked_relu_forward\n", + " Softmax.forward = tracked_softmax_forward\n", + " GELU.forward = tracked_gelu_forward\n", + " BinaryCrossEntropyLoss.forward = tracked_bce_forward\n", + " MSELoss.forward = tracked_mse_forward\n", + " CrossEntropyLoss.forward = tracked_ce_forward\n", + " \n", + " except ImportError:\n", + " # Activations/losses not yet available (happens during module development)\n", + " pass\n", + "\n", + " # Mark as enabled\n", + " Tensor._autograd_enabled = True\n", + "\n", + " print(\"✅ Autograd enabled! Tensors now track gradients.\")\n", + " print(\" - Operations build computation graphs\")\n", + " print(\" - backward() computes gradients\")\n", + " print(\" - requires_grad=True enables tracking\")\n", + "\n", + "# Auto-enable when module is imported\n", + "enable_autograd()" + ] + }, + { + "cell_type": "markdown", + "id": "ef614cde", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### 🔬 Unit Test: Tensor Autograd Enhancement\n", + "This test validates our enhanced Tensor class computes gradients correctly.\n", + "**What we're testing**: Gradient computation and chain rule implementation\n", + "**Why it matters**: This is the core of automatic differentiation\n", + "**Expected**: Correct gradients for various operations and computation graphs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99d6c0e7", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-tensor-autograd", + "locked": true, + "points": 20 + } + }, + "outputs": [], + "source": [ + "def test_unit_tensor_autograd():\n", + " \"\"\"🔬 Test Tensor autograd enhancement.\"\"\"\n", + " print(\"🔬 Unit Test: Tensor Autograd Enhancement...\")\n", + "\n", + " # Test simple gradient computation\n", + " x = Tensor([2.0], requires_grad=True)\n", + " y = x * 3\n", + " z = y + 1 # z = 3x + 1, so dz/dx = 3\n", + "\n", + " z.backward()\n", + " assert np.allclose(x.grad, [3.0]), f\"Expected [3.0], got {x.grad}\"\n", + "\n", + " # Test matrix multiplication gradients\n", + " a = Tensor([[1.0, 2.0]], requires_grad=True) # 1x2\n", + " b = Tensor([[3.0], [4.0]], requires_grad=True) # 2x1\n", + " c = a.matmul(b) # 1x1, result = [[11.0]]\n", + "\n", + " c.backward()\n", + " assert np.allclose(a.grad, [[3.0, 4.0]]), f\"Expected [[3.0, 4.0]], got {a.grad}\"\n", + " assert np.allclose(b.grad, [[1.0], [2.0]]), f\"Expected [[1.0], [2.0]], got {b.grad}\"\n", + "\n", + " # Test computation graph with multiple operations\n", + " x = Tensor([1.0, 2.0], requires_grad=True)\n", + " y = x * 2 # y = [2, 4]\n", + " z = y.sum() # z = 6\n", + "\n", + " z.backward()\n", + " assert np.allclose(x.grad, [2.0, 2.0]), f\"Expected [2.0, 2.0], got {x.grad}\"\n", + "\n", + " print(\"✅ Tensor autograd enhancement works correctly!\")\n", + "\n", + "if __name__ == \"__main__\":\n", + " test_unit_tensor_autograd()" + ] + }, + { + "cell_type": "markdown", + "id": "7bc6df99", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "## 🧪 Module Integration Test\n", + "\n", + "Final validation that everything works together correctly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "98054339", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": true, + "grade_id": "module-integration", + "locked": true, + "points": 25 + } + }, + "outputs": [], + "source": [ + "def test_module():\n", + " \"\"\"\n", + " Comprehensive test of entire module functionality.\n", + "\n", + " This final test runs before module summary to ensure:\n", + " - All unit tests pass\n", + " - Autograd works for complex computation graphs\n", + " - Module is ready for integration with TinyTorch\n", + " \"\"\"\n", + " print(\"🧪 RUNNING MODULE INTEGRATION TEST\")\n", + " print(\"=\" * 50)\n", + "\n", + " # Run all unit tests\n", + " print(\"Running unit tests...\")\n", + " test_unit_function_classes()\n", + " test_unit_tensor_autograd()\n", + "\n", + " print(\"\\nRunning integration scenarios...\")\n", + "\n", + " # Test 1: Multi-layer computation graph\n", + " print(\"🔬 Integration Test: Multi-layer Neural Network...\")\n", + "\n", + " # Create a 3-layer computation: x -> Linear -> Linear -> Linear -> loss\n", + " x = Tensor([[1.0, 2.0]], requires_grad=True)\n", + " W1 = Tensor([[0.5, 0.3, 0.1], [0.2, 0.4, 0.6]], requires_grad=True)\n", + " b1 = Tensor([[0.1, 0.2, 0.3]], requires_grad=True)\n", + "\n", + " # First layer\n", + " h1 = x.matmul(W1) + b1\n", + " assert h1.shape == (1, 3)\n", + " assert h1.requires_grad == True\n", + "\n", + " # Second layer\n", + " W2 = Tensor([[0.1], [0.2], [0.3]], requires_grad=True)\n", + " h2 = h1.matmul(W2)\n", + " assert h2.shape == (1, 1)\n", + "\n", + " # Compute simple loss (just square the output for testing)\n", + " loss = h2 * h2\n", + "\n", + " # Backward pass\n", + " loss.backward()\n", + "\n", + " # Verify all parameters have gradients\n", + " assert x.grad is not None\n", + " assert W1.grad is not None\n", + " assert b1.grad is not None\n", + " assert W2.grad is not None\n", + " assert x.grad.shape == x.shape\n", + " assert W1.grad.shape == W1.shape\n", + "\n", + " print(\"✅ Multi-layer neural network gradients work!\")\n", + "\n", + " # Test 2: Gradient accumulation\n", + " print(\"🔬 Integration Test: Gradient Accumulation...\")\n", + "\n", + " x = Tensor([2.0], requires_grad=True)\n", + "\n", + " # First computation\n", + " y1 = x * 3\n", + " y1.backward()\n", + " first_grad = x.grad.copy()\n", + "\n", + " # Second computation (should accumulate)\n", + " y2 = x * 5\n", + " y2.backward()\n", + "\n", + " assert np.allclose(x.grad, first_grad + 5.0), \"Gradients should accumulate\"\n", + " print(\"✅ Gradient accumulation works!\")\n", + "\n", + " # Test 3: Complex mathematical operations\n", + " print(\"🔬 Integration Test: Complex Operations...\")\n", + "\n", + " a = Tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)\n", + " b = Tensor([[2.0, 1.0], [1.0, 2.0]], requires_grad=True)\n", + "\n", + " # Complex computation: ((a @ b) + a) * b\n", + " temp1 = a.matmul(b) # Matrix multiplication\n", + " temp2 = temp1 + a # Addition\n", + " result = temp2 * b # Element-wise multiplication\n", + " final = result.sum() # Sum reduction\n", + "\n", + " final.backward()\n", + "\n", + " assert a.grad is not None\n", + " assert b.grad is not None\n", + " assert a.grad.shape == a.shape\n", + " assert b.grad.shape == b.shape\n", + "\n", + " print(\"✅ Complex mathematical operations work!\")\n", + "\n", + " print(\"\\n\" + \"=\" * 50)\n", + " print(\"🎉 ALL TESTS PASSED! Module ready for export.\")\n", + " print(\"Run: tito module complete 05_autograd\")\n", + "\n", + "# Test function defined above, will be called in main block" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bfd8ab62", + "metadata": {}, + "outputs": [], + "source": [ + "# Run comprehensive module test\n", + "if __name__ == \"__main__\":\n", + " test_module()" + ] + }, + { + "cell_type": "markdown", + "id": "8e5021db", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 🤔 ML Systems Reflection Questions\n", + "\n", + "Before we wrap up, reflect on these systems-level questions. Use only knowledge from Modules 01-05 (no forward references to concepts you haven't learned yet).\n", + "\n", + "### Question 1: Computational Graph Memory\n", + "**Scenario**: A 10-layer neural network processes a single sample. Each layer performs matrix multiplication (matmul) and addition (bias).\n", + "\n", + "**Question**: How much memory does the computation graph use compared to just storing the weights?\n", + "\n", + "**Consider**:\n", + "- What tensors must be saved during forward pass for backward pass?\n", + "- If weights take 10MB total, estimate graph memory overhead\n", + "- When is the graph freed?\n", + "\n", + "---\n", + "\n", + "### Question 2: Gradient Accumulation\n", + "**Scenario**: An embedding layer is shared between two paths in a network (like encoder-decoder attention).\n", + "\n", + "**Question**: Why does gradient accumulation (`grad = grad + new_grad`) save memory during training? What's the trade-off?\n", + "\n", + "**Consider**:\n", + "- What happens if you process a large batch all at once vs. multiple smaller batches?\n", + "- Memory usage: storing intermediate activations vs. recomputing forward passes\n", + "- Training behavior: does gradient accumulation change what the model learns?\n", + "\n", + "---\n", + "\n", + "### Question 3: Backward Pass Cost\n", + "**Scenario**: A forward pass through a 3-layer MLP takes 10ms.\n", + "\n", + "**Question**: Is the backward pass faster, slower, or the same speed as the forward pass? Why?\n", + "\n", + "**Consider**:\n", + "- Operations in forward pass: matmul, activation, addition\n", + "- Operations in backward pass: matmul (for gradients), element-wise multiplication (chain rule)\n", + "- Number of matmul operations: forward vs. backward\n", + "- Memory access patterns: reading vs. writing gradients\n", + "\n", + "**Hint**: Think about matrix multiplication gradients:\n", + "```\n", + "Forward: y = x @ W (one matmul)\n", + "Backward: grad_x = grad_y @ W.T (one matmul)\n", + " grad_W = x.T @ grad_y (another matmul)\n", + "```\n", + "\n", + "---\n", + "\n", + "### Question 4: Graph Retention\n", + "**Scenario**: You're training a language model that processes sequences of varying lengths.\n", + "\n", + "**Question**: When should you call `.zero_grad()`? What happens if you forget?\n", + "\n", + "**Consider**:\n", + "- Gradient accumulation behavior (Question 2)\n", + "- Memory growth over multiple iterations\n", + "- Training correctness: what values do parameters see?\n", + "\n", + "**Example**:\n", + "```python\n", + "for batch in dataloader:\n", + " # Should zero_grad() go here?\n", + " loss = model(batch)\n", + " loss.backward()\n", + " optimizer.step()\n", + " # Or should zero_grad() go here?\n", + "```\n", + "\n", + "---\n", + "\n", + "### Question 5: Production Pattern\n", + "**Scenario**: PyTorch and TensorFlow use `requires_grad` flags instead of always tracking gradients for every tensor.\n", + "\n", + "**Question**: Why? What's the performance benefit of making gradient tracking opt-in?\n", + "\n", + "**Consider**:\n", + "- Memory: What gets stored when requires_grad=True vs. False?\n", + "- Compute: What operations are skipped when requires_grad=False?\n", + "- Typical model: What percentage of tensors need gradients?\n", + " - Inputs (data): requires_grad = ?\n", + " - Weights: requires_grad = ?\n", + " - Intermediate activations: requires_grad = ?\n", + " - Targets (labels): requires_grad = ?\n", + "\n", + "**Hint**: In a typical training loop, think about:\n", + "- How many tensors are created per forward pass?\n", + "- How many of those tensors are actually parameters that need updates?\n", + "- What's the memory multiplier for gradient tracking?\n", + "\n", + "---\n", + "\n", + "### Reflection Prompts\n", + "\n", + "After answering these questions, consider:\n", + "1. **Which surprised you most?** What behavior was counterintuitive?\n", + "2. **What trade-offs exist?** Memory vs. compute? Simplicity vs. efficiency?\n", + "3. **How does this connect to Module 01?** Why did we include requires_grad, grad, and backward() from the start?\n", + "4. **What production patterns emerged?** What choices would you make differently for a research prototype vs. production system?\n", + "\n", + "These questions prepare you for Module 06 (Optimizers), where you'll use these gradients to actually update parameters and train models!" + ] + }, + { + "cell_type": "markdown", + "id": "7fbbb919", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 🎯 MODULE SUMMARY: Autograd Engine\n", + "\n", + "Congratulations! You've built the gradient engine that makes neural networks learn!\n", + "\n", + "### Key Accomplishments ⭐⭐\n", + "- **Enhanced Tensor class** with backward() method (no new wrapper classes!)\n", + "- **Built computation graph tracking** for automatic differentiation\n", + "- **Implemented Function classes** (Add, Mul, Matmul, Sum) with correct gradients\n", + "- **Created enable_autograd()** function that activates gradients globally\n", + "- **Tested complex multi-layer** computation graphs with gradient propagation\n", + "- **All tests pass** ✅ (validated by `test_module()`)\n", + "\n", + "### Ready for Next Steps 🚀\n", + "Your autograd implementation enables optimization! The dormant gradient features from Module 01 are now fully active. Every tensor can track gradients, every operation builds computation graphs, and backward() computes gradients automatically.\n", + "\n", + "**What you can do now:**\n", + "```python\n", + "# Create tensors with gradient tracking\n", + "x = Tensor([2.0], requires_grad=True)\n", + "W = Tensor([[0.5, 0.3]], requires_grad=True)\n", + "\n", + "# Build computation graphs automatically\n", + "y = x.matmul(W.T) # Forward pass\n", + "loss = (y - 1.0) ** 2 # Simple loss\n", + "\n", + "# Compute gradients automatically\n", + "loss.backward() # Magic happens here!\n", + "\n", + "# Access gradients\n", + "print(f\"x.grad: {x.grad}\") # Gradient w.r.t. x\n", + "print(f\"W.grad: {W.grad}\") # Gradient w.r.t. W\n", + "```\n", + "\n", + "Export with: `tito module complete 05_autograd`\n", + "\n", + "**Next**: Module 06 will add optimizers (SGD, Adam) that use these gradients to actually train neural networks! 🎯\n", + "\n", + "### 📈 Progress: Autograd ✓\n", + "```\n", + "✅ Module 01: Tensor (Foundation)\n", + "✅ Module 02: Activations (Non-linearities)\n", + "✅ Module 03: Layers (Building blocks)\n", + "✅ Module 04: Losses (Training objectives)\n", + "✅ Module 05: Autograd (Gradient engine) ← YOU ARE HERE\n", + "🔄 Module 06: Optimizers (Learning algorithms)\n", + "🔄 Module 07: Training (Complete training loops)\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/modules/05_autograd/autograd.py b/modules/05_autograd/autograd.py index d8187227..2ca88a84 100644 --- a/modules/05_autograd/autograd.py +++ b/modules/05_autograd/autograd.py @@ -795,6 +795,72 @@ class EmbeddingBackward(Function): return (grad_weight,) + +class SliceBackward(Function): + """ + Gradient computation for tensor slicing/indexing operations. + + **Mathematical Rule:** If Y = X[key], then: + - ∂Loss/∂X[key] = grad_output + - ∂Loss/∂X[other positions] = 0 + + **Key Insight:** Slicing is a masking operation. The backward + places gradients back into the original tensor positions, with + zeros everywhere else. + + **Applications:** Positional encodings, sequence slicing, batch selection, + attention masking in transformers. + + **Examples:** + >>> x = Tensor([1, 2, 3, 4, 5], requires_grad=True) + >>> y = x[:3] # Slice first 3 elements + >>> loss = y.sum() + >>> loss.backward() + >>> # x.grad = [1, 1, 1, 0, 0] - gradients only for sliced positions + """ + + def __init__(self, tensor, key): + """ + Args: + tensor: Original tensor being sliced + key: Slicing key (index, slice, tuple of slices, etc.) + """ + super().__init__(tensor) + self.key = key + self.original_shape = tensor.shape + + def apply(self, grad_output): + """ + Compute gradient for slicing operation. + + Args: + grad_output: Gradient flowing backward from sliced output + + Returns: + Tuple with single gradient for input tensor + + **Mathematical Foundation:** + - Slicing extracts a subset of elements + - Backward scatters gradients back to original positions + - Unsliced positions receive zero gradient + + **Example:** + If X = [a, b, c, d, e] and Y = X[1:4] = [b, c, d] + Then dL/dX = [0, dL/db, dL/dc, dL/dd, 0] + """ + tensor, = self.saved_tensors + grad_input = None + + if isinstance(tensor, Tensor) and tensor.requires_grad: + # Create gradient array with same shape as original tensor + grad_input = np.zeros(self.original_shape, dtype=np.float32) + + # Place gradients back into the sliced positions + # This is the inverse of the forward slicing operation + grad_input[self.key] = grad_output + + return (grad_input,) + # %% nbgrader={"grade": false, "grade_id": "reshape-backward", "solution": true} #| export class ReshapeBackward(Function): diff --git a/modules/11_embeddings/embeddings.ipynb b/modules/11_embeddings/embeddings.ipynb new file mode 100644 index 00000000..e8e020bd --- /dev/null +++ b/modules/11_embeddings/embeddings.ipynb @@ -0,0 +1,1698 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "039d54c0", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "# Module 11: Embeddings - Converting Tokens to Learnable Representations\n", + "\n", + "Welcome to Module 11! You're about to build embedding layers that convert discrete tokens into dense, learnable vectors - the foundation of all modern NLP models.\n", + "\n", + "## 🔗 Prerequisites & Progress\n", + "**You've Built**: Tensors, layers, tokenization (discrete text processing)\n", + "**You'll Build**: Embedding lookups and positional encodings for sequence modeling\n", + "**You'll Enable**: Foundation for attention mechanisms and transformer architectures\n", + "\n", + "**Connection Map**:\n", + "```\n", + "Tokenization → Embeddings → Positional Encoding → Attention (Module 12)\n", + "(discrete) (dense) (position-aware) (context-aware)\n", + "```\n", + "\n", + "## Learning Objectives\n", + "By the end of this module, you will:\n", + "1. Implement embedding layers for token-to-vector conversion\n", + "2. Understand learnable vs fixed positional encodings\n", + "3. Build both sinusoidal and learned position encodings\n", + "4. Analyze embedding memory requirements and lookup performance\n", + "\n", + "Let's transform tokens into intelligence!\n", + "\n", + "## 📦 Where This Code Lives in the Final Package\n", + "\n", + "**Learning Side:** You work in `modules/11_embeddings/embeddings_dev.py` \n", + "**Building Side:** Code exports to `tinytorch.text.embeddings`\n", + "\n", + "```python\n", + "# How to use this module:\n", + "from tinytorch.text.embeddings import Embedding, PositionalEncoding, create_sinusoidal_embeddings\n", + "```\n", + "\n", + "**Why this matters:**\n", + "- **Learning:** Complete embedding system for converting discrete tokens to continuous representations\n", + "- **Production:** Essential component matching PyTorch's torch.nn.Embedding with positional encoding patterns\n", + "- **Consistency:** All embedding operations and positional encodings in text.embeddings\n", + "- **Integration:** Works seamlessly with tokenizers for complete text processing pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10c925e7", + "metadata": {}, + "outputs": [], + "source": [ + "#| default_exp text.embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "685f96e8", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "import numpy as np\n", + "import math\n", + "from typing import List, Optional, Tuple\n", + "\n", + "# Import from previous modules - following dependency chain\n", + "from tinytorch.core.tensor import Tensor\n", + "from tinytorch.core.autograd import EmbeddingBackward\n", + "\n", + "# Constants for memory calculations\n", + "BYTES_PER_FLOAT32 = 4 # Standard float32 size in bytes\n", + "MB_TO_BYTES = 1024 * 1024 # Megabytes to bytes conversion" + ] + }, + { + "cell_type": "markdown", + "id": "99758b7b", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 1. Introduction - Why Embeddings?\n", + "\n", + "Neural networks operate on dense vectors, but language consists of discrete tokens. Embeddings are the crucial bridge that converts discrete tokens into continuous, learnable vector representations that capture semantic meaning.\n", + "\n", + "### The Token-to-Vector Challenge\n", + "\n", + "Consider the tokens from our tokenizer: [1, 42, 7] - how do we turn these discrete indices into meaningful vectors that capture semantic relationships?\n", + "\n", + "```\n", + "┌─────────────────────────────────────────────────────────────────┐\n", + "│ EMBEDDING PIPELINE: Discrete Tokens → Dense Vectors │\n", + "├─────────────────────────────────────────────────────────────────┤\n", + "│ │\n", + "│ Input (Token IDs): [1, 42, 7] │\n", + "│ │ │\n", + "│ ├─ Step 1: Lookup in embedding table │\n", + "│ │ Each ID → vector of learned features │\n", + "│ │ │\n", + "│ ├─ Step 2: Add positional information │\n", + "│ │ Same word at different positions → different│\n", + "│ │ │\n", + "│ ├─ Step 3: Create position-aware representations │\n", + "│ │ Ready for attention mechanisms │\n", + "│ │ │\n", + "│ └─ Step 4: Enable semantic understanding │\n", + "│ Similar words → similar vectors │\n", + "│ │\n", + "│ Output (Dense Vectors): [[0.1, 0.4, ...], [0.7, -0.2, ...]] │\n", + "│ │\n", + "└─────────────────────────────────────────────────────────────────┘\n", + "```\n", + "\n", + "### The Four-Layer Embedding System\n", + "\n", + "Modern embedding systems combine multiple components:\n", + "\n", + "**1. Token embeddings** - Learn semantic representations for each vocabulary token\n", + "**2. Positional encoding** - Add information about position in sequence\n", + "**3. Optional scaling** - Normalize embedding magnitudes (Transformer convention)\n", + "**4. Integration** - Combine everything into position-aware representations\n", + "\n", + "### Why This Matters\n", + "\n", + "The choice of embedding strategy dramatically affects:\n", + "- **Semantic understanding** - How well the model captures word meaning\n", + "- **Memory requirements** - Embedding tables can be gigabytes in size\n", + "- **Position awareness** - Whether the model understands word order\n", + "- **Extrapolation** - How well the model handles longer sequences than training" + ] + }, + { + "cell_type": "markdown", + "id": "0a7c2b1c", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 2. Foundations - Embedding Strategies\n", + "\n", + "Different embedding approaches make different trade-offs between memory, semantic understanding, and computational efficiency.\n", + "\n", + "### Token Embedding Lookup Process\n", + "\n", + "**Approach**: Each token ID maps to a learned dense vector\n", + "\n", + "```\n", + "┌──────────────────────────────────────────────────────────────┐\n", + "│ TOKEN EMBEDDING LOOKUP PROCESS │\n", + "├──────────────────────────────────────────────────────────────┤\n", + "│ │\n", + "│ Step 1: Build Embedding Table (vocab_size × embed_dim) │\n", + "│ ┌────────────────────────────────────────────────────────┐ │\n", + "│ │ Token ID │ Embedding Vector (learned features) │ │\n", + "│ ├────────────────────────────────────────────────────────┤ │\n", + "│ │ 0 │ [0.2, -0.1, 0.3, 0.8, ...] () │ │\n", + "│ │ 1 │ [0.1, 0.4, -0.2, 0.6, ...] (\"the\") │ │\n", + "│ │ 42 │ [0.7, -0.2, 0.1, 0.4, ...] (\"cat\") │ │\n", + "│ │ 7 │ [-0.3, 0.1, 0.5, 0.2, ...] (\"sat\") │ │\n", + "│ │ ... │ ... │ │\n", + "│ └────────────────────────────────────────────────────────┘ │\n", + "│ │\n", + "│ Step 2: Lookup Process (O(1) per token) │\n", + "│ ┌────────────────────────────────────────────────────────┐ │\n", + "│ │ Input: Token IDs [1, 42, 7] │ │\n", + "│ │ │ │\n", + "│ │ ID 1 → embedding[1] → [0.1, 0.4, -0.2, ...] │ │\n", + "│ │ ID 42 → embedding[42] → [0.7, -0.2, 0.1, ...] │ │\n", + "│ │ ID 7 → embedding[7] → [-0.3, 0.1, 0.5, ...] │ │\n", + "│ │ │ │\n", + "│ │ Output: Matrix (3 × embed_dim) │ │\n", + "│ │ [[0.1, 0.4, -0.2, ...], │ │\n", + "│ │ [0.7, -0.2, 0.1, ...], │ │\n", + "│ │ [-0.3, 0.1, 0.5, ...]] │ │\n", + "│ └────────────────────────────────────────────────────────┘ │\n", + "│ │\n", + "│ Step 3: Training Updates Embeddings │\n", + "│ ┌────────────────────────────────────────────────────────┐ │\n", + "│ │ Gradients flow back to embedding table │ │\n", + "│ │ │ │\n", + "│ │ Similar words learn similar vectors: │ │\n", + "│ │ \"cat\" and \"dog\" → closer in embedding space │ │\n", + "│ │ \"the\" and \"a\" → closer in embedding space │ │\n", + "│ │ \"sat\" and \"run\" → farther in embedding space │ │\n", + "│ └────────────────────────────────────────────────────────┘ │\n", + "│ │\n", + "└──────────────────────────────────────────────────────────────┘\n", + "```\n", + "\n", + "**Pros**:\n", + "- Dense representation (every dimension meaningful)\n", + "- Learnable (captures semantic relationships through training)\n", + "- Efficient lookup (O(1) time complexity)\n", + "- Scales to large vocabularies\n", + "\n", + "**Cons**:\n", + "- Memory intensive (vocab_size × embed_dim parameters)\n", + "- Requires training to develop semantic relationships\n", + "- Fixed vocabulary (new tokens need special handling)\n", + "\n", + "### Positional Encoding Strategies\n", + "\n", + "Since embeddings by themselves have no notion of order, we need positional information:\n", + "\n", + "```\n", + "Position-Aware Embeddings = Token Embeddings + Positional Encoding\n", + "\n", + "Learned Approach: Fixed Mathematical Approach:\n", + "Position 0 → [learned] Position 0 → [sin/cos pattern]\n", + "Position 1 → [learned] Position 1 → [sin/cos pattern]\n", + "Position 2 → [learned] Position 2 → [sin/cos pattern]\n", + "... ...\n", + "```\n", + "\n", + "**Learned Positional Encoding**:\n", + "- Trainable position embeddings\n", + "- Can learn task-specific patterns\n", + "- Limited to maximum training sequence length\n", + "\n", + "**Sinusoidal Positional Encoding**:\n", + "- Mathematical sine/cosine patterns\n", + "- No additional parameters\n", + "- Can extrapolate to longer sequences\n", + "\n", + "### Strategy Comparison\n", + "\n", + "```\n", + "Text: \"cat sat on mat\" → Token IDs: [42, 7, 15, 99]\n", + "\n", + "Token Embeddings: [vec_42, vec_7, vec_15, vec_99] # Same vectors anywhere\n", + "Position-Aware: [vec_42+pos_0, vec_7+pos_1, vec_15+pos_2, vec_99+pos_3]\n", + " ↑ Now \"cat\" at position 0 ≠ \"cat\" at position 1\n", + "```\n", + "\n", + "The combination enables transformers to understand both meaning and order!" + ] + }, + { + "cell_type": "markdown", + "id": "7cc0e198", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "## 3. Implementation - Building Embedding Systems\n", + "\n", + "Let's implement embedding systems from basic token lookup to sophisticated position-aware representations. We'll start with the core embedding layer and work up to complete systems." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "303311fe", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "embedding-class", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class Embedding:\n", + " \"\"\"\n", + " Learnable embedding layer that maps token indices to dense vectors.\n", + "\n", + " This is the fundamental building block for converting discrete tokens\n", + " into continuous representations that neural networks can process.\n", + "\n", + " TODO: Implement the Embedding class\n", + "\n", + " APPROACH:\n", + " 1. Initialize embedding matrix with random weights (vocab_size, embed_dim)\n", + " 2. Implement forward pass as matrix lookup using numpy indexing\n", + " 3. Handle batch dimensions correctly\n", + " 4. Return parameters for optimization\n", + "\n", + " EXAMPLE:\n", + " >>> embed = Embedding(vocab_size=100, embed_dim=64)\n", + " >>> tokens = Tensor([[1, 2, 3], [4, 5, 6]]) # batch_size=2, seq_len=3\n", + " >>> output = embed.forward(tokens)\n", + " >>> print(output.shape)\n", + " (2, 3, 64)\n", + "\n", + " HINTS:\n", + " - Use numpy advanced indexing for lookup: weight[indices]\n", + " - Embedding matrix shape: (vocab_size, embed_dim)\n", + " - Initialize with Xavier/Glorot uniform for stable gradients\n", + " - Handle multi-dimensional indices correctly\n", + " \"\"\"\n", + "\n", + " ### BEGIN SOLUTION\n", + " def __init__(self, vocab_size: int, embed_dim: int):\n", + " \"\"\"\n", + " Initialize embedding layer.\n", + "\n", + " Args:\n", + " vocab_size: Size of vocabulary (number of unique tokens)\n", + " embed_dim: Dimension of embedding vectors\n", + " \"\"\"\n", + " self.vocab_size = vocab_size\n", + " self.embed_dim = embed_dim\n", + "\n", + " # Xavier initialization for better gradient flow\n", + " limit = math.sqrt(6.0 / (vocab_size + embed_dim))\n", + " self.weight = Tensor(\n", + " np.random.uniform(-limit, limit, (vocab_size, embed_dim)),\n", + " requires_grad=True\n", + " )\n", + "\n", + " def forward(self, indices: Tensor) -> Tensor:\n", + " \"\"\"\n", + " Forward pass: lookup embeddings for given indices.\n", + "\n", + " Args:\n", + " indices: Token indices of shape (batch_size, seq_len) or (seq_len,)\n", + "\n", + " Returns:\n", + " Embedded vectors of shape (*indices.shape, embed_dim)\n", + " \"\"\"\n", + " # Handle input validation\n", + " if np.any(indices.data >= self.vocab_size) or np.any(indices.data < 0):\n", + " raise ValueError(\n", + " f\"Index out of range. Expected 0 <= indices < {self.vocab_size}, \"\n", + " f\"got min={np.min(indices.data)}, max={np.max(indices.data)}\"\n", + " )\n", + "\n", + " # Perform embedding lookup using advanced indexing\n", + " # This is equivalent to one-hot multiplication but much more efficient\n", + " embedded = self.weight.data[indices.data.astype(int)]\n", + "\n", + " # Create result tensor with gradient tracking\n", + " result = Tensor(embedded, requires_grad=self.weight.requires_grad)\n", + " \n", + " # Attach backward function for gradient computation (following TinyTorch protocol)\n", + " if result.requires_grad:\n", + " result._grad_fn = EmbeddingBackward(self.weight, indices)\n", + " \n", + " return result\n", + "\n", + " def __call__(self, indices: Tensor) -> Tensor:\n", + " \"\"\"Allows the embedding to be called like a function.\"\"\"\n", + " return self.forward(indices)\n", + "\n", + " def parameters(self) -> List[Tensor]:\n", + " \"\"\"Return trainable parameters.\"\"\"\n", + " return [self.weight]\n", + "\n", + " def __repr__(self):\n", + " return f\"Embedding(vocab_size={self.vocab_size}, embed_dim={self.embed_dim})\"\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9eb96fa0", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-embedding", + "locked": true, + "points": 10 + } + }, + "outputs": [], + "source": [ + "def test_unit_embedding():\n", + " \"\"\"🔬 Unit Test: Embedding Layer Implementation\"\"\"\n", + " print(\"🔬 Unit Test: Embedding Layer...\")\n", + "\n", + " # Test 1: Basic embedding creation and forward pass\n", + " embed = Embedding(vocab_size=100, embed_dim=64)\n", + "\n", + " # Single sequence\n", + " tokens = Tensor([1, 2, 3])\n", + " output = embed.forward(tokens)\n", + "\n", + " assert output.shape == (3, 64), f\"Expected shape (3, 64), got {output.shape}\"\n", + " assert len(embed.parameters()) == 1, \"Should have 1 parameter (weight matrix)\"\n", + " assert embed.parameters()[0].shape == (100, 64), \"Weight matrix has wrong shape\"\n", + "\n", + " # Test 2: Batch processing\n", + " batch_tokens = Tensor([[1, 2, 3], [4, 5, 6]])\n", + " batch_output = embed.forward(batch_tokens)\n", + "\n", + " assert batch_output.shape == (2, 3, 64), f\"Expected batch shape (2, 3, 64), got {batch_output.shape}\"\n", + "\n", + " # Test 3: Embedding lookup consistency\n", + " single_lookup = embed.forward(Tensor([1]))\n", + " batch_lookup = embed.forward(Tensor([[1]]))\n", + "\n", + " # Should get same embedding for same token\n", + " assert np.allclose(single_lookup.data[0], batch_lookup.data[0, 0]), \"Inconsistent embedding lookup\"\n", + "\n", + " # Test 4: Parameter access\n", + " params = embed.parameters()\n", + " assert all(p.requires_grad for p in params), \"All parameters should require gradients\"\n", + "\n", + " print(\"✅ Embedding layer works correctly!\")\n", + "\n", + "# Run test immediately when developing this module\n", + "if __name__ == \"__main__\":\n", + " test_unit_embedding()" + ] + }, + { + "cell_type": "markdown", + "id": "9f76f8c9", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "### Learned Positional Encoding\n", + "\n", + "Trainable position embeddings that can learn position-specific patterns. This approach treats each position as a learnable parameter, similar to token embeddings.\n", + "\n", + "```\n", + "Learned Position Embedding Process:\n", + "\n", + "Step 1: Initialize Position Embedding Table\n", + "┌───────────────────────────────────────────────────────────────┐\n", + "│ Position │ Learnable Vector (trainable parameters) │\n", + "├───────────────────────────────────────────────────────────────┤\n", + "│ 0 │ [0.1, -0.2, 0.4, ...] ← learns \"start\" patterns │\n", + "│ 1 │ [0.3, 0.1, -0.1, ...] ← learns \"second\" patterns│\n", + "│ 2 │ [-0.1, 0.5, 0.2, ...] ← learns \"third\" patterns │\n", + "│ ... │ ... │\n", + "│ 511 │ [0.4, -0.3, 0.1, ...] ← learns \"late\" patterns │\n", + "└───────────────────────────────────────────────────────────────┘\n", + "\n", + "Step 2: Add to Token Embeddings\n", + "Input: [\"The\", \"cat\", \"sat\"] → Token IDs: [1, 42, 7]\n", + "\n", + "Token embeddings: Position embeddings: Combined:\n", + "[1] → [0.1, 0.4, ...] + [0.1, -0.2, ...] = [0.2, 0.2, ...]\n", + "[42] → [0.7, -0.2, ...] + [0.3, 0.1, ...] = [1.0, -0.1, ...]\n", + "[7] → [-0.3, 0.1, ...] + [-0.1, 0.5, ...] = [-0.4, 0.6, ...]\n", + "\n", + "Result: Position-aware embeddings that can learn task-specific patterns!\n", + "```\n", + "\n", + "**Why learned positions work**: The model can discover that certain positions have special meaning (like sentence beginnings, question words, etc.) and learn specific representations for those patterns." + ] + }, + { + "cell_type": "markdown", + "id": "92495175", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### Implementing Learned Positional Encoding\n", + "\n", + "Let's build trainable positional embeddings that can learn position-specific patterns for our specific task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2068806c", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "positional-encoding", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class PositionalEncoding:\n", + " \"\"\"\n", + " Learnable positional encoding layer.\n", + "\n", + " Adds trainable position-specific vectors to token embeddings,\n", + " allowing the model to learn positional patterns specific to the task.\n", + "\n", + " TODO: Implement learnable positional encoding\n", + "\n", + " APPROACH:\n", + " 1. Create embedding matrix for positions: (max_seq_len, embed_dim)\n", + " 2. Forward pass: lookup position embeddings and add to input\n", + " 3. Handle different sequence lengths gracefully\n", + " 4. Return parameters for training\n", + "\n", + " EXAMPLE:\n", + " >>> pos_enc = PositionalEncoding(max_seq_len=512, embed_dim=64)\n", + " >>> embeddings = Tensor(np.random.randn(2, 10, 64)) # (batch, seq, embed)\n", + " >>> output = pos_enc.forward(embeddings)\n", + " >>> print(output.shape)\n", + " (2, 10, 64) # Same shape, but now position-aware\n", + "\n", + " HINTS:\n", + " - Position embeddings shape: (max_seq_len, embed_dim)\n", + " - Use slice [:seq_len] to handle variable lengths\n", + " - Add position encodings to input embeddings element-wise\n", + " - Initialize with smaller values than token embeddings (they're additive)\n", + " \"\"\"\n", + "\n", + " ### BEGIN SOLUTION\n", + " def __init__(self, max_seq_len: int, embed_dim: int):\n", + " \"\"\"\n", + " Initialize learnable positional encoding.\n", + "\n", + " Args:\n", + " max_seq_len: Maximum sequence length to support\n", + " embed_dim: Embedding dimension (must match token embeddings)\n", + " \"\"\"\n", + " self.max_seq_len = max_seq_len\n", + " self.embed_dim = embed_dim\n", + "\n", + " # Initialize position embedding matrix\n", + " # Smaller initialization than token embeddings since these are additive\n", + " limit = math.sqrt(2.0 / embed_dim)\n", + " self.position_embeddings = Tensor(\n", + " np.random.uniform(-limit, limit, (max_seq_len, embed_dim)),\n", + " requires_grad=True\n", + " )\n", + "\n", + " def forward(self, x: Tensor) -> Tensor:\n", + " \"\"\"\n", + " Add positional encodings to input embeddings.\n", + "\n", + " Args:\n", + " x: Input embeddings of shape (batch_size, seq_len, embed_dim)\n", + "\n", + " Returns:\n", + " Position-encoded embeddings of same shape\n", + " \"\"\"\n", + " if len(x.shape) != 3:\n", + " raise ValueError(f\"Expected 3D input (batch, seq, embed), got shape {x.shape}\")\n", + "\n", + " batch_size, seq_len, embed_dim = x.shape\n", + "\n", + " if seq_len > self.max_seq_len:\n", + " raise ValueError(\n", + " f\"Sequence length {seq_len} exceeds maximum {self.max_seq_len}\"\n", + " )\n", + "\n", + " if embed_dim != self.embed_dim:\n", + " raise ValueError(\n", + " f\"Embedding dimension mismatch: expected {self.embed_dim}, got {embed_dim}\"\n", + " )\n", + "\n", + " # Slice position embeddings for this sequence length using Tensor slicing\n", + " # This now preserves gradient flow (as of Module 01 update with __getitem__)\n", + " pos_embeddings = self.position_embeddings[:seq_len] # (seq_len, embed_dim) - gradients preserved!\n", + " \n", + " # Reshape to add batch dimension: (1, seq_len, embed_dim)\n", + " # Need to use .data for reshaping temporarily, then wrap in Tensor\n", + " pos_data = pos_embeddings.data[np.newaxis, :, :]\n", + " pos_embeddings_batched = Tensor(pos_data, requires_grad=pos_embeddings.requires_grad)\n", + " \n", + " # Copy gradient function if it exists (to preserve backward connection)\n", + " if hasattr(pos_embeddings, '_grad_fn') and pos_embeddings._grad_fn is not None:\n", + " pos_embeddings_batched._grad_fn = pos_embeddings._grad_fn\n", + "\n", + " # Add positional information - gradients flow through both x and pos_embeddings!\n", + " result = x + pos_embeddings_batched\n", + "\n", + " return result\n", + "\n", + " def __call__(self, x: Tensor) -> Tensor:\n", + " \"\"\"Allows the positional encoding to be called like a function.\"\"\"\n", + " return self.forward(x)\n", + "\n", + " def parameters(self) -> List[Tensor]:\n", + " \"\"\"Return trainable parameters.\"\"\"\n", + " return [self.position_embeddings]\n", + "\n", + " def __repr__(self):\n", + " return f\"PositionalEncoding(max_seq_len={self.max_seq_len}, embed_dim={self.embed_dim})\"\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b24cf145", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-positional", + "locked": true, + "points": 10 + } + }, + "outputs": [], + "source": [ + "def test_unit_positional_encoding():\n", + " \"\"\"🔬 Unit Test: Positional Encoding Implementation\"\"\"\n", + " print(\"🔬 Unit Test: Positional Encoding...\")\n", + "\n", + " # Test 1: Basic functionality\n", + " pos_enc = PositionalEncoding(max_seq_len=512, embed_dim=64)\n", + "\n", + " # Create sample embeddings\n", + " embeddings = Tensor(np.random.randn(2, 10, 64))\n", + " output = pos_enc.forward(embeddings)\n", + "\n", + " assert output.shape == (2, 10, 64), f\"Expected shape (2, 10, 64), got {output.shape}\"\n", + "\n", + " # Test 2: Position consistency\n", + " # Same position should always get same encoding\n", + " emb1 = Tensor(np.zeros((1, 5, 64)))\n", + " emb2 = Tensor(np.zeros((1, 5, 64)))\n", + "\n", + " out1 = pos_enc.forward(emb1)\n", + " out2 = pos_enc.forward(emb2)\n", + "\n", + " assert np.allclose(out1.data, out2.data), \"Position encodings should be consistent\"\n", + "\n", + " # Test 3: Different positions get different encodings\n", + " short_emb = Tensor(np.zeros((1, 3, 64)))\n", + " long_emb = Tensor(np.zeros((1, 5, 64)))\n", + "\n", + " short_out = pos_enc.forward(short_emb)\n", + " long_out = pos_enc.forward(long_emb)\n", + "\n", + " # First 3 positions should match\n", + " assert np.allclose(short_out.data, long_out.data[:, :3, :]), \"Position encoding prefix should match\"\n", + "\n", + " # Test 4: Parameters\n", + " params = pos_enc.parameters()\n", + " assert len(params) == 1, \"Should have 1 parameter (position embeddings)\"\n", + " assert params[0].shape == (512, 64), \"Position embedding matrix has wrong shape\"\n", + "\n", + " print(\"✅ Positional encoding works correctly!\")\n", + "\n", + "# Run test immediately when developing this module\n", + "if __name__ == \"__main__\":\n", + " test_unit_positional_encoding()" + ] + }, + { + "cell_type": "markdown", + "id": "3f71484a", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "### Sinusoidal Positional Encoding\n", + "\n", + "Mathematical position encoding that creates unique signatures for each position using trigonometric functions. This approach requires no additional parameters and can extrapolate to sequences longer than seen during training.\n", + "\n", + "```\n", + "┌───────────────────────────────────────────────────────────────────────────┐\n", + "│ SINUSOIDAL POSITION ENCODING: Mathematical Position Signatures │\n", + "├───────────────────────────────────────────────────────────────────────────┤\n", + "│ │\n", + "│ MATHEMATICAL FORMULA: │\n", + "│ ┌──────────────────────────────────────────────────────────────┐ │\n", + "│ │ PE(pos, 2i) = sin(pos / 10000^(2i/embed_dim)) # Even dims │ │\n", + "│ │ PE(pos, 2i+1) = cos(pos / 10000^(2i/embed_dim)) # Odd dims │ │\n", + "│ │ │ │\n", + "│ │ Where: │ │\n", + "│ │ pos = position in sequence (0, 1, 2, ...) │ │\n", + "│ │ i = dimension pair index (0, 1, 2, ...) │ │\n", + "│ │ 10000 = base frequency (creates different wavelengths) │ │\n", + "│ └──────────────────────────────────────────────────────────────┘ │\n", + "│ │\n", + "│ FREQUENCY PATTERN ACROSS DIMENSIONS: │\n", + "│ ┌──────────────────────────────────────────────────────────────┐ │\n", + "│ │ Dimension: 0 1 2 3 4 5 6 7 │ │\n", + "│ │ Frequency: High High Med Med Low Low VLow VLow │ │\n", + "│ │ Function: sin cos sin cos sin cos sin cos │ │\n", + "│ │ │ │\n", + "│ │ pos=0: [0.00, 1.00, 0.00, 1.00, 0.00, 1.00, 0.00, 1.00] │ │\n", + "│ │ pos=1: [0.84, 0.54, 0.01, 1.00, 0.00, 1.00, 0.00, 1.00] │ │\n", + "│ │ pos=2: [0.91,-0.42, 0.02, 1.00, 0.00, 1.00, 0.00, 1.00] │ │\n", + "│ │ pos=3: [0.14,-0.99, 0.03, 1.00, 0.00, 1.00, 0.00, 1.00] │ │\n", + "│ │ │ │\n", + "│ │ Each position gets a unique mathematical \"fingerprint\"! │ │\n", + "│ └──────────────────────────────────────────────────────────────┘ │\n", + "│ │\n", + "│ WHY THIS WORKS: │\n", + "│ ┌──────────────────────────────────────────────────────────────┐ │\n", + "│ │ Wave Pattern Visualization: │ │\n", + "│ │ │ │\n", + "│ │ Dim 0: ∿∿∿∿∿∿∿∿∿∿∿∿∿∿∿∿∿∿∿∿ (rapid oscillation) │ │\n", + "│ │ Dim 2: ∿---∿---∿---∿---∿---∿ (medium frequency) │ │\n", + "│ │ Dim 4: ∿-----∿-----∿-----∿-- (low frequency) │ │\n", + "│ │ Dim 6: ∿----------∿---------- (very slow changes) │ │\n", + "│ │ │ │\n", + "│ │ • High frequency dims change rapidly between positions │ │\n", + "│ │ • Low frequency dims change slowly │ │\n", + "│ │ • Combination creates unique signature for each position │ │\n", + "│ │ • Similar positions have similar (but distinct) encodings │ │\n", + "│ └──────────────────────────────────────────────────────────────┘ │\n", + "│ │\n", + "│ KEY ADVANTAGES: │\n", + "│ • Zero parameters (no memory overhead) │\n", + "│ • Infinite sequence length (can extrapolate) │\n", + "│ • Smooth transitions (nearby positions are similar) │\n", + "│ • Mathematical elegance (interpretable patterns) │\n", + "│ │\n", + "└───────────────────────────────────────────────────────────────────────────┘\n", + "```\n", + "\n", + "**Why transformers use this**: The mathematical structure allows the model to learn relative positions (how far apart tokens are) through simple vector operations, which is crucial for attention mechanisms!" + ] + }, + { + "cell_type": "markdown", + "id": "c4396fdd", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### Implementing Sinusoidal Positional Encodings\n", + "\n", + "Let's implement the mathematical position encoding that creates unique signatures for each position using trigonometric functions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b41a9be6", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "sinusoidal-function", + "solution": true + } + }, + "outputs": [], + "source": [ + "def create_sinusoidal_embeddings(max_seq_len: int, embed_dim: int) -> Tensor:\n", + " \"\"\"\n", + " Create sinusoidal positional encodings as used in \"Attention Is All You Need\".\n", + "\n", + " These fixed encodings use sine and cosine functions to create unique\n", + " positional patterns that don't require training and can extrapolate\n", + " to longer sequences than seen during training.\n", + "\n", + " TODO: Implement sinusoidal positional encoding generation\n", + "\n", + " APPROACH:\n", + " 1. Create position indices: [0, 1, 2, ..., max_seq_len-1]\n", + " 2. Create dimension indices for frequency calculation\n", + " 3. Apply sine to even dimensions, cosine to odd dimensions\n", + " 4. Use the transformer paper formula with 10000 base\n", + "\n", + " MATHEMATICAL FORMULA:\n", + " PE(pos, 2i) = sin(pos / 10000^(2i/embed_dim))\n", + " PE(pos, 2i+1) = cos(pos / 10000^(2i/embed_dim))\n", + "\n", + " EXAMPLE:\n", + " >>> pe = create_sinusoidal_embeddings(512, 64)\n", + " >>> print(pe.shape)\n", + " (512, 64)\n", + " >>> # Position 0: [0, 1, 0, 1, 0, 1, ...] (sin(0)=0, cos(0)=1)\n", + " >>> # Each position gets unique trigonometric signature\n", + "\n", + " HINTS:\n", + " - Use np.arange to create position and dimension arrays\n", + " - Calculate div_term using exponential for frequency scaling\n", + " - Apply different formulas to even/odd dimensions\n", + " - The 10000 base creates different frequencies for different dimensions\n", + " \"\"\"\n", + "\n", + " ### BEGIN SOLUTION\n", + " # Create position indices [0, 1, 2, ..., max_seq_len-1]\n", + " position = np.arange(max_seq_len, dtype=np.float32)[:, np.newaxis] # (max_seq_len, 1)\n", + "\n", + " # Create dimension indices for calculating frequencies\n", + " div_term = np.exp(\n", + " np.arange(0, embed_dim, 2, dtype=np.float32) *\n", + " -(math.log(10000.0) / embed_dim)\n", + " ) # (embed_dim//2,)\n", + "\n", + " # Initialize the positional encoding matrix\n", + " pe = np.zeros((max_seq_len, embed_dim), dtype=np.float32)\n", + "\n", + " # Apply sine to even indices (0, 2, 4, ...)\n", + " pe[:, 0::2] = np.sin(position * div_term)\n", + "\n", + " # Apply cosine to odd indices (1, 3, 5, ...)\n", + " if embed_dim % 2 == 1:\n", + " # Handle odd embed_dim by only filling available positions\n", + " pe[:, 1::2] = np.cos(position * div_term[:-1])\n", + " else:\n", + " pe[:, 1::2] = np.cos(position * div_term)\n", + "\n", + " return Tensor(pe)\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cee7ac8d", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-sinusoidal", + "locked": true, + "points": 10 + } + }, + "outputs": [], + "source": [ + "def test_unit_sinusoidal_embeddings():\n", + " \"\"\"🔬 Unit Test: Sinusoidal Positional Embeddings\"\"\"\n", + " print(\"🔬 Unit Test: Sinusoidal Embeddings...\")\n", + "\n", + " # Test 1: Basic shape and properties\n", + " pe = create_sinusoidal_embeddings(512, 64)\n", + "\n", + " assert pe.shape == (512, 64), f\"Expected shape (512, 64), got {pe.shape}\"\n", + "\n", + " # Test 2: Position 0 should be mostly zeros and ones\n", + " pos_0 = pe.data[0]\n", + "\n", + " # Even indices should be sin(0) = 0\n", + " assert np.allclose(pos_0[0::2], 0, atol=1e-6), \"Even indices at position 0 should be ~0\"\n", + "\n", + " # Odd indices should be cos(0) = 1\n", + " assert np.allclose(pos_0[1::2], 1, atol=1e-6), \"Odd indices at position 0 should be ~1\"\n", + "\n", + " # Test 3: Different positions should have different encodings\n", + " pe_small = create_sinusoidal_embeddings(10, 8)\n", + "\n", + " # Check that consecutive positions are different\n", + " for i in range(9):\n", + " assert not np.allclose(pe_small.data[i], pe_small.data[i+1]), f\"Positions {i} and {i+1} are too similar\"\n", + "\n", + " # Test 4: Frequency properties\n", + " # Higher dimensions should have lower frequencies (change more slowly)\n", + " pe_test = create_sinusoidal_embeddings(100, 16)\n", + "\n", + " # First dimension should change faster than last dimension\n", + " first_dim_changes = np.sum(np.abs(np.diff(pe_test.data[:10, 0])))\n", + " last_dim_changes = np.sum(np.abs(np.diff(pe_test.data[:10, -1])))\n", + "\n", + " assert first_dim_changes > last_dim_changes, \"Lower dimensions should change faster than higher dimensions\"\n", + "\n", + " # Test 5: Odd embed_dim handling\n", + " pe_odd = create_sinusoidal_embeddings(10, 7)\n", + " assert pe_odd.shape == (10, 7), \"Should handle odd embedding dimensions\"\n", + "\n", + " print(\"✅ Sinusoidal embeddings work correctly!\")\n", + "\n", + "# Run test immediately when developing this module\n", + "if __name__ == \"__main__\":\n", + " test_unit_sinusoidal_embeddings()" + ] + }, + { + "cell_type": "markdown", + "id": "8ccd4ac2", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 4. Integration - Bringing It Together\n", + "\n", + "Now let's build the complete embedding system that combines token and positional embeddings into a production-ready component used in modern transformers and language models.\n", + "\n", + "```\n", + "Complete Embedding Pipeline:\n", + "\n", + "1. Token Lookup → 2. Position Encoding → 3. Combination → 4. Ready for Attention\n", + " ↓ ↓ ↓ ↓\n", + " sparse IDs position info dense vectors context-aware\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "4d0327cb", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "### Complete Embedding System Architecture\n", + "\n", + "The production embedding layer that powers modern transformers combines multiple components into an efficient, flexible pipeline.\n", + "\n", + "```\n", + "┌───────────────────────────────────────────────────────────────────────────┐\n", + "│ COMPLETE EMBEDDING SYSTEM: Token + Position → Attention-Ready │\n", + "├───────────────────────────────────────────────────────────────────────────┤\n", + "│ │\n", + "│ INPUT: Token IDs [1, 42, 7, 99] │\n", + "│ │ │\n", + "│ ├─ STEP 1: TOKEN EMBEDDING LOOKUP │\n", + "│ │ ┌─────────────────────────────────────────────────────────┐ │\n", + "│ │ │ Token Embedding Table (vocab_size × embed_dim) │ │\n", + "│ │ │ │ │\n", + "│ │ │ ID 1 → [0.1, 0.4, -0.2, ...] (semantic features) │ │\n", + "│ │ │ ID 42 → [0.7, -0.2, 0.1, ...] (learned meaning) │ │\n", + "│ │ │ ID 7 → [-0.3, 0.1, 0.5, ...] (dense vector) │ │\n", + "│ │ │ ID 99 → [0.9, -0.1, 0.3, ...] (context-free) │ │\n", + "│ │ └─────────────────────────────────────────────────────────┘ │\n", + "│ │ │\n", + "│ ├─ STEP 2: POSITIONAL ENCODING (Choose Strategy) │\n", + "│ │ ┌─────────────────────────────────────────────────────────┐ │\n", + "│ │ │ Strategy A: Learned PE │ │\n", + "│ │ │ pos 0 → [trainable vector] (learns patterns) │ │\n", + "│ │ │ pos 1 → [trainable vector] (task-specific) │ │\n", + "│ │ │ pos 2 → [trainable vector] (fixed max length) │ │\n", + "│ │ │ │ │\n", + "│ │ │ Strategy B: Sinusoidal PE │ │\n", + "│ │ │ pos 0 → [sin/cos pattern] (mathematical) │ │\n", + "│ │ │ pos 1 → [sin/cos pattern] (no parameters) │ │\n", + "│ │ │ pos 2 → [sin/cos pattern] (infinite length) │ │\n", + "│ │ │ │ │\n", + "│ │ │ Strategy C: No PE │ │\n", + "│ │ │ positions ignored (order-agnostic) │ │\n", + "│ │ └─────────────────────────────────────────────────────────┘ │\n", + "│ │ │\n", + "│ ├─ STEP 3: ELEMENT-WISE ADDITION │\n", + "│ │ ┌─────────────────────────────────────────────────────────┐ │\n", + "│ │ │ Token + Position = Position-Aware Representation │ │\n", + "│ │ │ │ │\n", + "│ │ │ [0.1, 0.4, -0.2] + [pos0] = [0.1+p0, 0.4+p0, ...] │ │\n", + "│ │ │ [0.7, -0.2, 0.1] + [pos1] = [0.7+p1, -0.2+p1, ...] │ │\n", + "│ │ │ [-0.3, 0.1, 0.5] + [pos2] = [-0.3+p2, 0.1+p2, ...] │ │\n", + "│ │ │ [0.9, -0.1, 0.3] + [pos3] = [0.9+p3, -0.1+p3, ...] │ │\n", + "│ │ └─────────────────────────────────────────────────────────┘ │\n", + "│ │ │\n", + "│ ├─ STEP 4: OPTIONAL SCALING (Transformer Convention) │\n", + "│ │ ┌─────────────────────────────────────────────────────────┐ │\n", + "│ │ │ Scale by √embed_dim for gradient stability │ │\n", + "│ │ │ Helps balance token and position magnitudes │ │\n", + "│ │ └─────────────────────────────────────────────────────────┘ │\n", + "│ │ │\n", + "│ └─ OUTPUT: Position-Aware Dense Vectors │\n", + "│ Ready for attention mechanisms and transformers! │\n", + "│ │\n", + "│ INTEGRATION FEATURES: │\n", + "│ • Flexible position encoding (learned/sinusoidal/none) │\n", + "│ • Efficient batch processing with variable sequence lengths │\n", + "│ • Memory optimization (shared position encodings) │\n", + "│ • Production patterns (matches PyTorch/HuggingFace) │\n", + "│ │\n", + "└───────────────────────────────────────────────────────────────────────────┘\n", + "```\n", + "\n", + "**Why this architecture works**: By separating token semantics from positional information, the model can learn meaning and order independently, then combine them optimally for the specific task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11d12a14", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "complete-system", + "solution": true + } + }, + "outputs": [], + "source": [ + "#| export\n", + "class EmbeddingLayer:\n", + " \"\"\"\n", + " Complete embedding system combining token and positional embeddings.\n", + "\n", + " This is the production-ready component that handles the full embedding\n", + " pipeline used in transformers and other sequence models.\n", + "\n", + " TODO: Implement complete embedding system\n", + "\n", + " APPROACH:\n", + " 1. Combine token embedding + positional encoding\n", + " 2. Support both learned and sinusoidal position encodings\n", + " 3. Handle variable sequence lengths gracefully\n", + " 4. Add optional embedding scaling (Transformer convention)\n", + "\n", + " EXAMPLE:\n", + " >>> embed_layer = EmbeddingLayer(\n", + " ... vocab_size=50000,\n", + " ... embed_dim=512,\n", + " ... max_seq_len=2048,\n", + " ... pos_encoding='learned'\n", + " ... )\n", + " >>> tokens = Tensor([[1, 2, 3], [4, 5, 6]])\n", + " >>> output = embed_layer.forward(tokens)\n", + " >>> print(output.shape)\n", + " (2, 3, 512)\n", + "\n", + " HINTS:\n", + " - First apply token embedding, then add positional encoding\n", + " - Support 'learned', 'sinusoidal', or None for pos_encoding\n", + " - Handle both 2D (batch, seq) and 1D (seq) inputs gracefully\n", + " - Scale embeddings by sqrt(embed_dim) if requested (transformer convention)\n", + " \"\"\"\n", + "\n", + " ### BEGIN SOLUTION\n", + " def __init__(\n", + " self,\n", + " vocab_size: int,\n", + " embed_dim: int,\n", + " max_seq_len: int = 512,\n", + " pos_encoding: str = 'learned',\n", + " scale_embeddings: bool = False\n", + " ):\n", + " \"\"\"\n", + " Initialize complete embedding system.\n", + "\n", + " Args:\n", + " vocab_size: Size of vocabulary\n", + " embed_dim: Embedding dimension\n", + " max_seq_len: Maximum sequence length for positional encoding\n", + " pos_encoding: Type of positional encoding ('learned', 'sinusoidal', or None)\n", + " scale_embeddings: Whether to scale embeddings by sqrt(embed_dim)\n", + " \"\"\"\n", + " self.vocab_size = vocab_size\n", + " self.embed_dim = embed_dim\n", + " self.max_seq_len = max_seq_len\n", + " self.pos_encoding_type = pos_encoding\n", + " self.scale_embeddings = scale_embeddings\n", + "\n", + " # Token embedding layer\n", + " self.token_embedding = Embedding(vocab_size, embed_dim)\n", + "\n", + " # Positional encoding\n", + " if pos_encoding == 'learned':\n", + " self.pos_encoding = PositionalEncoding(max_seq_len, embed_dim)\n", + " elif pos_encoding == 'sinusoidal':\n", + " # Create fixed sinusoidal encodings (no parameters)\n", + " self.pos_encoding = create_sinusoidal_embeddings(max_seq_len, embed_dim)\n", + " elif pos_encoding is None:\n", + " self.pos_encoding = None\n", + " else:\n", + " raise ValueError(f\"Unknown pos_encoding: {pos_encoding}. Use 'learned', 'sinusoidal', or None\")\n", + "\n", + " def forward(self, tokens: Tensor) -> Tensor:\n", + " \"\"\"\n", + " Forward pass through complete embedding system.\n", + "\n", + " Args:\n", + " tokens: Token indices of shape (batch_size, seq_len) or (seq_len,)\n", + "\n", + " Returns:\n", + " Embedded tokens with positional information\n", + " \"\"\"\n", + " # Handle 1D input by adding batch dimension\n", + " if len(tokens.shape) == 1:\n", + " # NOTE: Tensor reshape preserves gradients\n", + " tokens = tokens.reshape(1, -1)\n", + " squeeze_batch = True\n", + " else:\n", + " squeeze_batch = False\n", + "\n", + " # Get token embeddings\n", + " token_embeds = self.token_embedding.forward(tokens) # (batch, seq, embed)\n", + "\n", + " # Scale embeddings if requested (transformer convention)\n", + " if self.scale_embeddings:\n", + " scale_factor = math.sqrt(self.embed_dim)\n", + " token_embeds = token_embeds * scale_factor # Use Tensor multiplication to preserve gradients\n", + "\n", + " # Add positional encoding\n", + " if self.pos_encoding_type == 'learned':\n", + " # Use learnable positional encoding\n", + " output = self.pos_encoding.forward(token_embeds)\n", + " elif self.pos_encoding_type == 'sinusoidal':\n", + " # Use fixed sinusoidal encoding (not learnable)\n", + " batch_size, seq_len, embed_dim = token_embeds.shape\n", + " pos_embeddings = self.pos_encoding[:seq_len] # Slice using Tensor slicing\n", + " \n", + " # Reshape to add batch dimension\n", + " pos_data = pos_embeddings.data[np.newaxis, :, :]\n", + " pos_embeddings_batched = Tensor(pos_data, requires_grad=False) # Sinusoidal are fixed\n", + " \n", + " output = token_embeds + pos_embeddings_batched\n", + " else:\n", + " # No positional encoding\n", + " output = token_embeds\n", + "\n", + " # Remove batch dimension if it was added\n", + " if squeeze_batch:\n", + " # Use Tensor slicing (now supported in Module 01)\n", + " output = output[0]\n", + "\n", + " return output\n", + "\n", + " def __call__(self, tokens: Tensor) -> Tensor:\n", + " \"\"\"Allows the embedding layer to be called like a function.\"\"\"\n", + " return self.forward(tokens)\n", + "\n", + " def parameters(self) -> List[Tensor]:\n", + " \"\"\"Return all trainable parameters.\"\"\"\n", + " params = self.token_embedding.parameters()\n", + "\n", + " if self.pos_encoding_type == 'learned':\n", + " params.extend(self.pos_encoding.parameters())\n", + "\n", + " return params\n", + "\n", + " def __repr__(self):\n", + " return (f\"EmbeddingLayer(vocab_size={self.vocab_size}, \"\n", + " f\"embed_dim={self.embed_dim}, \"\n", + " f\"pos_encoding='{self.pos_encoding_type}')\")\n", + " ### END SOLUTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ed8b7db", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "test-complete-system", + "locked": true, + "points": 15 + } + }, + "outputs": [], + "source": [ + "def test_unit_complete_embedding_system():\n", + " \"\"\"🔬 Unit Test: Complete Embedding System\"\"\"\n", + " print(\"🔬 Unit Test: Complete Embedding System...\")\n", + "\n", + " # Test 1: Learned positional encoding\n", + " embed_learned = EmbeddingLayer(\n", + " vocab_size=100,\n", + " embed_dim=64,\n", + " max_seq_len=128,\n", + " pos_encoding='learned'\n", + " )\n", + "\n", + " tokens = Tensor([[1, 2, 3], [4, 5, 6]])\n", + " output_learned = embed_learned.forward(tokens)\n", + "\n", + " assert output_learned.shape == (2, 3, 64), f\"Expected shape (2, 3, 64), got {output_learned.shape}\"\n", + "\n", + " # Test 2: Sinusoidal positional encoding\n", + " embed_sin = EmbeddingLayer(\n", + " vocab_size=100,\n", + " embed_dim=64,\n", + " pos_encoding='sinusoidal'\n", + " )\n", + "\n", + " output_sin = embed_sin.forward(tokens)\n", + " assert output_sin.shape == (2, 3, 64), \"Sinusoidal embedding should have same shape\"\n", + "\n", + " # Test 3: No positional encoding\n", + " embed_none = EmbeddingLayer(\n", + " vocab_size=100,\n", + " embed_dim=64,\n", + " pos_encoding=None\n", + " )\n", + "\n", + " output_none = embed_none.forward(tokens)\n", + " assert output_none.shape == (2, 3, 64), \"No pos encoding should have same shape\"\n", + "\n", + " # Test 4: 1D input handling\n", + " tokens_1d = Tensor([1, 2, 3])\n", + " output_1d = embed_learned.forward(tokens_1d)\n", + "\n", + " assert output_1d.shape == (3, 64), f\"Expected shape (3, 64) for 1D input, got {output_1d.shape}\"\n", + "\n", + " # Test 5: Embedding scaling\n", + " embed_scaled = EmbeddingLayer(\n", + " vocab_size=100,\n", + " embed_dim=64,\n", + " pos_encoding=None,\n", + " scale_embeddings=True\n", + " )\n", + "\n", + " # Use same weights to ensure fair comparison\n", + " embed_scaled.token_embedding.weight = embed_none.token_embedding.weight\n", + "\n", + " output_scaled = embed_scaled.forward(tokens)\n", + " output_unscaled = embed_none.forward(tokens)\n", + "\n", + " # Scaled version should be sqrt(64) times larger\n", + " scale_factor = math.sqrt(64)\n", + " expected_scaled = output_unscaled.data * scale_factor\n", + " assert np.allclose(output_scaled.data, expected_scaled, rtol=1e-5), \"Embedding scaling not working correctly\"\n", + "\n", + " # Test 6: Parameter counting\n", + " params_learned = embed_learned.parameters()\n", + " params_sin = embed_sin.parameters()\n", + " params_none = embed_none.parameters()\n", + "\n", + " assert len(params_learned) == 2, \"Learned encoding should have 2 parameter tensors\"\n", + " assert len(params_sin) == 1, \"Sinusoidal encoding should have 1 parameter tensor\"\n", + " assert len(params_none) == 1, \"No pos encoding should have 1 parameter tensor\"\n", + "\n", + " print(\"✅ Complete embedding system works correctly!\")\n", + "\n", + "# Run test immediately when developing this module\n", + "if __name__ == \"__main__\":\n", + " test_unit_complete_embedding_system()" + ] + }, + { + "cell_type": "markdown", + "id": "5cb55a85", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "## 5. Systems Analysis - Embedding Trade-offs\n", + "\n", + "Understanding the performance implications of different embedding strategies is crucial for building efficient NLP systems that scale to production workloads." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e53208bc", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "memory-analysis", + "solution": true + } + }, + "outputs": [], + "source": [ + "def analyze_embedding_memory_scaling():\n", + " \"\"\"📊 Compare embedding memory requirements across different model scales.\"\"\"\n", + " print(\"📊 Analyzing Embedding Memory Requirements...\")\n", + "\n", + " # Vocabulary and embedding dimension scenarios\n", + " scenarios = [\n", + " (\"Small Model\", 10_000, 256),\n", + " (\"Medium Model\", 50_000, 512),\n", + " (\"Large Model\", 100_000, 1024),\n", + " (\"GPT-3 Scale\", 50_257, 12_288),\n", + " ]\n", + "\n", + " print(f\"{'Model':<15} {'Vocab Size':<12} {'Embed Dim':<12} {'Memory (MB)':<15} {'Parameters (M)':<15}\")\n", + " print(\"-\" * 80)\n", + "\n", + " for name, vocab_size, embed_dim in scenarios:\n", + " # Calculate memory for FP32 (4 bytes per parameter)\n", + " params = vocab_size * embed_dim\n", + " memory_mb = params * BYTES_PER_FLOAT32 / MB_TO_BYTES\n", + " params_m = params / 1_000_000\n", + "\n", + " print(f\"{name:<15} {vocab_size:<12,} {embed_dim:<12} {memory_mb:<15.1f} {params_m:<15.2f}\")\n", + "\n", + " print(\"\\n💡 Key Insights:\")\n", + " print(\"• Embedding tables often dominate model memory (especially for large vocabularies)\")\n", + " print(\"• Memory scales linearly with vocab_size × embed_dim\")\n", + " print(\"• Consider vocabulary pruning for memory-constrained environments\")\n", + "\n", + " # Positional encoding memory comparison\n", + " print(f\"\\n📊 Positional Encoding Memory Comparison (embed_dim=512, max_seq_len=2048):\")\n", + "\n", + " learned_params = 2048 * 512\n", + " learned_memory = learned_params * 4 / (1024 * 1024)\n", + "\n", + " print(f\"Learned PE: {learned_memory:.1f} MB ({learned_params:,} parameters)\")\n", + " print(f\"Sinusoidal PE: 0.0 MB (0 parameters - computed on-the-fly)\")\n", + " print(f\"No PE: 0.0 MB (0 parameters)\")\n", + "\n", + " print(\"\\n🚀 Production Implications:\")\n", + " print(\"• GPT-3's embedding table: ~2.4GB (50K vocab × 12K dims)\")\n", + " print(\"• Learned PE adds memory but may improve task-specific performance\")\n", + " print(\"• Sinusoidal PE saves memory and allows longer sequences\")\n", + "\n", + "# Run analysis when developing/testing this module\n", + "if __name__ == \"__main__\":\n", + " analyze_embedding_memory_scaling()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5426e4cf", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": false, + "grade_id": "lookup-performance", + "solution": true + } + }, + "outputs": [], + "source": [ + "def analyze_embedding_performance():\n", + " \"\"\"📊 Compare embedding lookup performance across different configurations.\"\"\"\n", + " print(\"\\n📊 Analyzing Embedding Lookup Performance...\")\n", + "\n", + " import time\n", + "\n", + " # Test different vocabulary sizes and batch configurations\n", + " vocab_sizes = [1_000, 10_000, 100_000]\n", + " embed_dim = 512\n", + " seq_len = 128\n", + " batch_sizes = [1, 16, 64, 256]\n", + "\n", + " print(f\"{'Vocab Size':<12} {'Batch Size':<12} {'Lookup Time (ms)':<18} {'Throughput (tokens/s)':<20}\")\n", + " print(\"-\" * 70)\n", + "\n", + " for vocab_size in vocab_sizes:\n", + " # Create embedding layer\n", + " embed = Embedding(vocab_size, embed_dim)\n", + "\n", + " for batch_size in batch_sizes:\n", + " # Create random token batch\n", + " tokens = Tensor(np.random.randint(0, vocab_size, (batch_size, seq_len)))\n", + "\n", + " # Warmup\n", + " for _ in range(5):\n", + " _ = embed.forward(tokens)\n", + "\n", + " # Time the lookup\n", + " start_time = time.time()\n", + " iterations = 100\n", + "\n", + " for _ in range(iterations):\n", + " output = embed.forward(tokens)\n", + "\n", + " end_time = time.time()\n", + "\n", + " # Calculate metrics\n", + " total_time = end_time - start_time\n", + " avg_time_ms = (total_time / iterations) * 1000\n", + " total_tokens = batch_size * seq_len * iterations\n", + " throughput = total_tokens / total_time\n", + "\n", + " print(f\"{vocab_size:<12,} {batch_size:<12} {avg_time_ms:<18.2f} {throughput:<20,.0f}\")\n", + "\n", + " print(\"\\n💡 Performance Insights:\")\n", + " print(\"• Lookup time is O(1) per token - vocabulary size doesn't affect individual lookups\")\n", + " print(\"• Larger batches improve throughput due to vectorization\")\n", + " print(\"• Memory bandwidth becomes bottleneck for large embedding dimensions\")\n", + " print(\"• Cache locality important for repeated token patterns\")\n", + "\n", + "# Run analysis when developing/testing this module\n", + "if __name__ == \"__main__\":\n", + " analyze_embedding_performance()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7e0d1d67", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "position-encoding-comparison", + "solution": true + } + }, + "outputs": [], + "source": [ + "def analyze_positional_encoding_strategies():\n", + " \"\"\"📊 Compare different positional encoding approaches and trade-offs.\"\"\"\n", + " print(\"\\n📊 Analyzing Positional Encoding Trade-offs...\")\n", + "\n", + " max_seq_len = 512\n", + " embed_dim = 256\n", + "\n", + " # Create both types of positional encodings\n", + " learned_pe = PositionalEncoding(max_seq_len, embed_dim)\n", + " sinusoidal_pe = create_sinusoidal_embeddings(max_seq_len, embed_dim)\n", + "\n", + " # Analyze memory footprint\n", + " learned_params = max_seq_len * embed_dim\n", + " learned_memory = learned_params * 4 / (1024 * 1024) # MB\n", + "\n", + " print(f\"📈 Memory Comparison:\")\n", + " print(f\"Learned PE: {learned_memory:.2f} MB ({learned_params:,} parameters)\")\n", + " print(f\"Sinusoidal PE: 0.00 MB (0 parameters)\")\n", + "\n", + " # Analyze encoding patterns\n", + " print(f\"\\n📈 Encoding Pattern Analysis:\")\n", + "\n", + " # Test sample sequences\n", + " test_input = Tensor(np.random.randn(1, 10, embed_dim))\n", + "\n", + " learned_output = learned_pe.forward(test_input)\n", + "\n", + " # For sinusoidal, manually add to match learned interface\n", + " sin_encodings = sinusoidal_pe.data[:10][np.newaxis, :, :] # (1, 10, embed_dim)\n", + " sinusoidal_output = Tensor(test_input.data + sin_encodings)\n", + "\n", + " # Analyze variance across positions\n", + " learned_var = np.var(learned_output.data, axis=1).mean() # Variance across positions\n", + " sin_var = np.var(sinusoidal_output.data, axis=1).mean()\n", + "\n", + " print(f\"Position variance (learned): {learned_var:.4f}\")\n", + " print(f\"Position variance (sinusoidal): {sin_var:.4f}\")\n", + "\n", + " # Check extrapolation capability\n", + " print(f\"\\n📈 Extrapolation Analysis:\")\n", + " extended_length = max_seq_len + 100\n", + "\n", + " try:\n", + " # Learned PE cannot handle longer sequences\n", + " extended_learned = PositionalEncoding(extended_length, embed_dim)\n", + " print(f\"Learned PE: Requires retraining for sequences > {max_seq_len}\")\n", + " except:\n", + " print(f\"Learned PE: Cannot handle sequences > {max_seq_len}\")\n", + "\n", + " # Sinusoidal can extrapolate\n", + " extended_sin = create_sinusoidal_embeddings(extended_length, embed_dim)\n", + " print(f\"Sinusoidal PE: Can extrapolate to length {extended_length} (smooth continuation)\")\n", + "\n", + " print(f\"\\n🚀 Production Trade-offs:\")\n", + " print(f\"Learned PE:\")\n", + " print(f\" + Can learn task-specific positional patterns\")\n", + " print(f\" + May perform better for tasks with specific position dependencies\")\n", + " print(f\" - Requires additional memory and parameters\")\n", + " print(f\" - Fixed maximum sequence length\")\n", + " print(f\" - Needs training data for longer sequences\")\n", + "\n", + " print(f\"\\nSinusoidal PE:\")\n", + " print(f\" + Zero additional parameters\")\n", + " print(f\" + Can extrapolate to any sequence length\")\n", + " print(f\" + Provides rich, mathematically grounded position signals\")\n", + " print(f\" - Cannot adapt to task-specific position patterns\")\n", + " print(f\" - May be suboptimal for highly position-dependent tasks\")\n", + "\n", + "# Run analysis when developing/testing this module\n", + "if __name__ == \"__main__\":\n", + " analyze_positional_encoding_strategies()" + ] + }, + { + "cell_type": "markdown", + "id": "d85e6256", + "metadata": { + "cell_marker": "\"\"\"", + "lines_to_next_cell": 1 + }, + "source": [ + "## 6. Module Integration Test\n", + "\n", + "Let's test our complete embedding system to ensure everything works together correctly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c3ddc94", + "metadata": { + "lines_to_next_cell": 1, + "nbgrader": { + "grade": true, + "grade_id": "module-test", + "locked": true, + "points": 20 + } + }, + "outputs": [], + "source": [ + "def test_module():\n", + " \"\"\"\n", + " Comprehensive test of entire embeddings module functionality.\n", + "\n", + " This final test ensures all components work together and the module\n", + " is ready for integration with attention mechanisms and transformers.\n", + " \"\"\"\n", + " print(\"🧪 RUNNING MODULE INTEGRATION TEST\")\n", + " print(\"=\" * 50)\n", + "\n", + " # Run all unit tests\n", + " print(\"Running unit tests...\")\n", + " test_unit_embedding()\n", + " test_unit_positional_encoding()\n", + " test_unit_sinusoidal_embeddings()\n", + " test_unit_complete_embedding_system()\n", + "\n", + " print(\"\\nRunning integration scenarios...\")\n", + "\n", + " # Integration Test 1: Realistic NLP pipeline\n", + " print(\"🔬 Integration Test: NLP Pipeline Simulation...\")\n", + "\n", + " # Simulate a small transformer setup\n", + " vocab_size = 1000\n", + " embed_dim = 128\n", + " max_seq_len = 64\n", + "\n", + " # Create embedding layer\n", + " embed_layer = EmbeddingLayer(\n", + " vocab_size=vocab_size,\n", + " embed_dim=embed_dim,\n", + " max_seq_len=max_seq_len,\n", + " pos_encoding='learned',\n", + " scale_embeddings=True\n", + " )\n", + "\n", + " # Simulate tokenized sentences\n", + " sentences = [\n", + " [1, 15, 42, 7, 99], # \"the cat sat on mat\"\n", + " [23, 7, 15, 88], # \"dog chased the ball\"\n", + " [1, 67, 15, 42, 7, 99, 34] # \"the big cat sat on mat here\"\n", + " ]\n", + "\n", + " # Process each sentence\n", + " outputs = []\n", + " for sentence in sentences:\n", + " tokens = Tensor(sentence)\n", + " embedded = embed_layer.forward(tokens)\n", + " outputs.append(embedded)\n", + "\n", + " # Verify output shape\n", + " expected_shape = (len(sentence), embed_dim)\n", + " assert embedded.shape == expected_shape, f\"Wrong shape for sentence: {embedded.shape} != {expected_shape}\"\n", + "\n", + " print(\"✅ Variable length sentence processing works!\")\n", + "\n", + " # Integration Test 2: Batch processing with padding\n", + " print(\"🔬 Integration Test: Batched Processing...\")\n", + "\n", + " # Create padded batch (real-world scenario)\n", + " max_len = max(len(s) for s in sentences)\n", + " batch_tokens = []\n", + "\n", + " for sentence in sentences:\n", + " # Pad with zeros (assuming 0 is padding token)\n", + " padded = sentence + [0] * (max_len - len(sentence))\n", + " batch_tokens.append(padded)\n", + "\n", + " batch_tensor = Tensor(batch_tokens) # (3, 7)\n", + " batch_output = embed_layer.forward(batch_tensor)\n", + "\n", + " assert batch_output.shape == (3, max_len, embed_dim), f\"Batch output shape incorrect: {batch_output.shape}\"\n", + "\n", + " print(\"✅ Batch processing with padding works!\")\n", + "\n", + " # Integration Test 3: Different positional encoding types\n", + " print(\"🔬 Integration Test: Position Encoding Variants...\")\n", + "\n", + " test_tokens = Tensor([[1, 2, 3, 4, 5]])\n", + "\n", + " # Test all position encoding types\n", + " for pe_type in ['learned', 'sinusoidal', None]:\n", + " embed_test = EmbeddingLayer(\n", + " vocab_size=100,\n", + " embed_dim=64,\n", + " pos_encoding=pe_type\n", + " )\n", + "\n", + " output = embed_test.forward(test_tokens)\n", + " assert output.shape == (1, 5, 64), f\"PE type {pe_type} failed shape test\"\n", + "\n", + " # Check parameter counts\n", + " if pe_type == 'learned':\n", + " assert len(embed_test.parameters()) == 2, f\"Learned PE should have 2 param tensors\"\n", + " else:\n", + " assert len(embed_test.parameters()) == 1, f\"PE type {pe_type} should have 1 param tensor\"\n", + "\n", + " print(\"✅ All positional encoding variants work!\")\n", + "\n", + " # Integration Test 4: Memory efficiency check\n", + " print(\"🔬 Integration Test: Memory Efficiency...\")\n", + "\n", + " # Test that we're not creating unnecessary copies\n", + " large_embed = EmbeddingLayer(vocab_size=10000, embed_dim=512)\n", + " test_batch = Tensor(np.random.randint(0, 10000, (32, 128)))\n", + "\n", + " # Multiple forward passes should not accumulate memory (in production)\n", + " for _ in range(5):\n", + " output = large_embed.forward(test_batch)\n", + " assert output.shape == (32, 128, 512), \"Large batch processing failed\"\n", + "\n", + " print(\"✅ Memory efficiency check passed!\")\n", + "\n", + " print(\"\\n\" + \"=\" * 50)\n", + " print(\"🎉 ALL TESTS PASSED! Module ready for export.\")\n", + " print(\"📚 Summary of capabilities built:\")\n", + " print(\" • Token embedding with trainable lookup tables\")\n", + " print(\" • Learned positional encodings for position awareness\")\n", + " print(\" • Sinusoidal positional encodings for extrapolation\")\n", + " print(\" • Complete embedding system for NLP pipelines\")\n", + " print(\" • Efficient batch processing and memory management\")\n", + " print(\"\\n🚀 Ready for: Attention mechanisms, transformers, and language models!\")\n", + " print(\"Export with: tito module complete 11\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "399d1451", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "main-execution", + "solution": true + } + }, + "outputs": [], + "source": [ + "if __name__ == \"__main__\":\n", + " \"\"\"Main execution block for module validation.\"\"\"\n", + " print(\"🚀 Running Embeddings module...\")\n", + " test_module()\n", + " print(\"✅ Module validation complete!\")" + ] + }, + { + "cell_type": "markdown", + "id": "4f2dce68", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 🤔 ML Systems Thinking: Embedding Foundations\n", + "\n", + "### Question 1: Memory Scaling\n", + "You implemented an embedding layer with vocab_size=50,000 and embed_dim=512.\n", + "- How many parameters does this embedding table contain? _____ million\n", + "- If using FP32 (4 bytes per parameter), how much memory does this use? _____ MB\n", + "- If you double the embedding dimension to 1024, what happens to memory usage? _____ MB\n", + "\n", + "### Question 2: Lookup Complexity\n", + "Your embedding layer performs table lookups for token indices.\n", + "- What is the time complexity of looking up a single token? O(_____)\n", + "- For a batch of 32 sequences, each of length 128, how many lookup operations? _____\n", + "- Why doesn't vocabulary size affect individual lookup performance? _____\n", + "\n", + "### Question 3: Positional Encoding Trade-offs\n", + "You implemented both learned and sinusoidal positional encodings.\n", + "- Learned PE for max_seq_len=2048, embed_dim=512 adds how many parameters? _____\n", + "- What happens if you try to process a sequence longer than max_seq_len with learned PE? _____\n", + "- Which type of PE can handle sequences longer than seen during training? _____\n", + "\n", + "### Question 4: Production Implications\n", + "Your complete EmbeddingLayer combines token and positional embeddings.\n", + "- In GPT-3 (vocab_size≈50K, embed_dim≈12K), approximately what percentage of total parameters are in the embedding table? _____%\n", + "- If you wanted to reduce memory usage by 50%, which would be more effective: halving vocab_size or halving embed_dim? _____\n", + "- Why might sinusoidal PE be preferred for models that need to handle variable sequence lengths? _____" + ] + }, + { + "cell_type": "markdown", + "id": "5c24618c", + "metadata": { + "cell_marker": "\"\"\"" + }, + "source": [ + "## 🎯 MODULE SUMMARY: Embeddings\n", + "\n", + "Congratulations! You've built a complete embedding system that transforms discrete tokens into learnable representations!\n", + "\n", + "### Key Accomplishments\n", + "- Built `Embedding` class with efficient token-to-vector lookup (10M+ token support)\n", + "- Implemented `PositionalEncoding` for learnable position awareness (unlimited sequence patterns)\n", + "- Created `create_sinusoidal_embeddings` with mathematical position encoding (extrapolates beyond training)\n", + "- Developed `EmbeddingLayer` integrating both token and positional embeddings (production-ready)\n", + "- Analyzed embedding memory scaling and lookup performance trade-offs\n", + "- All tests pass ✅ (validated by `test_module()`)\n", + "\n", + "### Technical Achievements\n", + "- **Memory Efficiency**: Optimized embedding table storage and lookup patterns\n", + "- **Flexible Architecture**: Support for learned, sinusoidal, and no positional encoding\n", + "- **Batch Processing**: Efficient handling of variable-length sequences with padding\n", + "- **Systems Analysis**: Deep understanding of memory vs performance trade-offs\n", + "\n", + "### Ready for Next Steps\n", + "Your embeddings implementation enables attention mechanisms and transformer architectures!\n", + "The combination of token and positional embeddings provides the foundation for sequence-to-sequence models.\n", + "\n", + "**Next**: Module 12 will add attention mechanisms for context-aware representations!\n", + "\n", + "### Production Context\n", + "You've built the exact embedding patterns used in:\n", + "- **GPT models**: Token embeddings + learned positional encoding\n", + "- **BERT models**: Token embeddings + sinusoidal positional encoding\n", + "- **T5 models**: Relative positional embeddings (variant of your implementations)\n", + "\n", + "Export with: `tito module complete 11`" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/modules/11_embeddings/embeddings.py b/modules/11_embeddings/embeddings.py index 95236055..81102b7b 100644 --- a/modules/11_embeddings/embeddings.py +++ b/modules/11_embeddings/embeddings.py @@ -480,17 +480,21 @@ class PositionalEncoding: f"Embedding dimension mismatch: expected {self.embed_dim}, got {embed_dim}" ) - # Get position embeddings for this sequence length (slice using .data for efficiency) - pos_embeddings_data = self.position_embeddings.data[:seq_len] # (seq_len, embed_dim) - - # Broadcast to match batch dimension: (1, seq_len, embed_dim) - pos_embeddings_data = pos_embeddings_data[np.newaxis, :, :] + # Slice position embeddings for this sequence length using Tensor slicing + # This now preserves gradient flow (as of Module 01 update with __getitem__) + pos_embeddings = self.position_embeddings[:seq_len] # (seq_len, embed_dim) - gradients preserved! - # Wrap in Tensor to preserve requires_grad - pos_embeddings = Tensor(pos_embeddings_data, requires_grad=self.position_embeddings.requires_grad) + # Reshape to add batch dimension: (1, seq_len, embed_dim) + # Need to use .data for reshaping temporarily, then wrap in Tensor + pos_data = pos_embeddings.data[np.newaxis, :, :] + pos_embeddings_batched = Tensor(pos_data, requires_grad=pos_embeddings.requires_grad) + + # Copy gradient function if it exists (to preserve backward connection) + if hasattr(pos_embeddings, '_grad_fn') and pos_embeddings._grad_fn is not None: + pos_embeddings_batched._grad_fn = pos_embeddings._grad_fn - # Add positional information using Tensor operation to preserve gradients! - result = x + pos_embeddings + # Add positional information - gradients flow through both x and pos_embeddings! + result = x + pos_embeddings_batched return result @@ -900,7 +904,8 @@ class EmbeddingLayer: """ # Handle 1D input by adding batch dimension if len(tokens.shape) == 1: - tokens = Tensor(tokens.data[np.newaxis, :]) # (1, seq_len) + # NOTE: Tensor reshape preserves gradients + tokens = tokens.reshape(1, -1) squeeze_batch = True else: squeeze_batch = False @@ -910,25 +915,31 @@ class EmbeddingLayer: # Scale embeddings if requested (transformer convention) if self.scale_embeddings: - token_embeds = Tensor(token_embeds.data * math.sqrt(self.embed_dim)) + scale_factor = math.sqrt(self.embed_dim) + token_embeds = token_embeds * scale_factor # Use Tensor multiplication to preserve gradients # Add positional encoding if self.pos_encoding_type == 'learned': # Use learnable positional encoding output = self.pos_encoding.forward(token_embeds) elif self.pos_encoding_type == 'sinusoidal': - # Use fixed sinusoidal encoding + # Use fixed sinusoidal encoding (not learnable) batch_size, seq_len, embed_dim = token_embeds.shape - pos_embeddings = self.pos_encoding.data[:seq_len] # (seq_len, embed_dim) - pos_embeddings = pos_embeddings[np.newaxis, :, :] # (1, seq_len, embed_dim) - output = Tensor(token_embeds.data + pos_embeddings) + pos_embeddings = self.pos_encoding[:seq_len] # Slice using Tensor slicing + + # Reshape to add batch dimension + pos_data = pos_embeddings.data[np.newaxis, :, :] + pos_embeddings_batched = Tensor(pos_data, requires_grad=False) # Sinusoidal are fixed + + output = token_embeds + pos_embeddings_batched else: # No positional encoding output = token_embeds # Remove batch dimension if it was added if squeeze_batch: - output = Tensor(output.data[0]) # (seq_len, embed_dim) + # Use Tensor slicing (now supported in Module 01) + output = output[0] return output diff --git a/tinytorch/_modidx.py b/tinytorch/_modidx.py index 3df88156..d3126e4a 100644 --- a/tinytorch/_modidx.py +++ b/tinytorch/_modidx.py @@ -1,3 +1,19 @@ +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/[unknown]/[unknown].py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # Autogenerated by nbdev d = { 'settings': { 'branch': 'main', @@ -6,515 +22,509 @@ d = { 'settings': { 'branch': 'main', 'git_url': 'https://github.com/tinytorch/TinyTorch/', 'lib_path': 'tinytorch'}, 'syms': { 'tinytorch.applications.tinygpt': {}, - 'tinytorch.benchmarking.benchmark': { 'tinytorch.benchmarking.benchmark.Benchmark': ( '19_benchmarking/benchmarking_dev.html#benchmark', + 'tinytorch.benchmarking.benchmark': { 'tinytorch.benchmarking.benchmark.Benchmark': ( 'source/19_benchmarking/benchmarking_dev.html#benchmark', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.Benchmark.__init__': ( '19_benchmarking/benchmarking_dev.html#benchmark.__init__', + 'tinytorch.benchmarking.benchmark.Benchmark.__init__': ( 'source/19_benchmarking/benchmarking_dev.html#benchmark.__init__', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.Benchmark.compare_models': ( '19_benchmarking/benchmarking_dev.html#benchmark.compare_models', + 'tinytorch.benchmarking.benchmark.Benchmark.compare_models': ( 'source/19_benchmarking/benchmarking_dev.html#benchmark.compare_models', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.Benchmark.run_accuracy_benchmark': ( '19_benchmarking/benchmarking_dev.html#benchmark.run_accuracy_benchmark', + 'tinytorch.benchmarking.benchmark.Benchmark.run_accuracy_benchmark': ( 'source/19_benchmarking/benchmarking_dev.html#benchmark.run_accuracy_benchmark', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.Benchmark.run_latency_benchmark': ( '19_benchmarking/benchmarking_dev.html#benchmark.run_latency_benchmark', + 'tinytorch.benchmarking.benchmark.Benchmark.run_latency_benchmark': ( 'source/19_benchmarking/benchmarking_dev.html#benchmark.run_latency_benchmark', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.Benchmark.run_memory_benchmark': ( '19_benchmarking/benchmarking_dev.html#benchmark.run_memory_benchmark', + 'tinytorch.benchmarking.benchmark.Benchmark.run_memory_benchmark': ( 'source/19_benchmarking/benchmarking_dev.html#benchmark.run_memory_benchmark', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.BenchmarkSuite': ( '19_benchmarking/benchmarking_dev.html#benchmarksuite', + 'tinytorch.benchmarking.benchmark.BenchmarkSuite': ( 'source/19_benchmarking/benchmarking_dev.html#benchmarksuite', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.BenchmarkSuite.__init__': ( '19_benchmarking/benchmarking_dev.html#benchmarksuite.__init__', + 'tinytorch.benchmarking.benchmark.BenchmarkSuite.__init__': ( 'source/19_benchmarking/benchmarking_dev.html#benchmarksuite.__init__', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.BenchmarkSuite._estimate_energy_efficiency': ( '19_benchmarking/benchmarking_dev.html#benchmarksuite._estimate_energy_efficiency', + 'tinytorch.benchmarking.benchmark.BenchmarkSuite._estimate_energy_efficiency': ( 'source/19_benchmarking/benchmarking_dev.html#benchmarksuite._estimate_energy_efficiency', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.BenchmarkSuite.generate_report': ( '19_benchmarking/benchmarking_dev.html#benchmarksuite.generate_report', + 'tinytorch.benchmarking.benchmark.BenchmarkSuite.generate_report': ( 'source/19_benchmarking/benchmarking_dev.html#benchmarksuite.generate_report', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.BenchmarkSuite.plot_pareto_frontier': ( '19_benchmarking/benchmarking_dev.html#benchmarksuite.plot_pareto_frontier', + 'tinytorch.benchmarking.benchmark.BenchmarkSuite.plot_pareto_frontier': ( 'source/19_benchmarking/benchmarking_dev.html#benchmarksuite.plot_pareto_frontier', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.BenchmarkSuite.plot_results': ( '19_benchmarking/benchmarking_dev.html#benchmarksuite.plot_results', + 'tinytorch.benchmarking.benchmark.BenchmarkSuite.plot_results': ( 'source/19_benchmarking/benchmarking_dev.html#benchmarksuite.plot_results', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.BenchmarkSuite.run_full_benchmark': ( '19_benchmarking/benchmarking_dev.html#benchmarksuite.run_full_benchmark', + 'tinytorch.benchmarking.benchmark.BenchmarkSuite.run_full_benchmark': ( 'source/19_benchmarking/benchmarking_dev.html#benchmarksuite.run_full_benchmark', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.OlympicEvent': ( '19_benchmarking/benchmarking_dev.html#olympicevent', + 'tinytorch.benchmarking.benchmark.OlympicEvent': ( 'source/19_benchmarking/benchmarking_dev.html#olympicevent', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.TinyMLPerf': ( '19_benchmarking/benchmarking_dev.html#tinymlperf', + 'tinytorch.benchmarking.benchmark.TinyMLPerf': ( 'source/19_benchmarking/benchmarking_dev.html#tinymlperf', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.TinyMLPerf.__init__': ( '19_benchmarking/benchmarking_dev.html#tinymlperf.__init__', + 'tinytorch.benchmarking.benchmark.TinyMLPerf.__init__': ( 'source/19_benchmarking/benchmarking_dev.html#tinymlperf.__init__', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.TinyMLPerf.generate_compliance_report': ( '19_benchmarking/benchmarking_dev.html#tinymlperf.generate_compliance_report', + 'tinytorch.benchmarking.benchmark.TinyMLPerf.generate_compliance_report': ( 'source/19_benchmarking/benchmarking_dev.html#tinymlperf.generate_compliance_report', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.TinyMLPerf.run_all_benchmarks': ( '19_benchmarking/benchmarking_dev.html#tinymlperf.run_all_benchmarks', + 'tinytorch.benchmarking.benchmark.TinyMLPerf.run_all_benchmarks': ( 'source/19_benchmarking/benchmarking_dev.html#tinymlperf.run_all_benchmarks', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.TinyMLPerf.run_standard_benchmark': ( '19_benchmarking/benchmarking_dev.html#tinymlperf.run_standard_benchmark', + 'tinytorch.benchmarking.benchmark.TinyMLPerf.run_standard_benchmark': ( 'source/19_benchmarking/benchmarking_dev.html#tinymlperf.run_standard_benchmark', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.calculate_normalized_scores': ( '19_benchmarking/benchmarking_dev.html#calculate_normalized_scores', + 'tinytorch.benchmarking.benchmark.calculate_normalized_scores': ( 'source/19_benchmarking/benchmarking_dev.html#calculate_normalized_scores', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.test_unit_benchmark': ( '19_benchmarking/benchmarking_dev.html#test_unit_benchmark', + 'tinytorch.benchmarking.benchmark.test_unit_benchmark': ( 'source/19_benchmarking/benchmarking_dev.html#test_unit_benchmark', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.test_unit_benchmark_suite': ( '19_benchmarking/benchmarking_dev.html#test_unit_benchmark_suite', + 'tinytorch.benchmarking.benchmark.test_unit_benchmark_suite': ( 'source/19_benchmarking/benchmarking_dev.html#test_unit_benchmark_suite', 'tinytorch/benchmarking/benchmark.py'), - 'tinytorch.benchmarking.benchmark.test_unit_tinymlperf': ( '19_benchmarking/benchmarking_dev.html#test_unit_tinymlperf', + 'tinytorch.benchmarking.benchmark.test_unit_tinymlperf': ( 'source/19_benchmarking/benchmarking_dev.html#test_unit_tinymlperf', 'tinytorch/benchmarking/benchmark.py')}, - 'tinytorch.competition.submit': { 'tinytorch.competition.submit.generate_baseline': ( '20_competition/competition_dev.html#generate_baseline', + 'tinytorch.competition.submit': { 'tinytorch.competition.submit.generate_baseline': ( 'source/20_competition/competition_dev.html#generate_baseline', 'tinytorch/competition/submit.py'), - 'tinytorch.competition.submit.generate_submission': ( '20_competition/competition_dev.html#generate_submission', + 'tinytorch.competition.submit.generate_submission': ( 'source/20_competition/competition_dev.html#generate_submission', 'tinytorch/competition/submit.py'), - 'tinytorch.competition.submit.load_baseline_model': ( '20_competition/competition_dev.html#load_baseline_model', + 'tinytorch.competition.submit.load_baseline_model': ( 'source/20_competition/competition_dev.html#load_baseline_model', 'tinytorch/competition/submit.py'), - 'tinytorch.competition.submit.optimize_for_competition': ( '20_competition/competition_dev.html#optimize_for_competition', + 'tinytorch.competition.submit.optimize_for_competition': ( 'source/20_competition/competition_dev.html#optimize_for_competition', 'tinytorch/competition/submit.py'), - 'tinytorch.competition.submit.validate_installation': ( '20_competition/competition_dev.html#validate_installation', + 'tinytorch.competition.submit.validate_installation': ( 'source/20_competition/competition_dev.html#validate_installation', 'tinytorch/competition/submit.py'), - 'tinytorch.competition.submit.validate_submission': ( '20_competition/competition_dev.html#validate_submission', + 'tinytorch.competition.submit.validate_submission': ( 'source/20_competition/competition_dev.html#validate_submission', 'tinytorch/competition/submit.py'), - 'tinytorch.competition.submit.worked_example_optimization': ( '20_competition/competition_dev.html#worked_example_optimization', + 'tinytorch.competition.submit.worked_example_optimization': ( 'source/20_competition/competition_dev.html#worked_example_optimization', 'tinytorch/competition/submit.py')}, - 'tinytorch.core.activations': { 'tinytorch.core.activations.GELU': ( '02_activations/activations_dev.html#gelu', + 'tinytorch.core.activations': { 'tinytorch.core.activations.GELU': ( 'source/02_activations/activations_dev.html#gelu', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.GELU.__call__': ( '02_activations/activations_dev.html#gelu.__call__', + 'tinytorch.core.activations.GELU.__call__': ( 'source/02_activations/activations_dev.html#gelu.__call__', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.GELU.backward': ( '02_activations/activations_dev.html#gelu.backward', + 'tinytorch.core.activations.GELU.backward': ( 'source/02_activations/activations_dev.html#gelu.backward', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.GELU.forward': ( '02_activations/activations_dev.html#gelu.forward', + 'tinytorch.core.activations.GELU.forward': ( 'source/02_activations/activations_dev.html#gelu.forward', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.ReLU': ( '02_activations/activations_dev.html#relu', + 'tinytorch.core.activations.ReLU': ( 'source/02_activations/activations_dev.html#relu', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.ReLU.__call__': ( '02_activations/activations_dev.html#relu.__call__', + 'tinytorch.core.activations.ReLU.__call__': ( 'source/02_activations/activations_dev.html#relu.__call__', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.ReLU.backward': ( '02_activations/activations_dev.html#relu.backward', + 'tinytorch.core.activations.ReLU.backward': ( 'source/02_activations/activations_dev.html#relu.backward', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.ReLU.forward': ( '02_activations/activations_dev.html#relu.forward', + 'tinytorch.core.activations.ReLU.forward': ( 'source/02_activations/activations_dev.html#relu.forward', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Sigmoid': ( '02_activations/activations_dev.html#sigmoid', + 'tinytorch.core.activations.Sigmoid': ( 'source/02_activations/activations_dev.html#sigmoid', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Sigmoid.__call__': ( '02_activations/activations_dev.html#sigmoid.__call__', + 'tinytorch.core.activations.Sigmoid.__call__': ( 'source/02_activations/activations_dev.html#sigmoid.__call__', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Sigmoid.backward': ( '02_activations/activations_dev.html#sigmoid.backward', + 'tinytorch.core.activations.Sigmoid.backward': ( 'source/02_activations/activations_dev.html#sigmoid.backward', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Sigmoid.forward': ( '02_activations/activations_dev.html#sigmoid.forward', + 'tinytorch.core.activations.Sigmoid.forward': ( 'source/02_activations/activations_dev.html#sigmoid.forward', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Softmax': ( '02_activations/activations_dev.html#softmax', + 'tinytorch.core.activations.Softmax': ( 'source/02_activations/activations_dev.html#softmax', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Softmax.__call__': ( '02_activations/activations_dev.html#softmax.__call__', + 'tinytorch.core.activations.Softmax.__call__': ( 'source/02_activations/activations_dev.html#softmax.__call__', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Softmax.backward': ( '02_activations/activations_dev.html#softmax.backward', + 'tinytorch.core.activations.Softmax.backward': ( 'source/02_activations/activations_dev.html#softmax.backward', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Softmax.forward': ( '02_activations/activations_dev.html#softmax.forward', + 'tinytorch.core.activations.Softmax.forward': ( 'source/02_activations/activations_dev.html#softmax.forward', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Tanh': ( '02_activations/activations_dev.html#tanh', + 'tinytorch.core.activations.Tanh': ( 'source/02_activations/activations_dev.html#tanh', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Tanh.__call__': ( '02_activations/activations_dev.html#tanh.__call__', + 'tinytorch.core.activations.Tanh.__call__': ( 'source/02_activations/activations_dev.html#tanh.__call__', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Tanh.backward': ( '02_activations/activations_dev.html#tanh.backward', + 'tinytorch.core.activations.Tanh.backward': ( 'source/02_activations/activations_dev.html#tanh.backward', 'tinytorch/core/activations.py'), - 'tinytorch.core.activations.Tanh.forward': ( '02_activations/activations_dev.html#tanh.forward', + 'tinytorch.core.activations.Tanh.forward': ( 'source/02_activations/activations_dev.html#tanh.forward', 'tinytorch/core/activations.py')}, - 'tinytorch.core.attention': { 'tinytorch.core.attention.MultiHeadAttention': ( '12_attention/attention_dev.html#multiheadattention', + 'tinytorch.core.attention': { 'tinytorch.core.attention.MultiHeadAttention': ( 'source/12_attention/attention_dev.html#multiheadattention', 'tinytorch/core/attention.py'), - 'tinytorch.core.attention.MultiHeadAttention.__init__': ( '12_attention/attention_dev.html#multiheadattention.__init__', + 'tinytorch.core.attention.MultiHeadAttention.__call__': ( 'source/12_attention/attention_dev.html#multiheadattention.__call__', 'tinytorch/core/attention.py'), - 'tinytorch.core.attention.MultiHeadAttention.forward': ( '12_attention/attention_dev.html#multiheadattention.forward', + 'tinytorch.core.attention.MultiHeadAttention.__init__': ( 'source/12_attention/attention_dev.html#multiheadattention.__init__', + 'tinytorch/core/attention.py'), + 'tinytorch.core.attention.MultiHeadAttention.forward': ( 'source/12_attention/attention_dev.html#multiheadattention.forward', 'tinytorch/core/attention.py'), - 'tinytorch.core.attention.MultiHeadAttention.parameters': ( '12_attention/attention_dev.html#multiheadattention.parameters', + 'tinytorch.core.attention.MultiHeadAttention.parameters': ( 'source/12_attention/attention_dev.html#multiheadattention.parameters', 'tinytorch/core/attention.py'), - 'tinytorch.core.attention.scaled_dot_product_attention': ( '12_attention/attention_dev.html#scaled_dot_product_attention', + 'tinytorch.core.attention.scaled_dot_product_attention': ( 'source/12_attention/attention_dev.html#scaled_dot_product_attention', 'tinytorch/core/attention.py')}, 'tinytorch.core.autograd': {}, - 'tinytorch.core.layers': { 'tinytorch.core.layers.Dropout': ('03_layers/layers_dev.html#dropout', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Dropout.__call__': ( '03_layers/layers_dev.html#dropout.__call__', + 'tinytorch.core.layers': { 'tinytorch.core.layers.Dropout': ( 'source/03_layers/layers_dev.html#dropout', + 'tinytorch/core/layers.py'), + 'tinytorch.core.layers.Dropout.__call__': ( 'source/03_layers/layers_dev.html#dropout.__call__', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Dropout.__init__': ( '03_layers/layers_dev.html#dropout.__init__', + 'tinytorch.core.layers.Dropout.__init__': ( 'source/03_layers/layers_dev.html#dropout.__init__', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Dropout.__repr__': ( '03_layers/layers_dev.html#dropout.__repr__', + 'tinytorch.core.layers.Dropout.__repr__': ( 'source/03_layers/layers_dev.html#dropout.__repr__', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Dropout.forward': ( '03_layers/layers_dev.html#dropout.forward', + 'tinytorch.core.layers.Dropout.forward': ( 'source/03_layers/layers_dev.html#dropout.forward', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Dropout.parameters': ( '03_layers/layers_dev.html#dropout.parameters', + 'tinytorch.core.layers.Dropout.parameters': ( 'source/03_layers/layers_dev.html#dropout.parameters', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Linear': ('03_layers/layers_dev.html#linear', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Linear.__call__': ( '03_layers/layers_dev.html#linear.__call__', + 'tinytorch.core.layers.Linear': ( 'source/03_layers/layers_dev.html#linear', + 'tinytorch/core/layers.py'), + 'tinytorch.core.layers.Linear.__call__': ( 'source/03_layers/layers_dev.html#linear.__call__', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Linear.__init__': ( '03_layers/layers_dev.html#linear.__init__', + 'tinytorch.core.layers.Linear.__init__': ( 'source/03_layers/layers_dev.html#linear.__init__', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Linear.__repr__': ( '03_layers/layers_dev.html#linear.__repr__', + 'tinytorch.core.layers.Linear.__repr__': ( 'source/03_layers/layers_dev.html#linear.__repr__', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Linear.forward': ( '03_layers/layers_dev.html#linear.forward', + 'tinytorch.core.layers.Linear.forward': ( 'source/03_layers/layers_dev.html#linear.forward', 'tinytorch/core/layers.py'), - 'tinytorch.core.layers.Linear.parameters': ( '03_layers/layers_dev.html#linear.parameters', + 'tinytorch.core.layers.Linear.parameters': ( 'source/03_layers/layers_dev.html#linear.parameters', 'tinytorch/core/layers.py')}, - 'tinytorch.core.losses': { 'tinytorch.core.losses.BinaryCrossEntropyLoss': ( '04_losses/losses_dev.html#binarycrossentropyloss', + 'tinytorch.core.losses': { 'tinytorch.core.losses.BinaryCrossEntropyLoss': ( 'source/04_losses/losses_dev.html#binarycrossentropyloss', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.BinaryCrossEntropyLoss.__call__': ( '04_losses/losses_dev.html#binarycrossentropyloss.__call__', + 'tinytorch.core.losses.BinaryCrossEntropyLoss.__call__': ( 'source/04_losses/losses_dev.html#binarycrossentropyloss.__call__', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.BinaryCrossEntropyLoss.__init__': ( '04_losses/losses_dev.html#binarycrossentropyloss.__init__', + 'tinytorch.core.losses.BinaryCrossEntropyLoss.__init__': ( 'source/04_losses/losses_dev.html#binarycrossentropyloss.__init__', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.BinaryCrossEntropyLoss.backward': ( '04_losses/losses_dev.html#binarycrossentropyloss.backward', + 'tinytorch.core.losses.BinaryCrossEntropyLoss.backward': ( 'source/04_losses/losses_dev.html#binarycrossentropyloss.backward', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.BinaryCrossEntropyLoss.forward': ( '04_losses/losses_dev.html#binarycrossentropyloss.forward', + 'tinytorch.core.losses.BinaryCrossEntropyLoss.forward': ( 'source/04_losses/losses_dev.html#binarycrossentropyloss.forward', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.CrossEntropyLoss': ( '04_losses/losses_dev.html#crossentropyloss', + 'tinytorch.core.losses.CrossEntropyLoss': ( 'source/04_losses/losses_dev.html#crossentropyloss', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.CrossEntropyLoss.__call__': ( '04_losses/losses_dev.html#crossentropyloss.__call__', + 'tinytorch.core.losses.CrossEntropyLoss.__call__': ( 'source/04_losses/losses_dev.html#crossentropyloss.__call__', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.CrossEntropyLoss.__init__': ( '04_losses/losses_dev.html#crossentropyloss.__init__', + 'tinytorch.core.losses.CrossEntropyLoss.__init__': ( 'source/04_losses/losses_dev.html#crossentropyloss.__init__', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.CrossEntropyLoss.backward': ( '04_losses/losses_dev.html#crossentropyloss.backward', + 'tinytorch.core.losses.CrossEntropyLoss.backward': ( 'source/04_losses/losses_dev.html#crossentropyloss.backward', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.CrossEntropyLoss.forward': ( '04_losses/losses_dev.html#crossentropyloss.forward', + 'tinytorch.core.losses.CrossEntropyLoss.forward': ( 'source/04_losses/losses_dev.html#crossentropyloss.forward', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.MSELoss': ('04_losses/losses_dev.html#mseloss', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.MSELoss.__call__': ( '04_losses/losses_dev.html#mseloss.__call__', + 'tinytorch.core.losses.MSELoss': ( 'source/04_losses/losses_dev.html#mseloss', + 'tinytorch/core/losses.py'), + 'tinytorch.core.losses.MSELoss.__call__': ( 'source/04_losses/losses_dev.html#mseloss.__call__', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.MSELoss.__init__': ( '04_losses/losses_dev.html#mseloss.__init__', + 'tinytorch.core.losses.MSELoss.__init__': ( 'source/04_losses/losses_dev.html#mseloss.__init__', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.MSELoss.backward': ( '04_losses/losses_dev.html#mseloss.backward', + 'tinytorch.core.losses.MSELoss.backward': ( 'source/04_losses/losses_dev.html#mseloss.backward', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.MSELoss.forward': ( '04_losses/losses_dev.html#mseloss.forward', + 'tinytorch.core.losses.MSELoss.forward': ( 'source/04_losses/losses_dev.html#mseloss.forward', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.import_previous_module': ( '04_losses/losses_dev.html#import_previous_module', + 'tinytorch.core.losses.import_previous_module': ( 'source/04_losses/losses_dev.html#import_previous_module', 'tinytorch/core/losses.py'), - 'tinytorch.core.losses.log_softmax': ( '04_losses/losses_dev.html#log_softmax', + 'tinytorch.core.losses.log_softmax': ( 'source/04_losses/losses_dev.html#log_softmax', 'tinytorch/core/losses.py')}, - 'tinytorch.core.optimizers': { 'tinytorch.core.optimizers.Adam': ( '06_optimizers/optimizers_dev.html#adam', + 'tinytorch.core.optimizers': { 'tinytorch.core.optimizers.Adam': ( 'source/06_optimizers/optimizers_dev.html#adam', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.Adam.__init__': ( '06_optimizers/optimizers_dev.html#adam.__init__', + 'tinytorch.core.optimizers.Adam.__init__': ( 'source/06_optimizers/optimizers_dev.html#adam.__init__', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.Adam.step': ( '06_optimizers/optimizers_dev.html#adam.step', + 'tinytorch.core.optimizers.Adam.step': ( 'source/06_optimizers/optimizers_dev.html#adam.step', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.AdamW': ( '06_optimizers/optimizers_dev.html#adamw', + 'tinytorch.core.optimizers.AdamW': ( 'source/06_optimizers/optimizers_dev.html#adamw', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.AdamW.__init__': ( '06_optimizers/optimizers_dev.html#adamw.__init__', + 'tinytorch.core.optimizers.AdamW.__init__': ( 'source/06_optimizers/optimizers_dev.html#adamw.__init__', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.AdamW.step': ( '06_optimizers/optimizers_dev.html#adamw.step', + 'tinytorch.core.optimizers.AdamW.step': ( 'source/06_optimizers/optimizers_dev.html#adamw.step', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.Optimizer': ( '06_optimizers/optimizers_dev.html#optimizer', + 'tinytorch.core.optimizers.Optimizer': ( 'source/06_optimizers/optimizers_dev.html#optimizer', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.Optimizer.__init__': ( '06_optimizers/optimizers_dev.html#optimizer.__init__', + 'tinytorch.core.optimizers.Optimizer.__init__': ( 'source/06_optimizers/optimizers_dev.html#optimizer.__init__', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.Optimizer.step': ( '06_optimizers/optimizers_dev.html#optimizer.step', + 'tinytorch.core.optimizers.Optimizer.step': ( 'source/06_optimizers/optimizers_dev.html#optimizer.step', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.Optimizer.zero_grad': ( '06_optimizers/optimizers_dev.html#optimizer.zero_grad', + 'tinytorch.core.optimizers.Optimizer.zero_grad': ( 'source/06_optimizers/optimizers_dev.html#optimizer.zero_grad', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.SGD': ( '06_optimizers/optimizers_dev.html#sgd', + 'tinytorch.core.optimizers.SGD': ( 'source/06_optimizers/optimizers_dev.html#sgd', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.SGD.__init__': ( '06_optimizers/optimizers_dev.html#sgd.__init__', + 'tinytorch.core.optimizers.SGD.__init__': ( 'source/06_optimizers/optimizers_dev.html#sgd.__init__', 'tinytorch/core/optimizers.py'), - 'tinytorch.core.optimizers.SGD.step': ( '06_optimizers/optimizers_dev.html#sgd.step', + 'tinytorch.core.optimizers.SGD.step': ( 'source/06_optimizers/optimizers_dev.html#sgd.step', 'tinytorch/core/optimizers.py')}, - 'tinytorch.core.spatial': { 'tinytorch.core.spatial.AvgPool2d': ( '09_spatial/spatial_dev.html#avgpool2d', + 'tinytorch.core.spatial': { 'tinytorch.core.spatial.AvgPool2d': ( '09_spatial/spatial.html#avgpool2d', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.AvgPool2d.__call__': ( '09_spatial/spatial_dev.html#avgpool2d.__call__', + 'tinytorch.core.spatial.AvgPool2d.__call__': ( '09_spatial/spatial.html#avgpool2d.__call__', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.AvgPool2d.__init__': ( '09_spatial/spatial_dev.html#avgpool2d.__init__', + 'tinytorch.core.spatial.AvgPool2d.__init__': ( '09_spatial/spatial.html#avgpool2d.__init__', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.AvgPool2d.forward': ( '09_spatial/spatial_dev.html#avgpool2d.forward', + 'tinytorch.core.spatial.AvgPool2d.forward': ( '09_spatial/spatial.html#avgpool2d.forward', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.AvgPool2d.parameters': ( '09_spatial/spatial_dev.html#avgpool2d.parameters', + 'tinytorch.core.spatial.AvgPool2d.parameters': ( '09_spatial/spatial.html#avgpool2d.parameters', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.Conv2d': ( '09_spatial/spatial_dev.html#conv2d', - 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.Conv2d.__call__': ( '09_spatial/spatial_dev.html#conv2d.__call__', + 'tinytorch.core.spatial.Conv2d': ('09_spatial/spatial.html#conv2d', 'tinytorch/core/spatial.py'), + 'tinytorch.core.spatial.Conv2d.__call__': ( '09_spatial/spatial.html#conv2d.__call__', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.Conv2d.__init__': ( '09_spatial/spatial_dev.html#conv2d.__init__', + 'tinytorch.core.spatial.Conv2d.__init__': ( '09_spatial/spatial.html#conv2d.__init__', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.Conv2d.forward': ( '09_spatial/spatial_dev.html#conv2d.forward', + 'tinytorch.core.spatial.Conv2d.forward': ( '09_spatial/spatial.html#conv2d.forward', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.Conv2d.parameters': ( '09_spatial/spatial_dev.html#conv2d.parameters', + 'tinytorch.core.spatial.Conv2d.parameters': ( '09_spatial/spatial.html#conv2d.parameters', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.MaxPool2d': ( '09_spatial/spatial_dev.html#maxpool2d', + 'tinytorch.core.spatial.MaxPool2d': ( '09_spatial/spatial.html#maxpool2d', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.MaxPool2d.__call__': ( '09_spatial/spatial_dev.html#maxpool2d.__call__', + 'tinytorch.core.spatial.MaxPool2d.__call__': ( '09_spatial/spatial.html#maxpool2d.__call__', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.MaxPool2d.__init__': ( '09_spatial/spatial_dev.html#maxpool2d.__init__', + 'tinytorch.core.spatial.MaxPool2d.__init__': ( '09_spatial/spatial.html#maxpool2d.__init__', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.MaxPool2d.forward': ( '09_spatial/spatial_dev.html#maxpool2d.forward', + 'tinytorch.core.spatial.MaxPool2d.forward': ( '09_spatial/spatial.html#maxpool2d.forward', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.MaxPool2d.parameters': ( '09_spatial/spatial_dev.html#maxpool2d.parameters', + 'tinytorch.core.spatial.MaxPool2d.parameters': ( '09_spatial/spatial.html#maxpool2d.parameters', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.SimpleCNN': ( '09_spatial/spatial_dev.html#simplecnn', + 'tinytorch.core.spatial.SimpleCNN': ( '09_spatial/spatial.html#simplecnn', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.SimpleCNN.__call__': ( '09_spatial/spatial_dev.html#simplecnn.__call__', + 'tinytorch.core.spatial.SimpleCNN.__call__': ( '09_spatial/spatial.html#simplecnn.__call__', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.SimpleCNN.__init__': ( '09_spatial/spatial_dev.html#simplecnn.__init__', + 'tinytorch.core.spatial.SimpleCNN.__init__': ( '09_spatial/spatial.html#simplecnn.__init__', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.SimpleCNN.forward': ( '09_spatial/spatial_dev.html#simplecnn.forward', + 'tinytorch.core.spatial.SimpleCNN.forward': ( '09_spatial/spatial.html#simplecnn.forward', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.SimpleCNN.parameters': ( '09_spatial/spatial_dev.html#simplecnn.parameters', + 'tinytorch.core.spatial.SimpleCNN.parameters': ( '09_spatial/spatial.html#simplecnn.parameters', 'tinytorch/core/spatial.py'), - 'tinytorch.core.spatial.SimpleCNN.relu': ( '09_spatial/spatial_dev.html#simplecnn.relu', + 'tinytorch.core.spatial.SimpleCNN.relu': ( '09_spatial/spatial.html#simplecnn.relu', 'tinytorch/core/spatial.py')}, - 'tinytorch.core.tensor': { 'tinytorch.core.tensor.Tensor': ('01_tensor/tensor_dev.html#tensor', 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.__add__': ( '01_tensor/tensor_dev.html#tensor.__add__', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.__init__': ( '01_tensor/tensor_dev.html#tensor.__init__', + 'tinytorch.core.tensor': { 'tinytorch.core.tensor.Tensor': ('01_tensor/tensor.html#tensor', 'tinytorch/core/tensor.py'), + 'tinytorch.core.tensor.Tensor.__init__': ( '01_tensor/tensor.html#tensor.__init__', 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.__mul__': ( '01_tensor/tensor_dev.html#tensor.__mul__', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.__repr__': ( '01_tensor/tensor_dev.html#tensor.__repr__', + 'tinytorch.core.tensor.Tensor.__repr__': ( '01_tensor/tensor.html#tensor.__repr__', 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.__str__': ( '01_tensor/tensor_dev.html#tensor.__str__', + 'tinytorch.core.tensor.Tensor.__str__': ( '01_tensor/tensor.html#tensor.__str__', 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.__sub__': ( '01_tensor/tensor_dev.html#tensor.__sub__', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.__truediv__': ( '01_tensor/tensor_dev.html#tensor.__truediv__', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.backward': ( '01_tensor/tensor_dev.html#tensor.backward', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.matmul': ( '01_tensor/tensor_dev.html#tensor.matmul', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.max': ( '01_tensor/tensor_dev.html#tensor.max', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.mean': ( '01_tensor/tensor_dev.html#tensor.mean', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.numpy': ( '01_tensor/tensor_dev.html#tensor.numpy', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.reshape': ( '01_tensor/tensor_dev.html#tensor.reshape', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.sum': ( '01_tensor/tensor_dev.html#tensor.sum', - 'tinytorch/core/tensor.py'), - 'tinytorch.core.tensor.Tensor.transpose': ( '01_tensor/tensor_dev.html#tensor.transpose', - 'tinytorch/core/tensor.py')}, - 'tinytorch.core.training': { 'tinytorch.core.training.CosineSchedule': ( '07_training/training_dev.html#cosineschedule', + 'tinytorch.core.tensor.Tensor.numpy': ( '01_tensor/tensor.html#tensor.numpy', + 'tinytorch/core/tensor.py')}, + 'tinytorch.core.training': { 'tinytorch.core.training.CosineSchedule': ( 'source/07_training/training_dev.html#cosineschedule', 'tinytorch/core/training.py'), - 'tinytorch.core.training.CosineSchedule.__init__': ( '07_training/training_dev.html#cosineschedule.__init__', + 'tinytorch.core.training.CosineSchedule.__init__': ( 'source/07_training/training_dev.html#cosineschedule.__init__', 'tinytorch/core/training.py'), - 'tinytorch.core.training.CosineSchedule.get_lr': ( '07_training/training_dev.html#cosineschedule.get_lr', + 'tinytorch.core.training.CosineSchedule.get_lr': ( 'source/07_training/training_dev.html#cosineschedule.get_lr', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer': ( '07_training/training_dev.html#trainer', + 'tinytorch.core.training.Trainer': ( 'source/07_training/training_dev.html#trainer', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer.__init__': ( '07_training/training_dev.html#trainer.__init__', + 'tinytorch.core.training.Trainer.__init__': ( 'source/07_training/training_dev.html#trainer.__init__', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer._get_model_state': ( '07_training/training_dev.html#trainer._get_model_state', + 'tinytorch.core.training.Trainer._get_model_state': ( 'source/07_training/training_dev.html#trainer._get_model_state', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer._get_optimizer_state': ( '07_training/training_dev.html#trainer._get_optimizer_state', + 'tinytorch.core.training.Trainer._get_optimizer_state': ( 'source/07_training/training_dev.html#trainer._get_optimizer_state', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer._get_scheduler_state': ( '07_training/training_dev.html#trainer._get_scheduler_state', + 'tinytorch.core.training.Trainer._get_scheduler_state': ( 'source/07_training/training_dev.html#trainer._get_scheduler_state', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer._set_model_state': ( '07_training/training_dev.html#trainer._set_model_state', + 'tinytorch.core.training.Trainer._set_model_state': ( 'source/07_training/training_dev.html#trainer._set_model_state', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer._set_optimizer_state': ( '07_training/training_dev.html#trainer._set_optimizer_state', + 'tinytorch.core.training.Trainer._set_optimizer_state': ( 'source/07_training/training_dev.html#trainer._set_optimizer_state', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer._set_scheduler_state': ( '07_training/training_dev.html#trainer._set_scheduler_state', + 'tinytorch.core.training.Trainer._set_scheduler_state': ( 'source/07_training/training_dev.html#trainer._set_scheduler_state', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer.evaluate': ( '07_training/training_dev.html#trainer.evaluate', + 'tinytorch.core.training.Trainer.evaluate': ( 'source/07_training/training_dev.html#trainer.evaluate', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer.load_checkpoint': ( '07_training/training_dev.html#trainer.load_checkpoint', + 'tinytorch.core.training.Trainer.load_checkpoint': ( 'source/07_training/training_dev.html#trainer.load_checkpoint', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer.save_checkpoint': ( '07_training/training_dev.html#trainer.save_checkpoint', + 'tinytorch.core.training.Trainer.save_checkpoint': ( 'source/07_training/training_dev.html#trainer.save_checkpoint', 'tinytorch/core/training.py'), - 'tinytorch.core.training.Trainer.train_epoch': ( '07_training/training_dev.html#trainer.train_epoch', + 'tinytorch.core.training.Trainer.train_epoch': ( 'source/07_training/training_dev.html#trainer.train_epoch', 'tinytorch/core/training.py'), - 'tinytorch.core.training.load_checkpoint': ( '07_training/training_dev.html#load_checkpoint', + 'tinytorch.core.training.load_checkpoint': ( 'source/07_training/training_dev.html#load_checkpoint', 'tinytorch/core/training.py'), - 'tinytorch.core.training.save_checkpoint': ( '07_training/training_dev.html#save_checkpoint', + 'tinytorch.core.training.save_checkpoint': ( 'source/07_training/training_dev.html#save_checkpoint', 'tinytorch/core/training.py')}, - 'tinytorch.data.loader': { 'tinytorch.data.loader.DataLoader': ( '08_dataloader/dataloader_dev.html#dataloader', + 'tinytorch.data.loader': { 'tinytorch.data.loader.DataLoader': ( 'source/08_dataloader/dataloader_dev.html#dataloader', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.DataLoader.__init__': ( '08_dataloader/dataloader_dev.html#dataloader.__init__', + 'tinytorch.data.loader.DataLoader.__init__': ( 'source/08_dataloader/dataloader_dev.html#dataloader.__init__', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.DataLoader.__iter__': ( '08_dataloader/dataloader_dev.html#dataloader.__iter__', + 'tinytorch.data.loader.DataLoader.__iter__': ( 'source/08_dataloader/dataloader_dev.html#dataloader.__iter__', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.DataLoader.__len__': ( '08_dataloader/dataloader_dev.html#dataloader.__len__', + 'tinytorch.data.loader.DataLoader.__len__': ( 'source/08_dataloader/dataloader_dev.html#dataloader.__len__', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.DataLoader._collate_batch': ( '08_dataloader/dataloader_dev.html#dataloader._collate_batch', + 'tinytorch.data.loader.DataLoader._collate_batch': ( 'source/08_dataloader/dataloader_dev.html#dataloader._collate_batch', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.Dataset': ( '08_dataloader/dataloader_dev.html#dataset', + 'tinytorch.data.loader.Dataset': ( 'source/08_dataloader/dataloader_dev.html#dataset', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.Dataset.__getitem__': ( '08_dataloader/dataloader_dev.html#dataset.__getitem__', + 'tinytorch.data.loader.Dataset.__getitem__': ( 'source/08_dataloader/dataloader_dev.html#dataset.__getitem__', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.Dataset.__len__': ( '08_dataloader/dataloader_dev.html#dataset.__len__', + 'tinytorch.data.loader.Dataset.__len__': ( 'source/08_dataloader/dataloader_dev.html#dataset.__len__', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.TensorDataset': ( '08_dataloader/dataloader_dev.html#tensordataset', + 'tinytorch.data.loader.TensorDataset': ( 'source/08_dataloader/dataloader_dev.html#tensordataset', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.TensorDataset.__getitem__': ( '08_dataloader/dataloader_dev.html#tensordataset.__getitem__', + 'tinytorch.data.loader.TensorDataset.__getitem__': ( 'source/08_dataloader/dataloader_dev.html#tensordataset.__getitem__', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.TensorDataset.__init__': ( '08_dataloader/dataloader_dev.html#tensordataset.__init__', + 'tinytorch.data.loader.TensorDataset.__init__': ( 'source/08_dataloader/dataloader_dev.html#tensordataset.__init__', 'tinytorch/data/loader.py'), - 'tinytorch.data.loader.TensorDataset.__len__': ( '08_dataloader/dataloader_dev.html#tensordataset.__len__', + 'tinytorch.data.loader.TensorDataset.__len__': ( 'source/08_dataloader/dataloader_dev.html#tensordataset.__len__', 'tinytorch/data/loader.py')}, - 'tinytorch.generation.kv_cache': { 'tinytorch.generation.kv_cache.KVCache': ( '15_memoization/memoization_dev.html#kvcache', + 'tinytorch.generation.kv_cache': { 'tinytorch.generation.kv_cache.KVCache': ( 'source/15_memoization/memoization_dev.html#kvcache', 'tinytorch/generation/kv_cache.py'), - 'tinytorch.generation.kv_cache.KVCache.__init__': ( '15_memoization/memoization_dev.html#kvcache.__init__', + 'tinytorch.generation.kv_cache.KVCache.__init__': ( 'source/15_memoization/memoization_dev.html#kvcache.__init__', 'tinytorch/generation/kv_cache.py'), - 'tinytorch.generation.kv_cache.KVCache.advance': ( '15_memoization/memoization_dev.html#kvcache.advance', + 'tinytorch.generation.kv_cache.KVCache.advance': ( 'source/15_memoization/memoization_dev.html#kvcache.advance', 'tinytorch/generation/kv_cache.py'), - 'tinytorch.generation.kv_cache.KVCache.get': ( '15_memoization/memoization_dev.html#kvcache.get', + 'tinytorch.generation.kv_cache.KVCache.get': ( 'source/15_memoization/memoization_dev.html#kvcache.get', 'tinytorch/generation/kv_cache.py'), - 'tinytorch.generation.kv_cache.KVCache.get_memory_usage': ( '15_memoization/memoization_dev.html#kvcache.get_memory_usage', + 'tinytorch.generation.kv_cache.KVCache.get_memory_usage': ( 'source/15_memoization/memoization_dev.html#kvcache.get_memory_usage', 'tinytorch/generation/kv_cache.py'), - 'tinytorch.generation.kv_cache.KVCache.reset': ( '15_memoization/memoization_dev.html#kvcache.reset', + 'tinytorch.generation.kv_cache.KVCache.reset': ( 'source/15_memoization/memoization_dev.html#kvcache.reset', 'tinytorch/generation/kv_cache.py'), - 'tinytorch.generation.kv_cache.KVCache.update': ( '15_memoization/memoization_dev.html#kvcache.update', + 'tinytorch.generation.kv_cache.KVCache.update': ( 'source/15_memoization/memoization_dev.html#kvcache.update', 'tinytorch/generation/kv_cache.py'), - 'tinytorch.generation.kv_cache.disable_kv_cache': ( '15_memoization/memoization_dev.html#disable_kv_cache', + 'tinytorch.generation.kv_cache.disable_kv_cache': ( 'source/15_memoization/memoization_dev.html#disable_kv_cache', 'tinytorch/generation/kv_cache.py'), - 'tinytorch.generation.kv_cache.enable_kv_cache': ( '15_memoization/memoization_dev.html#enable_kv_cache', + 'tinytorch.generation.kv_cache.enable_kv_cache': ( 'source/15_memoization/memoization_dev.html#enable_kv_cache', 'tinytorch/generation/kv_cache.py')}, - 'tinytorch.models.transformer': { 'tinytorch.models.transformer.GPT': ( '13_transformers/transformers_dev.html#gpt', + 'tinytorch.models.transformer': { 'tinytorch.models.transformer.GPT': ( 'source/13_transformers/transformers_dev.html#gpt', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.GPT.__init__': ( '13_transformers/transformers_dev.html#gpt.__init__', + 'tinytorch.models.transformer.GPT.__init__': ( 'source/13_transformers/transformers_dev.html#gpt.__init__', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.GPT._create_causal_mask': ( '13_transformers/transformers_dev.html#gpt._create_causal_mask', + 'tinytorch.models.transformer.GPT._create_causal_mask': ( 'source/13_transformers/transformers_dev.html#gpt._create_causal_mask', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.GPT.forward': ( '13_transformers/transformers_dev.html#gpt.forward', + 'tinytorch.models.transformer.GPT.forward': ( 'source/13_transformers/transformers_dev.html#gpt.forward', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.GPT.generate': ( '13_transformers/transformers_dev.html#gpt.generate', + 'tinytorch.models.transformer.GPT.generate': ( 'source/13_transformers/transformers_dev.html#gpt.generate', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.GPT.parameters': ( '13_transformers/transformers_dev.html#gpt.parameters', + 'tinytorch.models.transformer.GPT.parameters': ( 'source/13_transformers/transformers_dev.html#gpt.parameters', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.LayerNorm': ( '13_transformers/transformers_dev.html#layernorm', + 'tinytorch.models.transformer.LayerNorm': ( 'source/13_transformers/transformers_dev.html#layernorm', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.LayerNorm.__init__': ( '13_transformers/transformers_dev.html#layernorm.__init__', + 'tinytorch.models.transformer.LayerNorm.__call__': ( 'source/13_transformers/transformers_dev.html#layernorm.__call__', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.LayerNorm.forward': ( '13_transformers/transformers_dev.html#layernorm.forward', + 'tinytorch.models.transformer.LayerNorm.__init__': ( 'source/13_transformers/transformers_dev.html#layernorm.__init__', + 'tinytorch/models/transformer.py'), + 'tinytorch.models.transformer.LayerNorm.forward': ( 'source/13_transformers/transformers_dev.html#layernorm.forward', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.LayerNorm.parameters': ( '13_transformers/transformers_dev.html#layernorm.parameters', + 'tinytorch.models.transformer.LayerNorm.parameters': ( 'source/13_transformers/transformers_dev.html#layernorm.parameters', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.MLP': ( '13_transformers/transformers_dev.html#mlp', + 'tinytorch.models.transformer.MLP': ( 'source/13_transformers/transformers_dev.html#mlp', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.MLP.__init__': ( '13_transformers/transformers_dev.html#mlp.__init__', + 'tinytorch.models.transformer.MLP.__call__': ( 'source/13_transformers/transformers_dev.html#mlp.__call__', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.MLP.forward': ( '13_transformers/transformers_dev.html#mlp.forward', + 'tinytorch.models.transformer.MLP.__init__': ( 'source/13_transformers/transformers_dev.html#mlp.__init__', + 'tinytorch/models/transformer.py'), + 'tinytorch.models.transformer.MLP.forward': ( 'source/13_transformers/transformers_dev.html#mlp.forward', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.MLP.parameters': ( '13_transformers/transformers_dev.html#mlp.parameters', + 'tinytorch.models.transformer.MLP.parameters': ( 'source/13_transformers/transformers_dev.html#mlp.parameters', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.TransformerBlock': ( '13_transformers/transformers_dev.html#transformerblock', + 'tinytorch.models.transformer.TransformerBlock': ( 'source/13_transformers/transformers_dev.html#transformerblock', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.TransformerBlock.__init__': ( '13_transformers/transformers_dev.html#transformerblock.__init__', + 'tinytorch.models.transformer.TransformerBlock.__call__': ( 'source/13_transformers/transformers_dev.html#transformerblock.__call__', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.TransformerBlock.forward': ( '13_transformers/transformers_dev.html#transformerblock.forward', + 'tinytorch.models.transformer.TransformerBlock.__init__': ( 'source/13_transformers/transformers_dev.html#transformerblock.__init__', + 'tinytorch/models/transformer.py'), + 'tinytorch.models.transformer.TransformerBlock.forward': ( 'source/13_transformers/transformers_dev.html#transformerblock.forward', 'tinytorch/models/transformer.py'), - 'tinytorch.models.transformer.TransformerBlock.parameters': ( '13_transformers/transformers_dev.html#transformerblock.parameters', + 'tinytorch.models.transformer.TransformerBlock.parameters': ( 'source/13_transformers/transformers_dev.html#transformerblock.parameters', 'tinytorch/models/transformer.py')}, 'tinytorch.optimization.acceleration': {}, - 'tinytorch.optimization.compression': { 'tinytorch.optimization.compression.Linear': ( '17_compression/compression_dev.html#linear', + 'tinytorch.optimization.compression': { 'tinytorch.optimization.compression.Linear': ( 'source/17_compression/compression_dev.html#linear', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Linear.__init__': ( '17_compression/compression_dev.html#linear.__init__', + 'tinytorch.optimization.compression.Linear.__init__': ( 'source/17_compression/compression_dev.html#linear.__init__', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Linear.forward': ( '17_compression/compression_dev.html#linear.forward', + 'tinytorch.optimization.compression.Linear.forward': ( 'source/17_compression/compression_dev.html#linear.forward', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Linear.parameters': ( '17_compression/compression_dev.html#linear.parameters', + 'tinytorch.optimization.compression.Linear.parameters': ( 'source/17_compression/compression_dev.html#linear.parameters', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Sequential': ( '17_compression/compression_dev.html#sequential', + 'tinytorch.optimization.compression.Sequential': ( 'source/17_compression/compression_dev.html#sequential', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Sequential.__init__': ( '17_compression/compression_dev.html#sequential.__init__', + 'tinytorch.optimization.compression.Sequential.__init__': ( 'source/17_compression/compression_dev.html#sequential.__init__', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Sequential.forward': ( '17_compression/compression_dev.html#sequential.forward', + 'tinytorch.optimization.compression.Sequential.forward': ( 'source/17_compression/compression_dev.html#sequential.forward', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Sequential.parameters': ( '17_compression/compression_dev.html#sequential.parameters', + 'tinytorch.optimization.compression.Sequential.parameters': ( 'source/17_compression/compression_dev.html#sequential.parameters', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Tensor': ( '17_compression/compression_dev.html#tensor', + 'tinytorch.optimization.compression.Tensor': ( 'source/17_compression/compression_dev.html#tensor', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Tensor.__add__': ( '17_compression/compression_dev.html#tensor.__add__', + 'tinytorch.optimization.compression.Tensor.__add__': ( 'source/17_compression/compression_dev.html#tensor.__add__', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Tensor.__init__': ( '17_compression/compression_dev.html#tensor.__init__', + 'tinytorch.optimization.compression.Tensor.__init__': ( 'source/17_compression/compression_dev.html#tensor.__init__', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Tensor.__mul__': ( '17_compression/compression_dev.html#tensor.__mul__', + 'tinytorch.optimization.compression.Tensor.__mul__': ( 'source/17_compression/compression_dev.html#tensor.__mul__', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Tensor.__repr__': ( '17_compression/compression_dev.html#tensor.__repr__', + 'tinytorch.optimization.compression.Tensor.__repr__': ( 'source/17_compression/compression_dev.html#tensor.__repr__', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Tensor.abs': ( '17_compression/compression_dev.html#tensor.abs', + 'tinytorch.optimization.compression.Tensor.abs': ( 'source/17_compression/compression_dev.html#tensor.abs', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Tensor.matmul': ( '17_compression/compression_dev.html#tensor.matmul', + 'tinytorch.optimization.compression.Tensor.matmul': ( 'source/17_compression/compression_dev.html#tensor.matmul', 'tinytorch/optimization/compression.py'), - 'tinytorch.optimization.compression.Tensor.sum': ( '17_compression/compression_dev.html#tensor.sum', + 'tinytorch.optimization.compression.Tensor.sum': ( 'source/17_compression/compression_dev.html#tensor.sum', 'tinytorch/optimization/compression.py')}, - 'tinytorch.optimization.quantization': { 'tinytorch.optimization.quantization.QuantizationComplete': ( '16_quantization/quantization_dev.html#quantizationcomplete', + 'tinytorch.optimization.quantization': { 'tinytorch.optimization.quantization.QuantizationComplete': ( 'source/16_quantization/quantization_dev.html#quantizationcomplete', 'tinytorch/optimization/quantization.py'), - 'tinytorch.optimization.quantization.QuantizationComplete.compare_models': ( '16_quantization/quantization_dev.html#quantizationcomplete.compare_models', + 'tinytorch.optimization.quantization.QuantizationComplete.compare_models': ( 'source/16_quantization/quantization_dev.html#quantizationcomplete.compare_models', 'tinytorch/optimization/quantization.py'), - 'tinytorch.optimization.quantization.QuantizationComplete.dequantize_tensor': ( '16_quantization/quantization_dev.html#quantizationcomplete.dequantize_tensor', + 'tinytorch.optimization.quantization.QuantizationComplete.dequantize_tensor': ( 'source/16_quantization/quantization_dev.html#quantizationcomplete.dequantize_tensor', 'tinytorch/optimization/quantization.py'), - 'tinytorch.optimization.quantization.QuantizationComplete.quantize_model': ( '16_quantization/quantization_dev.html#quantizationcomplete.quantize_model', + 'tinytorch.optimization.quantization.QuantizationComplete.quantize_model': ( 'source/16_quantization/quantization_dev.html#quantizationcomplete.quantize_model', 'tinytorch/optimization/quantization.py'), - 'tinytorch.optimization.quantization.QuantizationComplete.quantize_tensor': ( '16_quantization/quantization_dev.html#quantizationcomplete.quantize_tensor', + 'tinytorch.optimization.quantization.QuantizationComplete.quantize_tensor': ( 'source/16_quantization/quantization_dev.html#quantizationcomplete.quantize_tensor', 'tinytorch/optimization/quantization.py'), - 'tinytorch.optimization.quantization.dequantize_int8': ( '16_quantization/quantization_dev.html#dequantize_int8', + 'tinytorch.optimization.quantization.dequantize_int8': ( 'source/16_quantization/quantization_dev.html#dequantize_int8', 'tinytorch/optimization/quantization.py'), - 'tinytorch.optimization.quantization.quantize_int8': ( '16_quantization/quantization_dev.html#quantize_int8', + 'tinytorch.optimization.quantization.quantize_int8': ( 'source/16_quantization/quantization_dev.html#quantize_int8', 'tinytorch/optimization/quantization.py'), - 'tinytorch.optimization.quantization.quantize_model': ( '16_quantization/quantization_dev.html#quantize_model', + 'tinytorch.optimization.quantization.quantize_model': ( 'source/16_quantization/quantization_dev.html#quantize_model', 'tinytorch/optimization/quantization.py')}, - 'tinytorch.profiling.profiler': { 'tinytorch.profiling.profiler.Profiler': ( '14_profiling/profiling_dev.html#profiler', + 'tinytorch.profiling.profiler': { 'tinytorch.profiling.profiler.Profiler': ( 'source/14_profiling/profiling_dev.html#profiler', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.Profiler.__init__': ( '14_profiling/profiling_dev.html#profiler.__init__', + 'tinytorch.profiling.profiler.Profiler.__init__': ( 'source/14_profiling/profiling_dev.html#profiler.__init__', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.Profiler.count_flops': ( '14_profiling/profiling_dev.html#profiler.count_flops', + 'tinytorch.profiling.profiler.Profiler.count_flops': ( 'source/14_profiling/profiling_dev.html#profiler.count_flops', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.Profiler.count_parameters': ( '14_profiling/profiling_dev.html#profiler.count_parameters', + 'tinytorch.profiling.profiler.Profiler.count_parameters': ( 'source/14_profiling/profiling_dev.html#profiler.count_parameters', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.Profiler.measure_latency': ( '14_profiling/profiling_dev.html#profiler.measure_latency', + 'tinytorch.profiling.profiler.Profiler.measure_latency': ( 'source/14_profiling/profiling_dev.html#profiler.measure_latency', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.Profiler.measure_memory': ( '14_profiling/profiling_dev.html#profiler.measure_memory', + 'tinytorch.profiling.profiler.Profiler.measure_memory': ( 'source/14_profiling/profiling_dev.html#profiler.measure_memory', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.Profiler.profile_backward_pass': ( '14_profiling/profiling_dev.html#profiler.profile_backward_pass', + 'tinytorch.profiling.profiler.Profiler.profile_backward_pass': ( 'source/14_profiling/profiling_dev.html#profiler.profile_backward_pass', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.Profiler.profile_forward_pass': ( '14_profiling/profiling_dev.html#profiler.profile_forward_pass', + 'tinytorch.profiling.profiler.Profiler.profile_forward_pass': ( 'source/14_profiling/profiling_dev.html#profiler.profile_forward_pass', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.Profiler.profile_layer': ( '14_profiling/profiling_dev.html#profiler.profile_layer', + 'tinytorch.profiling.profiler.Profiler.profile_layer': ( 'source/14_profiling/profiling_dev.html#profiler.profile_layer', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.analyze_weight_distribution': ( '14_profiling/profiling_dev.html#analyze_weight_distribution', + 'tinytorch.profiling.profiler.analyze_weight_distribution': ( 'source/14_profiling/profiling_dev.html#analyze_weight_distribution', 'tinytorch/profiling/profiler.py'), - 'tinytorch.profiling.profiler.quick_profile': ( '14_profiling/profiling_dev.html#quick_profile', + 'tinytorch.profiling.profiler.quick_profile': ( 'source/14_profiling/profiling_dev.html#quick_profile', 'tinytorch/profiling/profiler.py')}, - 'tinytorch.text.embeddings': { 'tinytorch.text.embeddings.Embedding': ( '11_embeddings/embeddings_dev.html#embedding', + 'tinytorch.text.embeddings': { 'tinytorch.text.embeddings.Embedding': ( '11_embeddings/embeddings.html#embedding', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.Embedding.__init__': ( '11_embeddings/embeddings_dev.html#embedding.__init__', + 'tinytorch.text.embeddings.Embedding.__call__': ( '11_embeddings/embeddings.html#embedding.__call__', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.Embedding.__repr__': ( '11_embeddings/embeddings_dev.html#embedding.__repr__', + 'tinytorch.text.embeddings.Embedding.__init__': ( '11_embeddings/embeddings.html#embedding.__init__', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.Embedding.forward': ( '11_embeddings/embeddings_dev.html#embedding.forward', + 'tinytorch.text.embeddings.Embedding.__repr__': ( '11_embeddings/embeddings.html#embedding.__repr__', + 'tinytorch/text/embeddings.py'), + 'tinytorch.text.embeddings.Embedding.forward': ( '11_embeddings/embeddings.html#embedding.forward', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.Embedding.parameters': ( '11_embeddings/embeddings_dev.html#embedding.parameters', + 'tinytorch.text.embeddings.Embedding.parameters': ( '11_embeddings/embeddings.html#embedding.parameters', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.EmbeddingLayer': ( '11_embeddings/embeddings_dev.html#embeddinglayer', + 'tinytorch.text.embeddings.EmbeddingLayer': ( '11_embeddings/embeddings.html#embeddinglayer', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.EmbeddingLayer.__init__': ( '11_embeddings/embeddings_dev.html#embeddinglayer.__init__', + 'tinytorch.text.embeddings.EmbeddingLayer.__call__': ( '11_embeddings/embeddings.html#embeddinglayer.__call__', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.EmbeddingLayer.__repr__': ( '11_embeddings/embeddings_dev.html#embeddinglayer.__repr__', + 'tinytorch.text.embeddings.EmbeddingLayer.__init__': ( '11_embeddings/embeddings.html#embeddinglayer.__init__', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.EmbeddingLayer.forward': ( '11_embeddings/embeddings_dev.html#embeddinglayer.forward', + 'tinytorch.text.embeddings.EmbeddingLayer.__repr__': ( '11_embeddings/embeddings.html#embeddinglayer.__repr__', + 'tinytorch/text/embeddings.py'), + 'tinytorch.text.embeddings.EmbeddingLayer.forward': ( '11_embeddings/embeddings.html#embeddinglayer.forward', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.EmbeddingLayer.parameters': ( '11_embeddings/embeddings_dev.html#embeddinglayer.parameters', + 'tinytorch.text.embeddings.EmbeddingLayer.parameters': ( '11_embeddings/embeddings.html#embeddinglayer.parameters', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.PositionalEncoding': ( '11_embeddings/embeddings_dev.html#positionalencoding', + 'tinytorch.text.embeddings.PositionalEncoding': ( '11_embeddings/embeddings.html#positionalencoding', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.PositionalEncoding.__init__': ( '11_embeddings/embeddings_dev.html#positionalencoding.__init__', + 'tinytorch.text.embeddings.PositionalEncoding.__call__': ( '11_embeddings/embeddings.html#positionalencoding.__call__', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.PositionalEncoding.__repr__': ( '11_embeddings/embeddings_dev.html#positionalencoding.__repr__', + 'tinytorch.text.embeddings.PositionalEncoding.__init__': ( '11_embeddings/embeddings.html#positionalencoding.__init__', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.PositionalEncoding.forward': ( '11_embeddings/embeddings_dev.html#positionalencoding.forward', + 'tinytorch.text.embeddings.PositionalEncoding.__repr__': ( '11_embeddings/embeddings.html#positionalencoding.__repr__', + 'tinytorch/text/embeddings.py'), + 'tinytorch.text.embeddings.PositionalEncoding.forward': ( '11_embeddings/embeddings.html#positionalencoding.forward', 'tinytorch/text/embeddings.py'), - 'tinytorch.text.embeddings.PositionalEncoding.parameters': ( '11_embeddings/embeddings_dev.html#positionalencoding.parameters', + 'tinytorch.text.embeddings.PositionalEncoding.parameters': ( '11_embeddings/embeddings.html#positionalencoding.parameters', 'tinytorch/text/embeddings.py')}, - 'tinytorch.text.tokenization': { 'tinytorch.text.tokenization.BPETokenizer': ( '10_tokenization/tokenization_dev.html#bpetokenizer', + 'tinytorch.text.tokenization': { 'tinytorch.text.tokenization.BPETokenizer': ( 'source/10_tokenization/tokenization_dev.html#bpetokenizer', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.BPETokenizer.__init__': ( '10_tokenization/tokenization_dev.html#bpetokenizer.__init__', + 'tinytorch.text.tokenization.BPETokenizer.__init__': ( 'source/10_tokenization/tokenization_dev.html#bpetokenizer.__init__', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.BPETokenizer._apply_merges': ( '10_tokenization/tokenization_dev.html#bpetokenizer._apply_merges', + 'tinytorch.text.tokenization.BPETokenizer._apply_merges': ( 'source/10_tokenization/tokenization_dev.html#bpetokenizer._apply_merges', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.BPETokenizer._build_mappings': ( '10_tokenization/tokenization_dev.html#bpetokenizer._build_mappings', + 'tinytorch.text.tokenization.BPETokenizer._build_mappings': ( 'source/10_tokenization/tokenization_dev.html#bpetokenizer._build_mappings', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.BPETokenizer._get_pairs': ( '10_tokenization/tokenization_dev.html#bpetokenizer._get_pairs', + 'tinytorch.text.tokenization.BPETokenizer._get_pairs': ( 'source/10_tokenization/tokenization_dev.html#bpetokenizer._get_pairs', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.BPETokenizer._get_word_tokens': ( '10_tokenization/tokenization_dev.html#bpetokenizer._get_word_tokens', + 'tinytorch.text.tokenization.BPETokenizer._get_word_tokens': ( 'source/10_tokenization/tokenization_dev.html#bpetokenizer._get_word_tokens', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.BPETokenizer.decode': ( '10_tokenization/tokenization_dev.html#bpetokenizer.decode', + 'tinytorch.text.tokenization.BPETokenizer.decode': ( 'source/10_tokenization/tokenization_dev.html#bpetokenizer.decode', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.BPETokenizer.encode': ( '10_tokenization/tokenization_dev.html#bpetokenizer.encode', + 'tinytorch.text.tokenization.BPETokenizer.encode': ( 'source/10_tokenization/tokenization_dev.html#bpetokenizer.encode', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.BPETokenizer.train': ( '10_tokenization/tokenization_dev.html#bpetokenizer.train', + 'tinytorch.text.tokenization.BPETokenizer.train': ( 'source/10_tokenization/tokenization_dev.html#bpetokenizer.train', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.CharTokenizer': ( '10_tokenization/tokenization_dev.html#chartokenizer', + 'tinytorch.text.tokenization.CharTokenizer': ( 'source/10_tokenization/tokenization_dev.html#chartokenizer', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.CharTokenizer.__init__': ( '10_tokenization/tokenization_dev.html#chartokenizer.__init__', + 'tinytorch.text.tokenization.CharTokenizer.__init__': ( 'source/10_tokenization/tokenization_dev.html#chartokenizer.__init__', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.CharTokenizer.build_vocab': ( '10_tokenization/tokenization_dev.html#chartokenizer.build_vocab', + 'tinytorch.text.tokenization.CharTokenizer.build_vocab': ( 'source/10_tokenization/tokenization_dev.html#chartokenizer.build_vocab', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.CharTokenizer.decode': ( '10_tokenization/tokenization_dev.html#chartokenizer.decode', + 'tinytorch.text.tokenization.CharTokenizer.decode': ( 'source/10_tokenization/tokenization_dev.html#chartokenizer.decode', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.CharTokenizer.encode': ( '10_tokenization/tokenization_dev.html#chartokenizer.encode', + 'tinytorch.text.tokenization.CharTokenizer.encode': ( 'source/10_tokenization/tokenization_dev.html#chartokenizer.encode', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.Tokenizer': ( '10_tokenization/tokenization_dev.html#tokenizer', + 'tinytorch.text.tokenization.Tokenizer': ( 'source/10_tokenization/tokenization_dev.html#tokenizer', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.Tokenizer.decode': ( '10_tokenization/tokenization_dev.html#tokenizer.decode', + 'tinytorch.text.tokenization.Tokenizer.decode': ( 'source/10_tokenization/tokenization_dev.html#tokenizer.decode', 'tinytorch/text/tokenization.py'), - 'tinytorch.text.tokenization.Tokenizer.encode': ( '10_tokenization/tokenization_dev.html#tokenizer.encode', + 'tinytorch.text.tokenization.Tokenizer.encode': ( 'source/10_tokenization/tokenization_dev.html#tokenizer.encode', 'tinytorch/text/tokenization.py')}}} diff --git a/tinytorch/applications/tinygpt.py b/tinytorch/applications/tinygpt.py index 80dabc9a..c588a3c3 100644 --- a/tinytorch/applications/tinygpt.py +++ b/tinytorch/applications/tinygpt.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/20_capstone/capstone_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_tinygpt/tinygpt.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = [] diff --git a/tinytorch/benchmarking/benchmark.py b/tinytorch/benchmarking/benchmark.py index 83b81eac..13433d24 100644 --- a/tinytorch/benchmarking/benchmark.py +++ b/tinytorch/benchmarking/benchmark.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/19_benchmarking/benchmarking_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_benchmark/benchmark.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['OlympicEvent', 'Benchmark', 'test_unit_benchmark', 'BenchmarkSuite', 'test_unit_benchmark_suite', 'TinyMLPerf', 'test_unit_tinymlperf', 'calculate_normalized_scores'] diff --git a/tinytorch/competition/submit.py b/tinytorch/competition/submit.py index a1a9d6d7..ebf2a5bb 100644 --- a/tinytorch/competition/submit.py +++ b/tinytorch/competition/submit.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/20_competition/competition_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_submit/submit.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['validate_installation', 'load_baseline_model', 'generate_baseline', 'worked_example_optimization', 'optimize_for_competition', 'validate_submission', 'generate_submission'] diff --git a/tinytorch/core/activations.py b/tinytorch/core/activations.py index c7fcb702..2169695d 100644 --- a/tinytorch/core/activations.py +++ b/tinytorch/core/activations.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/02_activations/activations_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/03_activations/activations.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['Sigmoid', 'ReLU', 'Tanh', 'GELU', 'Softmax'] diff --git a/tinytorch/core/attention.py b/tinytorch/core/attention.py index fd17103a..dbf22c80 100644 --- a/tinytorch/core/attention.py +++ b/tinytorch/core/attention.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/12_attention/attention_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/07_attention/attention.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['scaled_dot_product_attention', 'MultiHeadAttention'] diff --git a/tinytorch/core/autograd.py b/tinytorch/core/autograd.py index 97e48b2e..1d39607f 100644 --- a/tinytorch/core/autograd.py +++ b/tinytorch/core/autograd.py @@ -16,9 +16,9 @@ # ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['EPSILON', 'Function', 'AddBackward', 'MulBackward', 'SubBackward', 'DivBackward', 'MatmulBackward', - 'TransposeBackward', 'PermuteBackward', 'EmbeddingBackward', 'ReshapeBackward', 'SumBackward', - 'ReLUBackward', 'SigmoidBackward', 'SoftmaxBackward', 'GELUBackward', 'MSEBackward', 'BCEBackward', - 'CrossEntropyBackward', 'enable_autograd'] + 'TransposeBackward', 'PermuteBackward', 'EmbeddingBackward', 'SliceBackward', 'ReshapeBackward', + 'SumBackward', 'ReLUBackward', 'SigmoidBackward', 'SoftmaxBackward', 'GELUBackward', 'MSEBackward', + 'BCEBackward', 'CrossEntropyBackward', 'enable_autograd'] # %% ../../modules/05_autograd/autograd.ipynb 1 import numpy as np @@ -446,6 +446,72 @@ class EmbeddingBackward(Function): return (grad_weight,) + +class SliceBackward(Function): + """ + Gradient computation for tensor slicing/indexing operations. + + **Mathematical Rule:** If Y = X[key], then: + - ∂Loss/∂X[key] = grad_output + - ∂Loss/∂X[other positions] = 0 + + **Key Insight:** Slicing is a masking operation. The backward + places gradients back into the original tensor positions, with + zeros everywhere else. + + **Applications:** Positional encodings, sequence slicing, batch selection, + attention masking in transformers. + + **Examples:** + >>> x = Tensor([1, 2, 3, 4, 5], requires_grad=True) + >>> y = x[:3] # Slice first 3 elements + >>> loss = y.sum() + >>> loss.backward() + >>> # x.grad = [1, 1, 1, 0, 0] - gradients only for sliced positions + """ + + def __init__(self, tensor, key): + """ + Args: + tensor: Original tensor being sliced + key: Slicing key (index, slice, tuple of slices, etc.) + """ + super().__init__(tensor) + self.key = key + self.original_shape = tensor.shape + + def apply(self, grad_output): + """ + Compute gradient for slicing operation. + + Args: + grad_output: Gradient flowing backward from sliced output + + Returns: + Tuple with single gradient for input tensor + + **Mathematical Foundation:** + - Slicing extracts a subset of elements + - Backward scatters gradients back to original positions + - Unsliced positions receive zero gradient + + **Example:** + If X = [a, b, c, d, e] and Y = X[1:4] = [b, c, d] + Then dL/dX = [0, dL/db, dL/dc, dL/dd, 0] + """ + tensor, = self.saved_tensors + grad_input = None + + if isinstance(tensor, Tensor) and tensor.requires_grad: + # Create gradient array with same shape as original tensor + grad_input = np.zeros(self.original_shape, dtype=np.float32) + + # Place gradients back into the sliced positions + # This is the inverse of the forward slicing operation + grad_input[self.key] = grad_output + + return (grad_input,) + # %% ../../modules/05_autograd/autograd.ipynb 21 class ReshapeBackward(Function): """ @@ -811,7 +877,7 @@ def enable_autograd(): # 3. _autograd_enabled is a marker attribute we add at runtime # This is the CORRECT use of hasattr() for dynamic class modification if hasattr(Tensor, '_autograd_enabled'): - # Silently return - no need to warn user about multiple calls + print("⚠️ Autograd already enabled") return # Store original operations @@ -1208,5 +1274,5 @@ def enable_autograd(): print(" - backward() computes gradients") print(" - requires_grad=True enables tracking") -# Note: Autograd is enabled automatically when tinytorch is imported -# See tinytorch/__init__.py - no need to enable here +# Auto-enable when module is imported +enable_autograd() diff --git a/tinytorch/core/layers.py b/tinytorch/core/layers.py index 1289ad68..0112399e 100644 --- a/tinytorch/core/layers.py +++ b/tinytorch/core/layers.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/03_layers/layers_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/04_layers/layers.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['Linear', 'Dropout'] diff --git a/tinytorch/core/losses.py b/tinytorch/core/losses.py index 8f4369ba..ae56c17d 100644 --- a/tinytorch/core/losses.py +++ b/tinytorch/core/losses.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/04_losses/losses_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_losses/losses.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['import_previous_module', 'log_softmax', 'MSELoss', 'CrossEntropyLoss', 'BinaryCrossEntropyLoss'] diff --git a/tinytorch/core/optimizers.py b/tinytorch/core/optimizers.py index 6a4a8ecd..5b2bcf5e 100644 --- a/tinytorch/core/optimizers.py +++ b/tinytorch/core/optimizers.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/06_optimizers/optimizers_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/10_optimizers/optimizers.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['Optimizer', 'SGD', 'Adam', 'AdamW'] diff --git a/tinytorch/core/tensor.py b/tinytorch/core/tensor.py index dfd03466..daf3b738 100644 --- a/tinytorch/core/tensor.py +++ b/tinytorch/core/tensor.py @@ -291,6 +291,33 @@ class Tensor: return result ### END SOLUTION + + def __getitem__(self, key): + """ + Enable indexing and slicing operations on Tensors. + + Allows Tensors to be indexed like NumPy arrays. + + Examples: + >>> x = Tensor([1, 2, 3, 4, 5]) + >>> x[0] # Single element + >>> x[:3] # Slice: [1, 2, 3] + >>> x[1:4] # Range: [2, 3, 4] + """ + ### BEGIN SOLUTION + # Perform the indexing on underlying NumPy array + result_data = self.data[key] + + # Ensure result is always an array (even for scalar indexing) + if not isinstance(result_data, np.ndarray): + result_data = np.array(result_data) + + # Create new Tensor with sliced data + # Note: Gradient tracking will be added by Module 05 (Autograd) + result = Tensor(result_data, requires_grad=self.requires_grad) + return result + ### END SOLUTION + def transpose(self, dim0=None, dim1=None): """ Transpose tensor dimensions. diff --git a/tinytorch/core/training.py b/tinytorch/core/training.py index dd393f81..ba99799e 100644 --- a/tinytorch/core/training.py +++ b/tinytorch/core/training.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/07_training/training_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/11_training/training.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['CosineSchedule', 'save_checkpoint', 'load_checkpoint', 'Trainer'] diff --git a/tinytorch/data/loader.py b/tinytorch/data/loader.py index 09ea90a2..ef02fd10 100644 --- a/tinytorch/data/loader.py +++ b/tinytorch/data/loader.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/08_dataloader/dataloader_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_loader/loader.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['Dataset', 'TensorDataset', 'DataLoader'] diff --git a/tinytorch/generation/kv_cache.py b/tinytorch/generation/kv_cache.py index f6f411a6..7efebb6c 100644 --- a/tinytorch/generation/kv_cache.py +++ b/tinytorch/generation/kv_cache.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/15_memoization/memoization_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_kv_cache/kv_cache.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['KVCache', 'enable_kv_cache', 'disable_kv_cache'] diff --git a/tinytorch/models/transformer.py b/tinytorch/models/transformer.py index 0fdd20ea..b312ef37 100644 --- a/tinytorch/models/transformer.py +++ b/tinytorch/models/transformer.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/13_transformers/transformers_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_transformer/transformer.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['LayerNorm', 'MLP', 'TransformerBlock', 'GPT'] diff --git a/tinytorch/optimization/acceleration.py b/tinytorch/optimization/acceleration.py index fd53282e..b258bd77 100644 --- a/tinytorch/optimization/acceleration.py +++ b/tinytorch/optimization/acceleration.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/18_acceleration/acceleration_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_acceleration/acceleration.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = [] diff --git a/tinytorch/optimization/compression.py b/tinytorch/optimization/compression.py index 7f43ee68..cc633518 100644 --- a/tinytorch/optimization/compression.py +++ b/tinytorch/optimization/compression.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/17_compression/compression_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_compression/compression.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['Tensor', 'Linear', 'Sequential'] diff --git a/tinytorch/optimization/quantization.py b/tinytorch/optimization/quantization.py index 872b359f..9f13352f 100644 --- a/tinytorch/optimization/quantization.py +++ b/tinytorch/optimization/quantization.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/16_quantization/quantization_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_quantization/quantization.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['QuantizationComplete', 'quantize_int8', 'dequantize_int8', 'quantize_model'] diff --git a/tinytorch/profiling/profiler.py b/tinytorch/profiling/profiler.py index 88aece66..777f1e06 100644 --- a/tinytorch/profiling/profiler.py +++ b/tinytorch/profiling/profiler.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/14_profiling/profiling_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_profiler/profiler.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['Profiler', 'quick_profile', 'analyze_weight_distribution'] diff --git a/tinytorch/text/embeddings.py b/tinytorch/text/embeddings.py index 3d8a6d03..e668aa86 100644 --- a/tinytorch/text/embeddings.py +++ b/tinytorch/text/embeddings.py @@ -1,17 +1,36 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/11_embeddings/embeddings_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_embeddings/embeddings.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 -__all__ = ['Embedding', 'PositionalEncoding', 'EmbeddingLayer'] +__all__ = ['BYTES_PER_FLOAT32', 'MB_TO_BYTES', 'Embedding', 'PositionalEncoding', 'EmbeddingLayer'] -# %% ../../modules/source/11_embeddings/embeddings_dev.ipynb 2 +# %% ../../modules/11_embeddings/embeddings.ipynb 2 import numpy as np import math from typing import List, Optional, Tuple # Import from previous modules - following dependency chain from ..core.tensor import Tensor +from ..core.autograd import EmbeddingBackward -# %% ../../modules/source/11_embeddings/embeddings_dev.ipynb 6 +# Constants for memory calculations +BYTES_PER_FLOAT32 = 4 # Standard float32 size in bytes +MB_TO_BYTES = 1024 * 1024 # Megabytes to bytes conversion + +# %% ../../modules/11_embeddings/embeddings.ipynb 6 class Embedding: """ Learnable embedding layer that maps token indices to dense vectors. @@ -82,10 +101,12 @@ class Embedding: embedded = self.weight.data[indices.data.astype(int)] # Create result tensor with gradient tracking - # Note: Gradient computation handled by autograd system (Module 05) - # The embedding lookup is differentiable through the weight matrix result = Tensor(embedded, requires_grad=self.weight.requires_grad) - + + # Attach backward function for gradient computation (following TinyTorch protocol) + if result.requires_grad: + result._grad_fn = EmbeddingBackward(self.weight, indices) + return result def __call__(self, indices: Tensor) -> Tensor: @@ -100,7 +121,7 @@ class Embedding: return f"Embedding(vocab_size={self.vocab_size}, embed_dim={self.embed_dim})" ### END SOLUTION -# %% ../../modules/source/11_embeddings/embeddings_dev.ipynb 10 +# %% ../../modules/11_embeddings/embeddings.ipynb 10 class PositionalEncoding: """ Learnable positional encoding layer. @@ -175,17 +196,21 @@ class PositionalEncoding: f"Embedding dimension mismatch: expected {self.embed_dim}, got {embed_dim}" ) - # Get position embeddings for this sequence length (slice using .data for efficiency) - pos_embeddings_data = self.position_embeddings.data[:seq_len] # (seq_len, embed_dim) - - # Broadcast to match batch dimension: (1, seq_len, embed_dim) - pos_embeddings_data = pos_embeddings_data[np.newaxis, :, :] + # Slice position embeddings for this sequence length using Tensor slicing + # This now preserves gradient flow (as of Module 01 update with __getitem__) + pos_embeddings = self.position_embeddings[:seq_len] # (seq_len, embed_dim) - gradients preserved! - # Wrap in Tensor to preserve requires_grad - pos_embeddings = Tensor(pos_embeddings_data, requires_grad=self.position_embeddings.requires_grad) + # Reshape to add batch dimension: (1, seq_len, embed_dim) + # Need to use .data for reshaping temporarily, then wrap in Tensor + pos_data = pos_embeddings.data[np.newaxis, :, :] + pos_embeddings_batched = Tensor(pos_data, requires_grad=pos_embeddings.requires_grad) + + # Copy gradient function if it exists (to preserve backward connection) + if hasattr(pos_embeddings, '_grad_fn') and pos_embeddings._grad_fn is not None: + pos_embeddings_batched._grad_fn = pos_embeddings._grad_fn - # Add positional information using Tensor operation to preserve gradients! - result = x + pos_embeddings + # Add positional information - gradients flow through both x and pos_embeddings! + result = x + pos_embeddings_batched return result @@ -201,7 +226,7 @@ class PositionalEncoding: return f"PositionalEncoding(max_seq_len={self.max_seq_len}, embed_dim={self.embed_dim})" ### END SOLUTION -# %% ../../modules/source/11_embeddings/embeddings_dev.ipynb 18 +# %% ../../modules/11_embeddings/embeddings.ipynb 18 class EmbeddingLayer: """ Complete embedding system combining token and positional embeddings. @@ -287,7 +312,8 @@ class EmbeddingLayer: """ # Handle 1D input by adding batch dimension if len(tokens.shape) == 1: - tokens = Tensor(tokens.data[np.newaxis, :]) # (1, seq_len) + # NOTE: Tensor reshape preserves gradients + tokens = tokens.reshape(1, -1) squeeze_batch = True else: squeeze_batch = False @@ -297,28 +323,38 @@ class EmbeddingLayer: # Scale embeddings if requested (transformer convention) if self.scale_embeddings: - token_embeds = Tensor(token_embeds.data * math.sqrt(self.embed_dim)) + scale_factor = math.sqrt(self.embed_dim) + token_embeds = token_embeds * scale_factor # Use Tensor multiplication to preserve gradients # Add positional encoding if self.pos_encoding_type == 'learned': # Use learnable positional encoding output = self.pos_encoding.forward(token_embeds) elif self.pos_encoding_type == 'sinusoidal': - # Use fixed sinusoidal encoding + # Use fixed sinusoidal encoding (not learnable) batch_size, seq_len, embed_dim = token_embeds.shape - pos_embeddings = self.pos_encoding.data[:seq_len] # (seq_len, embed_dim) - pos_embeddings = pos_embeddings[np.newaxis, :, :] # (1, seq_len, embed_dim) - output = Tensor(token_embeds.data + pos_embeddings) + pos_embeddings = self.pos_encoding[:seq_len] # Slice using Tensor slicing + + # Reshape to add batch dimension + pos_data = pos_embeddings.data[np.newaxis, :, :] + pos_embeddings_batched = Tensor(pos_data, requires_grad=False) # Sinusoidal are fixed + + output = token_embeds + pos_embeddings_batched else: # No positional encoding output = token_embeds # Remove batch dimension if it was added if squeeze_batch: - output = Tensor(output.data[0]) # (seq_len, embed_dim) + # Use Tensor slicing (now supported in Module 01) + output = output[0] return output + def __call__(self, tokens: Tensor) -> Tensor: + """Allows the embedding layer to be called like a function.""" + return self.forward(tokens) + def parameters(self) -> List[Tensor]: """Return all trainable parameters.""" params = self.token_embedding.parameters() diff --git a/tinytorch/text/tokenization.py b/tinytorch/text/tokenization.py index 5b368a5d..c5967954 100644 --- a/tinytorch/text/tokenization.py +++ b/tinytorch/text/tokenization.py @@ -1,5 +1,19 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/10_tokenization/tokenization_dev.ipynb. - +# ╔═══════════════════════════════════════════════════════════════════════════════╗ +# ║ 🚨 CRITICAL WARNING 🚨 ║ +# ║ AUTOGENERATED! DO NOT EDIT! ║ +# ║ ║ +# ║ This file is AUTOMATICALLY GENERATED from source modules. ║ +# ║ ANY CHANGES MADE HERE WILL BE LOST when modules are re-exported! ║ +# ║ ║ +# ║ ✅ TO EDIT: modules/XX_tokenization/tokenization.py ║ +# ║ ✅ TO EXPORT: Run 'tito module complete ' ║ +# ║ ║ +# ║ 🛡️ STUDENT PROTECTION: This file contains optimized implementations. ║ +# ║ Editing it directly may break module functionality and training. ║ +# ║ ║ +# ║ 🎓 LEARNING TIP: Work in modules/ - that's where real development ║ +# ║ happens! The tinytorch/ directory is just the compiled output. ║ +# ╚═══════════════════════════════════════════════════════════════════════════════╝ # %% auto 0 __all__ = ['Tokenizer', 'CharTokenizer', 'BPETokenizer']