refactor(tinytorch): update test files and READMEs for module renumbering

- Update MODULE_DEPENDENCIES in test files for new ordering
- Rename test_module_05_autograd.py to test_module_06_autograd.py
- Update tinytorch/README.md with correct module structure
- Foundation tier now 01-08, Architecture tier 09-13
This commit is contained in:
Vijay Janapa Reddi
2025-12-18 13:14:50 -05:00
parent 365bfc616d
commit 86b437db2a
4 changed files with 21 additions and 21 deletions

View File

@@ -102,8 +102,8 @@ Build your framework through four progressive parts:
| Part | Modules | What You Build |
|------|---------|----------------|
| **I. Foundations** | 01-07 | Tensors, activations, layers, losses, autograd, optimizers, training |
| **II. Vision** | 08-09 | DataLoaders, Conv2d, CNNs for image classification |
| **I. Foundations** | 01-08 | Tensors, activations, layers, losses, dataloader, autograd, optimizers, training |
| **II. Vision** | 09 | Conv2d, CNNs for image classification |
| **III. Language** | 10-13 | Tokenization, embeddings, attention, transformers |
| **IV. Optimization** | 14-20 | Profiling, quantization, compression, acceleration, benchmarking, capstone |
@@ -173,10 +173,10 @@ TinyTorch/
│ ├── 02_activations/ # Module 02: ReLU, Softmax activations
│ ├── 03_layers/ # Module 03: Linear layers, Module system
│ ├── 04_losses/ # Module 04: MSE, CrossEntropy losses
│ ├── 05_autograd/ # Module 05: Automatic differentiation
│ ├── 06_optimizers/ # Module 06: SGD, Adam optimizers
│ ├── 07_training/ # Module 07: Complete training loops
│ ├── 08_dataloader/ # Module 08: Efficient data pipelines
│ ├── 05_dataloader/ # Module 05: Efficient data pipelines
│ ├── 06_autograd/ # Module 06: Automatic differentiation
│ ├── 07_optimizers/ # Module 07: SGD, Adam optimizers
│ ├── 08_training/ # Module 08: Complete training loops
│ ├── 09_convolutions/ # Module 09: Conv2d, MaxPool2d, CNNs
│ ├── 10_tokenization/ # Module 10: Text processing
│ ├── 11_embeddings/ # Module 11: Token & positional embeddings

View File

@@ -6,8 +6,8 @@ Tests how each module interfaces with modules that came before it
# Module dependency graph for TinyTorch
# Current module structure:
# 01_tensor, 02_activations, 03_layers, 04_losses, 05_autograd,
# 06_optimizers, 07_training, 08_dataloader, 09_convolutions,
# 01_tensor, 02_activations, 03_layers, 04_losses, 05_dataloader,
# 06_autograd, 07_optimizers, 08_training, 09_convolutions,
# 10_tokenization, 11_embeddings, 12_attention, 13_transformers,
# 14_profiling, 15_quantization, 16_compression, 17_memoization,
# 18_acceleration, 19_benchmarking, 20_capstone
@@ -16,10 +16,10 @@ MODULE_DEPENDENCIES = {
"02_activations": ["01_tensor"], # Needs Tensor
"03_layers": ["01_tensor"], # Needs Tensor
"04_losses": ["01_tensor"], # Needs Tensor
"05_autograd": ["01_tensor"], # Core dependency on Tensor
"06_optimizers": ["01_tensor", "05_autograd"], # Needs Tensor and autograd
"07_training": ["01_tensor", "05_autograd", "06_optimizers"], # Training loop deps
"08_dataloader": ["01_tensor"], # Needs Tensor
"05_dataloader": ["01_tensor"], # Needs Tensor
"06_autograd": ["01_tensor"], # Core dependency on Tensor
"07_optimizers": ["01_tensor", "06_autograd"], # Needs Tensor and autograd
"08_training": ["01_tensor", "06_autograd", "07_optimizers"], # Training loop deps
"09_convolutions": ["01_tensor", "03_layers"], # Needs Tensor and Layer base
"10_tokenization": ["01_tensor"], # Needs Tensor
"11_embeddings": ["01_tensor"], # Needs Tensor

View File

@@ -18,22 +18,22 @@ MODULE_DEPENDENCIES = {
"02": ["01"], # Activations need Tensor
"03": ["01", "02"], # Layers need Tensor, Activations
"04": ["01", "02", "03"], # Losses need Tensor, Activations, Layers
"05": ["01", "02", "03", "04"], # Autograd needs all foundation
"06": ["01", "02", "03", "04", "05"], # Optimizers need Autograd
"07": ["01", "02", "03", "04", "05", "06"], # Training needs Optimizers
"08": ["01"], # DataLoader mainly needs Tensor
"09": ["01", "02", "03", "05"], # Convolutions needs Tensor, Layers, Autograd
"05": ["01"], # DataLoader mainly needs Tensor
"06": ["01", "02", "03", "04", "05"], # Autograd needs foundation + DataLoader
"07": ["01", "02", "03", "04", "05", "06"], # Optimizers need Autograd
"08": ["01", "02", "03", "04", "05", "06", "07"], # Training needs Optimizers
"09": ["01", "02", "03", "06"], # Convolutions needs Tensor, Layers, Autograd
"10": ["01"], # Tokenization mainly needs Tensor
"11": ["01", "05", "10"], # Embeddings need Tensor, Autograd, Tokenization
"12": ["01", "03", "05", "11"], # Attention needs Layers, Autograd, Embeddings
"13": ["01", "03", "05", "11", "12"], # Transformers need Attention
"11": ["01", "06", "10"], # Embeddings need Tensor, Autograd, Tokenization
"12": ["01", "03", "06", "11"], # Attention needs Layers, Autograd, Embeddings
"13": ["01", "03", "06", "11", "12"], # Transformers need Attention
"14": ["01"], # Profiling is mostly standalone
"15": ["01", "03"], # Quantization needs Tensor, Layers
"16": ["01", "03"], # Compression needs Tensor, Layers
"17": ["01", "12", "13"], # Memoization (KV-cache) needs Attention, Transformers
"18": ["01"], # Acceleration is mostly standalone
"19": ["01"], # Benchmarking is mostly standalone
"20": ["01", "02", "03", "04", "05", "06", "07"], # Capstone needs core modules
"20": ["01", "02", "03", "04", "05", "06", "07", "08"], # Capstone needs core modules
}
# What each module should provide (for capability testing)