Add exported package files and cleanup

This commit includes:
- Exported tinytorch package files from nbdev (autograd, losses, optimizers, training, etc.)
- Updated activations.py and layers.py with __call__ methods
- New module exports: attention, spatial, tokenization, transformer, etc.
- Removed old _modidx.py file
- Cleanup of duplicate milestone directories

These are the generated package files that correspond to the source modules
we've been developing. Students will import from these when using TinyTorch.
This commit is contained in:
Vijay Janapa Reddi
2025-09-30 12:38:56 -04:00
parent 2377c788fe
commit 1f23035a1e
24 changed files with 478 additions and 189 deletions

0
tinytorch/optimization/__init__.py generated Normal file
View File

8
tinytorch/optimization/acceleration.py generated Normal file
View File

@@ -0,0 +1,8 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/16_acceleration/acceleration_dev.ipynb.
# %% auto 0
__all__ = []
# %% ../../modules/source/16_acceleration/acceleration_dev.ipynb 0
#| default_exp optimization.acceleration
#| export

85
tinytorch/optimization/compression.py generated Normal file
View File

@@ -0,0 +1,85 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/18_compression/compression_dev.ipynb.
# %% auto 0
__all__ = ['Tensor', 'Linear', 'Sequential']
# %% ../../modules/source/18_compression/compression_dev.ipynb 1
import numpy as np
import copy
from typing import List, Dict, Any, Tuple, Optional
import time
# Import from previous modules
# Note: In the full package, these would be imports like:
# from tinytorch.core.tensor import Tensor
# from tinytorch.core.layers import Linear
# For development, we'll create minimal implementations
class Tensor:
"""Minimal Tensor class for compression development - imports from Module 01 in practice."""
def __init__(self, data, requires_grad=False):
self.data = np.array(data)
self.shape = self.data.shape
self.size = self.data.size
self.requires_grad = requires_grad
self.grad = None
def __add__(self, other):
if isinstance(other, Tensor):
return Tensor(self.data + other.data)
return Tensor(self.data + other)
def __mul__(self, other):
if isinstance(other, Tensor):
return Tensor(self.data * other.data)
return Tensor(self.data * other)
def matmul(self, other):
return Tensor(np.dot(self.data, other.data))
def abs(self):
return Tensor(np.abs(self.data))
def sum(self, axis=None):
return Tensor(self.data.sum(axis=axis))
def __repr__(self):
return f"Tensor(shape={self.shape})"
class Linear:
"""Minimal Linear layer for compression development - imports from Module 03 in practice."""
def __init__(self, in_features, out_features, bias=True):
self.in_features = in_features
self.out_features = out_features
# Initialize with He initialization
self.weight = Tensor(np.random.randn(in_features, out_features) * np.sqrt(2.0 / in_features))
self.bias = Tensor(np.zeros(out_features)) if bias else None
def forward(self, x):
output = x.matmul(self.weight)
if self.bias is not None:
output = output + self.bias
return output
def parameters(self):
params = [self.weight]
if self.bias is not None:
params.append(self.bias)
return params
class Sequential:
"""Minimal Sequential container for model compression."""
def __init__(self, *layers):
self.layers = list(layers)
def forward(self, x):
for layer in self.layers:
x = layer.forward(x)
return x
def parameters(self):
params = []
for layer in self.layers:
if hasattr(layer, 'parameters'):
params.extend(layer.parameters())
return params

8
tinytorch/optimization/quantization.py generated Normal file
View File

@@ -0,0 +1,8 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/17_quantization/quantization_dev.ipynb.
# %% auto 0
__all__ = []
# %% ../../modules/source/17_quantization/quantization_dev.ipynb 0
#| default_exp optimization.quantization
#| export