mirror of
https://github.com/MLSysBook/TinyTorch.git
synced 2026-03-12 11:03:34 -05:00
feat: Complete NBGrader integration for all TinyTorch modules
Enhanced all remaining modules with comprehensive educational content: ## Modules Updated - ✅ 03_layers: Added NBGrader metadata, solution blocks for matmul_naive and Dense class - ✅ 04_networks: Added NBGrader metadata, solution blocks for Sequential class and forward pass - ✅ 05_cnn: Added NBGrader metadata, solution blocks for conv2d_naive function and Conv2D class - ✅ 06_dataloader: Added NBGrader metadata, solution blocks for Dataset base class ## Key Features Added - **NBGrader Metadata**: All cells properly tagged with grade, grade_id, locked, schema_version, solution, task flags - **Solution Blocks**: All TODO sections now have ### BEGIN SOLUTION / ### END SOLUTION markers - **Import Flexibility**: Robust import handling for development vs package usage - **Educational Content**: Package structure documentation and mathematical foundations - **Comprehensive Testing**: All modules run correctly as Python scripts ## Verification Results - ✅ All modules execute without errors - ✅ All solution blocks implemented correctly - ✅ Export workflow works: tito export --all successfully exports all modules - ✅ Package integration verified: all imports work correctly - ✅ Educational content preserved and enhanced ## Ready for Production - Complete NBGrader-compatible assignment system - Streamlined tito export command with automatic .py → .ipynb conversion - Comprehensive educational modules with real-world applications - Robust testing infrastructure for all components Total modules completed: 6/6 (setup, tensor, activations, layers, networks, cnn, dataloader)
This commit is contained in:
@@ -52,30 +52,38 @@ from tinytorch.core.tensor import Tensor
|
||||
- **Consistency:** All layers (Dense, Conv2D) live together in `core.layers`
|
||||
"""
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "layers-setup", "locked": false, "schema_version": 3, "solution": false, "task": false}
|
||||
#| default_exp core.layers
|
||||
|
||||
# Setup and imports
|
||||
import numpy as np
|
||||
import sys
|
||||
import os
|
||||
from typing import Union, Optional, Callable
|
||||
import math
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "layers-imports", "locked": false, "schema_version": 3, "solution": false, "task": false}
|
||||
#| export
|
||||
import numpy as np
|
||||
import math
|
||||
import sys
|
||||
from typing import Union, Optional, Callable
|
||||
|
||||
# Import from the main package (rock solid foundation)
|
||||
from tinytorch.core.tensor import Tensor
|
||||
from tinytorch.core.activations import ReLU, Sigmoid, Tanh
|
||||
# Import from the main package - try package first, then local modules
|
||||
try:
|
||||
from tinytorch.core.tensor import Tensor
|
||||
from tinytorch.core.activations import ReLU, Sigmoid, Tanh
|
||||
except ImportError:
|
||||
# For development, import from local modules
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '01_tensor'))
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '02_activations'))
|
||||
from tensor_dev import Tensor
|
||||
from activations_dev import ReLU, Sigmoid, Tanh
|
||||
|
||||
# print("🔥 TinyTorch Layers Module")
|
||||
# print(f"NumPy version: {np.__version__}")
|
||||
# print(f"Python version: {sys.version_info.major}.{sys.version_info.minor}")
|
||||
# print("Ready to build neural network layers!")
|
||||
print("🔥 TinyTorch Layers Module")
|
||||
print(f"NumPy version: {np.__version__}")
|
||||
print(f"Python version: {sys.version_info.major}.{sys.version_info.minor}")
|
||||
print("Ready to build neural network layers!")
|
||||
|
||||
# %% [markdown]
|
||||
"""
|
||||
@@ -155,7 +163,7 @@ C = A @ B = [[1*5 + 2*7, 1*6 + 2*8],
|
||||
Let's implement this step by step!
|
||||
"""
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "matmul-naive", "locked": false, "schema_version": 3, "solution": true, "task": false}
|
||||
#| export
|
||||
def matmul_naive(A: np.ndarray, B: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
@@ -196,7 +204,26 @@ def matmul_naive(A: np.ndarray, B: np.ndarray) -> np.ndarray:
|
||||
- Use three nested for loops: for i in range(m): for j in range(p): for k in range(n):
|
||||
- Accumulate the sum: C[i,j] += A[i,k] * B[k,j]
|
||||
"""
|
||||
raise NotImplementedError("Student implementation required")
|
||||
### BEGIN SOLUTION
|
||||
# Get matrix dimensions
|
||||
m, n = A.shape
|
||||
n2, p = B.shape
|
||||
|
||||
# Check compatibility
|
||||
if n != n2:
|
||||
raise ValueError(f"Incompatible matrix dimensions: A is {m}x{n}, B is {n2}x{p}")
|
||||
|
||||
# Initialize result matrix
|
||||
C = np.zeros((m, p))
|
||||
|
||||
# Triple nested loop for matrix multiplication
|
||||
for i in range(m):
|
||||
for j in range(p):
|
||||
for k in range(n):
|
||||
C[i, j] += A[i, k] * B[k, j]
|
||||
|
||||
return C
|
||||
### END SOLUTION
|
||||
|
||||
# %%
|
||||
#| hide
|
||||
|
||||
@@ -53,22 +53,32 @@ from tinytorch.core.tensor import Tensor
|
||||
- **Consistency:** All network architectures live together in `core.networks`
|
||||
"""
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "networks-setup", "locked": false, "schema_version": 3, "solution": false, "task": false}
|
||||
#| default_exp core.networks
|
||||
|
||||
# Setup and imports
|
||||
import numpy as np
|
||||
import sys
|
||||
import os
|
||||
from typing import List, Union, Optional, Callable
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.patches as patches
|
||||
from matplotlib.patches import FancyBboxPatch, ConnectionPatch
|
||||
import seaborn as sns
|
||||
|
||||
# Import all the building blocks we need
|
||||
from tinytorch.core.tensor import Tensor
|
||||
from tinytorch.core.layers import Dense
|
||||
from tinytorch.core.activations import ReLU, Sigmoid, Tanh, Softmax
|
||||
# Import all the building blocks we need - try package first, then local modules
|
||||
try:
|
||||
from tinytorch.core.tensor import Tensor
|
||||
from tinytorch.core.layers import Dense
|
||||
from tinytorch.core.activations import ReLU, Sigmoid, Tanh, Softmax
|
||||
except ImportError:
|
||||
# For development, import from local modules
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '01_tensor'))
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '02_activations'))
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '03_layers'))
|
||||
from tensor_dev import Tensor
|
||||
from activations_dev import ReLU, Sigmoid, Tanh, Softmax
|
||||
from layers_dev import Dense
|
||||
|
||||
print("🔥 TinyTorch Networks Module")
|
||||
print(f"NumPy version: {np.__version__}")
|
||||
@@ -145,7 +155,7 @@ Each layer transforms the data, and the final output is the composition of all t
|
||||
Let's start by building the most fundamental network: **Sequential**.
|
||||
"""
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "sequential-class", "locked": false, "schema_version": 3, "solution": true, "task": false}
|
||||
#| export
|
||||
class Sequential:
|
||||
"""
|
||||
@@ -198,7 +208,9 @@ class Sequential:
|
||||
Sequential([Dense(3,4), ReLU(), Dense(4,2)])
|
||||
creates a 3-layer network: Dense → ReLU → Dense
|
||||
"""
|
||||
raise NotImplementedError("Student implementation required")
|
||||
### BEGIN SOLUTION
|
||||
self.layers = layers
|
||||
### END SOLUTION
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
"""
|
||||
@@ -231,7 +243,12 @@ class Sequential:
|
||||
- The output of one layer becomes input to the next
|
||||
- Return the final result
|
||||
"""
|
||||
raise NotImplementedError("Student implementation required")
|
||||
### BEGIN SOLUTION
|
||||
# Apply each layer in sequence
|
||||
for layer in self.layers:
|
||||
x = layer(x)
|
||||
return x
|
||||
### END SOLUTION
|
||||
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
"""Make network callable: network(x) same as network.forward(x)"""
|
||||
|
||||
@@ -47,19 +47,37 @@ from tinytorch.core.tensor import Tensor
|
||||
- **Consistency:** All layers (Dense, Conv2D) live together in `core.layers`
|
||||
"""
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "cnn-setup", "locked": false, "schema_version": 3, "solution": false, "task": false}
|
||||
#| default_exp core.cnn
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "cnn-imports", "locked": false, "schema_version": 3, "solution": false, "task": false}
|
||||
#| export
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
from typing import List, Tuple, Optional
|
||||
from tinytorch.core.tensor import Tensor
|
||||
|
||||
# Import from the main package - try package first, then local modules
|
||||
try:
|
||||
from tinytorch.core.tensor import Tensor
|
||||
from tinytorch.core.layers import Dense
|
||||
from tinytorch.core.activations import ReLU
|
||||
except ImportError:
|
||||
# For development, import from local modules
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '01_tensor'))
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '02_activations'))
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '03_layers'))
|
||||
from tensor_dev import Tensor
|
||||
from activations_dev import ReLU
|
||||
from layers_dev import Dense
|
||||
|
||||
# Setup and imports (for development)
|
||||
import matplotlib.pyplot as plt
|
||||
from tinytorch.core.layers import Dense
|
||||
from tinytorch.core.activations import ReLU
|
||||
|
||||
print("🔥 TinyTorch CNN Module")
|
||||
print(f"NumPy version: {np.__version__}")
|
||||
print(f"Python version: {sys.version_info.major}.{sys.version_info.minor}")
|
||||
print("Ready to build convolutional neural networks!")
|
||||
|
||||
# %% [markdown]
|
||||
"""
|
||||
@@ -106,7 +124,7 @@ O[i,j] = sum(I[i+di, j+dj] * K[di, dj] for di in range(kH), dj in range(kW))
|
||||
Let's implement this step by step!
|
||||
"""
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "conv2d-naive", "locked": false, "schema_version": 3, "solution": true, "task": false}
|
||||
#| export
|
||||
def conv2d_naive(input: np.ndarray, kernel: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
@@ -147,7 +165,26 @@ def conv2d_naive(input: np.ndarray, kernel: np.ndarray) -> np.ndarray:
|
||||
- Use four nested loops: for i in range(out_H): for j in range(out_W): for di in range(kH): for dj in range(kW):
|
||||
- Accumulate the sum: output[i,j] += input[i+di, j+dj] * kernel[di, dj]
|
||||
"""
|
||||
raise NotImplementedError("Student implementation required")
|
||||
### BEGIN SOLUTION
|
||||
# Get input and kernel dimensions
|
||||
H, W = input.shape
|
||||
kH, kW = kernel.shape
|
||||
|
||||
# Calculate output dimensions
|
||||
out_H, out_W = H - kH + 1, W - kW + 1
|
||||
|
||||
# Initialize output array
|
||||
output = np.zeros((out_H, out_W), dtype=input.dtype)
|
||||
|
||||
# Sliding window convolution with four nested loops
|
||||
for i in range(out_H):
|
||||
for j in range(out_W):
|
||||
for di in range(kH):
|
||||
for dj in range(kW):
|
||||
output[i, j] += input[i + di, j + dj] * kernel[di, dj]
|
||||
|
||||
return output
|
||||
### END SOLUTION
|
||||
|
||||
# %%
|
||||
#| hide
|
||||
|
||||
@@ -52,7 +52,7 @@ from tinytorch.core.networks import Sequential
|
||||
- **Consistency:** All data loading utilities live together in `core.data`
|
||||
"""
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "dataloader-setup", "locked": false, "schema_version": 3, "solution": false, "task": false}
|
||||
#| default_exp core.dataloader
|
||||
|
||||
# Setup and imports
|
||||
@@ -66,8 +66,13 @@ import matplotlib.pyplot as plt
|
||||
import urllib.request
|
||||
import tarfile
|
||||
|
||||
# Import our building blocks
|
||||
from tinytorch.core.tensor import Tensor
|
||||
# Import our building blocks - try package first, then local modules
|
||||
try:
|
||||
from tinytorch.core.tensor import Tensor
|
||||
except ImportError:
|
||||
# For development, import from local modules
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '01_tensor'))
|
||||
from tensor_dev import Tensor
|
||||
|
||||
print("🔥 TinyTorch Data Module")
|
||||
print(f"NumPy version: {np.__version__}")
|
||||
@@ -138,7 +143,7 @@ Model: Process batch efficiently
|
||||
Let's start by building the most fundamental component: **Dataset**.
|
||||
"""
|
||||
|
||||
# %%
|
||||
# %% nbgrader={"grade": false, "grade_id": "dataset-class", "locked": false, "schema_version": 3, "solution": true, "task": false}
|
||||
#| export
|
||||
class Dataset:
|
||||
"""
|
||||
@@ -185,7 +190,10 @@ class Dataset:
|
||||
EXAMPLE:
|
||||
dataset[0] should return (Tensor(image_data), Tensor(label))
|
||||
"""
|
||||
raise NotImplementedError("Student implementation required")
|
||||
### BEGIN SOLUTION
|
||||
# This is an abstract method - subclasses must implement it
|
||||
raise NotImplementedError("Subclasses must implement __getitem__")
|
||||
### END SOLUTION
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user