mirror of
https://github.com/MLSysBook/TinyTorch.git
synced 2026-04-29 06:37:58 -05:00
feat: Add missing BEGIN/END SOLUTION markers to NBGrader modules
- Add solution markers to 01_tensor module properties (data, shape, size, dtype) - Add solution markers to 04_networks Sequential.forward method - Add solution markers to 05_cnn module (conv2d_naive, Conv2D.__init__, Conv2D.forward, flatten) - Add solution markers to 06_dataloader Dataset class methods (__getitem__, __len__, get_sample_shape) - Verify existing solution markers in 02_activations (4 pairs), 03_layers (3 pairs), 07_autograd (4 pairs), 00_setup (2 pairs) Critical for NBGrader functionality: - BEGIN/END SOLUTION markers identify instructor solutions to hide from students - Enables proper assignment generation and solution hiding - Ensures seamless integration with NBGrader grading system - Maintains pedagogical separation between student TODOs and instructor solutions
This commit is contained in:
@@ -317,7 +317,9 @@ print(t.dtype) # int32 - data type
|
||||
- Users can access the numpy array directly via this property
|
||||
- This is how PyTorch's .data property works
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
return self._data
|
||||
### END SOLUTION
|
||||
|
||||
@property
|
||||
def shape(self) -> Tuple[int, ...]:
|
||||
@@ -346,7 +348,9 @@ print(t.dtype) # int32 - data type
|
||||
- Shape is a tuple of integers
|
||||
- Essential for checking tensor compatibility
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
return self._data.shape
|
||||
### END SOLUTION
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
@@ -375,7 +379,9 @@ print(t.dtype) # int32 - data type
|
||||
- Size is the product of all dimensions
|
||||
- Important for memory calculations
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
return self._data.size
|
||||
### END SOLUTION
|
||||
|
||||
@property
|
||||
def dtype(self) -> np.dtype:
|
||||
@@ -405,7 +411,9 @@ print(t.dtype) # int32 - data type
|
||||
- Important for precision and memory usage
|
||||
- Affects computation speed and accuracy
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
return self._data.dtype
|
||||
### END SOLUTION
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""
|
||||
|
||||
@@ -251,10 +251,12 @@ class Sequential:
|
||||
- The output of one layer becomes input to the next
|
||||
- Return the final result
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
# Apply each layer in sequence
|
||||
for layer in self.layers:
|
||||
x = layer(x)
|
||||
return x
|
||||
### END SOLUTION
|
||||
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
"""Make network callable: network(x) same as network.forward(x)"""
|
||||
|
||||
@@ -184,6 +184,7 @@ def conv2d_naive(input: np.ndarray, kernel: np.ndarray) -> np.ndarray:
|
||||
- Use four nested loops: for i in range(out_H): for j in range(out_W): for di in range(kH): for dj in range(kW):
|
||||
- Accumulate the sum: output[i,j] += input[i+di, j+dj] * kernel[di, dj]
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
# Get input and kernel dimensions
|
||||
H, W = input.shape
|
||||
kH, kW = kernel.shape
|
||||
@@ -202,6 +203,7 @@ def conv2d_naive(input: np.ndarray, kernel: np.ndarray) -> np.ndarray:
|
||||
output[i, j] += input[i + di, j + dj] * kernel[di, dj]
|
||||
|
||||
return output
|
||||
### END SOLUTION
|
||||
|
||||
# %% [markdown]
|
||||
"""
|
||||
@@ -334,12 +336,14 @@ class Conv2D:
|
||||
- Initialize kernel: np.random.randn(kH, kW) * 0.1 (small values)
|
||||
- Convert to float32 for consistency
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
# Store kernel size
|
||||
self.kernel_size = kernel_size
|
||||
kH, kW = kernel_size
|
||||
|
||||
# Initialize random kernel with small values
|
||||
self.kernel = np.random.randn(kH, kW).astype(np.float32) * 0.1
|
||||
### END SOLUTION
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
"""
|
||||
@@ -368,9 +372,11 @@ class Conv2D:
|
||||
- Use conv2d_naive(x.data, self.kernel)
|
||||
- Return Tensor(result) to wrap the result
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
# Apply convolution using naive implementation
|
||||
result = conv2d_naive(x.data, self.kernel)
|
||||
return Tensor(result)
|
||||
### END SOLUTION
|
||||
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
"""Make layer callable: layer(x) same as layer.forward(x)"""
|
||||
@@ -490,10 +496,12 @@ def flatten(x: Tensor) -> Tensor:
|
||||
- Add batch dimension: result[None, :]
|
||||
- Return Tensor(result)
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
# Flatten the tensor and add batch dimension
|
||||
flattened = x.data.flatten()
|
||||
result = flattened[None, :] # Add batch dimension
|
||||
return Tensor(result)
|
||||
### END SOLUTION
|
||||
|
||||
# %% [markdown]
|
||||
"""
|
||||
|
||||
@@ -209,8 +209,10 @@ class Dataset:
|
||||
- Always return a tuple of (data, label) tensors
|
||||
- Data contains the input features, label contains the target
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
# This is an abstract method - subclasses must implement it
|
||||
raise NotImplementedError("Subclasses must implement __getitem__")
|
||||
### END SOLUTION
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""
|
||||
@@ -229,8 +231,10 @@ class Dataset:
|
||||
- This is an abstract method that subclasses must override
|
||||
- Return an integer representing the total number of samples
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
# This is an abstract method - subclasses must implement it
|
||||
raise NotImplementedError("Subclasses must implement __len__")
|
||||
### END SOLUTION
|
||||
|
||||
def get_sample_shape(self) -> Tuple[int, ...]:
|
||||
"""
|
||||
@@ -251,9 +255,11 @@ class Dataset:
|
||||
- Extract data from the (data, label) tuple
|
||||
- Return data.shape
|
||||
"""
|
||||
### BEGIN SOLUTION
|
||||
# Get the first sample to determine shape
|
||||
data, _ = self[0]
|
||||
return data.shape
|
||||
### END SOLUTION
|
||||
|
||||
def get_num_classes(self) -> int:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user