Fix NBGrader metadata for Modules 15 and 16

Module 15 (Quantization):
- Added locked=true to test_module cell (line 1523)
- Added NBGrader metadata to systems-thinking markdown cell (line 1751)
- Added schema_version: 3 to both cells

Module 16 (Compression):
- Added NBGrader metadata to 6 solution cells:
  * measure-sparsity (line 380)
  * magnitude-prune (line 511)
  * structured-prune (line 675)
  * low-rank-approx (line 843)
  * distillation (line 1013)
  * compress-model-comprehensive (line 1234)
- Added NBGrader metadata to 6 test cells:
  * test-measure-sparsity (line 427) - 5 points
  * test-magnitude-prune (line 567) - 10 points
  * test-structured-prune (line 733) - 10 points
  * test-low-rank (line 888) - 10 points
  * test-distillation (line 1133) - 15 points
  * test-compression-integration (line 1300) - 20 points
- Total: 70 points for Module 16

Result:
- Module 15: 0 P0-BLOCKER, 0 P1-IMPORTANT (was 1 P0 + 1 P1)
- Module 16: 0 P0-BLOCKER, 0 P1-IMPORTANT (was 12 P0)
- Both modules now production-ready for NBGrader deployment(https://claude.com/claude-code)
This commit is contained in:
Vijay Janapa Reddi
2025-11-11 14:50:37 -05:00
parent 11f1771f17
commit 884f024743
2 changed files with 94 additions and 32 deletions

View File

@@ -377,7 +377,7 @@ Storage: 28 values Storage: 7 values + indices
Why this matters: Sparsity directly relates to memory savings, but achieving speedup requires special sparse computation libraries.
"""
# %%
# %% nbgrader={"grade": false, "grade_id": "measure-sparsity", "solution": true, "schema_version": 3}
def measure_sparsity(model) -> float:
"""
Calculate the percentage of zero weights in a model.
@@ -424,6 +424,7 @@ def measure_sparsity(model) -> float:
return (zero_params / total_params) * 100.0
### END SOLUTION
# %% nbgrader={"grade": true, "grade_id": "test-measure-sparsity", "locked": true, "points": 5, "solution": false, "schema_version": 3}
def test_unit_measure_sparsity():
"""🔬 Test sparsity measurement functionality."""
print("🔬 Unit Test: Measure Sparsity...")
@@ -508,7 +509,7 @@ Global thresholding treats the entire model as one big collection of weights, fi
- Can hurt performance if layers have very different weight distributions
"""
# %%
# %% nbgrader={"grade": false, "grade_id": "magnitude-prune", "solution": true, "schema_version": 3}
def magnitude_prune(model, sparsity=0.9):
"""
Remove weights with smallest magnitudes to achieve target sparsity.
@@ -563,6 +564,7 @@ def magnitude_prune(model, sparsity=0.9):
return model
### END SOLUTION
# %% nbgrader={"grade": true, "grade_id": "test-magnitude-prune", "locked": true, "points": 10, "solution": false, "schema_version": 3}
def test_unit_magnitude_prune():
"""🔬 Test magnitude-based pruning functionality."""
print("🔬 Unit Test: Magnitude Prune...")
@@ -672,7 +674,7 @@ Structured sparsity enables real hardware acceleration because:
4. **Cache Efficiency**: Better spatial locality of memory access
"""
# %%
# %% nbgrader={"grade": false, "grade_id": "structured-prune", "solution": true, "schema_version": 3}
def structured_prune(model, prune_ratio=0.5):
"""
Remove entire channels/neurons based on L2 norm importance.
@@ -728,6 +730,7 @@ def structured_prune(model, prune_ratio=0.5):
return model
### END SOLUTION
# %% nbgrader={"grade": true, "grade_id": "test-structured-prune", "locked": true, "points": 10, "solution": false, "schema_version": 3}
def test_unit_structured_prune():
"""🔬 Test structured pruning functionality."""
print("🔬 Unit Test: Structured Prune...")
@@ -840,7 +843,7 @@ It works poorly when:
- **High precision required**: SVD introduces approximation error
"""
# %%
# %% nbgrader={"grade": false, "grade_id": "low-rank-approx", "solution": true, "schema_version": 3}
def low_rank_approximate(weight_matrix, rank_ratio=0.5):
"""
Approximate weight matrix using low-rank decomposition (SVD).
@@ -882,6 +885,7 @@ def low_rank_approximate(weight_matrix, rank_ratio=0.5):
return U_truncated, S_truncated, V_truncated
### END SOLUTION
# %% nbgrader={"grade": true, "grade_id": "test-low-rank", "locked": true, "points": 10, "solution": false, "schema_version": 3}
def test_unit_low_rank_approximate():
"""🔬 Test low-rank approximation functionality."""
print("🔬 Unit Test: Low-Rank Approximate...")
@@ -1010,7 +1014,7 @@ Temperature T:
```
"""
# %%
# %% nbgrader={"grade": false, "grade_id": "distillation", "solution": true, "schema_version": 3}
#| export
class KnowledgeDistillation:
"""
@@ -1126,6 +1130,7 @@ class KnowledgeDistillation:
else:
return -np.mean(np.sum(labels * np.log(predictions + 1e-8), axis=1))
# %% nbgrader={"grade": true, "grade_id": "test-distillation", "locked": true, "points": 15, "solution": false, "schema_version": 3}
def test_unit_knowledge_distillation():
"""🔬 Test knowledge distillation functionality."""
print("🔬 Unit Test: Knowledge Distillation...")
@@ -1231,7 +1236,7 @@ CLOUD SERVICE (Minimal compression):
```
"""
# %%
# %% nbgrader={"grade": false, "grade_id": "compress-model-comprehensive", "solution": true, "schema_version": 3}
def compress_model(model, compression_config):
"""
Apply comprehensive model compression based on configuration.
@@ -1292,6 +1297,7 @@ def compress_model(model, compression_config):
return stats
### END SOLUTION
# %% nbgrader={"grade": true, "grade_id": "test-compression-integration", "locked": true, "points": 20, "solution": false, "schema_version": 3}
def test_unit_compress_model():
"""🔬 Test comprehensive model compression."""
print("🔬 Unit Test: Compress Model...")