refactor(tests): clean up test folder and fix gradient flow issues

Test Cleanup (113 files, -22,000 lines):
- Remove 21 redundant run_all_tests.py files
- Remove checkpoints/ folder (22 obsolete checkpoint files)
- Remove progressive/, debugging/, diagnostic/ folders
- Remove duplicate integration tests and examples
- Remove orphaned dev artifacts and generated outputs
- Consolidate test_gradient_flow_overall.py into system/

Documentation Cleanup (4 files removed):
- Remove duplicate HOW_TO_USE.md, WORKFLOW.md, SYSTEM_DESIGN.md
- Trim environment/README.md from 334 to 86 lines
- Update capstone/README.md removing outdated bug references

Test Fixes:
- Add requires_grad=True to layer parameters in gradient tests
- Fix PositionalEncoding argument order in test_shapes.py
- Adjust performance thresholds for realistic expectations
- Fix gradient clipping to handle memoryview correctly
- Update zero_grad assertions to accept None or zeros
This commit is contained in:
Vijay Janapa Reddi
2026-01-24 12:22:37 -05:00
parent aafd7a8c67
commit 389989ece7
113 changed files with 214 additions and 22135 deletions

View File

@@ -1,146 +0,0 @@
#!/usr/bin/env python3
"""
Run all tests for Module XX: [Module Name]
Template test runner - copy to each module's test directory
"""
import sys
from pathlib import Path
import importlib.util
import time
from typing import List, Dict
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
def run_module_tests() -> Dict:
"""Run all tests for this module."""
from rich.console import Console
from rich.table import Table
from rich import box
from rich.panel import Panel
console = Console()
# Update module number and name
MODULE_NUMBER = "01"
MODULE_NAME = "Tensor"
# Header
console.print(Panel(f"[bold blue]Module {MODULE_NUMBER}: {MODULE_NAME} - Test Suite[/bold blue]",
expand=False))
# Find all test files in this module
test_files = list(Path(__file__).parent.glob("test_*.py"))
test_files = [f for f in test_files if f.name != Path(__file__).name]
if not test_files:
console.print("[yellow]No test files found in this module![/yellow]")
return {'status': 'NO_TESTS', 'passed': 0, 'failed': 0}
all_results = []
total_passed = 0
total_failed = 0
total_skipped = 0
# Create results table
table = Table(title="Test Results", box=box.ROUNDED)
table.add_column("Test File", style="cyan")
table.add_column("Test Class", style="yellow")
table.add_column("Test Method", style="white")
table.add_column("Status", justify="center")
table.add_column("Time", justify="right")
for test_file in sorted(test_files):
module_name = test_file.stem
try:
# Import test module
spec = importlib.util.spec_from_file_location(module_name, test_file)
test_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(test_module)
# Find test classes
for class_name in dir(test_module):
if class_name.startswith("Test"):
test_class = getattr(test_module, class_name)
# Create instance
try:
instance = test_class()
except Exception as e:
table.add_row(
module_name,
class_name,
"initialization",
"[red]❌ ERROR[/red]",
"-"
)
total_failed += 1
continue
# Run test methods
for method_name in dir(instance):
if method_name.startswith("test_"):
method = getattr(instance, method_name)
# Skip template placeholder tests
if "pass" in str(method.__code__.co_code):
continue
# Run test
start = time.time()
try:
method()
status = "[green]✅ PASS[/green]"
total_passed += 1
except AssertionError as e:
status = "[red]❌ FAIL[/red]"
total_failed += 1
except ImportError:
status = "[yellow]⏭️ SKIP[/yellow]"
total_skipped += 1
except Exception as e:
status = "[red]💥 ERROR[/red]"
total_failed += 1
duration = time.time() - start
table.add_row(
module_name,
class_name,
method_name,
status,
f"{duration:.3f}s"
)
except Exception as e:
console.print(f"[red]Error loading test file {test_file}: {e}[/red]")
total_failed += 1
if total_passed + total_failed + total_skipped > 0:
console.print(table)
# Summary
console.print(f"\n📊 Summary:")
console.print(f" • Total: {total_passed + total_failed + total_skipped} tests")
console.print(f" • ✅ Passed: {total_passed}")
console.print(f" • ❌ Failed: {total_failed}")
if total_skipped > 0:
console.print(f" • ⏭️ Skipped: {total_skipped}")
# Final status
if total_failed == 0:
console.print("\n[green bold]✅ All tests passed![/green bold]")
return {'status': 'PASSED', 'passed': total_passed, 'failed': 0}
else:
console.print("\n[red]❌ Some tests failed![/red]")
return {'status': 'FAILED', 'passed': total_passed, 'failed': total_failed}
else:
console.print("[yellow]No actual tests implemented yet (only templates).[/yellow]")
return {'status': 'NO_TESTS', 'passed': 0, 'failed': 0}
if __name__ == "__main__":
results = run_module_tests()
sys.exit(0 if results['status'] == 'PASSED' else 1)

View File

@@ -1,178 +0,0 @@
"""
Module 01: Progressive Integration Tests
Tests that Module 02 (Tensor) works correctly AND that all previous modules still work.
DEPENDENCY CHAIN: 01_setup → 02_tensor
This ensures students can trace back exactly where issues originate.
"""
import numpy as np
import sys
from pathlib import Path
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
class TestModule01Prerequisites:
"""Test that Module 01 (Setup) still works correctly."""
def test_environment_setup_working(self):
"""Verify setup module functionality is still working."""
# Python version detection
assert sys.version_info >= (3, 8), "Python 3.8+ required"
# Project structure
project_root = Path(__file__).parent.parent.parent
required_dirs = ['modules', 'tests', 'tito', 'tinytorch']
for dir_name in required_dirs:
dir_path = project_root / dir_name
assert dir_path.exists(), f"Setup failed: {dir_name} directory missing"
def test_development_environment_ready(self):
"""Verify development environment is properly configured."""
# Required packages
required_packages = ['numpy', 'pathlib']
for package in required_packages:
try:
__import__(package)
except ImportError:
assert False, f"Setup failed: {package} not available"
class TestModule02TensorCore:
"""Test that Module 02 (Tensor) core functionality works."""
def test_tensor_creation_and_basics(self):
"""Test tensor creation works correctly."""
try:
from tinytorch.core.tensor import Tensor
# Basic tensor creation
t1 = Tensor([1, 2, 3])
assert t1.shape == (3,), "Tensor creation failed"
# Numpy array integration
arr = np.array([[1, 2], [3, 4]])
t2 = Tensor(arr)
assert t2.shape == (2, 2), "Numpy integration failed"
except ImportError:
assert True, "Tensor not implemented yet (expected)"
def test_tensor_operations(self):
"""Test basic tensor operations work."""
try:
from tinytorch.core.tensor import Tensor
t1 = Tensor([1, 2, 3])
t2 = Tensor([4, 5, 6])
# Test operations if implemented
if hasattr(t1, '__add__'):
result = t1 + t2
expected = np.array([5, 7, 9])
assert np.array_equal(result.data, expected), "Tensor addition failed"
except ImportError:
assert True, "Tensor operations not implemented yet (expected)"
class TestProgressiveStack:
"""Test that the progressive stack (01→02) works together."""
def test_setup_enables_tensor(self):
"""Test that proper setup enables tensor functionality."""
# Verify setup created the foundation for tensors
# 1. Environment should support numpy (from setup)
import numpy as np
assert np.__version__ is not None, "Numpy not properly set up"
# 2. Project structure should support tensor module
tensor_module_path = Path(__file__).parent.parent.parent / "src" / "01_tensor"
assert tensor_module_path.exists(), "Setup didn't create proper module structure"
def test_end_to_end_capability(self):
"""Test end-to-end capability through Module 02."""
try:
# This should work if both setup and tensor are implemented
from tinytorch.core.tensor import Tensor
# Create tensors using environment from Module 01
data = np.random.randn(5, 10) # Uses numpy from setup
t = Tensor(data) # Uses tensor from Module 02
# Basic functionality should work
assert t.shape == (5, 10), "End-to-end stack broken"
assert isinstance(t.data, np.ndarray), "Tensor-numpy integration broken"
except ImportError:
# If tensor not implemented, that's expected
# But setup should still work
assert sys.version_info >= (3, 8), "Setup module broken"
class TestDependencyValidation:
"""Validate that dependencies are working correctly."""
def test_module_01_exports(self):
"""Test Module 01 exports are available."""
try:
# Try to import setup functionality
from tinytorch.setup import get_system_info
info = get_system_info()
assert 'platform' in info, "Module 01 exports broken"
except ImportError:
# If not implemented, verify basic setup works
import platform
assert platform.system() in ['Darwin', 'Linux', 'Windows'], "Basic setup broken"
def test_module_02_builds_on_01(self):
"""Test Module 02 correctly uses Module 01 foundation."""
try:
from tinytorch.core.tensor import Tensor
# Tensor should use numpy (set up by Module 01)
t = Tensor(np.array([1, 2, 3]))
# Should use system info for optimization hints
if hasattr(t, 'device') or hasattr(t, 'dtype'):
# Advanced tensor features building on setup
assert True, "Module 02 successfully builds on Module 01"
except ImportError:
assert True, "Module 02 not implemented yet"
class TestRegressionPrevention:
"""Prevent regressions in previously working modules."""
def test_module_01_not_broken(self):
"""Ensure Module 02 development didn't break Module 01."""
# These should ALWAYS work regardless of Module 02 status
# Environment detection
assert sys.version_info.major >= 3, "Python environment broken"
# File system access
project_root = Path(__file__).parent.parent.parent
assert project_root.exists(), "Project structure broken"
# Package imports
import numpy as np
assert np is not None, "Package management broken"
def test_progressive_compatibility(self):
"""Test that progress doesn't break backwards compatibility."""
# Module 02 should not change Module 01 behavior
# Basic imports should still work
import sys
import os
from pathlib import Path
# These are Module 01 capabilities that should never break
assert callable(Path), "Path functionality broken"
assert hasattr(sys, 'version_info'), "System info broken"
assert hasattr(os, 'environ'), "Environment access broken"