mirror of
https://github.com/harvard-edge/cs249r_book.git
synced 2026-05-05 09:09:13 -05:00
style: apply consistent whitespace and formatting across codebase
This commit is contained in:
@@ -6,7 +6,7 @@ These tests focus on inter-module compatibility, not individual module functiona
|
||||
|
||||
Integration test categories:
|
||||
1. Core module integration (tensor + autograd + layers)
|
||||
2. Training pipeline integration (optimizers + training + data)
|
||||
2. Training pipeline integration (optimizers + training + data)
|
||||
3. Optimization module integration (profiler + quantization + pruning)
|
||||
4. End-to-end integration (complete model training)
|
||||
"""
|
||||
@@ -19,61 +19,61 @@ def test_core_module_integration():
|
||||
"""Test that core modules work together: tensor → autograd → layers"""
|
||||
print("🔧 Testing Core Module Integration")
|
||||
print("-" * 40)
|
||||
|
||||
|
||||
try:
|
||||
# Test tensor + autograd integration
|
||||
from tinytorch.core.tensor import Tensor
|
||||
from tinytorch.core.autograd import Variable
|
||||
|
||||
|
||||
# Create tensor and wrap in Variable
|
||||
t = Tensor([1.0, 2.0, 3.0])
|
||||
v = Variable(t, requires_grad=True)
|
||||
print("✅ Tensor + Autograd integration working")
|
||||
|
||||
|
||||
# Test tensor + layers integration
|
||||
from tinytorch.nn import Linear
|
||||
layer = Linear(3, 2)
|
||||
|
||||
|
||||
# This tests that layers can accept tensor inputs
|
||||
# result = layer(t) # Simplified test
|
||||
print("✅ Tensor + Layers integration working")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Core module integration failed: {e}")
|
||||
return False
|
||||
|
||||
def test_training_pipeline_integration():
|
||||
"""Test training pipeline: data → model → optimizer → training"""
|
||||
print("\n🏋️ Testing Training Pipeline Integration")
|
||||
print("\n🏋️ Testing Training Pipeline Integration")
|
||||
print("-" * 40)
|
||||
|
||||
|
||||
try:
|
||||
# Test data + model integration
|
||||
from tinytorch.utils.data import DataLoader, SimpleDataset
|
||||
from tinytorch.nn import Linear
|
||||
from tinytorch.core.optimizers import SGD
|
||||
|
||||
|
||||
# Create simple dataset
|
||||
dataset = SimpleDataset([(i, i*2) for i in range(10)])
|
||||
dataloader = DataLoader(dataset, batch_size=2)
|
||||
print("✅ Data loading integration working")
|
||||
|
||||
|
||||
# Create model
|
||||
model = Linear(1, 1)
|
||||
optimizer = SGD([model.weight], lr=0.01)
|
||||
print("✅ Model + Optimizer integration working")
|
||||
|
||||
|
||||
# Test that training components work together
|
||||
for batch_data, batch_labels in dataloader:
|
||||
# output = model(batch_data) # Simplified
|
||||
# optimizer.step() # Simplified
|
||||
break
|
||||
print("✅ Training pipeline integration working")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Training pipeline integration failed: {e}")
|
||||
return False
|
||||
@@ -82,31 +82,31 @@ def test_optimization_module_integration():
|
||||
"""Test optimization modules work with core modules"""
|
||||
print("\n⚡ Testing Optimization Module Integration")
|
||||
print("-" * 40)
|
||||
|
||||
|
||||
try:
|
||||
# Test profiler + core modules
|
||||
from tinytorch.core.tensor import Tensor
|
||||
import tinytorch.profiler
|
||||
|
||||
|
||||
# Test that profiler can analyze core operations
|
||||
def tensor_operation():
|
||||
t1 = Tensor([1, 2, 3])
|
||||
t2 = Tensor([4, 5, 6])
|
||||
return t1, t2
|
||||
|
||||
|
||||
# This tests that profiler can measure core operations
|
||||
print("✅ Profiler + Core integration working")
|
||||
|
||||
|
||||
# Test quantization + models (when available)
|
||||
import tinytorch.quantization
|
||||
from tinytorch.nn import Linear
|
||||
|
||||
|
||||
model = Linear(10, 5)
|
||||
# quantized_model = tinytorch.quantization.quantize(model) # When implemented
|
||||
print("✅ Quantization + Models integration ready")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Optimization module integration failed: {e}")
|
||||
return False
|
||||
@@ -115,7 +115,7 @@ def test_import_compatibility():
|
||||
"""Test that all import paths work and don't conflict"""
|
||||
print("\n📦 Testing Import Compatibility")
|
||||
print("-" * 40)
|
||||
|
||||
|
||||
try:
|
||||
# Test PyTorch-style imports don't conflict with core
|
||||
import tinytorch.profiler
|
||||
@@ -123,17 +123,17 @@ def test_import_compatibility():
|
||||
import tinytorch.backends
|
||||
import tinytorch.experimental
|
||||
from tinytorch.nn.utils import prune
|
||||
|
||||
|
||||
# Test core imports still work
|
||||
from tinytorch.core import tensor, autograd
|
||||
from tinytorch.nn import Linear, functional
|
||||
from tinytorch.utils.data import DataLoader
|
||||
|
||||
|
||||
print("✅ All import paths compatible")
|
||||
print("✅ No namespace conflicts detected")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Import compatibility failed: {e}")
|
||||
return False
|
||||
@@ -142,26 +142,26 @@ def test_cross_module_data_flow():
|
||||
"""Test data can flow between different modules correctly"""
|
||||
print("\n🌊 Testing Cross-Module Data Flow")
|
||||
print("-" * 40)
|
||||
|
||||
|
||||
try:
|
||||
from tinytorch.core.tensor import Tensor
|
||||
from tinytorch.nn import Linear
|
||||
from tinytorch.utils.data import SimpleDataset
|
||||
|
||||
|
||||
# Create data
|
||||
data = [(Tensor([i]), Tensor([i*2])) for i in range(5)]
|
||||
dataset = SimpleDataset(data)
|
||||
|
||||
|
||||
# Test data flows through model
|
||||
model = Linear(1, 1)
|
||||
sample_input, sample_target = dataset[0]
|
||||
|
||||
|
||||
# Test that tensor from data works with model
|
||||
# output = model(sample_input) # Simplified
|
||||
print("✅ Data flows correctly between modules")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Cross-module data flow failed: {e}")
|
||||
return False
|
||||
@@ -170,30 +170,30 @@ def run_all_integration_tests():
|
||||
"""Run all module integration tests"""
|
||||
print("🧪 TINYTORCH MODULE INTEGRATION TESTS")
|
||||
print("=" * 60)
|
||||
|
||||
|
||||
tests = [
|
||||
test_core_module_integration,
|
||||
test_training_pipeline_integration,
|
||||
test_training_pipeline_integration,
|
||||
test_optimization_module_integration,
|
||||
test_import_compatibility,
|
||||
test_cross_module_data_flow
|
||||
]
|
||||
|
||||
|
||||
passed = 0
|
||||
total = len(tests)
|
||||
|
||||
|
||||
for test in tests:
|
||||
try:
|
||||
if test():
|
||||
passed += 1
|
||||
except Exception as e:
|
||||
print(f"❌ Test {test.__name__} crashed: {e}")
|
||||
|
||||
|
||||
print(f"\n📊 INTEGRATION TEST RESULTS")
|
||||
print("=" * 40)
|
||||
print(f"Passed: {passed}/{total}")
|
||||
print(f"Success Rate: {passed/total*100:.1f}%")
|
||||
|
||||
|
||||
if passed == total:
|
||||
print("🎉 ALL INTEGRATION TESTS PASSED!")
|
||||
print("✅ Modules integrate correctly with each other")
|
||||
@@ -204,4 +204,4 @@ def run_all_integration_tests():
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_all_integration_tests()
|
||||
run_all_integration_tests()
|
||||
|
||||
Reference in New Issue
Block a user