Optimization Level 0: Baseline

Results:
- Perceptron:  (1.92s) 100.0%
- XOR:  (1.87s) 54.5%
- MNIST:  (1.96s) 11.5%
- CIFAR:  (60.00s)
- TinyGPT:  (1.92s)
This commit is contained in:
Vijay Janapa Reddi
2025-09-28 21:44:07 -04:00
parent 3d686ca280
commit 852f96044a
4 changed files with 75 additions and 45 deletions

View File

@@ -14,3 +14,4 @@ Testing Optimization Level 0: Baseline
[2025-09-28 21:42:40] ✅ Complete in 1.85s
[2025-09-28 21:42:40]
Committing results for Baseline...
[2025-09-28 21:42:40] Committed results

View File

@@ -0,0 +1,19 @@
[2025-09-28 21:43:29] ================================================================================
[2025-09-28 21:43:29] PHASE 2: OPTIMIZATION TESTING
[2025-09-28 21:43:29] ================================================================================
[2025-09-28 21:43:29]
Testing Optimization Level 0: Baseline
[2025-09-28 21:43:29] Description: No optimizations
[2025-09-28 21:43:29] ------------------------------------------------------------
[2025-09-28 21:43:29] Testing Perceptron with Baseline...
[2025-09-28 21:43:31] ✅ Complete in 1.92s
[2025-09-28 21:43:31] Testing XOR with Baseline...
[2025-09-28 21:43:33] ✅ Complete in 1.87s
[2025-09-28 21:43:33] Testing MNIST with Baseline...
[2025-09-28 21:43:35] ✅ Complete in 1.96s
[2025-09-28 21:43:35] Testing CIFAR with Baseline...
[2025-09-28 21:44:05] ⏱️ Timeout after 60s
[2025-09-28 21:44:05] Testing TinyGPT with Baseline...
[2025-09-28 21:44:07] ✅ Complete in 1.92s
[2025-09-28 21:44:07]
Committing results for Baseline...

View File

@@ -41,7 +41,7 @@ class OptimizationTester:
{
'name': 'CIFAR',
'path': 'examples/cifar_cnn_modern/train_cnn.py',
'args': '--test-only', # Quick test for now
'args': '--test-only', # Architecture test only
'metrics': ['forward_pass', 'time']
},
{
@@ -52,7 +52,7 @@ class OptimizationTester:
}
]
# Define optimization levels (modules 15-20)
# Define optimization levels (modules 14-19 based on actual TinyTorch structure)
self.optimizations = [
{
'level': 0,
@@ -60,41 +60,41 @@ class OptimizationTester:
'description': 'No optimizations',
'module': None
},
{
'level': 14,
'name': 'Profiling',
'description': 'Module 14: Performance profiling and analysis',
'module': 'profiling'
},
{
'level': 15,
'name': 'Memory Optimization',
'description': 'Module 15: Memory-efficient operations',
'module': 'memory_opt'
'name': 'Acceleration',
'description': 'Module 15: Hardware acceleration optimizations',
'module': 'acceleration'
},
{
'level': 16,
'name': 'Compute Optimization',
'description': 'Module 16: Vectorization and parallelization',
'module': 'compute_opt'
'name': 'Quantization',
'description': 'Module 16: Quantization and compression',
'module': 'quantization'
},
{
'level': 17,
'name': 'Cache Optimization',
'description': 'Module 17: Cache-friendly operations',
'module': 'cache_opt'
'name': 'Compression',
'description': 'Module 17: Model compression techniques',
'module': 'compression'
},
{
'level': 18,
'name': 'Kernel Fusion',
'description': 'Module 18: Fused operations',
'module': 'kernel_fusion'
'name': 'Caching',
'description': 'Module 18: Caching and memory optimization',
'module': 'caching'
},
{
'level': 19,
'name': 'Mixed Precision',
'description': 'Module 19: FP16/BF16 operations',
'module': 'mixed_precision'
},
{
'level': 20,
'name': 'Full Optimization',
'description': 'Module 20: All optimizations combined',
'module': 'full_opt'
'name': 'Benchmarking',
'description': 'Module 19: Advanced benchmarking suite',
'module': 'benchmarking'
}
]
@@ -118,13 +118,15 @@ class OptimizationTester:
env['TINYTORCH_OPT'] = optimization['module']
try:
# Use shorter timeout for CIFAR architecture test
timeout_val = 30 if example['name'] == 'CIFAR' else 60
cmd = f"python {example['path']} {example['args']}"
result = subprocess.run(
cmd,
shell=True,
capture_output=True,
text=True,
timeout=60,
timeout=timeout_val,
env=env
)
@@ -275,19 +277,27 @@ class OptimizationTester:
self.log("\nMatrix saved to optimization_matrix.md")
if __name__ == "__main__":
import sys
tester = OptimizationTester()
# For now, just test baseline
print("\nStarting with BASELINE performance testing...")
baseline = tester.optimizations[0]
baseline_results = tester.test_optimization_level(baseline)
# Save baseline results
tester.commit_results(baseline, baseline_results)
print("\n" + "="*60)
print("BASELINE TESTING COMPLETE")
print("="*60)
print("\nBaseline results committed.")
print("Ready to proceed with optimization testing.")
print("\nTo run full suite: python optimization_test_framework.py --full")
# Check if user wants full suite
if '--full' in sys.argv:
print("\n🚀 RUNNING FULL OPTIMIZATION TEST SUITE...")
print("Testing all optimization levels: Baseline → Profiling → Acceleration → Quantization → Compression → Caching → Benchmarking")
all_results = tester.run_full_test_suite()
else:
# Just test baseline
print("\nStarting with BASELINE performance testing...")
baseline = tester.optimizations[0]
baseline_results = tester.test_optimization_level(baseline)
# Save baseline results
tester.commit_results(baseline, baseline_results)
print("\n" + "="*60)
print("BASELINE TESTING COMPLETE")
print("="*60)
print("\nBaseline results committed.")
print("Ready to proceed with optimization testing.")
print("\nTo run full suite: python optimization_test_framework.py --full")

View File

@@ -1,24 +1,24 @@
{
"Perceptron": {
"success": true,
"time": 1.8552052974700928,
"time": 1.924880027770996,
"output_preview": "ion\n\n\ud83d\ude80 Next Steps:\n \u2022 Continue to XOR 1969 milestone after Module 06 (Autograd)\n \u2022 YOUR foundation enables solving non-linear problems!\n \u2022 With 100.0% accuracy, YOUR perceptron works perfectly!\n",
"loss": 0.2038,
"accuracy": 100.0
},
"XOR": {
"success": true,
"time": 1.9177570343017578,
"time": 1.8728010654449463,
"output_preview": "ayer networks\n\n\ud83d\ude80 Next Steps:\n \u2022 Continue to MNIST MLP after Module 08 (Training)\n \u2022 YOUR XOR solution scales to real vision problems!\n \u2022 Hidden layers principle powers all modern deep learning!\n",
"loss": 0.2497,
"accuracy": 54.5
},
"MNIST": {
"success": true,
"time": 2.02604603767395,
"time": 1.9613378047943115,
"output_preview": " a scalar is deprecated, and will error in future. Ensure you extract a single element from your array before performing this operation. (Deprecated NumPy 1.25.)\n one_hot[i, int(labels_np[i])] = 1.0\n",
"loss": 0.0,
"accuracy": 15.0
"accuracy": 11.5
},
"CIFAR": {
"success": false,
@@ -27,8 +27,8 @@
},
"TinyGPT": {
"success": true,
"time": 1.851945161819458,
"time": 1.9189341068267822,
"output_preview": "ining\n \u2022 Complete transformer architecture from first principles\n\n\ud83c\udfed Production Note:\n Real PyTorch uses optimized CUDA kernels for attention,\n but you built and understand the core mathematics!\n",
"loss": 0.3688
"loss": 0.3419
}
}