MILESTONE: Complete Phase 2 CNN training pipeline

 Phase 1-2 Complete: Modules 1-10 aligned with tutorial master plan
 CNN Training Pipeline: Autograd → Spatial → Optimizers → DataLoader → Training
 Technical Validation: All modules import and function correctly
 CIFAR-10 Ready: Multi-channel Conv2D, BatchNorm, MaxPool2D, complete pipeline

Key Achievements:
- Fixed module sequence alignment (spatial now Module 7, not 6)
- Updated tutorial master plan for logical pedagogical flow
- Phase 2 milestone achieved: Students can train CNNs on CIFAR-10
- Complete systems engineering focus throughout all modules
- Production-ready CNN pipeline with memory profiling

Next Phase: Language models (Modules 11-15) for TinyGPT milestone
This commit is contained in:
Vijay Janapa Reddi
2025-09-23 18:33:56 -04:00
parent 86587f6aa0
commit b3c8dfaa3d
102 changed files with 27309 additions and 13102 deletions

View File

@@ -1,79 +0,0 @@
#!/usr/bin/env python3
"""
TinyTorch Module Analysis Wrapper
Simple wrapper to run the module analyzer from the root directory.
"""
import sys
import os
from pathlib import Path
# Add instructor tools to path
sys.path.insert(0, str(Path(__file__).parent / "instructor" / "tools"))
# Import and run the analyzer
from tinytorch_module_analyzer import TinyTorchModuleAnalyzer
import argparse
def main():
parser = argparse.ArgumentParser(description="TinyTorch Module Analyzer & Report Card Generator")
parser.add_argument("--module", help="Analyze specific module (e.g., 02_activations)")
parser.add_argument("--all", action="store_true", help="Analyze all modules")
parser.add_argument("--compare", nargs="+", help="Compare multiple modules")
parser.add_argument("--format", choices=["json", "html", "both"], default="both", help="Output format")
parser.add_argument("--save", action="store_true", help="Save report cards to files")
args = parser.parse_args()
# Use correct path from root directory
analyzer = TinyTorchModuleAnalyzer("modules/source")
if args.module:
# Analyze single module
print(f"🔍 Analyzing module: {args.module}")
try:
report_card = analyzer.analyze_module(args.module)
print(f"\n📊 Report Card for {args.module}:")
print(f"Overall Grade: {report_card.overall_grade}")
print(f"Scaffolding Quality: {report_card.scaffolding_quality}/5")
print(f"Critical Issues: {len(report_card.critical_issues)}")
if args.save:
saved_files = analyzer.save_report_card(report_card, args.format)
print(f"💾 Saved to: {', '.join(saved_files)}")
except Exception as e:
print(f"❌ Error: {e}")
elif args.all:
# Analyze all modules
print("🔍 Analyzing all modules...")
results = analyzer.analyze_all_modules()
print("\n📊 Summary Report:")
for name, rc in results.items():
print(f"{name}: Grade {rc.overall_grade} | Scaffolding {rc.scaffolding_quality}/5")
if args.save:
for name, rc in results.items():
saved_files = analyzer.save_report_card(rc, args.format)
print(f"💾 {name} saved to: {', '.join(saved_files)}")
elif args.compare:
# Compare modules
print(f"🔍 Comparing modules: {', '.join(args.compare)}")
comparison = analyzer.compare_modules(args.compare)
print(f"\n{comparison}")
if args.save:
from datetime import datetime
with open(f"instructor/reports/comparison_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md", 'w') as f:
f.write(comparison)
print("💾 Comparison saved to instructor/reports/")
else:
parser.print_help()
if __name__ == "__main__":
main()

View File

@@ -1,88 +0,0 @@
#!/usr/bin/env python3
"""Check NBGrader style guide compliance across all modules."""
import os
import re
from pathlib import Path
def analyze_module_compliance(filepath):
with open(filepath, 'r') as f:
content = f.read()
# Count solution blocks
solution_blocks = len(re.findall(r'### BEGIN SOLUTION', content))
# Check for required sections
has_todo = 'TODO:' in content
has_step_by_step = 'STEP-BY-STEP IMPLEMENTATION:' in content
has_example_usage = 'EXAMPLE USAGE:' in content or 'EXAMPLE:' in content
has_hints = 'IMPLEMENTATION HINTS:' in content or 'HINTS:' in content
has_connections = 'LEARNING CONNECTIONS:' in content or 'LEARNING CONNECTION:' in content
# Check for alternative patterns (older style)
has_approach = 'APPROACH:' in content
has_your_code_here = 'YOUR CODE HERE' in content
has_raise_notimpl = 'raise NotImplementedError' in content
compliance_score = sum([has_todo, has_step_by_step, has_example_usage, has_hints, has_connections])
return {
'solution_blocks': solution_blocks,
'compliance_score': compliance_score,
'has_todo': has_todo,
'has_step_by_step': has_step_by_step,
'has_example_usage': has_example_usage,
'has_hints': has_hints,
'has_connections': has_connections,
'has_old_patterns': has_approach or has_your_code_here or has_raise_notimpl
}
# Analyze all modules
modules_dir = Path('modules/source')
results = {}
for module_dir in sorted(modules_dir.iterdir()):
if module_dir.is_dir() and module_dir.name != 'utils':
py_files = list(module_dir.glob('*_dev.py'))
if py_files:
module_file = py_files[0]
results[module_dir.name] = analyze_module_compliance(module_file)
# Report results
print('=== NBGrader Style Guide Compliance Report ===\n')
print('Module | Blocks | Score | TODO | STEP | EXAM | HINT | CONN | Old? |')
print('-' * 78)
for module_name in sorted(results.keys()):
r = results[module_name]
status_emoji = '' if r['compliance_score'] == 5 else '⚠️' if r['compliance_score'] >= 3 else ''
print(f"{module_name:16} | {r['solution_blocks']:6} | {status_emoji} {r['compliance_score']}/5 | "
f"{'' if r['has_todo'] else '':^4} | "
f"{'' if r['has_step_by_step'] else '':^4} | "
f"{'' if r['has_example_usage'] else '':^4} | "
f"{'' if r['has_hints'] else '':^4} | "
f"{'' if r['has_connections'] else '':^4} | "
f"{'⚠️' if r['has_old_patterns'] else '':^4} |")
# Summary
fully_compliant = sum(1 for r in results.values() if r['compliance_score'] == 5)
needs_update = sum(1 for r in results.values() if r['compliance_score'] < 5)
has_old_patterns = sum(1 for r in results.values() if r['has_old_patterns'])
print('\n=== Summary ===')
print(f'Fully Compliant: {fully_compliant}/{len(results)}')
print(f'Needs Update: {needs_update}/{len(results)}')
print(f'Has Old Patterns: {has_old_patterns}/{len(results)}')
# List modules needing updates
print('\n=== Modules Needing Updates ===')
for module_name, r in sorted(results.items()):
if r['compliance_score'] < 5:
missing = []
if not r['has_todo']: missing.append('TODO')
if not r['has_step_by_step']: missing.append('STEP-BY-STEP')
if not r['has_example_usage']: missing.append('EXAMPLE USAGE')
if not r['has_hints']: missing.append('HINTS')
if not r['has_connections']: missing.append('CONNECTIONS')
print(f"{module_name}: Missing {', '.join(missing)}")

View File

@@ -1,21 +0,0 @@
#!/usr/bin/env python3
"""Fix syntax errors in mlops_dev.py"""
import re
# Read the file
with open('modules/source/15_mlops/mlops_dev.py', 'r') as f:
content = f.read()
# Fix the malformed function definitions
# Pattern: def if __name__ == "__main__":\n function_name():
pattern = r'def if __name__ == "__main__":\n (\w+)\(\):'
replacement = r'def \1():'
content = re.sub(pattern, replacement, content)
# Write back
with open('modules/source/15_mlops/mlops_dev.py', 'w') as f:
f.write(content)
print("✅ Fixed syntax errors in mlops_dev.py")

View File

@@ -1,100 +0,0 @@
#!/bin/bash
# 🛡️ TinyTorch Core File Protection Script
# Industry-standard approach: Make generated files read-only
echo "🛡️ Setting up TinyTorch Core File Protection..."
echo "=" * 60
# Make all files in tinytorch/core/ read-only
if [ -d "tinytorch/core" ]; then
echo "🔒 Making tinytorch/core/ files read-only..."
chmod -R 444 tinytorch/core/*.py
echo "✅ Core files are now read-only"
else
echo "⚠️ tinytorch/core/ directory not found"
fi
# Create .gitattributes to mark files as generated (GitHub feature)
echo "📝 Setting up .gitattributes for generated file detection..."
cat > .gitattributes << 'EOF'
# Mark auto-generated files (GitHub will show "Generated" label)
tinytorch/core/*.py linguist-generated=true
tinytorch/**/*.py linguist-generated=true
# Exclude from diff by default (reduces noise)
tinytorch/core/*.py -diff
EOF
echo "✅ .gitattributes configured for generated file detection"
# Create EditorConfig to warn in common editors
echo "📝 Setting up .editorconfig for editor warnings..."
cat > .editorconfig << 'EOF'
# EditorConfig: Industry standard editor configuration
# Many editors will show warnings for files marked as generated
root = true
[*]
indent_style = space
indent_size = 4
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
# Mark generated files with special rules (some editors respect this)
[tinytorch/core/*.py]
# Some editors show warnings for files in generated directories
generated = true
EOF
echo "✅ .editorconfig configured for editor warnings"
# Create a pre-commit hook to warn about core file modifications
mkdir -p .git/hooks
cat > .git/hooks/pre-commit << 'EOF'
#!/bin/bash
# 🛡️ TinyTorch Pre-commit Hook: Prevent core file modifications
echo "🛡️ Checking for modifications to auto-generated files..."
# Check if any tinytorch/core files are staged
CORE_FILES_MODIFIED=$(git diff --cached --name-only | grep "^tinytorch/core/")
if [ ! -z "$CORE_FILES_MODIFIED" ]; then
echo ""
echo "🚨 ERROR: Attempting to commit auto-generated files!"
echo "=========================================="
echo ""
echo "The following auto-generated files are staged:"
echo "$CORE_FILES_MODIFIED"
echo ""
echo "🛡️ PROTECTION TRIGGERED: These files are auto-generated from modules/source/"
echo ""
echo "TO FIX:"
echo "1. Unstage these files: git reset HEAD tinytorch/core/"
echo "2. Make changes in modules/source/ instead"
echo "3. Run: tito module complete <module_name>"
echo "4. Commit the source changes, not the generated files"
echo ""
echo "⚠️ This protection prevents breaking CIFAR-10 training!"
echo ""
exit 1
fi
echo "✅ No auto-generated files being committed"
EOF
chmod +x .git/hooks/pre-commit
echo "✅ Git pre-commit hook installed"
echo ""
echo "🎉 TinyTorch Protection System Activated!"
echo "=" * 60
echo "🔒 Core files are read-only"
echo "📝 GitHub will label files as 'Generated'"
echo "⚙️ Editors will show generated file warnings"
echo "🚫 Git pre-commit hook prevents accidental commits"
echo ""
echo "🛡️ Students are now protected from accidentally breaking core functionality!"

View File

@@ -1,147 +0,0 @@
#!/usr/bin/env python3
"""
Final test to validate that modules can be imported and key functionality works
"""
import sys
import os
from pathlib import Path
from unittest.mock import MagicMock, patch
import importlib.util
# Setup mock modules before any imports
mock_np = MagicMock()
mock_np.__version__ = "1.24.0"
mock_np.array = MagicMock(side_effect=lambda x: x)
mock_np.mean = MagicMock(return_value=0.5)
mock_np.random = MagicMock()
mock_np.random.randn = MagicMock(return_value=[[1, 2], [3, 4]])
mock_np.random.randint = MagicMock(return_value=5)
mock_np.ceil = MagicMock(side_effect=lambda x: int(x) + 1 if hasattr(x, '__int__') else x)
sys.modules['numpy'] = mock_np
sys.modules['psutil'] = MagicMock()
sys.modules['matplotlib'] = MagicMock()
sys.modules['matplotlib.pyplot'] = MagicMock()
# Mock TinyTorch modules
sys.modules['tinytorch'] = MagicMock()
sys.modules['tinytorch.tensor'] = MagicMock()
sys.modules['tinytorch.nn'] = MagicMock()
sys.modules['tinytorch.optim'] = MagicMock()
sys.modules['tinytorch.data'] = MagicMock()
sys.modules['tinytorch.autograd'] = MagicMock()
def load_module_safely(module_path):
"""Load a module without executing test code"""
module_name = Path(module_path).stem
# Read the module content
with open(module_path, 'r') as f:
content = f.read()
# Create module spec
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
# Add to sys.modules
sys.modules[module_name] = module
# Set up module's namespace
module.__file__ = module_path
module.__name__ = module_name
module.__dict__['__file__'] = module_path
# Execute the module code in its namespace with __file__ available
namespace = module.__dict__
namespace['__file__'] = module_path
try:
exec(content, namespace)
return module
except Exception as e:
print(f" ⚠️ Warning during execution: {e}")
return module
def test_module_profiler(module_path, profiler_class_name):
"""Test that a module's profiler class can be instantiated"""
print(f"\n🔍 Testing {Path(module_path).stem}")
try:
# Load the module
module = load_module_safely(module_path)
# Check if profiler class exists
if hasattr(module, profiler_class_name):
profiler_class = getattr(module, profiler_class_name)
print(f" ✅ Found {profiler_class_name}")
# Try to instantiate
try:
instance = profiler_class()
print(f" ✅ Successfully instantiated {profiler_class_name}")
# Check for key methods (don't execute them)
method_count = sum(1 for attr in dir(instance)
if callable(getattr(instance, attr))
and not attr.startswith('_'))
print(f" Found {method_count} public methods")
return True
except Exception as e:
print(f" ⚠️ Could not instantiate: {e}")
return False
else:
print(f"{profiler_class_name} not found")
return False
except Exception as e:
print(f" ❌ Error loading module: {e}")
return False
def main():
print("=" * 60)
print("🧪 Final Module Validation")
print("=" * 60)
modules_to_test = [
("modules/source/12_compression/compression_dev.py", "CompressionSystemsProfiler"),
("modules/source/13_kernels/kernels_dev.py", "KernelOptimizationProfiler"),
("modules/source/14_benchmarking/benchmarking_dev.py", "ProductionBenchmarkingProfiler"),
("modules/source/15_mlops/mlops_dev.py", "ProductionMLOpsProfiler"),
("modules/source/16_capstone/capstone_dev.py", "ProductionMLSystemProfiler"),
]
results = {}
for module_path, profiler_class in modules_to_test:
if Path(module_path).exists():
results[module_path] = test_module_profiler(module_path, profiler_class)
else:
print(f"\n❌ Module not found: {module_path}")
results[module_path] = False
print("\n" + "=" * 60)
print("📊 Final Results:")
print("=" * 60)
for module_path, passed in results.items():
module_name = Path(module_path).stem
status = "✅ PASS" if passed else "❌ FAIL"
print(f"{status} - {module_name}")
all_passed = all(results.values())
print("\n" + "=" * 60)
if all_passed:
print("🎉 All modules validated successfully!")
print("The ML systems profilers are properly implemented.")
else:
print("⚠️ Some modules have issues that need fixing.")
print("However, the core profiler classes are present.")
print("=" * 60)
return 0 if all_passed else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,146 +0,0 @@
#!/usr/bin/env python3
"""
Test script to validate module execution with mock dependencies
"""
import sys
from pathlib import Path
from unittest.mock import MagicMock, patch
# Add project root to path
project_root = Path(__file__).parent
sys.path.insert(0, str(project_root))
# Mock numpy and other dependencies
sys.modules['numpy'] = MagicMock()
sys.modules['psutil'] = MagicMock()
sys.modules['tinytorch'] = MagicMock()
sys.modules['tinytorch.tensor'] = MagicMock()
sys.modules['tinytorch.nn'] = MagicMock()
sys.modules['tinytorch.optim'] = MagicMock()
sys.modules['tinytorch.data'] = MagicMock()
sys.modules['tinytorch.autograd'] = MagicMock()
sys.modules['tinytorch.utils.nbgrader'] = MagicMock()
def test_module_imports(module_path):
"""Test if a module can be imported and key classes instantiated"""
print(f"\n🔍 Testing: {module_path}")
try:
# Clear any cached imports
module_name = Path(module_path).stem
if module_name in sys.modules:
del sys.modules[module_name]
# Read and execute the module
with open(module_path, 'r') as f:
code = f.read()
# Create a namespace for execution
namespace = {
'__name__': '__main__',
'__file__': module_path,
'np': MagicMock(),
'time': MagicMock(),
'json': MagicMock()
}
# Execute the code
exec(code, namespace)
# Check for expected classes based on module
expected_classes = {
'compression_dev': 'CompressionSystemsProfiler',
'kernels_dev': 'KernelOptimizationProfiler',
'benchmarking_dev': 'ProductionBenchmarkingProfiler',
'mlops_dev': 'ProductionMLOpsProfiler',
'capstone_dev': 'ProductionMLSystemProfiler'
}
module_name = Path(module_path).stem
if module_name in expected_classes:
class_name = expected_classes[module_name]
if class_name in namespace:
print(f" ✅ Found {class_name}")
# Try to instantiate
try:
instance = namespace[class_name]()
print(f" ✅ Successfully instantiated {class_name}")
# Check for key methods
if module_name == 'capstone_dev':
assert hasattr(instance, 'profile_end_to_end_system')
assert hasattr(instance, 'detect_cross_module_optimizations')
print(f" ✅ Key methods present")
elif module_name == 'mlops_dev':
assert hasattr(instance, 'register_model_version')
assert hasattr(instance, 'detect_advanced_feature_drift')
print(f" ✅ Key methods present")
except Exception as e:
print(f" ⚠️ Could not instantiate: {e}")
else:
print(f"{class_name} not found in module")
return False
# Check test functions were called (if they exist)
test_functions = [name for name in namespace if name.startswith('test_')]
print(f" Found {len(test_functions)} test functions")
return True
except SyntaxError as e:
print(f" ❌ Syntax Error: {e}")
return False
except Exception as e:
print(f" ❌ Execution Error: {e}")
import traceback
traceback.print_exc()
return False
def main():
"""Test all modified modules"""
print("=" * 60)
print("🧪 Testing TinyTorch Module Execution")
print("=" * 60)
modules_to_test = [
"modules/source/12_compression/compression_dev.py",
"modules/source/13_kernels/kernels_dev.py",
"modules/source/14_benchmarking/benchmarking_dev.py",
"modules/source/15_mlops/mlops_dev.py",
"modules/source/16_capstone/capstone_dev.py"
]
results = {}
for module_path in modules_to_test:
filepath = Path(module_path)
if filepath.exists():
results[module_path] = test_module_imports(module_path)
else:
print(f"\n❌ Module not found: {module_path}")
results[module_path] = False
print("\n" + "=" * 60)
print("📊 Test Results Summary:")
print("=" * 60)
for module, passed in results.items():
status = "" if passed else ""
module_name = Path(module).stem
print(f"{status} {module_name}: {'Passed' if passed else 'Failed'}")
all_passed = all(results.values())
print("\n" + "=" * 60)
if all_passed:
print("✅ All module execution tests passed!")
else:
print("❌ Some tests failed. The modules have syntax/import issues.")
print("=" * 60)
return 0 if all_passed else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,130 +0,0 @@
#!/usr/bin/env python3
"""
Test script to validate module structure without numpy dependency
"""
import ast
import sys
from pathlib import Path
def validate_module_structure(filepath):
"""Validate that a module has the correct structure"""
print(f"\n🔍 Validating: {filepath.name}")
with open(filepath, 'r') as f:
content = f.read()
try:
tree = ast.parse(content)
# Check for required classes
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
functions = [node.name for node in ast.walk(tree) if isinstance(node, ast.FunctionDef)]
# Check module sections (markdown cells)
has_sections = "Module Introduction" in content
has_math = "Mathematical Background" in content
has_implementation = "Implementation" in content or "Core Implementation" in content
has_testing = "Testing" in content
has_ml_systems = "ML Systems Thinking" in content
has_summary = "Module Summary" in content
results = {
"Classes found": len(classes),
"Functions found": len(functions),
"Has Introduction": has_sections,
"Has Math Background": has_math,
"Has Implementation": has_implementation,
"Has Testing": has_testing,
"Has ML Systems Questions": has_ml_systems,
"Has Summary": has_summary
}
# Print results
all_good = True
for key, value in results.items():
if isinstance(value, bool):
status = "" if value else ""
if not value:
all_good = False
else:
status = "" if value > 0 else "⚠️"
print(f" {status} {key}: {value}")
# Module-specific validation
if "compression" in filepath.name.lower():
has_profiler = "CompressionSystemsProfiler" in classes
print(f" {'' if has_profiler else ''} Has CompressionSystemsProfiler: {has_profiler}")
if not has_profiler:
all_good = False
elif "kernels" in filepath.name.lower():
has_profiler = "KernelOptimizationProfiler" in classes
print(f" {'' if has_profiler else ''} Has KernelOptimizationProfiler: {has_profiler}")
if not has_profiler:
all_good = False
elif "benchmarking" in filepath.name.lower():
has_profiler = "ProductionBenchmarkingProfiler" in classes
print(f" {'' if has_profiler else ''} Has ProductionBenchmarkingProfiler: {has_profiler}")
if not has_profiler:
all_good = False
elif "mlops" in filepath.name.lower():
has_profiler = "ProductionMLOpsProfiler" in classes
print(f" {'' if has_profiler else ''} Has ProductionMLOpsProfiler: {has_profiler}")
if not has_profiler:
all_good = False
elif "capstone" in filepath.name.lower():
has_profiler = "ProductionMLSystemProfiler" in classes
print(f" {'' if has_profiler else ''} Has ProductionMLSystemProfiler: {has_profiler}")
if not has_profiler:
all_good = False
return all_good
except SyntaxError as e:
print(f" ❌ Syntax Error: {e}")
return False
except Exception as e:
print(f" ❌ Error: {e}")
return False
def main():
"""Test all modified modules"""
print("=" * 60)
print("🧪 Testing TinyTorch Module Structures")
print("=" * 60)
modules_to_test = [
"modules/source/12_compression/compression_dev.py",
"modules/source/13_kernels/kernels_dev.py",
"modules/source/14_benchmarking/benchmarking_dev.py",
"modules/source/15_mlops/mlops_dev.py",
"modules/source/16_capstone/capstone_dev.py"
]
all_passed = True
for module_path in modules_to_test:
filepath = Path(module_path)
if filepath.exists():
passed = validate_module_structure(filepath)
if not passed:
all_passed = False
else:
print(f"\n❌ Module not found: {module_path}")
all_passed = False
print("\n" + "=" * 60)
if all_passed:
print("✅ All module structure tests passed!")
else:
print("❌ Some tests failed. Please review the issues above.")
print("=" * 60)
return 0 if all_passed else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env python3
"""
Clean test of TinyTorch pipeline for CIFAR-10 north star goal.
"""
import os
import sys
# Suppress module test outputs
sys.stdout = open(os.devnull, 'w')
from tinytorch.core.tensor import Tensor
from tinytorch.core.layers import Dense
from tinytorch.core.activations import ReLU
from tinytorch.core.networks import Sequential
from tinytorch.core.dataloader import CIFAR10Dataset, DataLoader, SimpleDataset
from tinytorch.core.training import CrossEntropyLoss, Accuracy, evaluate_model, plot_training_history
from tinytorch.core.optimizers import SGD
sys.stdout = sys.__stdout__
import numpy as np
print("=" * 60)
print("🎯 TINYTORCH PIPELINE VALIDATION")
print("=" * 60)
# 1. Test data loading
print("\n1⃣ Data Loading")
dataset = SimpleDataset(size=100, num_features=784, num_classes=10)
loader = DataLoader(dataset, batch_size=16)
batch_x, batch_y = next(iter(loader))
print(f"✅ DataLoader: {batch_x.shape} batches")
# 2. Test model creation
print("\n2⃣ Model Creation")
model = Sequential([
Dense(784, 128),
ReLU(),
Dense(128, 10)
])
print("✅ Model: 784 → 128 → 10")
# 3. Test forward pass
print("\n3⃣ Forward Pass")
output = model(batch_x)
print(f"✅ Output: {output.shape}")
# 4. Test loss computation
print("\n4⃣ Loss Function")
loss_fn = CrossEntropyLoss()
loss = loss_fn(output, batch_y)
print(f"✅ Loss: {loss.data:.4f}")
# 5. Test CIFAR-10
print("\n5⃣ CIFAR-10 Dataset")
print("✅ CIFAR10Dataset class available")
print("✅ download_cifar10 function available")
# 6. Test training components
print("\n6⃣ Training Components")
from tinytorch.core.training import Trainer
print("✅ Trainer class available")
print("✅ save_checkpoint method available")
print("✅ evaluate_model function available")
print("\n" + "=" * 60)
print("🎉 ALL COMPONENTS WORKING!")
print("=" * 60)
print("\n📋 Students can now:")
print("1. Download CIFAR-10 with CIFAR10Dataset(download=True)")
print("2. Build CNNs with Sequential and Dense layers")
print("3. Train with Trainer.fit(save_best=True)")
print("4. Evaluate with evaluate_model()")
print("5. Save best models with checkpointing")
print("\n🎯 North Star Goal: ACHIEVABLE ✅")
print("=" * 60)

View File

@@ -1,83 +0,0 @@
#!/usr/bin/env python3
"""
Test TinyGPT package demo to see if text generation works
"""
import sys
import time
import tinytorch.tinygpt as tgpt
def test_tinygpt_demo():
"""Test if TinyGPT can generate text as a packaged demo"""
print("🤖 TinyGPT Package Demo Test")
print("=" * 50)
# Simple Shakespeare text for testing
text = """To be, or not to be, that is the question:
Whether 'tis nobler in the mind to suffer
The slings and arrows of outrageous fortune,
Or to take arms against a sea of troubles
And by opposing end them."""
print(f"📚 Training text: {len(text)} characters")
try:
# Create tokenizer
print("\n🔤 Creating tokenizer...")
tokenizer = tgpt.CharTokenizer(vocab_size=50)
tokenizer.fit(text)
vocab_size = tokenizer.get_vocab_size()
print(f" Vocabulary size: {vocab_size}")
# Create model
print("\n🧠 Creating TinyGPT model...")
model = tgpt.TinyGPT(
vocab_size=vocab_size,
d_model=64,
num_heads=4,
num_layers=2,
d_ff=256,
max_length=128,
dropout=0.1
)
print(f" Model parameters: {model.count_parameters():,}")
# Create trainer
print("\n🎓 Creating trainer...")
trainer = tgpt.LanguageModelTrainer(model, tokenizer)
# Test generation BEFORE training (should be random)
print("\n📝 Pre-training generation test:")
prompt = "To be"
generated = trainer.generate_text(prompt, max_length=20, temperature=1.0)
print(f" '{prompt}''{generated}'")
# Quick training test
print("\n🚀 Quick training test (1 epoch)...")
history = trainer.fit(
text=text,
epochs=1,
seq_length=16,
batch_size=2,
val_split=0.2,
verbose=True
)
# Test generation AFTER training
print("\n📝 Post-training generation test:")
for temp in [0.3, 0.7, 1.0]:
generated = trainer.generate_text(prompt, max_length=30, temperature=temp)
print(f" '{prompt}' (T={temp}) → '{generated}'")
print("\n✅ TinyGPT package demo successful!")
return True
except Exception as e:
print(f"\n❌ Demo failed: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
success = test_tinygpt_demo()
sys.exit(0 if success else 1)

View File

@@ -1,190 +0,0 @@
#!/usr/bin/env python3
"""
TinyGPT Live Typing Demo - Shows text generation character by character
Like watching a real AI think and type!
"""
import sys
import time
import tinytorch.tinygpt as tgpt
def typewriter_effect(text, delay=0.05):
"""Print text with typewriter effect"""
for char in text:
print(char, end='', flush=True)
time.sleep(delay)
print() # New line at end
def live_generation_demo():
"""Demo TinyGPT with live character-by-character generation"""
print("🤖 TinyGPT Live Generation Demo")
print("=" * 60)
print("Watch TinyGPT learn and generate Shakespeare-style text!")
print()
# Extended Shakespeare for better learning
shakespeare_text = """To be, or not to be, that is the question:
Whether 'tis nobler in the mind to suffer
The slings and arrows of outrageous fortune,
Or to take arms against a sea of troubles
And by opposing end them. To die—to sleep,
No more; and by a sleep to say we end
The heart-ache and the thousand natural shocks
That flesh is heir to: 'tis a consummation
Devoutly to be wish'd. To die, to sleep;
To sleep, perchance to dream—ay, there's the rub:
For in that sleep of death what dreams may come,
When we have shuffled off this mortal coil,
Must give us pause—there's the respect
That makes calamity of so long life.
Shall I compare thee to a summer's day?
Thou art more lovely and more temperate:
Rough winds do shake the darling buds of May,
And summer's lease hath all too short a date:
Sometime too hot the eye of heaven shines,
And often is his gold complexion dimmed;
And every fair from fair sometime declines,
By chance, or nature's changing course, untrimmed;
But thy eternal summer shall not fade,
Nor lose possession of that fair thou ow'st,
Nor shall death brag thou wander'st in his shade,
When in eternal lines to time thou grow'st:
So long as men can breathe or eyes can see,
So long lives this, and this gives life to thee."""
print(f"📚 Shakespeare corpus: {len(shakespeare_text):,} characters")
print(f" {len(shakespeare_text.split())} words from Hamlet & Sonnet 18")
print()
# Setup phase with typewriter effect
typewriter_effect("🔤 Creating character tokenizer...")
tokenizer = tgpt.CharTokenizer(vocab_size=100)
tokenizer.fit(shakespeare_text)
vocab_size = tokenizer.get_vocab_size()
print(f" ✅ Vocabulary: {vocab_size} unique characters")
print()
typewriter_effect("🧠 Building TinyGPT neural network...")
model = tgpt.TinyGPT(
vocab_size=vocab_size,
d_model=128,
num_heads=8,
num_layers=3,
d_ff=512,
max_length=200,
dropout=0.1
)
print(f" ✅ Model: {model.count_parameters():,} parameters")
print(f" ✅ Architecture: {3} transformer layers, {8} attention heads")
print()
typewriter_effect("🎓 Initializing training system...")
trainer = tgpt.LanguageModelTrainer(model, tokenizer)
print()
# Pre-training generation with live typing
print("📝 BEFORE TRAINING - Random Neural Noise:")
print("-" * 50)
prompts = ["To be", "Shall I", "When in"]
for prompt in prompts:
print(f"🎯 Prompt: '{prompt}'")
print("🤖 TinyGPT: ", end='', flush=True)
# Generate text
generated = trainer.generate_text(prompt, max_length=25, temperature=1.0)
generated_part = generated[len(prompt):]
# Type out the generated part character by character
typewriter_effect(generated_part, delay=0.08)
print()
# Training phase with progress
print("🚀 TRAINING PHASE - Learning Shakespeare...")
print("=" * 50)
typewriter_effect("Feeding Shakespeare into neural networks...")
print("⚡ Processing language patterns...")
time.sleep(0.5)
print("🔄 Optimizing attention weights...")
time.sleep(0.5)
print("🧮 Computing gradients...")
time.sleep(0.5)
# Actual training
start_time = time.time()
history = trainer.fit(
text=shakespeare_text,
epochs=3,
seq_length=32,
batch_size=4,
val_split=0.2,
verbose=True
)
training_time = time.time() - start_time
print(f"\n✅ Training complete in {training_time:.1f} seconds!")
print(f" Final accuracy: {history['val_accuracy'][-1]:.1%}")
print()
# Post-training generation with dramatic effect
print("📝 AFTER TRAINING - Shakespearean AI:")
print("=" * 50)
generation_prompts = [
"To be, or not to",
"Shall I compare thee",
"When in eternal",
"The slings and arrows",
"But thy eternal"
]
for i, prompt in enumerate(generation_prompts, 1):
print(f"🎭 Generation {i}/5")
print(f"🎯 Prompt: '{prompt}'")
print("🤖 TinyGPT: ", end='', flush=True)
# Generate with different temperatures for variety
temp = [0.3, 0.5, 0.7, 0.9, 1.0][i-1]
generated = trainer.generate_text(prompt, max_length=40, temperature=temp)
generated_part = generated[len(prompt):]
# Live typing effect - slower and more dramatic
typewriter_effect(generated_part, delay=0.1)
print(f" (temperature: {temp})")
print()
# Small pause between generations
time.sleep(0.5)
# Finale
print("🎉 FINALE - Continuous Generation:")
print("=" * 50)
print("🤖 TinyGPT composing original Shakespeare-style text...")
print()
print("🎭 ", end='', flush=True)
final_poem = trainer.generate_text("To be", max_length=80, temperature=0.6)
typewriter_effect(final_poem, delay=0.08)
print()
print("✨ TinyGPT Demo Complete!")
print(f"🏆 Achievements:")
print(f" • Built complete GPT from {model.count_parameters():,} parameters")
print(f" • Learned Shakespeare in {training_time:.1f} seconds")
print(f" • Generated original text with {vocab_size} character vocabulary")
print(f" • Demonstrated autoregressive language modeling")
print()
print("🔥 This entire AI was built from scratch using only TinyTorch!")
if __name__ == "__main__":
try:
live_generation_demo()
except KeyboardInterrupt:
print("\n\n⏹️ Demo interrupted by user")
except Exception as e:
print(f"\n❌ Demo failed: {e}")
import traceback
traceback.print_exc()
sys.exit(1)