mirror of
https://github.com/MLSysBook/TinyTorch.git
synced 2026-05-06 20:14:44 -05:00
🔧 TESTING INFRASTRUCTURE FIXES: - Fixed pytest configuration (removed duplicate timeout) - Exported all modules to tinytorch package using nbdev - Converted .py files to .ipynb for proper NBDev processing - Fixed import issues in test files with fallback strategies 📊 TESTING RESULTS: - 145 tests passing, 15 failing, 16 skipped - Major improvement from previous import errors - All modules now properly exported and testable - Analysis tool working correctly on all modules 🎯 MODULE QUALITY STATUS: - Most modules: Grade C, Scaffolding 3/5 - 01_tensor: Grade C, Scaffolding 2/5 (needs improvement) - 07_autograd: Grade D, Scaffolding 2/5 (needs improvement) - Overall: Functional but needs educational enhancement ✅ RESOLVED ISSUES: - All import errors resolved - NBDev export process working - Test infrastructure functional - Analysis tools operational 🚀 READY FOR NEXT PHASE: Professional report cards and improvements
134 lines
4.6 KiB
Python
134 lines
4.6 KiB
Python
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/source/00_setup/setup_dev.ipynb.
|
|
|
|
# %% auto 0
|
|
__all__ = ['personal_info', 'system_info']
|
|
|
|
# %% ../../modules/source/00_setup/setup_dev.ipynb 1
|
|
import sys
|
|
import platform
|
|
import psutil
|
|
import os
|
|
from typing import Dict, Any
|
|
|
|
# %% ../../modules/source/00_setup/setup_dev.ipynb 6
|
|
def personal_info() -> Dict[str, str]:
|
|
"""
|
|
Return personal information for this TinyTorch installation.
|
|
|
|
This function configures your personal TinyTorch installation with your identity.
|
|
It's the foundation of proper ML engineering practices - every system needs
|
|
to know who built it and how to contact them.
|
|
|
|
TODO: Implement personal information configuration.
|
|
|
|
STEP-BY-STEP IMPLEMENTATION:
|
|
1. Create a dictionary with your personal details
|
|
2. Include all required keys: developer, email, institution, system_name, version
|
|
3. Use your actual information (not placeholder text)
|
|
4. Make system_name unique and descriptive
|
|
5. Keep version as '1.0.0' for now
|
|
|
|
EXAMPLE OUTPUT:
|
|
{
|
|
'developer': 'Vijay Janapa Reddi',
|
|
'email': 'vj@eecs.harvard.edu',
|
|
'institution': 'Harvard University',
|
|
'system_name': 'VJ-TinyTorch-Dev',
|
|
'version': '1.0.0'
|
|
}
|
|
|
|
IMPLEMENTATION HINTS:
|
|
- Replace the example with your real information
|
|
- Use a descriptive system_name (e.g., 'YourName-TinyTorch-Dev')
|
|
- Keep email format valid (contains @ and domain)
|
|
- Make sure all values are strings
|
|
- Consider how this info will be used in debugging and collaboration
|
|
|
|
LEARNING CONNECTIONS:
|
|
- This is like the 'author' field in Git commits
|
|
- Similar to maintainer info in Docker images
|
|
- Parallels author info in Python packages
|
|
- Foundation for professional ML development
|
|
"""
|
|
### BEGIN SOLUTION
|
|
return {
|
|
'developer': 'Vijay Janapa Reddi',
|
|
'email': 'vj@eecs.harvard.edu',
|
|
'institution': 'Harvard University',
|
|
'system_name': 'VJ-TinyTorch-Dev',
|
|
'version': '1.0.0'
|
|
}
|
|
### END SOLUTION
|
|
|
|
# %% ../../modules/source/00_setup/setup_dev.ipynb 8
|
|
def system_info() -> Dict[str, Any]:
|
|
"""
|
|
Query and return system information for this TinyTorch installation.
|
|
|
|
This function gathers crucial hardware and software information that affects
|
|
ML performance, compatibility, and debugging. It's the foundation of
|
|
hardware-aware ML systems.
|
|
|
|
TODO: Implement system information queries.
|
|
|
|
STEP-BY-STEP IMPLEMENTATION:
|
|
1. Get Python version using sys.version_info
|
|
2. Get platform using platform.system()
|
|
3. Get architecture using platform.machine()
|
|
4. Get CPU count using psutil.cpu_count()
|
|
5. Get memory using psutil.virtual_memory().total
|
|
6. Convert memory from bytes to GB (divide by 1024^3)
|
|
7. Return all information in a dictionary
|
|
|
|
EXAMPLE OUTPUT:
|
|
{
|
|
'python_version': '3.9.7',
|
|
'platform': 'Darwin',
|
|
'architecture': 'arm64',
|
|
'cpu_count': 8,
|
|
'memory_gb': 16.0
|
|
}
|
|
|
|
IMPLEMENTATION HINTS:
|
|
- Use f-string formatting for Python version: f"{major}.{minor}.{micro}"
|
|
- Memory conversion: bytes / (1024^3) = GB
|
|
- Round memory to 1 decimal place for readability
|
|
- Make sure data types are correct (strings for text, int for cpu_count, float for memory_gb)
|
|
|
|
LEARNING CONNECTIONS:
|
|
- This is like `torch.cuda.is_available()` in PyTorch
|
|
- Similar to system info in MLflow experiment tracking
|
|
- Parallels hardware detection in TensorFlow
|
|
- Foundation for performance optimization in ML systems
|
|
|
|
PERFORMANCE IMPLICATIONS:
|
|
- cpu_count affects parallel processing capabilities
|
|
- memory_gb determines maximum model and batch sizes
|
|
- platform affects file system and process management
|
|
- architecture influences numerical precision and optimization
|
|
"""
|
|
### BEGIN SOLUTION
|
|
# Get Python version
|
|
version_info = sys.version_info
|
|
python_version = f"{version_info.major}.{version_info.minor}.{version_info.micro}"
|
|
|
|
# Get platform information
|
|
platform_name = platform.system()
|
|
architecture = platform.machine()
|
|
|
|
# Get CPU information
|
|
cpu_count = psutil.cpu_count()
|
|
|
|
# Get memory information (convert bytes to GB)
|
|
memory_bytes = psutil.virtual_memory().total
|
|
memory_gb = round(memory_bytes / (1024**3), 1)
|
|
|
|
return {
|
|
'python_version': python_version,
|
|
'platform': platform_name,
|
|
'architecture': architecture,
|
|
'cpu_count': cpu_count,
|
|
'memory_gb': memory_gb
|
|
}
|
|
### END SOLUTION
|