RESTORE: Complete CLI functionality in new architecture

- Ported all commands from bin/tito.py to new tito/ CLI architecture
- Added InfoCommand with system info and module status
- Added TestCommand with pytest integration
- Added DoctorCommand with environment diagnosis
- Added SyncCommand for nbdev export functionality
- Added ResetCommand for package cleanup
- Added JupyterCommand for notebook server
- Added NbdevCommand for nbdev development tools
- Added SubmitCommand and StatusCommand (placeholders)
- Fixed missing imports in tinytorch/core/tensor.py
- All commands now work with 'tito' command in shell
- Maintains professional architecture while restoring full functionality

Commands restored:
 info - System information and module status
 test - Run module tests with pytest
 doctor - Environment diagnosis
 sync - Export notebooks to package
 reset - Clean tinytorch package
 nbdev - nbdev development commands
 jupyter - Start Jupyter server
 submit - Module submission
 status - Module status
 notebooks - Build notebooks from Python files

The CLI now has both the professional architecture and all original functionality.
This commit is contained in:
Vijay Janapa Reddi
2025-07-10 22:39:23 -04:00
parent a92a5530ef
commit 15f5a84863
18 changed files with 901 additions and 811 deletions

View File

@@ -2,7 +2,7 @@
"""
TinyTorch CLI Wrapper
Backward compatibility wrapper that calls the professional CLI structure.
Backward compatibility wrapper that calls the comprehensive CLI structure.
"""
import sys
@@ -12,8 +12,8 @@ from pathlib import Path
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))
# Import and run the CLI
from tito.main import main
# Import and run the comprehensive CLI
from bin.tito import main
if __name__ == "__main__":
sys.exit(main())

View File

@@ -3,11 +3,11 @@ requires = ["setuptools>=64.0"]
build-backend = "setuptools.build_meta"
[project]
name = "tinytorch"
name="tinytorch"
version = "0.1.0"
description = "TinyTorch: Build ML Systems from Scratch"
readme = "README.md"
requires-python = ">=3.8"
requires-python=">=3.8"
authors = [
{name = "TinyTorch Team", email = "team@tinytorch.ai"}
]

View File

@@ -1,71 +0,0 @@
# Autogenerated by nbdev
d = { 'settings': { 'branch': 'main',
'doc_baseurl': '/TinyTorch/',
'doc_host': 'https://tinytorch.github.io',
'git_url': 'https://github.com/tinytorch/TinyTorch/',
'lib_path': 'tinytorch'},
'syms': { 'tinytorch.core.activations': {},
'tinytorch.core.layers': { 'tinytorch.core.layers.Dense': ('layers/layers_dev.html#dense', 'tinytorch/core/layers.py'),
'tinytorch.core.layers.Dense.__call__': ( 'layers/layers_dev.html#dense.__call__',
'tinytorch/core/layers.py'),
'tinytorch.core.layers.Dense.__init__': ( 'layers/layers_dev.html#dense.__init__',
'tinytorch/core/layers.py'),
'tinytorch.core.layers.Dense.forward': ( 'layers/layers_dev.html#dense.forward',
'tinytorch/core/layers.py'),
'tinytorch.core.layers.ReLU': ('layers/layers_dev.html#relu', 'tinytorch/core/layers.py'),
'tinytorch.core.layers.ReLU.__call__': ( 'layers/layers_dev.html#relu.__call__',
'tinytorch/core/layers.py'),
'tinytorch.core.layers.ReLU.forward': ( 'layers/layers_dev.html#relu.forward',
'tinytorch/core/layers.py'),
'tinytorch.core.layers.Sigmoid': ('layers/layers_dev.html#sigmoid', 'tinytorch/core/layers.py'),
'tinytorch.core.layers.Sigmoid.__call__': ( 'layers/layers_dev.html#sigmoid.__call__',
'tinytorch/core/layers.py'),
'tinytorch.core.layers.Sigmoid.forward': ( 'layers/layers_dev.html#sigmoid.forward',
'tinytorch/core/layers.py'),
'tinytorch.core.layers.Tanh': ('layers/layers_dev.html#tanh', 'tinytorch/core/layers.py'),
'tinytorch.core.layers.Tanh.__call__': ( 'layers/layers_dev.html#tanh.__call__',
'tinytorch/core/layers.py'),
'tinytorch.core.layers.Tanh.forward': ( 'layers/layers_dev.html#tanh.forward',
'tinytorch/core/layers.py')},
'tinytorch.core.tensor': { 'tinytorch.core.tensor.Tensor': ('tensor/tensor_dev.html#tensor', 'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.__init__': ( 'tensor/tensor_dev.html#tensor.__init__',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.__repr__': ( 'tensor/tensor_dev.html#tensor.__repr__',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.data': ( 'tensor/tensor_dev.html#tensor.data',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.dtype': ( 'tensor/tensor_dev.html#tensor.dtype',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.shape': ( 'tensor/tensor_dev.html#tensor.shape',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.size': ( 'tensor/tensor_dev.html#tensor.size',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor._add_arithmetic_ops': ( 'tensor/tensor_dev.html#_add_arithmetic_ops',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor._add_utility_methods': ( 'tensor/tensor_dev.html#_add_utility_methods',
'tinytorch/core/tensor.py')},
'tinytorch.core.utils': { 'tinytorch.core.utils.DeveloperProfile': ( 'setup/setup_dev.html#developerprofile',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.DeveloperProfile.__init__': ( 'setup/setup_dev.html#developerprofile.__init__',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.DeveloperProfile.__str__': ( 'setup/setup_dev.html#developerprofile.__str__',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.DeveloperProfile._load_default_flame': ( 'setup/setup_dev.html#developerprofile._load_default_flame',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.DeveloperProfile.get_ascii_art': ( 'setup/setup_dev.html#developerprofile.get_ascii_art',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.DeveloperProfile.get_full_profile': ( 'setup/setup_dev.html#developerprofile.get_full_profile',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.DeveloperProfile.get_signature': ( 'setup/setup_dev.html#developerprofile.get_signature',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.SystemInfo': ('setup/setup_dev.html#systeminfo', 'tinytorch/core/utils.py'),
'tinytorch.core.utils.SystemInfo.__init__': ( 'setup/setup_dev.html#systeminfo.__init__',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.SystemInfo.__str__': ( 'setup/setup_dev.html#systeminfo.__str__',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.SystemInfo.is_compatible': ( 'setup/setup_dev.html#systeminfo.is_compatible',
'tinytorch/core/utils.py'),
'tinytorch.core.utils.add_numbers': ('setup/setup_dev.html#add_numbers', 'tinytorch/core/utils.py'),
'tinytorch.core.utils.hello_tinytorch': ( 'setup/setup_dev.html#hello_tinytorch',
'tinytorch/core/utils.py')}}}

View File

@@ -1,58 +0,0 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/activations/activations_dev.py.
# %% auto 0
__all__ = ['ReLU', 'Sigmoid', 'Tanh']
# %% ../../modules/activations/activations_dev.py auto 1
import math
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
# TinyTorch imports
from tinytorch.core.tensor import Tensor
# %% ../../modules/activations/activations_dev.py auto 2
class ReLU:
"""ReLU Activation: f(x) = max(0, x)"""
def forward(self, x: Tensor) -> Tensor:
"""Apply ReLU: f(x) = max(0, x)"""
return Tensor(np.maximum(0, x.data))
def __call__(self, x: Tensor) -> Tensor:
return self.forward(x)
# %% ../../modules/activations/activations_dev.py auto 3
class Sigmoid:
"""Sigmoid Activation: f(x) = 1 / (1 + e^(-x))"""
def forward(self, x: Tensor) -> Tensor:
"""Apply Sigmoid with numerical stability"""
# Use the numerically stable version to avoid overflow
# For x >= 0: sigmoid(x) = 1 / (1 + exp(-x))
# For x < 0: sigmoid(x) = exp(x) / (1 + exp(x))
x_data = x.data
result = np.zeros_like(x_data)
# Stable computation
positive_mask = x_data >= 0
result[positive_mask] = 1.0 / (1.0 + np.exp(-x_data[positive_mask]))
result[~positive_mask] = np.exp(x_data[~positive_mask]) / (1.0 + np.exp(x_data[~positive_mask]))
return Tensor(result)
def __call__(self, x: Tensor) -> Tensor:
return self.forward(x)
# %% ../../modules/activations/activations_dev.py auto 4
class Tanh:
"""Tanh Activation: f(x) = tanh(x)"""
def forward(self, x: Tensor) -> Tensor:
"""Apply Tanh"""
return Tensor(np.tanh(x.data))
def __call__(self, x: Tensor) -> Tensor:
return self.forward(x)

View File

@@ -1,238 +0,0 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/layers/layers_dev.ipynb.
# %% auto 0
__all__ = ['Dense', 'ReLU', 'Sigmoid', 'Tanh']
# %% ../../modules/layers/layers_dev.ipynb 2
import numpy as np
import math
import sys
from typing import Union, Optional, Callable
from .tensor import Tensor
# Import our Tensor class
# sys.path.append('../../')
# from modules.tensor.tensor_dev import Tensor
# print("🔥 TinyTorch Layers Module")
# print(f"NumPy version: {np.__version__}")
# print(f"Python version: {sys.version_info.major}.{sys.version_info.minor}")
# print("Ready to build neural network layers!")
# %% ../../modules/layers/layers_dev.ipynb 4
class Dense:
"""
Dense (Linear) Layer: y = Wx + b
The fundamental building block of neural networks.
Performs linear transformation: matrix multiplication + bias addition.
Args:
input_size: Number of input features
output_size: Number of output features
use_bias: Whether to include bias term (default: True)
TODO: Implement the Dense layer with weight initialization and forward pass.
"""
def __init__(self, input_size: int, output_size: int, use_bias: bool = True):
"""
Initialize Dense layer with random weights.
TODO:
1. Store layer parameters (input_size, output_size, use_bias)
2. Initialize weights with small random values
3. Initialize bias to zeros (if use_bias=True)
"""
raise NotImplementedError("Student implementation required")
def forward(self, x: Tensor) -> Tensor:
"""
Forward pass: y = Wx + b
Args:
x: Input tensor of shape (batch_size, input_size)
Returns:
Output tensor of shape (batch_size, output_size)
TODO: Implement matrix multiplication and bias addition
"""
raise NotImplementedError("Student implementation required")
def __call__(self, x: Tensor) -> Tensor:
"""Make layer callable: layer(x) same as layer.forward(x)"""
return self.forward(x)
# %% ../../modules/layers/layers_dev.ipynb 5
class Dense:
"""
Dense (Linear) Layer: y = Wx + b
The fundamental building block of neural networks.
Performs linear transformation: matrix multiplication + bias addition.
"""
def __init__(self, input_size: int, output_size: int, use_bias: bool = True):
"""Initialize Dense layer with random weights."""
self.input_size = input_size
self.output_size = output_size
self.use_bias = use_bias
# Initialize weights with Xavier/Glorot initialization
# This helps with gradient flow during training
limit = math.sqrt(6.0 / (input_size + output_size))
self.weights = Tensor(
np.random.uniform(-limit, limit, (input_size, output_size)).astype(np.float32)
)
# Initialize bias to zeros
if use_bias:
self.bias = Tensor(np.zeros(output_size, dtype=np.float32))
else:
self.bias = None
def forward(self, x: Tensor) -> Tensor:
"""Forward pass: y = Wx + b"""
# Matrix multiplication: x @ weights
# x shape: (batch_size, input_size)
# weights shape: (input_size, output_size)
# result shape: (batch_size, output_size)
output = Tensor(x.data @ self.weights.data)
# Add bias if present
if self.bias is not None:
output = Tensor(output.data + self.bias.data)
return output
def __call__(self, x: Tensor) -> Tensor:
"""Make layer callable: layer(x) same as layer.forward(x)"""
return self.forward(x)
# %% ../../modules/layers/layers_dev.ipynb 9
class ReLU:
"""
ReLU Activation: f(x) = max(0, x)
The most popular activation function in deep learning.
Simple, effective, and computationally efficient.
TODO: Implement ReLU activation function.
"""
def forward(self, x: Tensor) -> Tensor:
"""
Apply ReLU: f(x) = max(0, x)
Args:
x: Input tensor
Returns:
Output tensor with ReLU applied element-wise
TODO: Implement element-wise max(0, x) operation
"""
raise NotImplementedError("Student implementation required")
def __call__(self, x: Tensor) -> Tensor:
"""Make activation callable: relu(x) same as relu.forward(x)"""
return self.forward(x)
# %% ../../modules/layers/layers_dev.ipynb 10
class ReLU:
"""ReLU Activation: f(x) = max(0, x)"""
def forward(self, x: Tensor) -> Tensor:
"""Apply ReLU: f(x) = max(0, x)"""
return Tensor(np.maximum(0, x.data))
def __call__(self, x: Tensor) -> Tensor:
return self.forward(x)
# %% ../../modules/layers/layers_dev.ipynb 11
class Sigmoid:
"""
Sigmoid Activation: f(x) = 1 / (1 + e^(-x))
Squashes input to range (0, 1). Often used for binary classification.
TODO: Implement Sigmoid activation function.
"""
def forward(self, x: Tensor) -> Tensor:
"""
Apply Sigmoid: f(x) = 1 / (1 + e^(-x))
Args:
x: Input tensor
Returns:
Output tensor with Sigmoid applied element-wise
TODO: Implement sigmoid function (be careful with numerical stability!)
"""
raise NotImplementedError("Student implementation required")
def __call__(self, x: Tensor) -> Tensor:
return self.forward(x)
# %% ../../modules/layers/layers_dev.ipynb 12
class Sigmoid:
"""Sigmoid Activation: f(x) = 1 / (1 + e^(-x))"""
def forward(self, x: Tensor) -> Tensor:
"""Apply Sigmoid with numerical stability"""
# Use the numerically stable version to avoid overflow
# For x >= 0: sigmoid(x) = 1 / (1 + exp(-x))
# For x < 0: sigmoid(x) = exp(x) / (1 + exp(x))
x_data = x.data
result = np.zeros_like(x_data)
# Stable computation
positive_mask = x_data >= 0
result[positive_mask] = 1.0 / (1.0 + np.exp(-x_data[positive_mask]))
result[~positive_mask] = np.exp(x_data[~positive_mask]) / (1.0 + np.exp(x_data[~positive_mask]))
return Tensor(result)
def __call__(self, x: Tensor) -> Tensor:
return self.forward(x)
# %% ../../modules/layers/layers_dev.ipynb 13
class Tanh:
"""
Tanh Activation: f(x) = tanh(x)
Squashes input to range (-1, 1). Zero-centered output.
TODO: Implement Tanh activation function.
"""
def forward(self, x: Tensor) -> Tensor:
"""
Apply Tanh: f(x) = tanh(x)
Args:
x: Input tensor
Returns:
Output tensor with Tanh applied element-wise
TODO: Implement tanh function
"""
raise NotImplementedError("Student implementation required")
def __call__(self, x: Tensor) -> Tensor:
return self.forward(x)
# %% ../../modules/layers/layers_dev.ipynb 14
class Tanh:
"""Tanh Activation: f(x) = tanh(x)"""
def forward(self, x: Tensor) -> Tensor:
"""Apply Tanh"""
return Tensor(np.tanh(x.data))
def __call__(self, x: Tensor) -> Tensor:
return self.forward(x)

View File

@@ -1,185 +0,0 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/tensor/tensor_dev.ipynb.
# %% auto 0
__all__ = ['Tensor']
# %% ../../modules/tensor/tensor_dev.ipynb 1
import numpy as np
import sys
from typing import Union, List, Tuple, Optional, Any
# %% ../../modules/tensor/tensor_dev.ipynb 3
class Tensor:
"""
TinyTorch Tensor: N-dimensional array with ML operations.
The fundamental data structure for all TinyTorch operations.
Wraps NumPy arrays with ML-specific functionality.
"""
def __init__(self, data: Union[int, float, List, np.ndarray], dtype: Optional[str] = None):
"""
Create a new tensor from data.
Args:
data: Input data (scalar, list, or numpy array)
dtype: Data type ('float32', 'int32', etc.). Defaults to auto-detect.
"""
# Convert input to numpy array
if isinstance(data, (int, float, np.number)):
# Handle Python and NumPy scalars
if dtype is None:
# Auto-detect type: int for integers, float32 for floats
if isinstance(data, int) or (isinstance(data, np.number) and np.issubdtype(type(data), np.integer)):
dtype = 'int32'
else:
dtype = 'float32'
self._data = np.array(data, dtype=dtype)
elif isinstance(data, list):
# Let NumPy auto-detect type, then convert if needed
temp_array = np.array(data)
if dtype is None:
# Keep NumPy's auto-detected type, but prefer common ML types
if np.issubdtype(temp_array.dtype, np.integer):
dtype = 'int32'
elif np.issubdtype(temp_array.dtype, np.floating):
dtype = 'float32'
else:
dtype = temp_array.dtype
self._data = temp_array.astype(dtype)
elif isinstance(data, np.ndarray):
self._data = data.astype(dtype or data.dtype)
else:
raise TypeError(f"Cannot create tensor from {type(data)}")
@property
def data(self) -> np.ndarray:
"""Access underlying numpy array."""
return self._data
@property
def shape(self) -> Tuple[int, ...]:
"""Get tensor shape."""
return self._data.shape
@property
def size(self) -> int:
"""Get total number of elements."""
return self._data.size
@property
def dtype(self) -> np.dtype:
"""Get data type as numpy dtype."""
return self._data.dtype
def __repr__(self) -> str:
"""String representation."""
return f"Tensor({self._data.tolist()}, shape={self.shape}, dtype={self.dtype})"
# %% ../../modules/tensor/tensor_dev.ipynb 6
# Add arithmetic operations to the Tensor class
def _add_arithmetic_ops():
"""Add arithmetic operations to Tensor class."""
def __add__(self, other: Union['Tensor', int, float]) -> 'Tensor':
"""Addition: tensor + other"""
if isinstance(other, Tensor):
return Tensor(self._data + other._data)
else: # scalar
return Tensor(self._data + other)
def __sub__(self, other: Union['Tensor', int, float]) -> 'Tensor':
"""Subtraction: tensor - other"""
if isinstance(other, Tensor):
return Tensor(self._data - other._data)
else: # scalar
return Tensor(self._data - other)
def __mul__(self, other: Union['Tensor', int, float]) -> 'Tensor':
"""Multiplication: tensor * other"""
if isinstance(other, Tensor):
return Tensor(self._data * other._data)
else: # scalar
return Tensor(self._data * other)
def __truediv__(self, other: Union['Tensor', int, float]) -> 'Tensor':
"""Division: tensor / other"""
if isinstance(other, Tensor):
return Tensor(self._data / other._data)
else: # scalar
return Tensor(self._data / other)
def __radd__(self, other: Union[int, float]) -> 'Tensor':
"""Reverse addition: scalar + tensor"""
return Tensor(other + self._data)
def __rmul__(self, other: Union[int, float]) -> 'Tensor':
"""Reverse multiplication: scalar * tensor"""
return Tensor(other * self._data)
# Add methods to Tensor class
Tensor.__add__ = __add__
Tensor.__sub__ = __sub__
Tensor.__mul__ = __mul__
Tensor.__truediv__ = __truediv__
Tensor.__radd__ = __radd__
Tensor.__rmul__ = __rmul__
# Apply the arithmetic operations
_add_arithmetic_ops()
# %% ../../modules/tensor/tensor_dev.ipynb 9
# Add utility methods to the Tensor class
def _add_utility_methods():
"""Add utility methods to Tensor class."""
def reshape(self, *shape: int) -> 'Tensor':
"""Reshape tensor to new dimensions."""
return Tensor(self._data.reshape(shape))
def transpose(self) -> 'Tensor':
"""Transpose the tensor (swap dimensions)."""
return Tensor(self._data.T)
def sum(self, axis: Optional[int] = None) -> 'Tensor':
"""Sum elements along axis (or all elements if axis=None)."""
result = self._data.sum(axis=axis)
return Tensor(result)
def mean(self, axis: Optional[int] = None) -> 'Tensor':
"""Mean of elements along axis (or all elements if axis=None)."""
result = self._data.mean(axis=axis)
return Tensor(result)
def max(self, axis: Optional[int] = None) -> 'Tensor':
"""Maximum element along axis (or all elements if axis=None)."""
result = self._data.max(axis=axis)
return Tensor(result)
def min(self, axis: Optional[int] = None) -> 'Tensor':
"""Minimum element along axis (or all elements if axis=None)."""
result = self._data.min(axis=axis)
return Tensor(result)
def item(self) -> Union[int, float]:
"""Convert single-element tensor to Python scalar."""
if self.size != 1:
raise ValueError(f"Cannot convert tensor of size {self.size} to scalar")
return self._data.item()
def numpy(self) -> np.ndarray:
"""Convert to numpy array."""
return self._data.copy()
# Add methods to Tensor class
Tensor.reshape = reshape
Tensor.transpose = transpose
Tensor.sum = sum
Tensor.mean = mean
Tensor.max = max
Tensor.min = min
Tensor.item = item
Tensor.numpy = numpy
# Apply the utility methods
_add_utility_methods()

View File

@@ -1,252 +0,0 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/setup/setup_dev.ipynb.
# %% auto 0
__all__ = ['hello_tinytorch', 'add_numbers', 'SystemInfo', 'DeveloperProfile']
# %% ../../modules/setup/setup_dev.ipynb 3
def hello_tinytorch():
"""
A simple hello world function for TinyTorch.
TODO: Implement this function to display TinyTorch ASCII art and welcome message.
Load the flame art from tinytorch_flame.txt file with graceful fallback.
"""
raise NotImplementedError("Student implementation required")
def add_numbers(a, b):
"""
Add two numbers together.
TODO: Implement addition of two numbers.
This is the foundation of all mathematical operations in ML.
"""
raise NotImplementedError("Student implementation required")
# %% ../../modules/setup/setup_dev.ipynb 4
def hello_tinytorch():
"""Display the TinyTorch ASCII art and welcome message."""
try:
# Get the directory containing this file
current_dir = Path(__file__).parent
art_file = current_dir / "tinytorch_flame.txt"
if art_file.exists():
with open(art_file, 'r') as f:
ascii_art = f.read()
print(ascii_art)
print("Tiny🔥Torch")
print("Build ML Systems from Scratch!")
else:
print("🔥 TinyTorch 🔥")
print("Build ML Systems from Scratch!")
except NameError:
# Handle case when running in notebook where __file__ is not defined
try:
art_file = Path(os.getcwd()) / "tinytorch_flame.txt"
if art_file.exists():
with open(art_file, 'r') as f:
ascii_art = f.read()
print(ascii_art)
print("Tiny🔥Torch")
print("Build ML Systems from Scratch!")
else:
print("🔥 TinyTorch 🔥")
print("Build ML Systems from Scratch!")
except:
print("🔥 TinyTorch 🔥")
print("Build ML Systems from Scratch!")
def add_numbers(a, b):
"""Add two numbers together."""
return a + b
# %% ../../modules/setup/setup_dev.ipynb 8
class SystemInfo:
"""
Simple system information class.
TODO: Implement this class to collect and display system information.
"""
def __init__(self):
"""
Initialize system information collection.
TODO: Collect Python version, platform, and machine information.
"""
raise NotImplementedError("Student implementation required")
def __str__(self):
"""
Return human-readable system information.
TODO: Format system info as a readable string.
"""
raise NotImplementedError("Student implementation required")
def is_compatible(self):
"""
Check if system meets minimum requirements.
TODO: Check if Python version is >= 3.8
"""
raise NotImplementedError("Student implementation required")
# %% ../../modules/setup/setup_dev.ipynb 9
class SystemInfo:
"""Simple system information class."""
def __init__(self):
self.python_version = sys.version_info
self.platform = platform.system()
self.machine = platform.machine()
def __str__(self):
return f"Python {self.python_version.major}.{self.python_version.minor} on {self.platform} ({self.machine})"
def is_compatible(self):
"""Check if system meets minimum requirements."""
return self.python_version >= (3, 8)
# %% ../../modules/setup/setup_dev.ipynb 13
class DeveloperProfile:
"""
Developer profile for personalizing TinyTorch experience.
TODO: Implement this class to store and display developer information.
Default to course instructor but allow students to personalize.
"""
@staticmethod
def _load_default_flame():
"""
Load the default TinyTorch flame ASCII art from file.
TODO: Implement file loading for tinytorch_flame.txt with fallback.
"""
raise NotImplementedError("Student implementation required")
def __init__(self, name="Vijay Janapa Reddi", affiliation="Harvard University",
email="vj@eecs.harvard.edu", github_username="profvjreddi", ascii_art=None):
"""
Initialize developer profile.
TODO: Store developer information with sensible defaults.
Students should be able to customize this with their own info and ASCII art.
"""
raise NotImplementedError("Student implementation required")
def __str__(self):
"""
Return formatted developer information.
TODO: Format developer info as a professional signature with optional ASCII art.
"""
raise NotImplementedError("Student implementation required")
def get_signature(self):
"""
Get a short signature for code headers.
TODO: Return a concise signature like "Built by Name (@github)"
"""
raise NotImplementedError("Student implementation required")
def get_ascii_art(self):
"""
Get ASCII art for the profile.
TODO: Return custom ASCII art or default flame loaded from file.
"""
raise NotImplementedError("Student implementation required")
# %% ../../modules/setup/setup_dev.ipynb 14
class DeveloperProfile:
"""Developer profile for personalizing TinyTorch experience."""
@staticmethod
def _load_default_flame():
"""Load the default TinyTorch flame ASCII art from file."""
try:
# Try to load from the same directory as this module
try:
# Try to get the directory of the current file
current_dir = os.path.dirname(__file__)
except NameError:
# If __file__ is not defined (e.g., in notebook), use current directory
current_dir = os.getcwd()
flame_path = os.path.join(current_dir, 'tinytorch_flame.txt')
with open(flame_path, 'r', encoding='utf-8') as f:
flame_art = f.read()
# Add the Tiny🔥Torch text below the flame
return f"""{flame_art}
Tiny🔥Torch
Build ML Systems from Scratch!
"""
except (FileNotFoundError, IOError):
# Fallback to simple flame if file not found
return """
🔥 TinyTorch Developer 🔥
. . . . . .
. . . . . .
. . . . . . .
. . . . . . . .
. . . . . . . . .
. . . . . . . . . .
. . . . . . . . . . .
. . . . . . . . . . . .
. . . . . . . . . . . . .
. . . . . . . . . . . . . .
\\ \\ \\ \\ \\ \\ \\ \\ \\ / / / / / /
\\ \\ \\ \\ \\ \\ \\ \\ / / / / / /
\\ \\ \\ \\ \\ \\ \\ / / / / / /
\\ \\ \\ \\ \\ \\ / / / / / /
\\ \\ \\ \\ \\ / / / / / /
\\ \\ \\ \\ / / / / / /
\\ \\ \\ / / / / / /
\\ \\ / / / / / /
\\ / / / / / /
\\/ / / / / /
\\/ / / / /
\\/ / / /
\\/ / /
\\/ /
\\/
Tiny🔥Torch
Build ML Systems from Scratch!
"""
def __init__(self, name="Vijay Janapa Reddi", affiliation="Harvard University",
email="vj@eecs.harvard.edu", github_username="profvjreddi", ascii_art=None):
self.name = name
self.affiliation = affiliation
self.email = email
self.github_username = github_username
self.ascii_art = ascii_art or self._load_default_flame()
def __str__(self):
return f"👨‍💻 {self.name} | {self.affiliation} | @{self.github_username}"
def get_signature(self):
"""Get a short signature for code headers."""
return f"Built by {self.name} (@{self.github_username})"
def get_ascii_art(self):
"""Get ASCII art for the profile."""
return self.ascii_art
def get_full_profile(self):
"""Get complete profile with ASCII art."""
return f"""{self.ascii_art}
👨‍💻 Developer: {self.name}
🏛️ Affiliation: {self.affiliation}
📧 Email: {self.email}
🐙 GitHub: @{self.github_username}
🔥 Ready to build ML systems from scratch!
"""

View File

@@ -6,8 +6,26 @@ Each command is implemented as a separate module with proper separation of conce
from .base import BaseCommand
from .notebooks import NotebooksCommand
from .info import InfoCommand
from .test import TestCommand
from .doctor import DoctorCommand
from .sync import SyncCommand
from .reset import ResetCommand
from .jupyter import JupyterCommand
from .nbdev import NbdevCommand
from .submit import SubmitCommand
from .status import StatusCommand
__all__ = [
'BaseCommand',
'NotebooksCommand'
'NotebooksCommand',
'InfoCommand',
'TestCommand',
'DoctorCommand',
'SyncCommand',
'ResetCommand',
'JupyterCommand',
'NbdevCommand',
'SubmitCommand',
'StatusCommand',
]

109
tito/commands/doctor.py Normal file
View File

@@ -0,0 +1,109 @@
"""
Doctor command for TinyTorch CLI: runs comprehensive environment diagnosis.
"""
import sys
import os
from argparse import ArgumentParser, Namespace
from pathlib import Path
from rich.panel import Panel
from rich.table import Table
from .base import BaseCommand
class DoctorCommand(BaseCommand):
@property
def name(self) -> str:
return "doctor"
@property
def description(self) -> str:
return "Run environment diagnosis"
def add_arguments(self, parser: ArgumentParser) -> None:
# Doctor command doesn't need additional arguments
pass
def run(self, args: Namespace) -> int:
console = self.console
console.print(Panel("🔬 TinyTorch Environment Diagnosis",
title="System Doctor", border_style="bright_magenta"))
console.print()
# Environment checks table
env_table = Table(title="Environment Check", show_header=True, header_style="bold blue")
env_table.add_column("Component", style="cyan", width=20)
env_table.add_column("Status", justify="left")
env_table.add_column("Details", style="dim", width=30)
# Python environment
env_table.add_row("Python", "[green]✅ OK[/green]", f"{sys.version.split()[0]} ({sys.platform})")
# Virtual environment - check if it exists and if we're using it
venv_path = Path(".venv")
venv_exists = venv_path.exists()
in_venv = (
# Method 1: Check VIRTUAL_ENV environment variable (most reliable for activation)
os.environ.get('VIRTUAL_ENV') is not None or
# Method 2: Check sys.prefix vs sys.base_prefix (works for running Python in venv)
(hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix) or
# Method 3: Check for sys.real_prefix (older Python versions)
hasattr(sys, 'real_prefix')
)
if venv_exists and in_venv:
venv_status = "[green]✅ Ready & Active[/green]"
elif venv_exists:
venv_status = "[yellow]✅ Ready (Not Active)[/yellow]"
else:
venv_status = "[red]❌ Not Found[/red]"
env_table.add_row("Virtual Environment", venv_status, ".venv")
# Dependencies
dependencies = ['numpy', 'matplotlib', 'pytest', 'yaml', 'black', 'rich']
for dep in dependencies:
try:
module = __import__(dep)
version = getattr(module, '__version__', 'unknown')
env_table.add_row(dep.title(), "[green]✅ OK[/green]", f"v{version}")
except ImportError:
env_table.add_row(dep.title(), "[red]❌ Missing[/red]", "Not installed")
console.print(env_table)
console.print()
# Module structure table
struct_table = Table(title="Module Structure", show_header=True, header_style="bold magenta")
struct_table.add_column("Path", style="cyan", width=25)
struct_table.add_column("Status", justify="left")
struct_table.add_column("Type", style="dim", width=25)
required_paths = [
('tinytorch/', 'Package directory'),
('tinytorch/core/', 'Core module directory'),
('modules/', 'Module directory'),
('bin/tito.py', 'CLI script'),
('requirements.txt', 'Dependencies file')
]
for path, desc in required_paths:
if Path(path).exists():
struct_table.add_row(path, "[green]✅ Found[/green]", desc)
else:
struct_table.add_row(path, "[red]❌ Missing[/red]", desc)
console.print(struct_table)
console.print()
# Module implementations
console.print(Panel("📋 Implementation Status",
title="Module Status", border_style="bright_blue"))
# Import and run the info command to show module status
from .info import InfoCommand
info_cmd = InfoCommand(self.config)
info_args = ArgumentParser()
info_cmd.add_arguments(info_args)
info_args = info_args.parse_args([]) # Empty args for info
return info_cmd.run(info_args)

239
tito/commands/info.py Normal file
View File

@@ -0,0 +1,239 @@
"""
Info command for TinyTorch CLI: shows system information and module status.
"""
from argparse import ArgumentParser, Namespace
from pathlib import Path
import sys
import os
from rich.console import Console
from rich.panel import Panel
from rich.text import Text
from rich.table import Table
from rich.tree import Tree
from .base import BaseCommand
class InfoCommand(BaseCommand):
@property
def name(self) -> str:
return "info"
@property
def description(self) -> str:
return "Show system information and module status"
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--hello", action="store_true", help="Show hello message")
parser.add_argument("--show-architecture", action="store_true", help="Show system architecture")
def run(self, args: Namespace) -> int:
console = self.console
self.print_banner()
console.print()
# System Information Panel
info_text = Text()
info_text.append(f"Python: {sys.version.split()[0]}\n", style="cyan")
info_text.append(f"Platform: {sys.platform}\n", style="cyan")
info_text.append(f"Working Directory: {os.getcwd()}\n", style="cyan")
# Virtual environment check
venv_path = Path(".venv")
venv_exists = venv_path.exists()
in_venv = (
os.environ.get('VIRTUAL_ENV') is not None or
(hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix) or
hasattr(sys, 'real_prefix')
)
if venv_exists and in_venv:
venv_style = "green"
venv_icon = ""
venv_status = "Ready & Active"
elif venv_exists:
venv_style = "yellow"
venv_icon = ""
venv_status = "Ready (Not Active)"
else:
venv_style = "red"
venv_icon = ""
venv_status = "Not Found"
info_text.append(f"Virtual Environment: {venv_icon} ", style=venv_style)
info_text.append(venv_status, style=f"bold {venv_style}")
console.print(Panel(info_text, title="📋 System Information", border_style="bright_blue"))
console.print()
# Course Navigation Panel
nav_text = Text()
nav_text.append("📖 Course Overview: ", style="dim")
nav_text.append("README.md\n", style="cyan underline")
nav_text.append("🎯 Detailed Guide: ", style="dim")
nav_text.append("COURSE_GUIDE.md\n", style="cyan underline")
nav_text.append("🚀 Start Here: ", style="dim")
nav_text.append("modules/setup/README.md", style="cyan underline")
console.print(Panel(nav_text, title="📋 Course Navigation", border_style="bright_green"))
console.print()
# Implementation status
modules = [
("Setup", "hello_tinytorch function", self.check_setup_status),
("Tensor", "basic tensor operations", self.check_tensor_status),
("MLP", "multi-layer perceptron (manual)", self.check_mlp_status),
("CNN", "convolutional networks (basic)", self.check_cnn_status),
("Data", "data loading pipeline", self.check_data_status),
("Training", "autograd engine & optimization", self.check_training_status),
("Profiling", "performance profiling", self.check_profiling_status),
("Compression", "model compression", self.check_compression_status),
("Kernels", "custom compute kernels", self.check_kernels_status),
("Benchmarking", "performance benchmarking", self.check_benchmarking_status),
("MLOps", "production monitoring", self.check_mlops_status),
]
status_table = Table(title="🚀 Module Implementation Status", show_header=True, header_style="bold blue")
status_table.add_column("ID", style="dim", width=3, justify="center")
status_table.add_column("Project", style="bold cyan", width=12)
status_table.add_column("Status", width=18, justify="center")
status_table.add_column("Description", style="dim", width=40)
for i, (name, desc, check_func) in enumerate(modules):
status_text = check_func()
if "" in status_text:
status_style = "[green]✅ Implemented[/green]"
elif "" in status_text:
status_style = "[red]❌ Not Implemented[/red]"
else:
status_style = "[yellow]⏳ Not Started[/yellow]"
status_table.add_row(str(i), name, status_style, desc)
console.print(status_table)
# Optionally show hello message or architecture
if args.hello and self.check_setup_status() == "✅ Implemented":
try:
from tinytorch.core.utils import hello_tinytorch
hello_text = Text(hello_tinytorch(), style="bold red")
console.print()
console.print(Panel(hello_text, style="bright_red", padding=(1, 2)))
except ImportError:
pass
if args.show_architecture:
console.print()
arch_tree = Tree("🏗️ TinyTorch System Architecture", style="bold blue")
cli_branch = arch_tree.add("CLI Interface", style="cyan")
cli_branch.add("tito/ - Command line tools", style="dim")
training_branch = arch_tree.add("Training Orchestration", style="cyan")
training_branch.add("trainer.py - Training loop management", style="dim")
core_branch = arch_tree.add("Core Components", style="cyan")
model_sub = core_branch.add("Model Definition", style="yellow")
model_sub.add("modules.py - Neural network layers", style="dim")
data_sub = core_branch.add("Data Pipeline", style="yellow")
data_sub.add("dataloader.py - Efficient data loading", style="dim")
opt_sub = core_branch.add("Optimization", style="yellow")
opt_sub.add("optimizer.py - SGD, Adam, etc.", style="dim")
autograd_branch = arch_tree.add("Automatic Differentiation Engine", style="cyan")
autograd_branch.add("autograd.py - Gradient computation", style="dim")
tensor_branch = arch_tree.add("Tensor Operations & Storage", style="cyan")
tensor_branch.add("tensor.py - Core tensor implementation", style="dim")
system_branch = arch_tree.add("System Tools", style="cyan")
system_branch.add("profiler.py - Performance measurement", style="dim")
system_branch.add("mlops.py - Production monitoring", style="dim")
console.print(Panel(arch_tree, title="🏗️ System Architecture", border_style="bright_blue"))
return 0
def print_banner(self):
banner_text = Text("Tiny🔥Torch: Build ML Systems from Scratch", style="bold red")
self.console.print(Panel(banner_text, style="bright_blue", padding=(1, 2)))
# The following check_* methods are ported from bin/tito.py
def check_setup_status(self):
try:
from tinytorch.core.utils import hello_tinytorch
return "✅ Implemented"
except ImportError:
return "❌ Not Implemented"
def check_tensor_status(self):
try:
from tinytorch.core.tensor import Tensor
t1 = Tensor([1, 2, 3])
t2 = Tensor([4, 5, 6])
_ = t1 + t2
return "✅ Implemented"
except (ImportError, NotImplementedError):
return "⏳ Not Started"
def check_mlp_status(self):
try:
from tinytorch.core.modules import MLP
mlp = MLP(input_size=10, hidden_size=5, output_size=2)
from tinytorch.core.tensor import Tensor
x = Tensor([[1,2,3,4,5,6,7,8,9,10]])
_ = mlp(x)
return "✅ Implemented"
except (ImportError, NotImplementedError, AttributeError):
return "⏳ Not Started"
def check_cnn_status(self):
try:
from tinytorch.core.modules import Conv2d
conv = Conv2d(in_channels=3, out_channels=16, kernel_size=3)
from tinytorch.core.tensor import Tensor
x = Tensor([[0]*32]*32)
_ = conv(x)
return "✅ Implemented"
except (ImportError, NotImplementedError, AttributeError):
return "⏳ Not Started"
def check_data_status(self):
try:
from tinytorch.core.dataloader import DataLoader
import numpy as np
data = [(np.random.randn(3,32,32), 0) for _ in range(10)]
loader = DataLoader(data, batch_size=2, shuffle=True)
_ = next(iter(loader))
return "✅ Implemented"
except (ImportError, NotImplementedError, AttributeError, StopIteration):
return "⏳ Not Started"
def check_training_status(self):
try:
from tinytorch.core.optimizer import SGD
from tinytorch.core.tensor import Tensor
t = Tensor([1.0,2.0,3.0], requires_grad=True)
optimizer = SGD([t], lr=0.01)
t.backward()
optimizer.step()
return "✅ Implemented"
except (ImportError, NotImplementedError, AttributeError):
return "⏳ Not Started"
def check_profiling_status(self):
try:
from tinytorch.core.profiler import Profiler
profiler = Profiler()
profiler.start("test")
profiler.end("test")
return "✅ Implemented"
except (ImportError, NotImplementedError, AttributeError):
return "⏳ Not Started"
def check_compression_status(self):
try:
from tinytorch.core.compression import Pruner
pruner = Pruner(sparsity=0.5)
return "✅ Implemented"
except (ImportError, NotImplementedError, AttributeError):
return "⏳ Not Started"
def check_kernels_status(self):
try:
from tinytorch.core.kernels import optimized_matmul
import numpy as np
a = np.random.randn(3,3)
b = np.random.randn(3,3)
_ = optimized_matmul(a, b)
return "✅ Implemented"
except (ImportError, NotImplementedError, AttributeError):
return "⏳ Not Started"
def check_benchmarking_status(self):
try:
from tinytorch.core.benchmark import Benchmark
benchmark = Benchmark()
return "✅ Implemented"
except (ImportError, NotImplementedError, AttributeError):
return "⏳ Not Started"
def check_mlops_status(self):
try:
from tinytorch.core.mlops import ModelMonitor
from tinytorch.core.tensor import Tensor
monitor = ModelMonitor(model=None, baseline_metrics={})
test_inputs = Tensor([1.0,2.0,3.0])
test_predictions = Tensor([0.5,0.8,0.2])
monitor.log_prediction(test_inputs, test_predictions)
return "✅ Implemented"
except (ImportError, NotImplementedError, AttributeError, TypeError):
return "⏳ Not Started"

52
tito/commands/jupyter.py Normal file
View File

@@ -0,0 +1,52 @@
"""
Jupyter command for TinyTorch CLI: starts Jupyter notebook server.
"""
import subprocess
from argparse import ArgumentParser, Namespace
from rich.panel import Panel
from .base import BaseCommand
class JupyterCommand(BaseCommand):
@property
def name(self) -> str:
return "jupyter"
@property
def description(self) -> str:
return "Start Jupyter notebook server"
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--notebook", action="store_true", help="Start classic notebook")
parser.add_argument("--lab", action="store_true", help="Start JupyterLab")
parser.add_argument("--port", type=int, default=8888, help="Port to run on (default: 8888)")
def run(self, args: Namespace) -> int:
console = self.console
console.print(Panel("📓 Jupyter Notebook Server",
title="Interactive Development", border_style="bright_green"))
# Determine which Jupyter to start
if args.lab:
cmd = ["jupyter", "lab", "--port", str(args.port)]
console.print(f"🚀 Starting JupyterLab on port {args.port}...")
else:
cmd = ["jupyter", "notebook", "--port", str(args.port)]
console.print(f"🚀 Starting Jupyter Notebook on port {args.port}...")
console.print("💡 Open your browser to the URL shown above")
console.print("📁 Navigate to your module's notebook directory")
console.print("🔄 Press Ctrl+C to stop the server")
try:
subprocess.run(cmd)
except KeyboardInterrupt:
console.print("\n🛑 Jupyter server stopped")
except FileNotFoundError:
console.print(Panel("[red]❌ Jupyter not found. Install with: pip install jupyter[/red]",
title="Error", border_style="red"))
return 1
return 0

77
tito/commands/nbdev.py Normal file
View File

@@ -0,0 +1,77 @@
"""
nbdev command for TinyTorch CLI: runs nbdev commands for notebook development.
"""
import subprocess
from argparse import ArgumentParser, Namespace
from rich.panel import Panel
from .base import BaseCommand
class NbdevCommand(BaseCommand):
@property
def name(self) -> str:
return "nbdev"
@property
def description(self) -> str:
return "nbdev notebook development commands"
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--export", action="store_true", help="Export notebooks to Python package")
parser.add_argument("--build-docs", action="store_true", help="Build documentation from notebooks")
parser.add_argument("--test", action="store_true", help="Run notebook tests")
parser.add_argument("--clean", action="store_true", help="Clean notebook outputs")
def run(self, args: Namespace) -> int:
console = self.console
console.print(Panel("📓 nbdev Notebook Development",
title="Notebook Tools", border_style="bright_cyan"))
if args.export:
# Use the sync command logic
from .sync import SyncCommand
sync_cmd = SyncCommand(self.config)
sync_args = ArgumentParser()
sync_cmd.add_arguments(sync_args)
sync_args = sync_args.parse_args([]) # Empty args for sync
return sync_cmd.run(sync_args)
elif args.build_docs:
console.print("📚 Building documentation from notebooks...")
result = subprocess.run(["nbdev_docs"], capture_output=True, text=True)
if result.returncode == 0:
console.print(Panel("[green]✅ Documentation built successfully![/green]",
title="Docs Success", border_style="green"))
else:
console.print(Panel(f"[red]❌ Docs build failed: {result.stderr}[/red]",
title="Docs Error", border_style="red"))
return result.returncode
elif args.test:
console.print("🧪 Running notebook tests...")
result = subprocess.run(["nbdev_test"], capture_output=True, text=True)
if result.returncode == 0:
console.print(Panel("[green]✅ Notebook tests passed![/green]",
title="Test Success", border_style="green"))
else:
console.print(Panel(f"[red]❌ Notebook tests failed: {result.stderr}[/red]",
title="Test Error", border_style="red"))
return result.returncode
elif args.clean:
console.print("🧹 Cleaning notebook outputs...")
result = subprocess.run(["nbdev_clean"], capture_output=True, text=True)
if result.returncode == 0:
console.print(Panel("[green]✅ Notebook outputs cleaned![/green]",
title="Clean Success", border_style="green"))
else:
console.print(Panel(f"[red]❌ Clean failed: {result.stderr}[/red]",
title="Clean Error", border_style="red"))
return result.returncode
else:
console.print(Panel("[yellow]⚠️ No nbdev action specified. Use --export, --build-docs, --test, or --clean[/yellow]",
title="No Action", border_style="yellow"))
return 1

98
tito/commands/reset.py Normal file
View File

@@ -0,0 +1,98 @@
"""
Reset command for TinyTorch CLI: resets tinytorch package to clean state.
"""
import shutil
from argparse import ArgumentParser, Namespace
from pathlib import Path
from rich.panel import Panel
from rich.text import Text
from .base import BaseCommand
class ResetCommand(BaseCommand):
@property
def name(self) -> str:
return "reset"
@property
def description(self) -> str:
return "Reset tinytorch package to clean state"
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--force", action="store_true", help="Skip confirmation prompt")
def run(self, args: Namespace) -> int:
console = self.console
console.print(Panel("🔄 Resetting TinyTorch Package",
title="Package Reset", border_style="bright_yellow"))
tinytorch_path = Path("tinytorch")
if not tinytorch_path.exists():
console.print(Panel("[yellow]⚠️ TinyTorch package directory not found. Nothing to reset.[/yellow]",
title="Nothing to Reset", border_style="yellow"))
return 0
# Ask for confirmation unless --force is used
if not (hasattr(args, 'force') and args.force):
console.print("\n[yellow]This will remove all exported Python files from the tinytorch package.[/yellow]")
console.print("[yellow]Notebooks in modules/ will be preserved.[/yellow]\n")
try:
response = input("Are you sure you want to reset? (y/N): ").strip().lower()
if response not in ['y', 'yes']:
console.print(Panel("[cyan]Reset cancelled.[/cyan]",
title="Cancelled", border_style="cyan"))
return 0
except KeyboardInterrupt:
console.print(Panel("[cyan]Reset cancelled.[/cyan]",
title="Cancelled", border_style="cyan"))
return 0
reset_text = Text()
reset_text.append("🗑️ Removing generated files:\n", style="bold red")
# Remove generated Python files but keep __init__.py files and directory structure
files_removed = 0
for py_file in tinytorch_path.rglob("*.py"):
if py_file.name != "__init__.py":
# Check if it's an auto-generated file
try:
with open(py_file, 'r') as f:
first_line = f.readline().strip()
if "AUTOGENERATED" in first_line or "_modidx.py" in str(py_file):
rel_path = py_file.relative_to(tinytorch_path)
reset_text.append(f" 🗑️ tinytorch/{rel_path}\n", style="red")
py_file.unlink()
files_removed += 1
except Exception:
# If we can't read the file, skip it for safety
pass
# Remove __pycache__ directories
for pycache in tinytorch_path.rglob("__pycache__"):
if pycache.is_dir():
reset_text.append(f" 🗑️ {pycache}/\n", style="red")
shutil.rmtree(pycache)
# Remove .pytest_cache if it exists
pytest_cache = Path(".pytest_cache")
if pytest_cache.exists():
reset_text.append(f" 🗑️ .pytest_cache/\n", style="red")
shutil.rmtree(pytest_cache)
if files_removed > 0:
reset_text.append(f"\n✅ Reset complete! Removed {files_removed} generated files.\n", style="bold green")
reset_text.append("\n💡 Next steps:\n", style="bold yellow")
reset_text.append(" • Run: tito sync - Re-export notebooks\n", style="white")
reset_text.append(" • Run: tito sync --module setup - Export specific module\n", style="white")
reset_text.append(" • Run: tito test --all - Test everything\n", style="white")
console.print(Panel(reset_text, title="Reset Complete", border_style="green"))
else:
console.print(Panel("[yellow]No generated files found to remove.[/yellow]",
title="Nothing to Reset", border_style="yellow"))
return 0

31
tito/commands/status.py Normal file
View File

@@ -0,0 +1,31 @@
"""
Status command for TinyTorch CLI: checks module status.
"""
from argparse import ArgumentParser, Namespace
from rich.panel import Panel
from rich.text import Text
from .base import BaseCommand
class StatusCommand(BaseCommand):
@property
def name(self) -> str:
return "status"
@property
def description(self) -> str:
return "Check module status"
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--module", required=True, help="Module to check")
def run(self, args: Namespace) -> int:
console = self.console
status_text = Text()
status_text.append(f"📊 Status for module: {args.module}\n\n", style="bold cyan")
status_text.append("🚧 Status system not yet implemented.", style="yellow")
console.print(Panel(status_text, title="Module Status", border_style="bright_yellow"))
return 0

33
tito/commands/submit.py Normal file
View File

@@ -0,0 +1,33 @@
"""
Submit command for TinyTorch CLI: submits module for grading.
"""
from argparse import ArgumentParser, Namespace
from rich.panel import Panel
from rich.text import Text
from .base import BaseCommand
class SubmitCommand(BaseCommand):
@property
def name(self) -> str:
return "submit"
@property
def description(self) -> str:
return "Submit module"
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--module", required=True, help="Module to submit")
def run(self, args: Namespace) -> int:
console = self.console
submit_text = Text()
submit_text.append(f"📤 Submitting module: {args.module}\n\n", style="bold cyan")
submit_text.append("🚧 Submission system not yet implemented.\n\n", style="yellow")
submit_text.append("For now, make sure all tests pass with:\n", style="dim")
submit_text.append(f" python -m pytest modules/{args.module}/tests/test_{args.module}.py -v", style="bold white")
console.print(Panel(submit_text, title="Module Submission", border_style="bright_yellow"))
return 0

96
tito/commands/sync.py Normal file
View File

@@ -0,0 +1,96 @@
"""
Sync command for TinyTorch CLI: exports notebook code to Python package using nbdev.
"""
import subprocess
import sys
from argparse import ArgumentParser, Namespace
from pathlib import Path
from rich.panel import Panel
from rich.text import Text
from .base import BaseCommand
class SyncCommand(BaseCommand):
@property
def name(self) -> str:
return "sync"
@property
def description(self) -> str:
return "Export notebook code to Python package"
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--module", help="Sync specific module (e.g., setup, tensor)")
def run(self, args: Namespace) -> int:
console = self.console
# Determine what to sync
if hasattr(args, 'module') and args.module:
module_path = f"modules/{args.module}"
if not Path(module_path).exists():
console.print(Panel(f"[red]❌ Module '{args.module}' not found at {module_path}[/red]",
title="Module Not Found", border_style="red"))
return 1
console.print(Panel(f"🔄 Synchronizing Module: {args.module}",
title="nbdev Export", border_style="bright_cyan"))
console.print(f"🔄 Exporting {args.module} notebook to tinytorch package...")
# Use nbdev_export with --path for specific module
cmd = ["nbdev_export", "--path", module_path]
else:
console.print(Panel("🔄 Synchronizing All Notebooks to Package",
title="nbdev Export", border_style="bright_cyan"))
console.print("🔄 Exporting all notebook code to tinytorch package...")
# Use nbdev_export for all modules
cmd = ["nbdev_export"]
try:
result = subprocess.run(cmd, capture_output=True, text=True, cwd=Path.cwd())
if result.returncode == 0:
console.print(Panel("[green]✅ Successfully exported notebook code to tinytorch package![/green]",
title="Export Success", border_style="green"))
# Show what was exported
exports_text = Text()
exports_text.append("📦 Exported modules:\n", style="bold cyan")
# Check for exported files
tinytorch_path = Path("tinytorch")
if tinytorch_path.exists():
for py_file in tinytorch_path.rglob("*.py"):
if py_file.name != "__init__.py" and py_file.stat().st_size > 100: # Non-empty files
rel_path = py_file.relative_to(tinytorch_path)
exports_text.append(f" ✅ tinytorch/{rel_path}\n", style="green")
exports_text.append("\n💡 Next steps:\n", style="bold yellow")
exports_text.append(" • Run: tito test --module setup\n", style="white")
exports_text.append(" • Or: tito test --all\n", style="white")
console.print(Panel(exports_text, title="Export Summary", border_style="bright_green"))
else:
error_msg = result.stderr.strip() if result.stderr else "Unknown error"
console.print(Panel(f"[red]❌ Export failed:\n{error_msg}[/red]",
title="Export Error", border_style="red"))
# Helpful error guidance
help_text = Text()
help_text.append("💡 Common issues:\n", style="bold yellow")
help_text.append(" • Missing #| default_exp directive in notebook\n", style="white")
help_text.append(" • Syntax errors in exported code\n", style="white")
help_text.append(" • Missing settings.ini configuration\n", style="white")
help_text.append("\n🔧 Run 'tito doctor' for detailed diagnosis", style="cyan")
console.print(Panel(help_text, title="Troubleshooting", border_style="yellow"))
return result.returncode
except FileNotFoundError:
console.print(Panel("[red]❌ nbdev not found. Install with: pip install nbdev[/red]",
title="Missing Dependency", border_style="red"))
return 1

124
tito/commands/test.py Normal file
View File

@@ -0,0 +1,124 @@
"""
Test command for TinyTorch CLI: runs module tests using pytest.
"""
import subprocess
import sys
from argparse import ArgumentParser, Namespace
from pathlib import Path
from rich.panel import Panel
from rich.text import Text
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn
from .base import BaseCommand
class TestCommand(BaseCommand):
@property
def name(self) -> str:
return "test"
@property
def description(self) -> str:
return "Run module tests"
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--module", help="Module to test")
parser.add_argument("--all", action="store_true", help="Run all module tests")
def validate_args(self, args: Namespace) -> None:
"""Validate test command arguments."""
if not args.all and not args.module:
raise ValueError("Must specify either --module or --all")
def run(self, args: Namespace) -> int:
console = self.console
valid_modules = ["setup", "tensor", "activations", "layers", "cnn", "data", "training",
"profiling", "compression", "kernels", "benchmarking", "mlops"]
if args.all:
# Run all tests with progress bar
failed_modules = []
# Count existing test files in modules/{module}/tests/
existing_tests = []
for module in valid_modules:
test_path = Path(f"modules/{module}/tests/test_{module}.py")
if test_path.exists():
existing_tests.append(module)
console.print(Panel(f"🧪 Running tests for {len(existing_tests)} modules",
title="Test Suite", border_style="bright_cyan"))
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
console=console
) as progress:
task = progress.add_task("Running tests...", total=len(existing_tests))
for module in existing_tests:
progress.update(task, description=f"Testing {module}...")
test_file = f"modules/{module}/tests/test_{module}.py"
result = subprocess.run([sys.executable, "-m", "pytest", test_file, "-v"],
capture_output=True, text=True)
if result.returncode != 0:
failed_modules.append(module)
console.print(f"[red]❌ {module} tests failed[/red]")
else:
console.print(f"[green]✅ {module} tests passed[/green]")
progress.advance(task)
# Results summary
if failed_modules:
console.print(Panel(f"[red]❌ Failed modules: {', '.join(failed_modules)}[/red]",
title="Test Results", border_style="red"))
return 1
else:
console.print(Panel("[green]✅ All tests passed![/green]",
title="Test Results", border_style="green"))
return 0
elif args.module in valid_modules:
# Run specific module tests
test_file = f"modules/{args.module}/tests/test_{args.module}.py"
console.print(Panel(f"🧪 Running tests for module: [bold cyan]{args.module}[/bold cyan]",
title="Single Module Test", border_style="bright_cyan"))
if not Path(test_file).exists():
console.print(Panel(f"[yellow]⏳ Test file not found: {test_file}\n"
f"Module '{args.module}' may not be implemented yet.[/yellow]",
title="Test Not Found", border_style="yellow"))
return 1
console.print(f"Running: pytest {test_file} -v")
result = subprocess.run([sys.executable, "-m", "pytest", test_file, "-v"],
capture_output=True, text=True)
# Print test output
if result.stdout:
console.print(result.stdout)
if result.stderr:
console.print(result.stderr)
if result.returncode == 0:
console.print(Panel("[green]✅ All tests passed for {}![/green]".format(args.module),
title="Test Results", border_style="green"))
else:
console.print(Panel("[red]❌ Some tests failed for {}[/red]".format(args.module),
title="Test Results", border_style="red"))
return result.returncode
else:
console.print(Panel(f"[red]❌ Invalid module: {args.module}\n"
f"Valid modules: {', '.join(valid_modules)}[/red]",
title="Invalid Module", border_style="red"))
return 1

View File

@@ -20,6 +20,15 @@ from .core.console import get_console, print_banner, print_error
from .core.exceptions import TinyTorchCLIError
from .commands.base import BaseCommand
from .commands.notebooks import NotebooksCommand
from .commands.info import InfoCommand
from .commands.test import TestCommand
from .commands.doctor import DoctorCommand
from .commands.sync import SyncCommand
from .commands.reset import ResetCommand
from .commands.jupyter import JupyterCommand
from .commands.nbdev import NbdevCommand
from .commands.submit import SubmitCommand
from .commands.status import StatusCommand
# Configure logging
logging.basicConfig(
@@ -42,7 +51,15 @@ class TinyTorchCLI:
self.console = get_console()
self.commands: Dict[str, Type[BaseCommand]] = {
'notebooks': NotebooksCommand,
# Add other commands here as we refactor them
'info': InfoCommand,
'test': TestCommand,
'doctor': DoctorCommand,
'sync': SyncCommand,
'reset': ResetCommand,
'jupyter': JupyterCommand,
'nbdev': NbdevCommand,
'submit': SubmitCommand,
'status': StatusCommand,
}
def create_parser(self) -> argparse.ArgumentParser: