Files
TinyTorch/tinytorch/__init__.py
Vijay Janapa Reddi 00019408b0 Add tito verify command and expand package exports
- Add tito verify command for setup validation and community registration
- Fix broken Dense import in tinytorch/__init__.py (class does not exist)
- Clean up layers.py __all__ to remove non-existent Dense and internal constants
- Add commonly used components to top-level exports:
  - AvgPool2d, BatchNorm2d (spatial operations)
  - RandomHorizontalFlip, RandomCrop, Compose (data augmentation)
- Total exports now 41 (was 35)
2025-12-02 15:56:32 -05:00

100 lines
3.8 KiB
Python
Generated

"""
TinyTorch - Build ML Systems From First Principles
A complete educational ML framework for learning neural network internals
by implementing everything from scratch.
Top-level exports provide easy access to commonly used components.
For advanced modules (optimization, profiling), import from submodules:
from tinytorch.profiling.profiler import Profiler
from tinytorch.optimization.quantization import quantize_int8
from tinytorch.generation.kv_cache import enable_kv_cache
"""
__version__ = "0.1.0"
# ============================================================================
# Core Functionality (Modules 01-07)
# ============================================================================
from .core.tensor import Tensor
from .core.activations import Sigmoid, ReLU, Tanh, GELU, Softmax
from .core.layers import Layer, Linear, Dropout
from .core.losses import MSELoss, CrossEntropyLoss, BinaryCrossEntropyLoss
from .core.optimizers import SGD, Adam, AdamW
from .core.training import Trainer, CosineSchedule, clip_grad_norm
# ============================================================================
# Data Loading (Module 08)
# ============================================================================
from .data.loader import Dataset, TensorDataset, DataLoader
from .data.loader import RandomHorizontalFlip, RandomCrop, Compose # Augmentation
# ============================================================================
# Spatial Operations (Module 09)
# ============================================================================
from .core.spatial import Conv2d, MaxPool2d, AvgPool2d, BatchNorm2d
# ============================================================================
# Text Processing (Modules 10-11)
# ============================================================================
from .text.tokenization import Tokenizer, CharTokenizer, BPETokenizer
from .text.embeddings import Embedding, PositionalEncoding, EmbeddingLayer
# ============================================================================
# Attention & Transformers (Modules 12-13)
# ============================================================================
from .core.attention import MultiHeadAttention, scaled_dot_product_attention
from .models.transformer import LayerNorm, MLP, TransformerBlock, GPT
# ============================================================================
# Enable Autograd (CRITICAL - must happen after imports)
# ============================================================================
import os
from .core.autograd import enable_autograd
# Enable autograd quietly when imported by CLI tools
enable_autograd(quiet=os.environ.get('TINYTORCH_QUIET', '').lower() in ('1', 'true', 'yes'))
# ============================================================================
# Public API
# ============================================================================
__all__ = [
# Version
'__version__',
# Core - Tensor
'Tensor',
# Core - Activations
'Sigmoid', 'ReLU', 'Tanh', 'GELU', 'Softmax',
# Core - Layers
'Layer', 'Linear', 'Dropout',
# Core - Losses
'MSELoss', 'CrossEntropyLoss', 'BinaryCrossEntropyLoss',
# Core - Optimizers
'SGD', 'Adam', 'AdamW',
# Core - Training
'Trainer', 'CosineSchedule', 'clip_grad_norm',
# Data Loading
'Dataset', 'TensorDataset', 'DataLoader',
'RandomHorizontalFlip', 'RandomCrop', 'Compose',
# Core - Spatial (CNN)
'Conv2d', 'MaxPool2d', 'AvgPool2d', 'BatchNorm2d',
# Text/NLP
'Tokenizer', 'CharTokenizer', 'BPETokenizer',
'Embedding', 'PositionalEncoding', 'EmbeddingLayer',
# Core - Attention
'MultiHeadAttention', 'scaled_dot_product_attention',
# Models
'LayerNorm', 'MLP', 'TransformerBlock', 'GPT',
]