mirror of
https://github.com/MLSysBook/TinyTorch.git
synced 2025-12-05 19:17:52 -06:00
Fix tagline consistency: Don't import it. Build it.
- Remove 'just' from all instances of the tagline - Update banner to lead with tagline - Consistent branding across docs, CLI, and demos
This commit is contained in:
162
Makefile
Normal file
162
Makefile
Normal file
@@ -0,0 +1,162 @@
|
||||
# TinyTorch Makefile
|
||||
# ==================
|
||||
# Simple commands for common development tasks.
|
||||
#
|
||||
# Usage:
|
||||
# make help # Show all commands
|
||||
# make test # Run all tests
|
||||
# make preflight # Quick verification before work
|
||||
# make release # Full release validation
|
||||
#
|
||||
|
||||
.PHONY: help test preflight release clean lint
|
||||
|
||||
# Default target
|
||||
help:
|
||||
@echo ""
|
||||
@echo "🔥 TinyTorch Development Commands"
|
||||
@echo "=================================="
|
||||
@echo ""
|
||||
@echo "Quick Commands:"
|
||||
@echo " make preflight Quick check (~1 min) - run before starting work"
|
||||
@echo " make test Run main test suite"
|
||||
@echo " make test-quick Fast smoke tests only (~30s)"
|
||||
@echo ""
|
||||
@echo "Release Validation:"
|
||||
@echo " make release Full release validation (~10 min)"
|
||||
@echo " make release-check Pre-release checklist"
|
||||
@echo ""
|
||||
@echo "Development:"
|
||||
@echo " make lint Check code style"
|
||||
@echo " make clean Remove generated files"
|
||||
@echo " make setup Install development dependencies"
|
||||
@echo ""
|
||||
@echo "Testing Levels:"
|
||||
@echo " make test-e2e-quick E2E quick tests (~30s)"
|
||||
@echo " make test-e2e-module E2E module flow tests (~2min)"
|
||||
@echo " make test-e2e-full E2E complete journey (~10min)"
|
||||
@echo " make test-milestones Milestone learning tests (~90s)"
|
||||
@echo ""
|
||||
|
||||
# ============================================================================
|
||||
# QUICK COMMANDS (daily use)
|
||||
# ============================================================================
|
||||
|
||||
# Quick preflight check - run this before starting work
|
||||
preflight:
|
||||
@chmod +x scripts/preflight.sh
|
||||
@./scripts/preflight.sh
|
||||
|
||||
# Standard test suite
|
||||
test:
|
||||
python -m pytest tests/ -v --ignore=tests/e2e --ignore=tests/milestones -q
|
||||
|
||||
# Fast smoke tests only
|
||||
test-quick:
|
||||
python -m pytest tests/e2e/test_user_journey.py -k quick -v
|
||||
|
||||
# ============================================================================
|
||||
# E2E TESTING (by level)
|
||||
# ============================================================================
|
||||
|
||||
# E2E quick verification (~30 seconds)
|
||||
test-e2e-quick:
|
||||
python -m pytest tests/e2e/test_user_journey.py -k quick -v
|
||||
|
||||
# E2E module workflow tests (~2 minutes)
|
||||
test-e2e-module:
|
||||
python -m pytest tests/e2e/test_user_journey.py -k module_flow -v
|
||||
|
||||
# E2E milestone tests
|
||||
test-e2e-milestone:
|
||||
python -m pytest tests/e2e/test_user_journey.py -k milestone_flow -v
|
||||
|
||||
# E2E complete journey (~10 minutes)
|
||||
test-e2e-full:
|
||||
python -m pytest tests/e2e/test_user_journey.py -v
|
||||
|
||||
# ============================================================================
|
||||
# SPECIALIZED TESTS
|
||||
# ============================================================================
|
||||
|
||||
# Milestone learning verification (actually trains models, ~90 seconds)
|
||||
test-milestones:
|
||||
python -m pytest tests/milestones/test_learning_verification.py -v
|
||||
|
||||
# CLI tests
|
||||
test-cli:
|
||||
python -m pytest tests/cli/ -v
|
||||
|
||||
# Module-specific tests
|
||||
test-module-%:
|
||||
python -m pytest tests/$*/ -v
|
||||
|
||||
# ============================================================================
|
||||
# RELEASE VALIDATION
|
||||
# ============================================================================
|
||||
|
||||
# Full release validation - run this before any release
|
||||
release:
|
||||
@chmod +x scripts/preflight.sh
|
||||
@./scripts/preflight.sh --full
|
||||
|
||||
# Pre-release checklist (manual verification)
|
||||
release-check:
|
||||
@echo ""
|
||||
@echo "📋 Pre-Release Checklist"
|
||||
@echo "========================"
|
||||
@echo ""
|
||||
@echo "Run each of these commands and verify they pass:"
|
||||
@echo ""
|
||||
@echo " 1. make preflight # Quick sanity check"
|
||||
@echo " 2. make test-e2e-full # E2E user journey"
|
||||
@echo " 3. make test-milestones # ML actually learns"
|
||||
@echo " 4. make test # Full test suite"
|
||||
@echo ""
|
||||
@echo "Manual checks:"
|
||||
@echo " □ README.md is up to date"
|
||||
@echo " □ Version number bumped in pyproject.toml"
|
||||
@echo " □ CHANGELOG updated"
|
||||
@echo " □ Git status is clean"
|
||||
@echo ""
|
||||
@echo "Then run: make release"
|
||||
@echo ""
|
||||
|
||||
# ============================================================================
|
||||
# DEVELOPMENT UTILITIES
|
||||
# ============================================================================
|
||||
|
||||
# Install development dependencies
|
||||
setup:
|
||||
pip install -e ".[dev]"
|
||||
pip install pytest pytest-cov rich
|
||||
|
||||
# Lint code
|
||||
lint:
|
||||
python -m py_compile tito/main.py
|
||||
@echo "✓ No syntax errors"
|
||||
|
||||
# Clean generated files
|
||||
clean:
|
||||
find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true
|
||||
find . -type f -name "*.pyc" -delete 2>/dev/null || true
|
||||
find . -type d -name ".pytest_cache" -exec rm -rf {} + 2>/dev/null || true
|
||||
find . -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true
|
||||
@echo "✓ Cleaned generated files"
|
||||
|
||||
# ============================================================================
|
||||
# CI/CD TARGETS (used by GitHub Actions)
|
||||
# ============================================================================
|
||||
|
||||
# CI smoke test (fast, for every commit)
|
||||
ci-smoke:
|
||||
python -m pytest tests/e2e/test_user_journey.py -k quick --tb=short -q
|
||||
|
||||
# CI full test (for PRs)
|
||||
ci-full:
|
||||
python -m pytest tests/ -v --ignore=tests/milestones --tb=short
|
||||
|
||||
# CI release validation (for releases)
|
||||
ci-release:
|
||||
@./scripts/preflight.sh --full
|
||||
|
||||
2
docs/_static/demos/tapes/05-logo.tape
vendored
2
docs/_static/demos/tapes/05-logo.tape
vendored
@@ -89,5 +89,5 @@ Sleep 4s # Longer pause for the final message
|
||||
# CLOSING: Reinforce the core message
|
||||
# ==============================================================================
|
||||
|
||||
Type "# Don't just import it. Build it. ✨🔥"
|
||||
Type "# Don't import it. Build it. ✨🔥"
|
||||
Sleep 3s
|
||||
|
||||
6
docs/_static/wip-banner.js
vendored
6
docs/_static/wip-banner.js
vendored
@@ -15,9 +15,11 @@ document.addEventListener('DOMContentLoaded', function() {
|
||||
<span>Tiny🔥Torch</span>
|
||||
</div>
|
||||
<div class="wip-banner-description">
|
||||
Hands-on labs for <a href="https://mlsysbook.ai" target="_blank">MLSysBook</a>
|
||||
Don't import it. Build it.
|
||||
<span class="separator">•</span>
|
||||
<a href="https://tinytorch.ai/join" target="_blank">Join the Community</a>
|
||||
<a href="https://mlsysbook.ai" target="_blank">MLSysBook</a>
|
||||
<span class="separator">•</span>
|
||||
<a href="https://tinytorch.ai/join" target="_blank">Join Community</a>
|
||||
<span class="separator">•</span>
|
||||
<a href="https://github.com/harvard-edge/TinyTorch" target="_blank">GitHub ⭐</a>
|
||||
</div>
|
||||
|
||||
@@ -10,7 +10,7 @@ Hands-on labs for the <span style="font-weight: 600; color: #475569;">Machine Le
|
||||
</p>
|
||||
|
||||
<h2 style="background: linear-gradient(135deg, #E74C3C 0%, #E67E22 50%, #F39C12 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent; background-clip: text; text-align: center; font-size: 2.5rem; margin: 1.5rem 0 1rem 0; font-weight: 700;">
|
||||
Don't just import it. Build it.
|
||||
Don't import it. Build it.
|
||||
</h2>
|
||||
|
||||
<!-- Enhanced description: Added "machine learning (ML)" clarification and "under the hood"
|
||||
|
||||
265
scripts/preflight.sh
Executable file
265
scripts/preflight.sh
Executable file
@@ -0,0 +1,265 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# TinyTorch Pre-flight Check
|
||||
# ===========================
|
||||
# Run this before releases to verify everything works.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/preflight.sh # Quick check (~1 min)
|
||||
# ./scripts/preflight.sh --full # Full validation (~10 min)
|
||||
#
|
||||
# What professionals call this:
|
||||
# - "Smoke tests" (quick)
|
||||
# - "Release validation" (full)
|
||||
# - "Preflight checks" (before deployment)
|
||||
#
|
||||
|
||||
set -e # Exit on first error
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m' # No Color
|
||||
BOLD='\033[1m'
|
||||
|
||||
# Counters
|
||||
PASSED=0
|
||||
FAILED=0
|
||||
SKIPPED=0
|
||||
|
||||
# Print functions
|
||||
print_header() {
|
||||
echo ""
|
||||
echo -e "${BOLD}${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
echo -e "${BOLD}${BLUE} $1${NC}"
|
||||
echo -e "${BOLD}${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
}
|
||||
|
||||
print_section() {
|
||||
echo ""
|
||||
echo -e "${CYAN}▶ $1${NC}"
|
||||
}
|
||||
|
||||
check_pass() {
|
||||
echo -e " ${GREEN}✓${NC} $1"
|
||||
((PASSED++))
|
||||
}
|
||||
|
||||
check_fail() {
|
||||
echo -e " ${RED}✗${NC} $1"
|
||||
((FAILED++))
|
||||
}
|
||||
|
||||
check_skip() {
|
||||
echo -e " ${YELLOW}○${NC} $1 (skipped)"
|
||||
((SKIPPED++))
|
||||
}
|
||||
|
||||
# Determine test level
|
||||
FULL_TEST=false
|
||||
if [[ "$1" == "--full" ]]; then
|
||||
FULL_TEST=true
|
||||
fi
|
||||
|
||||
# Start
|
||||
print_header "🔥 TinyTorch Pre-flight Check"
|
||||
echo ""
|
||||
if $FULL_TEST; then
|
||||
echo -e "${YELLOW}Mode: FULL VALIDATION (~10 minutes)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}Mode: QUICK CHECK (~1 minute)${NC}"
|
||||
echo -e "${YELLOW} Run with --full for complete validation${NC}"
|
||||
fi
|
||||
|
||||
START_TIME=$(date +%s)
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 1: Environment Checks
|
||||
# ============================================================================
|
||||
print_section "Phase 1: Environment"
|
||||
|
||||
# Check Python version
|
||||
PYTHON_VERSION=$(python3 --version 2>&1)
|
||||
if [[ $? -eq 0 ]]; then
|
||||
check_pass "Python installed: $PYTHON_VERSION"
|
||||
else
|
||||
check_fail "Python not found"
|
||||
fi
|
||||
|
||||
# Check we're in the right directory
|
||||
if [[ -f "pyproject.toml" && -d "tito" ]]; then
|
||||
check_pass "In TinyTorch project root"
|
||||
else
|
||||
check_fail "Not in TinyTorch project root"
|
||||
echo -e "${RED} Run from the TinyTorch directory${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check virtual environment
|
||||
if [[ -n "$VIRTUAL_ENV" ]]; then
|
||||
check_pass "Virtual environment active: $(basename $VIRTUAL_ENV)"
|
||||
else
|
||||
check_skip "No virtual environment active"
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 2: Package Structure
|
||||
# ============================================================================
|
||||
print_section "Phase 2: Package Structure"
|
||||
|
||||
# Check critical directories
|
||||
for dir in "src" "modules" "milestones" "tito" "tinytorch" "tests"; do
|
||||
if [[ -d "$dir" ]]; then
|
||||
check_pass "Directory exists: $dir/"
|
||||
else
|
||||
check_fail "Directory missing: $dir/"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check critical files
|
||||
for file in "pyproject.toml" "requirements.txt" "README.md"; do
|
||||
if [[ -f "$file" ]]; then
|
||||
check_pass "File exists: $file"
|
||||
else
|
||||
check_fail "File missing: $file"
|
||||
fi
|
||||
done
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 3: TinyTorch Package Import
|
||||
# ============================================================================
|
||||
print_section "Phase 3: Package Import"
|
||||
|
||||
# Test tinytorch import
|
||||
if python3 -c "import tinytorch" 2>/dev/null; then
|
||||
check_pass "import tinytorch"
|
||||
else
|
||||
check_fail "import tinytorch"
|
||||
fi
|
||||
|
||||
# Test core imports (these may fail if modules not exported)
|
||||
for module in "Tensor" "ReLU" "Linear"; do
|
||||
if python3 -c "from tinytorch import $module" 2>/dev/null; then
|
||||
check_pass "from tinytorch import $module"
|
||||
else
|
||||
check_skip "from tinytorch import $module (not exported yet)"
|
||||
fi
|
||||
done
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 4: CLI Commands (tito)
|
||||
# ============================================================================
|
||||
print_section "Phase 4: CLI Commands"
|
||||
|
||||
# Test basic tito commands
|
||||
if python3 -m tito.main --version >/dev/null 2>&1; then
|
||||
check_pass "tito --version"
|
||||
else
|
||||
check_fail "tito --version"
|
||||
fi
|
||||
|
||||
if python3 -m tito.main --help >/dev/null 2>&1; then
|
||||
check_pass "tito --help"
|
||||
else
|
||||
check_fail "tito --help"
|
||||
fi
|
||||
|
||||
# Test key subcommands
|
||||
for cmd in "module status" "milestones list --simple" "system info"; do
|
||||
if python3 -m tito.main $cmd >/dev/null 2>&1; then
|
||||
check_pass "tito $cmd"
|
||||
else
|
||||
check_fail "tito $cmd"
|
||||
fi
|
||||
done
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 5: E2E Quick Tests (pytest)
|
||||
# ============================================================================
|
||||
print_section "Phase 5: E2E Quick Tests"
|
||||
|
||||
if [[ -f "tests/e2e/test_user_journey.py" ]]; then
|
||||
if python3 -m pytest tests/e2e/test_user_journey.py -k quick -q --tb=no 2>/dev/null; then
|
||||
check_pass "E2E quick tests passed"
|
||||
else
|
||||
check_fail "E2E quick tests failed"
|
||||
fi
|
||||
else
|
||||
check_skip "E2E tests not found"
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 6: Full Validation (if --full)
|
||||
# ============================================================================
|
||||
if $FULL_TEST; then
|
||||
print_section "Phase 6: Module Tests"
|
||||
|
||||
# Run module 01 tests
|
||||
if python3 -m pytest tests/01_tensor/ -q --tb=no 2>/dev/null; then
|
||||
check_pass "Module 01 (Tensor) tests"
|
||||
else
|
||||
check_fail "Module 01 (Tensor) tests"
|
||||
fi
|
||||
|
||||
# Run CLI tests
|
||||
if python3 -m pytest tests/cli/ -q --tb=no 2>/dev/null; then
|
||||
check_pass "CLI tests"
|
||||
else
|
||||
check_fail "CLI tests"
|
||||
fi
|
||||
|
||||
print_section "Phase 7: Module Flow Tests"
|
||||
|
||||
if python3 -m pytest tests/e2e/test_user_journey.py -k module_flow -q --tb=short 2>/dev/null; then
|
||||
check_pass "Module flow E2E tests"
|
||||
else
|
||||
check_fail "Module flow E2E tests"
|
||||
fi
|
||||
|
||||
print_section "Phase 8: Milestone Verification"
|
||||
|
||||
# Check if milestone tests exist and can run
|
||||
if [[ -f "tests/milestones/test_learning_verification.py" ]]; then
|
||||
echo -e " ${YELLOW}⏳${NC} Running milestone learning tests (this takes ~90s)..."
|
||||
if python3 -m pytest tests/milestones/test_learning_verification.py -q --tb=no 2>/dev/null; then
|
||||
check_pass "Milestone learning verification"
|
||||
else
|
||||
check_fail "Milestone learning verification"
|
||||
fi
|
||||
else
|
||||
check_skip "Milestone tests not found"
|
||||
fi
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Summary
|
||||
# ============================================================================
|
||||
END_TIME=$(date +%s)
|
||||
DURATION=$((END_TIME - START_TIME))
|
||||
|
||||
print_header "📊 Pre-flight Summary"
|
||||
echo ""
|
||||
echo -e " ${GREEN}Passed:${NC} $PASSED"
|
||||
echo -e " ${RED}Failed:${NC} $FAILED"
|
||||
echo -e " ${YELLOW}Skipped:${NC} $SKIPPED"
|
||||
echo ""
|
||||
echo -e " ${CYAN}Duration:${NC} ${DURATION}s"
|
||||
echo ""
|
||||
|
||||
if [[ $FAILED -eq 0 ]]; then
|
||||
echo -e "${GREEN}${BOLD}✅ PRE-FLIGHT CHECK PASSED${NC}"
|
||||
echo ""
|
||||
if ! $FULL_TEST; then
|
||||
echo -e "${YELLOW}💡 For complete validation, run: ./scripts/preflight.sh --full${NC}"
|
||||
fi
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}${BOLD}❌ PRE-FLIGHT CHECK FAILED${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Fix the issues above before release.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
2
tests/e2e/__init__.py
Normal file
2
tests/e2e/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# E2E Test Package
|
||||
|
||||
18
tests/e2e/conftest.py
Normal file
18
tests/e2e/conftest.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""
|
||||
E2E Test Configuration
|
||||
|
||||
Registers pytest markers for categorizing tests by speed and purpose.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
"""Register custom markers for E2E tests."""
|
||||
config.addinivalue_line("markers", "quick: Quick verification tests (~30s total)")
|
||||
config.addinivalue_line("markers", "module_flow: Module workflow tests (~2min)")
|
||||
config.addinivalue_line("markers", "milestone_flow: Milestone workflow tests")
|
||||
config.addinivalue_line("markers", "full_journey: Complete journey tests (~10min)")
|
||||
config.addinivalue_line("markers", "slow: Slow tests that train models")
|
||||
config.addinivalue_line("markers", "release: Release validation tests")
|
||||
|
||||
440
tests/e2e/test_user_journey.py
Normal file
440
tests/e2e/test_user_journey.py
Normal file
@@ -0,0 +1,440 @@
|
||||
"""
|
||||
End-to-End User Journey Tests for TinyTorch
|
||||
|
||||
These tests simulate the complete student experience:
|
||||
1. Fresh start (setup)
|
||||
2. Module workflow (start → work → complete)
|
||||
3. Progress tracking
|
||||
4. Milestone unlocking
|
||||
|
||||
Run with:
|
||||
pytest tests/e2e/test_user_journey.py -v
|
||||
|
||||
Categories:
|
||||
-k quick # Fast CLI verification (~30s)
|
||||
-k module_flow # Module workflow tests (~2min)
|
||||
-k full_journey # Complete journey test (~10min)
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import subprocess
|
||||
import sys
|
||||
import json
|
||||
import shutil
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Optional, Tuple
|
||||
|
||||
# Project root
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
||||
|
||||
|
||||
def run_tito(args: list, cwd: Optional[Path] = None, timeout: int = 60) -> Tuple[int, str, str]:
|
||||
"""Run a tito command and return (exit_code, stdout, stderr)."""
|
||||
cmd = [sys.executable, "-m", "tito.main"] + args
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
cwd=cwd or PROJECT_ROOT,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout
|
||||
)
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
|
||||
|
||||
def run_python_script(script_path: Path, timeout: int = 120) -> Tuple[int, str, str]:
|
||||
"""Run a Python script and return (exit_code, stdout, stderr)."""
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(script_path)],
|
||||
cwd=PROJECT_ROOT,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout
|
||||
)
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
|
||||
|
||||
class TestQuickVerification:
|
||||
"""Quick tests to verify CLI and structure (~30 seconds total)."""
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_tito_bare_command_works(self):
|
||||
"""Bare 'tito' shows welcome screen."""
|
||||
code, stdout, stderr = run_tito([])
|
||||
assert code == 0, f"Bare tito failed: {stderr}"
|
||||
assert "Welcome" in stdout or "Quick Start" in stdout
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_tito_help_works(self):
|
||||
"""'tito --help' shows help."""
|
||||
code, stdout, stderr = run_tito(["--help"])
|
||||
assert code == 0, f"tito --help failed: {stderr}"
|
||||
assert "usage" in stdout.lower() or "COMMAND" in stdout
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_tito_version_works(self):
|
||||
"""'tito --version' shows version."""
|
||||
code, stdout, stderr = run_tito(["--version"])
|
||||
assert code == 0
|
||||
assert "Tiny" in stdout or "CLI" in stdout
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_module_command_help(self):
|
||||
"""'tito module' shows module help."""
|
||||
code, stdout, stderr = run_tito(["module"])
|
||||
assert code == 0
|
||||
# Should show module subcommands
|
||||
assert "start" in stdout or "complete" in stdout
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_milestone_command_help(self):
|
||||
"""'tito milestones' shows milestone help."""
|
||||
code, stdout, stderr = run_tito(["milestones"])
|
||||
assert code == 0
|
||||
# Should show milestone subcommands
|
||||
assert "list" in stdout or "run" in stdout or "status" in stdout
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_module_status_works(self):
|
||||
"""'tito module status' runs without error."""
|
||||
code, stdout, stderr = run_tito(["module", "status"])
|
||||
assert code == 0, f"module status failed: {stderr}"
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_system_info_works(self):
|
||||
"""'tito system info' runs without error."""
|
||||
code, stdout, stderr = run_tito(["system", "info"])
|
||||
assert code == 0, f"system info failed: {stderr}"
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_milestone_list_works(self):
|
||||
"""'tito milestones list' shows available milestones."""
|
||||
code, stdout, stderr = run_tito(["milestones", "list", "--simple"])
|
||||
assert code == 0, f"milestones list failed: {stderr}"
|
||||
# Should show milestone names
|
||||
assert "Perceptron" in stdout or "1957" in stdout
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_modules_directory_exists(self):
|
||||
"""Modules directory structure exists."""
|
||||
modules_dir = PROJECT_ROOT / "modules"
|
||||
assert modules_dir.exists(), "modules/ directory missing"
|
||||
|
||||
# Check first few modules exist
|
||||
for num in ["01", "02", "03"]:
|
||||
module_dirs = list(modules_dir.glob(f"{num}_*"))
|
||||
assert len(module_dirs) > 0, f"Module {num} directory missing"
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_milestones_directory_exists(self):
|
||||
"""Milestones directory structure exists."""
|
||||
milestones_dir = PROJECT_ROOT / "milestones"
|
||||
assert milestones_dir.exists(), "milestones/ directory missing"
|
||||
|
||||
# Check milestone directories
|
||||
assert (milestones_dir / "01_1957_perceptron").exists(), "Milestone 01 missing"
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_tinytorch_package_importable(self):
|
||||
"""TinyTorch package can be imported."""
|
||||
code, stdout, stderr = subprocess.run(
|
||||
[sys.executable, "-c", "import tinytorch; print('OK')"],
|
||||
cwd=PROJECT_ROOT,
|
||||
capture_output=True,
|
||||
text=True
|
||||
).returncode, "", ""
|
||||
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-c", "import tinytorch; print('OK')"],
|
||||
cwd=PROJECT_ROOT,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
assert result.returncode == 0, f"Cannot import tinytorch: {result.stderr}"
|
||||
assert "OK" in result.stdout
|
||||
|
||||
|
||||
class TestModuleFlow:
|
||||
"""Test module workflow: start → complete → progress tracking."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def backup_progress(self):
|
||||
"""Backup and restore progress.json around tests."""
|
||||
progress_file = PROJECT_ROOT / "progress.json"
|
||||
backup_file = PROJECT_ROOT / "progress.json.e2e_backup"
|
||||
|
||||
# Backup existing progress
|
||||
if progress_file.exists():
|
||||
shutil.copy(progress_file, backup_file)
|
||||
|
||||
yield
|
||||
|
||||
# Restore original progress
|
||||
if backup_file.exists():
|
||||
shutil.copy(backup_file, progress_file)
|
||||
backup_file.unlink()
|
||||
elif progress_file.exists():
|
||||
# If there was no original, remove the test progress
|
||||
# Actually, keep it - don't delete real progress
|
||||
pass
|
||||
|
||||
@pytest.mark.module_flow
|
||||
def test_module_01_start_works(self):
|
||||
"""'tito module start 01' works (first module, no prerequisites)."""
|
||||
# Note: This opens Jupyter, but should not block
|
||||
# We test the command doesn't error on already-started modules
|
||||
code, stdout, stderr = run_tito(["module", "status"])
|
||||
assert code == 0
|
||||
|
||||
@pytest.mark.module_flow
|
||||
def test_module_02_blocked_without_01(self):
|
||||
"""Cannot start module 02 without completing 01 first."""
|
||||
# Create clean progress state
|
||||
progress_file = PROJECT_ROOT / "progress.json"
|
||||
progress_file.write_text(json.dumps({
|
||||
"started_modules": [],
|
||||
"completed_modules": [],
|
||||
"last_worked": None
|
||||
}))
|
||||
|
||||
code, stdout, stderr = run_tito(["module", "start", "02"])
|
||||
|
||||
# Should fail or show locked message
|
||||
combined = stdout + stderr
|
||||
assert "Locked" in combined or "prerequisite" in combined.lower() or code != 0
|
||||
|
||||
@pytest.mark.module_flow
|
||||
def test_module_complete_runs_tests(self):
|
||||
"""'tito module complete 01 --skip-export' runs tests."""
|
||||
# This tests that the complete command works (skip export to be faster)
|
||||
code, stdout, stderr = run_tito(
|
||||
["module", "complete", "01", "--skip-export"],
|
||||
timeout=120 # Tests may take a while
|
||||
)
|
||||
# Check that tests ran (may pass or fail depending on state)
|
||||
combined = stdout + stderr
|
||||
assert "Test" in combined or "test" in combined or code in [0, 1]
|
||||
|
||||
@pytest.mark.module_flow
|
||||
def test_progress_tracking_persists(self):
|
||||
"""Progress is saved and persisted across commands."""
|
||||
progress_file = PROJECT_ROOT / "progress.json"
|
||||
|
||||
# Set a known state
|
||||
progress_file.write_text(json.dumps({
|
||||
"started_modules": ["01"],
|
||||
"completed_modules": [],
|
||||
"last_worked": "01"
|
||||
}))
|
||||
|
||||
# Run status command
|
||||
code, stdout, stderr = run_tito(["module", "status"])
|
||||
assert code == 0
|
||||
|
||||
# Check progress file still exists and has data
|
||||
assert progress_file.exists()
|
||||
data = json.loads(progress_file.read_text())
|
||||
assert "started_modules" in data
|
||||
|
||||
@pytest.mark.module_flow
|
||||
def test_module_test_command_works(self):
|
||||
"""'tito module test 01' runs module tests."""
|
||||
code, stdout, stderr = run_tito(
|
||||
["module", "test", "01"],
|
||||
timeout=120
|
||||
)
|
||||
# Should run tests (may pass or fail)
|
||||
combined = stdout + stderr
|
||||
# Test command should produce some output
|
||||
assert len(combined) > 0
|
||||
|
||||
|
||||
class TestMilestoneFlow:
|
||||
"""Test milestone workflow: prerequisites → run → completion tracking."""
|
||||
|
||||
@pytest.mark.milestone_flow
|
||||
def test_milestone_list_shows_all(self):
|
||||
"""Milestone list shows all available milestones."""
|
||||
code, stdout, stderr = run_tito(["milestones", "list"])
|
||||
assert code == 0
|
||||
|
||||
# Check for expected milestones
|
||||
expected = ["Perceptron", "XOR", "MLP", "CNN", "Transformer"]
|
||||
found = sum(1 for m in expected if m in stdout)
|
||||
assert found >= 3, f"Expected milestones not shown. Got: {stdout}"
|
||||
|
||||
@pytest.mark.milestone_flow
|
||||
def test_milestone_info_works(self):
|
||||
"""'tito milestones info 01' shows milestone details."""
|
||||
code, stdout, stderr = run_tito(["milestones", "info", "01"])
|
||||
assert code == 0
|
||||
assert "Perceptron" in stdout or "1957" in stdout
|
||||
|
||||
@pytest.mark.milestone_flow
|
||||
def test_milestone_status_works(self):
|
||||
"""'tito milestones status' shows progress."""
|
||||
code, stdout, stderr = run_tito(["milestones", "status"])
|
||||
assert code == 0
|
||||
|
||||
@pytest.mark.milestone_flow
|
||||
def test_milestone_01_script_exists(self):
|
||||
"""Milestone 01 script file exists."""
|
||||
script_path = PROJECT_ROOT / "milestones" / "01_1957_perceptron" / "02_rosenblatt_trained.py"
|
||||
assert script_path.exists(), f"Milestone script missing: {script_path}"
|
||||
|
||||
@pytest.mark.milestone_flow
|
||||
def test_milestone_run_checks_prerequisites(self):
|
||||
"""'tito milestone run' checks prerequisites before running."""
|
||||
# Create clean state with no completed modules
|
||||
tito_dir = PROJECT_ROOT / ".tito"
|
||||
tito_dir.mkdir(exist_ok=True)
|
||||
progress_file = tito_dir / "progress.json"
|
||||
progress_file.write_text(json.dumps({
|
||||
"completed_modules": []
|
||||
}))
|
||||
|
||||
# Try to run milestone 03 (requires many modules)
|
||||
code, stdout, stderr = run_tito(["milestones", "run", "03", "--skip-checks"], timeout=5)
|
||||
|
||||
# With --skip-checks it might try to run; without it should check prereqs
|
||||
# Either way, the command should not crash
|
||||
assert code in [0, 1, 130] # 130 = user interrupt
|
||||
|
||||
|
||||
class TestFullJourney:
|
||||
"""Complete end-to-end journey test (slow, thorough)."""
|
||||
|
||||
@pytest.mark.full_journey
|
||||
@pytest.mark.slow
|
||||
def test_complete_module_01_journey(self):
|
||||
"""
|
||||
Test complete journey for module 01:
|
||||
1. Start module
|
||||
2. Complete module (with tests)
|
||||
3. Verify progress updated
|
||||
4. Verify export worked
|
||||
"""
|
||||
# Step 1: Check initial state
|
||||
code, stdout, stderr = run_tito(["module", "status"])
|
||||
assert code == 0
|
||||
|
||||
# Step 2: Test the module
|
||||
code, stdout, stderr = run_tito(
|
||||
["module", "test", "01"],
|
||||
timeout=180
|
||||
)
|
||||
# Tests should run (may pass or fail based on implementation)
|
||||
combined = stdout + stderr
|
||||
assert "test" in combined.lower() or "Test" in combined
|
||||
|
||||
# Step 3: Verify tinytorch imports work
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-c", "from tinytorch import Tensor; print('OK')"],
|
||||
cwd=PROJECT_ROOT,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
# This tests that the package structure is correct
|
||||
# May fail if module not exported yet - that's informative
|
||||
if result.returncode != 0:
|
||||
pytest.skip("Tensor not yet exported - run tito module complete 01 first")
|
||||
|
||||
@pytest.mark.full_journey
|
||||
@pytest.mark.slow
|
||||
def test_milestone_01_runs_successfully(self):
|
||||
"""
|
||||
Test that milestone 01 can run successfully.
|
||||
Requires: Module 01-07 completed and exported.
|
||||
"""
|
||||
# Check if prerequisite modules are available
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-c", """
|
||||
from tinytorch import Tensor, ReLU, Linear
|
||||
print('OK')
|
||||
"""],
|
||||
cwd=PROJECT_ROOT,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
)
|
||||
if result.returncode != 0:
|
||||
pytest.skip("Required modules not exported yet")
|
||||
except Exception:
|
||||
pytest.skip("Cannot import required modules")
|
||||
|
||||
# Run milestone 01 with skip-checks (we verified prereqs above)
|
||||
script_path = PROJECT_ROOT / "milestones" / "01_1957_perceptron" / "02_rosenblatt_trained.py"
|
||||
if not script_path.exists():
|
||||
pytest.skip("Milestone script not found")
|
||||
|
||||
code, stdout, stderr = run_python_script(script_path, timeout=120)
|
||||
|
||||
# Should complete successfully or with informative error
|
||||
combined = stdout + stderr
|
||||
assert code == 0 or "Error" in combined, f"Milestone failed unexpectedly: {combined}"
|
||||
|
||||
|
||||
class TestErrorHandling:
|
||||
"""Test that errors are handled gracefully."""
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_invalid_command_shows_error(self):
|
||||
"""Invalid commands show helpful error messages."""
|
||||
code, stdout, stderr = run_tito(["nonexistent_command"])
|
||||
assert code != 0
|
||||
combined = stdout + stderr
|
||||
assert "invalid" in combined.lower() or "error" in combined.lower()
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_invalid_module_number_handled(self):
|
||||
"""Invalid module numbers are handled gracefully."""
|
||||
code, stdout, stderr = run_tito(["module", "start", "99"])
|
||||
assert code != 0
|
||||
combined = stdout + stderr
|
||||
assert "not found" in combined.lower() or "invalid" in combined.lower() or "99" in combined
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_invalid_milestone_handled(self):
|
||||
"""Invalid milestone IDs are handled gracefully."""
|
||||
code, stdout, stderr = run_tito(["milestones", "info", "99"])
|
||||
assert code != 0
|
||||
combined = stdout + stderr
|
||||
assert "invalid" in combined.lower() or "not found" in combined.lower()
|
||||
|
||||
|
||||
class TestInstallationPaths:
|
||||
"""Test different installation/usage paths."""
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_src_directory_exists(self):
|
||||
"""Source directory for development exists."""
|
||||
src_dir = PROJECT_ROOT / "src"
|
||||
assert src_dir.exists(), "src/ directory missing"
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_pyproject_exists(self):
|
||||
"""pyproject.toml exists for pip installation."""
|
||||
pyproject = PROJECT_ROOT / "pyproject.toml"
|
||||
assert pyproject.exists(), "pyproject.toml missing"
|
||||
|
||||
@pytest.mark.quick
|
||||
def test_requirements_exists(self):
|
||||
"""requirements.txt exists for dependency installation."""
|
||||
requirements = PROJECT_ROOT / "requirements.txt"
|
||||
assert requirements.exists(), "requirements.txt missing"
|
||||
|
||||
|
||||
# Pytest configuration
|
||||
def pytest_configure(config):
|
||||
"""Register custom markers."""
|
||||
config.addinivalue_line("markers", "quick: Quick verification tests (~30s)")
|
||||
config.addinivalue_line("markers", "module_flow: Module workflow tests (~2min)")
|
||||
config.addinivalue_line("markers", "milestone_flow: Milestone workflow tests")
|
||||
config.addinivalue_line("markers", "full_journey: Complete journey tests (~10min)")
|
||||
config.addinivalue_line("markers", "slow: Slow tests that train models")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
|
||||
7
tito/commands/dev/__init__.py
Normal file
7
tito/commands/dev/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Developer command group for TinyTorch CLI."""
|
||||
|
||||
from .dev import DevCommand
|
||||
from .preflight import PreflightCommand
|
||||
|
||||
__all__ = ['DevCommand', 'PreflightCommand']
|
||||
|
||||
@@ -33,7 +33,7 @@ def print_banner(compact: bool = False):
|
||||
banner_text.append("Tiny", style="dim cyan")
|
||||
banner_text.append("🔥", style="red")
|
||||
banner_text.append("TORCH", style="bold orange1")
|
||||
banner_text.append(": Don't just import it. Build it.", style="dim")
|
||||
banner_text.append(": Don't import it. Build it.", style="dim")
|
||||
console.print(Panel(banner_text, style="bright_blue", padding=(1, 2)))
|
||||
|
||||
def print_compact_banner():
|
||||
@@ -44,7 +44,7 @@ def print_compact_banner():
|
||||
banner_text.append("Tiny", style="dim cyan")
|
||||
banner_text.append("\n🔥", style="red")
|
||||
banner_text.append("TORCH", style="bold orange1")
|
||||
banner_text.append(": Don't just import it. Build it.", style="dim")
|
||||
banner_text.append(": Don't import it. Build it.", style="dim")
|
||||
console.print(Panel(banner_text, style="bright_blue", padding=(1, 2)))
|
||||
|
||||
def print_ascii_logo(compact: bool = False):
|
||||
|
||||
Reference in New Issue
Block a user