diff --git a/.cursor/rules/module-development.md b/.cursor/rules/module-development.md new file mode 100644 index 00000000..54608f2a --- /dev/null +++ b/.cursor/rules/module-development.md @@ -0,0 +1,32 @@ +# Module Development Rules + +**Authoritative Source**: `docs/development/module-rules.md` +**Reference Implementation**: `modules/source/08_optimizers/optimizers_dev.py` + +All TinyTorch module development rules, patterns, conventions, and examples are maintained in the comprehensive documentation file: + +๐Ÿ“– **[docs/development/module-rules.md](../../docs/development/module-rules.md)** + +## ๐ŸŽฏ **Use Living Examples, Not Static Documentation** + +Instead of maintaining separate documentation that can get out of sync, **use `08_optimizers` as your reference implementation** - it follows all current patterns perfectly and serves as the living example of proper module structure. + +## ๐Ÿ“‹ **What's Covered** + +- ๐Ÿ“ File structure and organization +- ๐ŸŽ“ Educational patterns and pedagogical framework +- ๐Ÿงช Testing conventions and inline test standards +- ๐Ÿ“ฆ NBDev integration and export patterns +- ๐ŸŽฏ Complete module template with educational content +- ๐Ÿ—๏ธ Development workflow and best practices +- ๐Ÿ“‹ Module metadata and configuration +- โœ… Quality standards and checklist + +## ๐Ÿ”— **Quick Reference Approach** + +1. **Check `08_optimizers`** for educational content structure +2. **Follow exact patterns** for module summaries and takeaways +3. **Use the rules file** for technical specifications +4. **Reference living code** instead of static documentation + +**Remember**: When in doubt, reference `08_optimizers` - it follows all patterns perfectly! \ No newline at end of file diff --git a/docs/development/module-rules.md b/docs/development/module-rules.md new file mode 100644 index 00000000..1211c1a7 --- /dev/null +++ b/docs/development/module-rules.md @@ -0,0 +1,443 @@ +# TinyTorch Module Development Rules + +**Version**: 2.0 +**Date**: January 2025 +**Status**: Complete Reference Guide +**Reference Implementation**: `modules/source/08_optimizers/optimizers_dev.py` + +This document defines the complete set of rules, patterns, and conventions for developing TinyTorch modules. Instead of maintaining separate documentation, **use `08_optimizers` as your reference implementation** - it follows all current patterns perfectly. + +## ๐Ÿ“š Educational Philosophy + +### Core Principles +1. **Educational First**: Every module is designed for learning, not just functionality +2. **Progressive Complexity**: Start simple, build complexity step by step +3. **Real-World Connection**: Connect concepts to practical ML applications +4. **Standalone Learning**: Each module should be self-contained +5. **Professional Standards**: Use industry-standard patterns and practices + +### "Build โ†’ Use โ†’ [Understand/Reflect/Analyze/Optimize]" Framework +Each module follows this pedagogical pattern: +- **Build**: Implement the component from scratch +- **Use**: Apply it to real data and problems +- **Third Stage**: Varies by module (Understand/Reflect/Analyze/Optimize) + +## ๐Ÿ“ File Structure and Organization + +### 1. **File Naming Convention** +``` +modules/source/NN_modulename/ +โ”œโ”€โ”€ modulename_dev.py # Main development file (Python source) +โ”œโ”€โ”€ modulename_dev.ipynb # Generated notebook (temporary) +โ”œโ”€โ”€ module.yaml # Module configuration +โ”œโ”€โ”€ README.md # Module documentation +โ””โ”€โ”€ tests/ # External tests (if any) + โ””โ”€โ”€ test_modulename.py +``` + +### 2. **File Format: Jupytext Percent Format** +All `*_dev.py` files MUST use Jupytext percent format: + +```python +# --- +# jupyter: +# jupytext: +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.17.1 +# --- +``` + +## ๐Ÿ—๏ธ Module Template Structure + +**Follow this exact structure** (see `08_optimizers` for reference): + +### A. **Header Section** +```python +# %% [markdown] +""" +# Module N: Title - Brief Description + +## Learning Goals +- Goal 1: Specific outcome +- Goal 2: Another objective +- Goal 3: Connection to ML concepts + +## Build โ†’ Use โ†’ [Understand/Reflect/Analyze/Optimize] +1. **Build**: What students implement +2. **Use**: How they apply it +3. **[Third Stage]**: Deeper engagement +""" +``` + +### B. **Setup and Imports** +```python +# %% nbgrader={"grade": false, "grade_id": "modulename-imports", "locked": false, "schema_version": 3, "solution": false, "task": false} +#| default_exp core.modulename + +#| export +import numpy as np +import sys +from typing import Union, List, Tuple, Optional, Any + +# %% nbgrader={"grade": false, "grade_id": "modulename-setup", "locked": false, "schema_version": 3, "solution": false, "task": false} +print("๐Ÿ”ฅ TinyTorch [Module] Module") +print(f"Python version: {sys.version_info.major}.{sys.version_info.minor}") +print("Ready to [action]!") +``` + +### C. **Package Location** +```python +# %% [markdown] +""" +## ๐Ÿ“ฆ Where This Code Lives in the Final Package + +**Learning Side:** You work in `modules/source/NN_modulename/modulename_dev.py` +**Building Side:** Code exports to `tinytorch.core.modulename` + +```python +# Final package structure: +from tinytorch.core.modulename import ComponentName # Main functionality! +from tinytorch.core.tensor import Tensor # Foundation +``` + +**Why this matters:** +- **Learning:** Focused modules for deep understanding +- **Production:** Proper organization like PyTorch's structure +- **Consistency:** All [module] operations live together +- **Foundation:** Connection to broader ML systems +""" +``` + +### D. **Educational Content Structure** +```python +# %% [markdown] +""" +## What Are [Components]? + +### The Problem/Motivation +Explain why this module exists and what problem it solves. + +### The Solution +Describe the approach and key insights. + +### Real-World Impact +Show concrete applications and industry relevance. + +### What We'll Build +1. **Component 1**: Brief description +2. **Component 2**: Brief description +3. **Integration**: How components work together +""" +``` + +### E. **Implementation Sections** +```python +# %% [markdown] +""" +## Step N: [Component Name] + +### Mathematical Foundation +Mathematical explanation with formulas and intuition. + +### Implementation Strategy +Step-by-step approach to building the component. +""" + +# %% nbgrader={"grade": false, "grade_id": "component-implementation", "locked": false, "schema_version": 3, "solution": true, "task": false} +#| export +class ComponentName: + """ + Brief description of the component. + + TODO: Student implementation guidance + + APPROACH: + 1. [First step with specific guidance] + 2. [Second step with specific guidance] + 3. [Third step with specific guidance] + + EXAMPLE: + Input: [concrete example] + Expected: [concrete expected output] + """ + def __init__(self, parameter1, parameter2): + ### BEGIN SOLUTION + # Complete implementation (hidden from students) + ### END SOLUTION + raise NotImplementedError("Student implementation required") +``` + +### F. **Test Functions** +```python +# %% [markdown] +""" +### ๐Ÿงช Unit Test: Component Name + +**Description**: Brief explanation of what is tested + +**This is a unit test** - it tests [specific functionality] in isolation. +""" + +# %% nbgrader={"grade": true, "grade_id": "test-component", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false} +def test_component_function(): + """Test the component functionality.""" + print("๐Ÿ”ฌ Unit Test: Component Function...") + + # Test implementation + try: + # Test logic + assert condition, "Error message" + print("โœ… Component test works") + print("๐Ÿ“ˆ Progress: Component โœ“") + return True + except Exception as e: + print(f"โŒ Component test failed: {e}") + return False + +# Test execution +if __name__ == "__main__": + test_component_function() +``` + +### G. **Module Summary** (CRITICAL) +```python +# %% [markdown] +""" +## ๐ŸŽฏ Module Summary + +Congratulations! You've successfully implemented [module description]: + +### โœ… What You've Built +- **Component 1**: Description of accomplishment +- **Component 2**: Description of accomplishment +- **Integration**: How components work together +- **Complete System**: End-to-end functionality + +### โœ… Key Learning Outcomes +- **Understanding**: Core concepts mastered +- **Implementation**: Technical skills developed +- **Mathematical mastery**: Formulas and algorithms implemented +- **Real-world application**: Practical applications understood + +### โœ… Mathematical Foundations Mastered +- **Formula 1**: Mathematical concept with notation +- **Formula 2**: Another key mathematical insight +- **Algorithm**: Implementation of key algorithm + +### โœ… Professional Skills Developed +- **Skill 1**: Technical capability gained +- **Skill 2**: Another professional competency +- **Integration**: Systems thinking and design + +### โœ… Ready for Advanced Applications +Your implementations now enable: +- **Application 1**: What students can build next +- **Application 2**: Another capability unlocked +- **Real Systems**: Connection to production applications + +### ๐Ÿ”— Connection to Real ML Systems +Your implementations mirror production systems: +- **PyTorch**: `torch.component` provides identical functionality +- **TensorFlow**: Similar concepts in TensorFlow +- **Industry Standard**: Used in major ML frameworks + +### ๐ŸŽฏ The Power of [Technology] +You've unlocked the key technology that [impact description]: +- **Capability 1**: What this enables +- **Capability 2**: Another important capability +- **Scale**: How this technology scales + +### ๐Ÿง  Deep Learning Revolution/Impact +You now understand the technology that [revolutionary impact]: +- **Historical context**: Before/after this technology +- **Modern applications**: Current uses +- **Future implications**: What this enables + +### ๐Ÿš€ What's Next +Your implementations are the foundation for: +- **Next Module**: Natural progression +- **Advanced Topics**: Related advanced concepts +- **Research**: Opportunities for exploration + +**Next Module**: [Description of next module and its connection] + +[Motivational closing emphasizing what students have accomplished] +""" +``` + +## ๐Ÿงช Testing Standards + +### 1. **Test Function Naming** +All test functions MUST follow this pattern: +```python +def test_component_name(): + """Test the component functionality.""" +``` + +### 2. **Test Function Structure** +```python +def test_component_function(): + """Test description.""" + print("๐Ÿ”ฌ Unit Test: Component Function...") + + ### ๐Ÿงช Unit Test: Component Function + + **Description**: Brief explanation of what is tested + + **This is a unit test** - it tests [specific functionality] in isolation. + + try: + # Test logic + print("โœ… [check] works") + print("๐Ÿ“ˆ Progress: Component โœ“") + return True + except Exception as e: + print(f"โŒ Test failed: {e}") + return False +``` + +### 3. **Test Execution** +```python +if __name__ == "__main__": + test_function_1() + test_function_2() + test_function_3() +``` + +## ๐Ÿ“ฆ NBDev Integration + +### 1. **Export Directives** +```python +#| export +def function_to_export(): + """Function that becomes part of tinytorch package.""" + pass +``` + +### 2. **Default Export Target** +```python +#| default_exp core.modulename +``` + +### 3. **NBGrader Integration** +```python +# %% nbgrader={"grade": false, "grade_id": "unique-id", "locked": false, "schema_version": 3, "solution": true, "task": false} +``` + +### 4. **Solution Hiding (NBGrader)** +```python +def student_function(): + """ + Student implementation function. + + TODO: Implementation guidance for students. + """ + ### BEGIN SOLUTION + # Complete implementation hidden from students + ### END SOLUTION + raise NotImplementedError("Student implementation required") +``` + +## ๐Ÿ”ง Development Workflow + +### 1. **Python-First Development** +- Work in `.py` files (source of truth) +- Generate `.ipynb` with `tito nbgrader generate` +- Never commit `.ipynb` files to version control + +### 2. **Testing Integration** +- Use inline tests for immediate feedback +- All tests must pass before module completion +- Use `pytest` for any external testing + +## ๐Ÿ“‹ Module Metadata (module.yaml) + +```yaml +name: "modulename" +title: "Module Title" +description: "Brief description of module functionality" +version: "1.0.0" +author: "TinyTorch Team" + +learning_objectives: + - "Objective 1" + - "Objective 2" + +prerequisites: + - "prerequisite_module" + +metadata: + difficulty: "intermediate" + time_estimate: "4-6 hours" + pedagogical_framework: "Build โ†’ Use โ†’ Understand" + +concepts: + - "concept1" + - "concept2" + +exports: + - "ComponentName" + - "helper_function" + +files: + main: "modulename_dev.py" + readme: "README.md" + +assessment: + total_points: 50 + breakdown: + component1: 20 + component2: 20 + integration: 10 + +next_modules: + - "next_module" +``` + +## โœ… Quality Checklist + +Before completing a module: + +### Content Requirements +- [ ] Jupytext percent format header +- [ ] Educational content with clear explanations +- [ ] Step-by-step implementation guidance +- [ ] Mathematical foundations explained +- [ ] Real-world applications discussed +- [ ] Complete module summary (following 08_optimizers pattern) + +### Technical Requirements +- [ ] All functions have docstrings +- [ ] NBGrader cells properly configured +- [ ] NBDev export directives in place +- [ ] Solution blocks use `### BEGIN SOLUTION` / `### END SOLUTION` +- [ ] Error handling implemented +- [ ] Type hints where appropriate + +### Testing Requirements +- [ ] All inline tests pass +- [ ] Test functions use standard naming (`test_*`) +- [ ] Test output follows emoji standards +- [ ] `if __name__ == "__main__":` block present +- [ ] Tests provide educational feedback + +### Documentation Requirements +- [ ] module.yaml properly configured +- [ ] README.md updated +- [ ] Learning objectives clear +- [ ] Prerequisites documented +- [ ] Export list accurate + +## ๐Ÿ“š Additional Resources + +- **Reference Implementation**: `modules/source/08_optimizers/optimizers_dev.py` +- **NBGrader Documentation**: [NBGrader docs](https://nbgrader.readthedocs.io/) +- **NBDev Documentation**: [NBDev docs](https://nbdev.fast.ai/) +- **TinyTorch CLI**: Use `tito --help` for development commands + +--- + +**Remember**: When in doubt, reference `08_optimizers` - it follows all these patterns perfectly and serves as the living example of proper module structure. diff --git a/docs/development/module-structure-design.md b/docs/development/module-structure-design.md deleted file mode 100644 index 6782b238..00000000 --- a/docs/development/module-structure-design.md +++ /dev/null @@ -1,591 +0,0 @@ -# TinyTorch Module Structure Design Document - -## Overview - -This document defines the standard structure for TinyTorch educational modules, ensuring consistency, educational effectiveness, and maintainability across all components. - -## Module Architecture Philosophy - -### Core Principles - -1. **Educational First**: Every module is designed for learning, not just functionality -2. **Progressive Complexity**: Start simple, build complexity step by step -3. **Real-World Connection**: Connect concepts to practical ML applications -4. **Standalone Learning**: Each module should be self-contained -5. **Professional Standards**: Use industry-standard patterns and practices - -### "Build โ†’ Use โ†’ Understand" Framework - -Each module follows this pedagogical pattern: -- **Build**: Implement the component from scratch -- **Use**: Apply it to real data and problems -- **Understand**: Analyze behavior, trade-offs, and connections - -## Standard Module Structure - -### File Organization - -``` -modules/source/{module_name}/ -โ”œโ”€โ”€ {module_name}_dev.py # Main development file (Jupytext format) -โ”œโ”€โ”€ README.md # Module documentation and guide -โ”œโ”€โ”€ tests/ # Module-specific tests (if needed) -โ”‚ โ””โ”€โ”€ test_{module_name}.py # Comprehensive test suite -โ”œโ”€โ”€ data/ # Module-specific data files (if needed) -โ”‚ โ””โ”€โ”€ sample_data.npy -โ””โ”€โ”€ assets/ # Images, diagrams, etc. (if needed) - โ””โ”€โ”€ architecture_diagram.png -``` - -### Development File Structure (`*_dev.py`) - -Every module development file follows this standardized structure: - -```python -# --- -# jupyter: -# jupytext: -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.17.1 -# --- - -# %% [markdown] -""" -# Module {N}: {Title} - {Brief Description} - -## ๐ŸŽฏ Learning Objectives -- โœ… Build {core_concept} from scratch -- โœ… Use it with real data ({specific_dataset}) -- โœ… Understand {key_insight} -- โœ… Connect to {next_module} and production systems - -## ๐Ÿ“š What You'll Learn -- **Conceptual**: {concept_explanation} -- **Technical**: {implementation_details} -- **Practical**: {real_world_applications} - -## ๐Ÿ› ๏ธ What You'll Build -- **Core Component**: {main_class_or_function} -- **Supporting Functions**: {helper_functions} -- **Integration Points**: {connections_to_other_modules} - -## ๐Ÿ“Š Module Info -- **Difficulty**: {โญโญโญ} (1-5 stars) -- **Time Estimate**: {X-Y hours} -- **Prerequisites**: {previous_modules} -- **Next Steps**: {next_modules} -""" - -# %% -#| default_exp core.{module_name} - -# Standard imports -import numpy as np -import matplotlib.pyplot as plt -from typing import Union, List, Tuple, Optional, Any -import warnings -warnings.filterwarnings('ignore') - -# Module-specific imports -from pathlib import Path -import sys - -# Add project root to path for imports -project_root = Path(__file__).parent.parent.parent -sys.path.insert(0, str(project_root)) - -# %% [markdown] -""" -## Step 1: Conceptual Foundation - -### What is {Concept}? - -**Definition**: {Clear, simple definition with examples} - -**Why it matters**: {Real-world motivation and ML context} - -**How it works**: {Intuitive explanation before math} - -**Visual examples**: {Concrete examples, diagrams, analogies} - -**Connection**: {How it builds on previous modules} - -### Mathematical Foundation - -{Mathematical concepts explained intuitively} - -### Real-World Applications - -{Specific examples in ML and AI} -""" - -# %% [markdown] -""" -## Step 2: Implementation Planning - -### Design Decisions - -Before we implement, let's think about: -1. **Interface Design**: How should users interact with this component? -2. **Data Structures**: What internal representation makes sense? -3. **Error Handling**: What can go wrong and how do we handle it? -4. **Performance**: What are the computational considerations? -5. **Integration**: How does this connect to other modules? - -### Implementation Strategy - -We'll build this component in stages: -1. **Core Functionality**: {basic_implementation} -2. **Enhanced Features**: {advanced_features} -3. **Integration Points**: {connections} -4. **Optimization**: {performance_improvements} -""" - -# %% [markdown] -""" -## Step 3: Core Implementation - -### {Component Name} - -Let's implement the core component step by step. -""" - -# %% -#| export -class {ComponentName}: - """ - {Component description and purpose} - - This class implements {specific_functionality} for the TinyTorch framework. - - Args: - {parameter_descriptions} - - Example: - >>> {usage_example} - - Note: - {important_notes_or_warnings} - """ - - def __init__(self, {parameters}): - """ - Initialize the {component_name}. - - TODO: Implement initialization logic - - APPROACH: - 1. {step_1_description} - 2. {step_2_description} - 3. {step_3_description} - - EXAMPLE: - Input: {input_example} - Expected: {expected_behavior} - - HINTS: - - {hint_1} - - {hint_2} - - {hint_3} - """ - ### BEGIN SOLUTION - {instructor_implementation} - ### END SOLUTION - - def {method_name}(self, {parameters}) -> {return_type}: - """ - {Method description} - - TODO: Implement {method_functionality} - - APPROACH: - 1. {implementation_step_1} - 2. {implementation_step_2} - 3. {implementation_step_3} - - EXAMPLE: - Input: {concrete_input_example} - Expected output: {concrete_output_example} - Your code should: {specific_behavior_description} - - HINTS: - - {specific_hint_1} - - {specific_hint_2} - - {specific_hint_3} - """ - ### BEGIN SOLUTION - {instructor_implementation} - ### END SOLUTION - -# %% [markdown] -""" -### ๐Ÿงช Comprehensive Test: {Component Name} - -Let's test our implementation thoroughly to make sure it works correctly. -""" - -# %% nbgrader={"grade": true, "grade_id": "test-{component}-comprehensive", "locked": true, "points": 25, "schema_version": 3, "solution": false, "task": false} -import pytest - -class Test{ComponentName}: - """Comprehensive test suite for {ComponentName}.""" - - def test_initialization(self): - """Test component initialization.""" - # Test basic initialization - component = {ComponentName}({basic_params}) - assert {basic_assertion} - - # Test with different parameters - component2 = {ComponentName}({different_params}) - assert {different_assertion} - - def test_core_functionality(self): - """Test core component functionality.""" - component = {ComponentName}({params}) - - # Test basic operation - result = component.{method_name}({input_data}) - expected = {expected_result} - assert {assertion}, f"Expected {expected}, got {result}" - - # Test with different inputs - result2 = component.{method_name}({different_input}) - assert {different_assertion} - - def test_edge_cases(self): - """Test edge cases and boundary conditions.""" - component = {ComponentName}({params}) - - # Test empty input - {edge_case_tests} - - # Test large input - {large_input_tests} - - # Test invalid input - with pytest.raises({ExpectedException}): - component.{method_name}({invalid_input}) - - def test_integration(self): - """Test integration with other components.""" - {integration_tests} - -def run_comprehensive_tests(): - """Run all tests with educational feedback.""" - print("๐Ÿ”ฌ Running comprehensive {component_name} tests...") - - test_class = Test{ComponentName}() - tests = [ - ('Initialization', test_class.test_initialization), - ('Core Functionality', test_class.test_core_functionality), - ('Edge Cases', test_class.test_edge_cases), - ('Integration', test_class.test_integration) - ] - - passed = 0 - total = len(tests) - - for test_name, test_func in tests: - try: - test_func() - print(f"โœ… {test_name}: PASSED") - passed += 1 - except Exception as e: - print(f"โŒ {test_name}: FAILED - {e}") - - print(f"\n๐Ÿ“Š Results: {passed}/{total} tests passed") - if passed == total: - print("๐ŸŽ‰ All {component_name} tests passed!") - print("๐Ÿ“ˆ Progress: {ComponentName} โœ“") - return True - else: - print("โš ๏ธ Some tests failed - check your implementation") - return False - -# Execute tests -success = run_comprehensive_tests() - -# %% [markdown] -""" -## Step 4: Real-World Application - -### Using {ComponentName} with Real Data - -Let's see how our component works with actual data from {dataset_name}. -""" - -# %% -# Load real data for demonstration -{real_data_loading_code} - -# Apply our component -print("๐Ÿ”ฌ Testing with real data...") -component = {ComponentName}({real_params}) -result = component.{method_name}(real_data) - -print(f"โœ… Real data processing successful!") -print(f"Input shape: {real_data.shape}") -print(f"Output shape: {result.shape}") -print(f"Sample output: {result[:5]}") # Show first 5 elements - -# %% [markdown] -""" -### Visualization and Analysis - -Let's visualize what our component does to understand it better. -""" - -# %% -# Create visualization -plt.figure(figsize=(12, 4)) - -# Input visualization -plt.subplot(1, 3, 1) -{input_visualization_code} -plt.title('Input Data') - -# Process visualization -plt.subplot(1, 3, 2) -{process_visualization_code} -plt.title('{Component} Processing') - -# Output visualization -plt.subplot(1, 3, 3) -{output_visualization_code} -plt.title('Output Data') - -plt.tight_layout() -plt.show() - -print("๐Ÿ“Š Visualization shows how {component_name} transforms the data") - -# %% [markdown] -""" -## Step 5: Integration and Next Steps - -### Connection to Other Modules - -This {component_name} connects to the broader TinyTorch ecosystem: - -- **Previous modules**: {previous_connections} -- **Next modules**: {next_connections} -- **Production use**: {production_applications} - -### Performance Considerations - -{performance_analysis} - -### Advanced Features (Optional) - -{advanced_features_description} -""" - -# %% [markdown] -""" -## ๐ŸŽฏ Module Summary - -### What You've Built -- โœ… **{ComponentName}**: {achievement_1} -- โœ… **Real Data Integration**: {achievement_2} -- โœ… **Comprehensive Testing**: {achievement_3} -- โœ… **Visualization**: {achievement_4} - -### Key Insights -- **Technical**: {technical_insight} -- **Practical**: {practical_insight} -- **Conceptual**: {conceptual_insight} - -### Next Steps -- **Immediate**: {next_immediate_step} -- **Advanced**: {next_advanced_step} -- **Integration**: {next_integration_step} - -### Success Criteria -Your module is complete when: -1. **All tests pass**: Comprehensive testing shows everything works -2. **Real data works**: Component processes actual ML data correctly -3. **Integration ready**: Component exports to `tinytorch.core.{module_name}` -4. **Understanding**: You can explain how and why it works - -Ready to move to the next module? Let's go! ๐Ÿš€ -""" -``` - -## README Structure - -Every module should have a comprehensive README following this template: - -```markdown -# {Module Name} Module - -## ๐Ÿ“Š Module Info -- **Difficulty**: {โญโญโญ} (1-5 stars) -- **Time Estimate**: {X-Y hours} -- **Prerequisites**: {previous_modules} -- **Next Steps**: {next_modules} - -## Overview - -{Brief description of what this module teaches and why it matters} - -## Learning Goals - -{Specific learning objectives} - -## What You'll Implement - -{Detailed description of components to build} - -## Files - -{Description of all files in the module} - -## Usage - -{Code examples showing how to use the module} - -## Testing - -{Instructions for running tests} - -## Development Workflow - -{Step-by-step development process} - -## Key Concepts - -{Important concepts and takeaways} - -## Troubleshooting - -{Common issues and solutions} -``` - -## Testing Integration - -### Comprehensive Notebook Testing - -Each module includes comprehensive tests within the notebook: - -1. **Immediate Feedback**: Tests run as students implement -2. **Educational Context**: Tests explain what they're checking -3. **Professional Structure**: Uses pytest patterns -4. **Visual Feedback**: Clear pass/fail indicators -5. **Progress Tracking**: Shows completion status - -### Test Categories - -1. **Initialization Tests**: Component creation and setup -2. **Functionality Tests**: Core operations and methods -3. **Edge Case Tests**: Boundary conditions and error handling -4. **Integration Tests**: Connections to other modules -5. **Real Data Tests**: Performance with actual datasets - -## Visual Design Guidelines - -### Progress Indicators -- ๐Ÿ”ฌ Testing phase -- โœ… Success indicators -- โŒ Failure indicators -- ๐Ÿ“Š Results summary -- ๐ŸŽ‰ Completion celebration -- ๐Ÿ“ˆ Progress tracking - -### Educational Formatting -- **Bold** for key concepts -- `Code` for technical terms -- > Quotes for important notes -- Lists for step-by-step processes -- Tables for comparisons - -## Data Integration Standards - -### Real Data Requirements -- Use production datasets (CIFAR-10, ImageNet, etc.) -- Include data loading and preprocessing -- Show performance with realistic scales -- Demonstrate practical applications - -### Visualization Standards -- Input/Process/Output flow diagrams -- Before/after comparisons -- Performance metrics -- Error analysis plots - -## Export and Integration - -### NBDev Integration -- `#| default_exp core.{module_name}` for package destination -- `#| export` for production code -- `#| hide` for instructor solutions -- Proper imports and dependencies - -### Package Structure -``` -tinytorch/ -โ”œโ”€โ”€ core/ -โ”‚ โ”œโ”€โ”€ {module_name}.py # Exported module code -โ”‚ โ””โ”€โ”€ __init__.py # Package initialization -โ””โ”€โ”€ __init__.py # Main package init -``` - -## Quality Checklist - -### Before Module Completion -- [ ] All learning objectives addressed -- [ ] Comprehensive tests implemented and passing -- [ ] Real data integration working -- [ ] Visualization and analysis included -- [ ] README documentation complete -- [ ] Code exports to package correctly -- [ ] Integration with other modules tested -- [ ] Performance considerations addressed - -### Educational Quality -- [ ] Concepts explained clearly -- [ ] Step-by-step implementation guidance -- [ ] Real-world connections made -- [ ] Visual learning aids included -- [ ] Progressive complexity maintained -- [ ] Student success criteria defined - -## Examples - -### Tensor Module Structure -```python -# Core tensor operations with comprehensive testing -# Real data integration with NumPy arrays -# Visual demonstrations of tensor operations -# Integration with activation functions -``` - -### Activation Module Structure -```python -# Mathematical foundations explained -# Multiple activation functions implemented -# Real neural network data processing -# Visualization of activation behaviors -``` - -### Layer Module Structure -```python -# Linear algebra foundations -# Dense layer implementation -# Real image classification example -# Integration with tensor and activation modules -``` - -## Conclusion - -This standardized module structure ensures: -- **Consistency** across all TinyTorch modules -- **Educational effectiveness** through proven patterns -- **Professional quality** with industry standards -- **Maintainability** through clear organization -- **Scalability** for future module additions - -Every module following this structure provides students with a complete, professional learning experience that builds both understanding and practical skills. \ No newline at end of file diff --git a/docs/development/testing-design.md b/docs/development/testing-design.md deleted file mode 100644 index fe5a3269..00000000 --- a/docs/development/testing-design.md +++ /dev/null @@ -1,326 +0,0 @@ -# TinyTorch Testing Design Document - -## Overview - -This document defines the **inline-first testing architecture** for TinyTorch, prioritizing student learning effectiveness and flow state over context switching overhead. - -## Core Philosophy: Learning-First Testing - -**Primary Goal**: Student learning and confidence building -**Secondary Goal**: Comprehensive validation for grading -**Tertiary Goal**: Professional testing practices - -### Key Insight -Students learning ML concepts are already at cognitive capacity. Every context switch is expensive and disrupts the learning flow. We prioritize immediate feedback and confidence building over professional tool complexity. - -## Three-Tier Testing Architecture - -### 1. Inline Tests (Primary - In Notebooks) -**Goal**: Immediate feedback and confidence building during development - -**Location**: Embedded in `*_dev.py` files as NBGrader cells -**Dependencies**: None (or minimal, well-controlled) -**Scope**: Comprehensive testing with educational context -**Purpose**: Build confidence step-by-step while ensuring correctness - -**Example**: -```python -# %% [markdown] -""" -### ๐Ÿงช Test Your ReLU Implementation - -Let's verify your ReLU function works correctly with various inputs. -This tests the core functionality you just implemented. -""" - -# %% nbgrader={"grade": true, "grade_id": "test-relu-comprehensive", "locked": true, "points": 15, "schema_version": 3, "solution": false, "task": false} -def test_relu_comprehensive(): - """Comprehensive test of ReLU function.""" - print("๐Ÿ”ฌ Testing ReLU function...") - - # Test 1: Basic positive numbers - try: - result = relu([1, 2, 3]) - expected = [1, 2, 3] - assert result == expected, f"Positive numbers: expected {expected}, got {result}" - print("โœ… Positive numbers work correctly") - except Exception as e: - print(f"โŒ Positive numbers failed: {e}") - return False - - # Test 2: Negative numbers (should become 0) - try: - result = relu([-1, -2, -3]) - expected = [0, 0, 0] - assert result == expected, f"Negative numbers: expected {expected}, got {result}" - print("โœ… Negative numbers correctly clipped to 0") - except Exception as e: - print(f"โŒ Negative numbers failed: {e}") - return False - - # Test 3: Mixed positive and negative - try: - result = relu([-2, -1, 0, 1, 2]) - expected = [0, 0, 0, 1, 2] - assert result == expected, f"Mixed numbers: expected {expected}, got {result}" - print("โœ… Mixed positive/negative numbers work correctly") - except Exception as e: - print(f"โŒ Mixed numbers failed: {e}") - return False - - # Test 4: Edge case - zero - try: - result = relu([0]) - expected = [0] - assert result == expected, f"Zero: expected {expected}, got {result}" - print("โœ… Zero handled correctly") - except Exception as e: - print(f"โŒ Zero failed: {e}") - return False - - print("๐ŸŽ‰ All ReLU tests passed! Your implementation works correctly.") - print("๐Ÿ“ˆ Progress: ReLU function โœ“") - return True - -# Run the test -success = test_relu_comprehensive() -if not success: - print("\n๐Ÿ’ก Hints for fixing ReLU:") - print("- ReLU should return max(0, x) for each element") - print("- Negative numbers should become 0") - print("- Positive numbers should stay unchanged") - print("- Zero should remain zero") -``` - -**Characteristics**: -- **Comprehensive**: Test all functionality, edge cases, error conditions -- **Educational**: Explain what's being tested and why -- **Visual**: Clear pass/fail feedback with emojis and progress tracking -- **Immediate**: No context switching required -- **Encouraging**: Build confidence with positive reinforcement -- **Helpful**: Provide hints and guidance when tests fail - -### 2. Module Tests (For Grading - Separate Files with Mocks) -**Goal**: Comprehensive validation for instructor grading using simple, visible mocks - -**Location**: `tests/test_{module}.py` files -**Dependencies**: Simple, visible mock objects (no cross-module dependencies) -**Scope**: Complete module functionality with professional structure -**Purpose**: Verify module works correctly for grading and assessment - -**Example**: -```python -# tests/test_layers.py -""" -Comprehensive Layers Module Tests - -Tests Dense layer functionality using simple mock objects. -Used for instructor grading and comprehensive validation. -""" - -class SimpleTensor: - """ - Simple mock tensor for testing layers. - - Shows exactly what interface the Dense layer expects: - - .data (numpy array): The actual numerical data - - .shape (tuple): Dimensions of the data - """ - def __init__(self, data): - self.data = np.array(data) - self.shape = self.data.shape - - def __repr__(self): - return f"SimpleTensor(shape={self.shape})" - -class TestDenseLayer: - """Comprehensive tests for Dense layer - used for grading.""" - - def test_initialization(self): - """Test Dense layer creation and weight initialization.""" - layer = Dense(input_size=3, output_size=2) - - assert hasattr(layer, 'weights'), "Dense layer should have weights" - assert hasattr(layer, 'bias'), "Dense layer should have bias" - assert layer.weights.shape == (3, 2), f"Expected weights shape (3, 2), got {layer.weights.shape}" - assert layer.bias.shape == (2,), f"Expected bias shape (2,), got {layer.bias.shape}" - - def test_forward_pass_comprehensive(self): - """Comprehensive test of Dense layer forward pass.""" - layer = Dense(input_size=3, output_size=2) - - # Test single sample - input_tensor = SimpleTensor([[1.0, 2.0, 3.0]]) - output = layer(input_tensor) - - assert hasattr(output, 'data'), "Layer should return tensor-like object with .data" - assert hasattr(output, 'shape'), "Layer should return tensor-like object with .shape" - assert output.shape == (1, 2), f"Expected output shape (1, 2), got {output.shape}" - - # Verify computation (y = Wx + b) - expected = np.dot(input_tensor.data, layer.weights) + layer.bias - np.testing.assert_array_almost_equal(output.data, expected) - - # Test batch processing - batch_input = SimpleTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) - batch_output = layer(batch_input) - assert batch_output.shape == (2, 2), f"Expected batch output shape (2, 2), got {batch_output.shape}" - - # Test edge cases - edge_input = SimpleTensor([[0.0, 0.0, 0.0]]) - edge_output = layer(edge_input) - assert edge_output.shape == (1, 2), "Should handle zero input" -``` - -**Characteristics**: -- **Self-contained**: No dependencies on other TinyTorch modules -- **Mock-based**: Simple, visible mocks that document interfaces -- **Comprehensive**: Test all functionality, edge cases, error conditions -- **Professional**: Use pytest structure and best practices -- **Grading-focused**: Designed for instructor assessment - -### 3. Integration Tests (Cross-Module Validation) -**Goal**: Verify modules work together using vetted solutions - -**Location**: `tests/integration/` directory -**Dependencies**: Instructor-provided working implementations -**Scope**: Cross-module workflows and realistic ML scenarios -**Purpose**: Ensure modules compose correctly without cascade failures - -**Example**: -```python -# tests/integration/test_basic_ml_pipeline.py -""" -Integration Tests - Basic ML Pipeline - -Tests how student modules work together with vetted solutions. -No cascade failures - student code tested with known-working dependencies. -""" - -from tinytorch.solutions.tensor import Tensor # Working implementation -from tinytorch.solutions.activations import ReLU # Working implementation -from student_layers import Dense # Student's implementation - -class TestBasicMLPipeline: - """Test student modules in realistic ML workflows.""" - - def test_neural_network_forward_pass(self): - """Test complete neural network using student's Dense layer.""" - # Create network with student's Dense layers - layer1 = Dense(input_size=4, output_size=3) # Student implementation - activation = ReLU() # Working implementation - layer2 = Dense(input_size=3, output_size=2) # Student implementation - - # Create input with working Tensor - x = Tensor([[1.0, 2.0, 3.0, 4.0]]) # Working tensor - - # Forward pass through network - h1 = layer1(x) # Student layer with working tensor - h1_activated = activation(h1) # Working activation - output = layer2(h1_activated) # Student layer - - # Verify pipeline works end-to-end - assert output.shape == (1, 2), "Network should produce correct output shape" - assert isinstance(output, Tensor), "Network should produce Tensor output" - - print("โœ… Student's Dense layers work in complete neural network!") -``` - -## Testing Workflow - -### For Students (Primary Path) -1. **Implement in notebook**: Write code with educational guidance -2. **Test inline**: Get immediate feedback without leaving notebook -3. **Build confidence**: See progress with visual indicators -4. **Complete module**: All inline tests pass before moving on - -### For Instructors (Grading Path) -1. **Run module tests**: Comprehensive validation with mocks -2. **Run integration tests**: Verify cross-module functionality -3. **Grade systematically**: Clear separation of concerns - -## Key Principles - -### 1. **Learning-First Design** -- Prioritize student understanding over tool complexity -- Minimize context switching and cognitive overhead -- Provide immediate, encouraging feedback -- Build confidence step by step - -### 2. **Flow State Preservation** -- Keep students in their notebooks -- Provide instant validation -- Use visual, encouraging feedback -- Avoid workflow disruption - -### 3. **Comprehensive Coverage** -- Inline tests are thorough, not just quick checks -- Test functionality, edge cases, and error conditions -- Provide educational context for each test -- Explain what's being tested and why - -### 4. **Professional Structure (Where Appropriate)** -- Module tests use professional pytest structure -- Integration tests mirror real-world workflows -- Maintain quality standards for grading -- Prepare students for industry practices - -## Implementation Guidelines - -### Inline Test Design -1. **Comprehensive**: Test all functionality thoroughly -2. **Educational**: Explain what's being tested -3. **Visual**: Use emojis and clear progress indicators -4. **Helpful**: Provide hints when tests fail -5. **Encouraging**: Build confidence with positive feedback - -### Mock Design for Module Tests -1. **Simple**: Only implement what's needed -2. **Visible**: Put mocks at top of files with clear documentation -3. **Educational**: Show exactly what interfaces are expected -4. **Minimal**: Don't over-engineer the mocks - -### Test Organization -``` -modules/source/{module}/{module}_dev.py # Implementation + comprehensive inline tests -tests/test_{module}.py # Package tests for exported functionality -tests/integration/ # Cross-module tests with vetted solutions -``` - -### CLI Integration -```bash -# Primary workflow - students work in notebooks -# Tests run automatically as cells execute - -# Instructor grading workflow -tito test --module tensor # Run module tests with mocks -tito test --integration # Run integration tests -tito test --all # Run all tests -``` - -## Benefits - -### For Students -- **No context switching**: Stay in flow state -- **Immediate feedback**: Know instantly if code works -- **Confidence building**: Step-by-step validation -- **Clear guidance**: Helpful error messages and hints -- **Educational value**: Learn what good testing looks like - -### For Instructors -- **Comprehensive validation**: Thorough testing for grading -- **Clear diagnostics**: Know exactly what's working/broken -- **Independent assessment**: Module tests don't depend on student's other modules -- **Professional standards**: Maintain quality without overwhelming students - -### For the System -- **Maintainable**: Clear separation of learning vs grading concerns -- **Scalable**: Easy to add new modules -- **Educational**: Every test serves a learning purpose -- **Practical**: Balances learning effectiveness with quality assurance - -## Conclusion - -The inline-first testing approach prioritizes student learning effectiveness over tool complexity. Students get comprehensive testing within their learning context, building confidence and understanding step by step. Instructors maintain professional testing standards for grading while avoiding the cognitive overhead that disrupts the learning process. - -**Key insight**: Context switching is expensive for learners. Keep them in flow state while ensuring comprehensive validation. \ No newline at end of file diff --git a/modules/source/00_setup/README.md b/modules/source/00_setup/README.md index 8d05c48c..ce933040 100644 --- a/modules/source/00_setup/README.md +++ b/modules/source/00_setup/README.md @@ -146,7 +146,7 @@ my_profile = DeveloperProfile( ## What You'll Learn This comprehensive module introduces: -- **NBDev educational patterns** - `#| export`, `#| hide` directives +- **NBDev educational patterns** - `#| export` directives and NBGrader solution markers - **File I/O operations** - Loading ASCII art with error handling - **Object-oriented programming** - Classes, methods, and properties - **System programming** - Platform detection and compatibility diff --git a/modules/source/02_activations/activations_dev.py b/modules/source/02_activations/activations_dev.py index 8e7396ce..3254ebd1 100644 --- a/modules/source/02_activations/activations_dev.py +++ b/modules/source/02_activations/activations_dev.py @@ -829,7 +829,7 @@ def test_activations_integration(): print(f"โœ… Ready for neural network integration!") # Run the integration test - test_activations_integration() +test_activations_integration() # %% [markdown] """ diff --git a/modules/source/04_networks/networks_dev.py b/modules/source/04_networks/networks_dev.py index 0dc3d83a..290e00f8 100644 --- a/modules/source/04_networks/networks_dev.py +++ b/modules/source/04_networks/networks_dev.py @@ -197,28 +197,31 @@ class Sequential: Applies layers in order: f(x) = layer_n(...layer_2(layer_1(x))) """ - def __init__(self, layers: List): + def __init__(self, layers: Optional[List] = None): """ Initialize Sequential network with layers. Args: - layers: List of layers to compose in order + layers: List of layers to compose in order (optional, defaults to empty list) TODO: Store the layers and implement forward pass APPROACH: 1. Store the layers list as an instance variable - 2. This creates the network architecture ready for forward pass + 2. Initialize empty list if no layers provided + 3. Prepare for forward pass implementation EXAMPLE: Sequential([Dense(3,4), ReLU(), Dense(4,2)]) creates a 3-layer network: Dense โ†’ ReLU โ†’ Dense HINTS: - - Store layers in self.layers - - This is the foundation for all network architectures + - Use self.layers to store the layers + - Handle empty initialization case """ - self.layers = layers + ### BEGIN SOLUTION + self.layers = layers if layers is not None else [] + ### END SOLUTION def forward(self, x: Tensor) -> Tensor: """ @@ -259,8 +262,12 @@ class Sequential: ### END SOLUTION def __call__(self, x: Tensor) -> Tensor: - """Make network callable: network(x) same as network.forward(x)""" + """Make the network callable: sequential(x) instead of sequential.forward(x)""" return self.forward(x) + + def add(self, layer): + """Add a layer to the network.""" + self.layers.append(layer) # %% [markdown] """ @@ -726,61 +733,37 @@ except Exception as e: print("๐Ÿ“ˆ Final Progress: Complete network architectures ready for real ML applications!") -# %% [markdown] -""" -## ๐ŸŽฏ Module Summary +# %% nbgrader={"grade": false, "grade_id": "networks-compatibility", "locked": false, "schema_version": 3, "solution": false, "task": false} +#| export +def MLP(input_size: int, hidden_size: int, output_size: int, + activation=ReLU, output_activation=Sigmoid) -> Sequential: + """ + MLP function wrapper for test compatibility. + + This function provides compatibility with external tests that expect an MLP function + with singular hidden_size parameter instead of hidden_sizes list. + + Args: + input_size: Number of input features + hidden_size: Size of the single hidden layer + output_size: Number of output features + activation: Activation function for hidden layer (default: ReLU) + output_activation: Activation function for output layer (default: Sigmoid) + + Returns: + Sequential network with MLP architecture (input โ†’ hidden โ†’ output) + """ + layers = [] + + # Input to hidden layer + layers.append(Dense(input_size, hidden_size)) + layers.append(activation()) + + # Hidden to output layer + layers.append(Dense(hidden_size, output_size)) + if output_activation is not None: + layers.append(output_activation()) + + return Sequential(layers) -Congratulations! You've successfully implemented complete neural network architectures: - -### What You've Accomplished -โœ… **Sequential Networks**: The fundamental architecture for composing layers -โœ… **Function Composition**: Understanding how layers combine to create complex behaviors -โœ… **MLP Creation**: Building Multi-Layer Perceptrons with flexible architectures -โœ… **Architecture Patterns**: Creating shallow, deep, and wide networks -โœ… **Forward Pass**: Complete inference through multi-layer networks -โœ… **Real Applications**: Classification, regression, and deep learning patterns - -### Key Concepts You've Learned -- **Networks are function composition**: Complex behavior from simple building blocks -- **Sequential architecture**: The foundation of most neural networks -- **MLP patterns**: Dense โ†’ Activation โ†’ Dense โ†’ Activation โ†’ Output -- **Architecture design**: How depth and width affect network capability -- **Forward pass**: How data flows through complete networks - -### Mathematical Foundations -- **Function composition**: f(x) = f_n(...f_2(f_1(x))) -- **Universal approximation**: MLPs can approximate any continuous function -- **Hierarchical learning**: Early layers learn simple features, later layers learn complex patterns -- **Nonlinearity**: Activation functions enable complex decision boundaries - -### Real-World Applications -- **Classification**: Image recognition, spam detection, medical diagnosis -- **Regression**: Price prediction, time series forecasting -- **Feature learning**: Extracting meaningful representations from raw data -- **Transfer learning**: Using pre-trained networks for new tasks - -### Architecture Insights -- **Shallow networks**: Good for simple patterns, fast training -- **Deep networks**: Better for complex patterns, hierarchical learning -- **Wide networks**: More parallel processing, good for diverse features -- **Activation choice**: ReLU for most cases, Tanh for centered data, Softmax for classification - -### Next Steps -1. **Export your code**: Use NBDev to export to the `tinytorch` package -2. **Test your implementation**: Run the complete test suite -3. **Use your networks**: - ```python - from tinytorch.core.networks import Sequential, create_mlp - from tinytorch.core.layers import Dense - from tinytorch.core.activations import ReLU - - # Create custom network - network = Sequential([Dense(10, 5), ReLU(), Dense(5, 1)]) - - # Create MLP - mlp = create_mlp(10, [20, 10], 1) - ``` -4. **Build more complex architectures**: CNNs, RNNs, Transformers! - -**Ready for the next challenge?** Let's add convolutional layers for image processing and build CNNs! -""" \ No newline at end of file +# %% [markdown] \ No newline at end of file diff --git a/modules/source/06_dataloader/dataloader_dev.py b/modules/source/06_dataloader/dataloader_dev.py index 56aadd85..95993688 100644 --- a/modules/source/06_dataloader/dataloader_dev.py +++ b/modules/source/06_dataloader/dataloader_dev.py @@ -944,14 +944,14 @@ try: for epoch in range(3): epoch_samples = 0 - for batch_data, batch_labels in loader: + for batch_data, batch_labels in loader: epoch_samples += batch_data.shape[0] - # Verify shapes remain consistent across epochs - assert batch_data.shape[1] == 6, f"Features should be 6 in epoch {epoch}" - assert len(batch_labels.shape) == 1, f"Labels should be 1D in epoch {epoch}" - - assert epoch_samples == 60, f"Should process 60 samples in epoch {epoch}, got {epoch_samples}" + # Verify shapes remain consistent across epochs + assert batch_data.shape[1] == 6, f"Features should be 6 in epoch {epoch}" + assert len(batch_labels.shape) == 1, f"Labels should be 1D in epoch {epoch}" + + assert epoch_samples == 60, f"Should process 60 samples in epoch {epoch}, got {epoch_samples}" print("โœ… Multi-epoch training works correctly") @@ -962,7 +962,7 @@ try: print(" โ€ข Memory-efficient processing") print(" โ€ข Multi-epoch training scenarios") - except Exception as e: +except Exception as e: print(f"โŒ Integration test failed: {e}") raise diff --git a/modules/source/07_autograd/autograd_dev.py b/modules/source/07_autograd/autograd_dev.py index 5e5d557c..641caae8 100644 --- a/modules/source/07_autograd/autograd_dev.py +++ b/modules/source/07_autograd/autograd_dev.py @@ -345,8 +345,9 @@ def test_variable_class(): print(f"โœ… Data access and properties working") print(f"โœ… Gradient management working") -# Run the test -test_variable_class() +# Run inline tests when module is executed directly +if __name__ == "__main__": + test_variable_class() # %% [markdown] """ @@ -487,8 +488,9 @@ def test_add_operation(): print(f"โœ… Backward pass computing correct gradients") print(f"โœ… Scalar addition working correctly") -# Run the test -test_add_operation() +# Run inline tests when module is executed directly +if __name__ == "__main__": + test_add_operation() # %% [markdown] """ @@ -620,8 +622,9 @@ def test_multiply_operation(): print(f"โœ… Backward pass implementing product rule correctly") print(f"โœ… Scalar multiplication working correctly") -# Run the test -test_multiply_operation() +# Run inline tests when module is executed directly +if __name__ == "__main__": + test_multiply_operation() # %% nbgrader={"grade": false, "grade_id": "subtract-operation", "locked": false, "schema_version": 3, "solution": true, "task": false} #| export @@ -717,8 +720,9 @@ def test_subtract_operation(): print(f"โœ… Backward pass implementing subtraction rule correctly") print(f"โœ… Scalar subtraction working correctly") -# Run the test -test_subtract_operation() +# Run inline tests when module is executed directly +if __name__ == "__main__": + test_subtract_operation() # %% [markdown] """ @@ -801,8 +805,9 @@ def test_chain_rule(): print(f"โœ… Automatic gradient computation working correctly") print(f"โœ… Chain rule implemented correctly") -# Run the test -test_chain_rule() +# Run inline tests when module is executed directly +if __name__ == "__main__": + test_chain_rule() # %% [markdown] """ @@ -916,8 +921,9 @@ def test_neural_network_training(): print(f"โœ… Autograd enables automatic training") print(f"โœ… Ready for complex neural network architectures!") -# Run the test -test_neural_network_training() +# Run inline tests when module is executed directly +if __name__ == "__main__": + test_neural_network_training() # %% [markdown] """ diff --git a/modules/source/08_optimizers/optimizers_dev.py b/modules/source/08_optimizers/optimizers_dev.py index ab3c805b..2d7a0667 100644 --- a/modules/source/08_optimizers/optimizers_dev.py +++ b/modules/source/08_optimizers/optimizers_dev.py @@ -43,11 +43,49 @@ try: from tinytorch.core.tensor import Tensor from tinytorch.core.autograd import Variable except ImportError: - # For development, import from local modules - sys.path.append(os.path.join(os.path.dirname(__file__), '..', '01_tensor')) - from tensor_dev import Tensor - sys.path.append(os.path.join(os.path.dirname(__file__), '..', '07_autograd')) - from autograd_dev import Variable + # For development, try local imports + try: + import sys + import os + + # Add module directories to path + base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + tensor_dir = os.path.join(base_dir, '01_tensor') + autograd_dir = os.path.join(base_dir, '07_autograd') + + if tensor_dir not in sys.path: + sys.path.append(tensor_dir) + if autograd_dir not in sys.path: + sys.path.append(autograd_dir) + + from tensor_dev import Tensor + from autograd_dev import Variable + except ImportError: + # Create minimal fallback classes for testing + print("Warning: Using fallback classes for testing") + + class Tensor: + def __init__(self, data): + self.data = np.array(data) + self.shape = self.data.shape + + def __str__(self): + return f"Tensor({self.data})" + + class Variable: + def __init__(self, data, requires_grad=True): + if isinstance(data, (int, float)): + self.data = Tensor([data]) + else: + self.data = Tensor(data) + self.requires_grad = requires_grad + self.grad = None + + def zero_grad(self): + self.grad = None + + def __str__(self): + return f"Variable({self.data.data})" # %% nbgrader={"grade": false, "grade_id": "optimizers-setup", "locked": false, "schema_version": 3, "solution": false, "task": false} print("๐Ÿ”ฅ TinyTorch Optimizers Module") diff --git a/tests/test_networks.py b/tests/test_networks.py index 1cc016ef..9d7b35e3 100644 --- a/tests/test_networks.py +++ b/tests/test_networks.py @@ -21,7 +21,7 @@ import os # Add the module source directory to the path sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'modules', 'source', '04_networks')) -from networks_dev import Sequential, create_mlp as MLP +from networks_dev import Sequential, MLP class MockTensor: