mirror of
https://github.com/MLSysBook/TinyTorch.git
synced 2026-04-29 06:32:55 -05:00
Refactor notebook generation to use separate files for better architecture
- Restored tools/py_to_notebook.py as a focused, standalone tool - Updated tito notebooks command to use subprocess to call the separate tool - Maintains clean separation of concerns: tito.py for CLI orchestration, py_to_notebook.py for conversion logic - Updated documentation to use 'tito notebooks' command instead of direct tool calls - Benefits: easier debugging, better maintainability, focused single-responsibility modules
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../modules/layers/layers_dev.ipynb.
|
||||
|
||||
# %% auto 0
|
||||
__all__ = ['Dense']
|
||||
__all__ = ['Dense', 'ReLU', 'Sigmoid', 'Tanh']
|
||||
|
||||
# %% ../../modules/layers/layers_dev.ipynb 2
|
||||
import numpy as np
|
||||
@@ -10,9 +10,6 @@ import sys
|
||||
from typing import Union, Optional, Callable
|
||||
from .tensor import Tensor
|
||||
|
||||
# Import activation functions from the activations module
|
||||
from .activations import ReLU, Sigmoid, Tanh
|
||||
|
||||
# Import our Tensor class
|
||||
# sys.path.append('../../')
|
||||
# from modules.tensor.tensor_dev import Tensor
|
||||
@@ -112,3 +109,130 @@ class Dense:
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
"""Make layer callable: layer(x) same as layer.forward(x)"""
|
||||
return self.forward(x)
|
||||
|
||||
# %% ../../modules/layers/layers_dev.ipynb 9
|
||||
class ReLU:
|
||||
"""
|
||||
ReLU Activation: f(x) = max(0, x)
|
||||
|
||||
The most popular activation function in deep learning.
|
||||
Simple, effective, and computationally efficient.
|
||||
|
||||
TODO: Implement ReLU activation function.
|
||||
"""
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
"""
|
||||
Apply ReLU: f(x) = max(0, x)
|
||||
|
||||
Args:
|
||||
x: Input tensor
|
||||
|
||||
Returns:
|
||||
Output tensor with ReLU applied element-wise
|
||||
|
||||
TODO: Implement element-wise max(0, x) operation
|
||||
"""
|
||||
raise NotImplementedError("Student implementation required")
|
||||
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
"""Make activation callable: relu(x) same as relu.forward(x)"""
|
||||
return self.forward(x)
|
||||
|
||||
# %% ../../modules/layers/layers_dev.ipynb 10
|
||||
class ReLU:
|
||||
"""ReLU Activation: f(x) = max(0, x)"""
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
"""Apply ReLU: f(x) = max(0, x)"""
|
||||
return Tensor(np.maximum(0, x.data))
|
||||
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
return self.forward(x)
|
||||
|
||||
# %% ../../modules/layers/layers_dev.ipynb 11
|
||||
class Sigmoid:
|
||||
"""
|
||||
Sigmoid Activation: f(x) = 1 / (1 + e^(-x))
|
||||
|
||||
Squashes input to range (0, 1). Often used for binary classification.
|
||||
|
||||
TODO: Implement Sigmoid activation function.
|
||||
"""
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
"""
|
||||
Apply Sigmoid: f(x) = 1 / (1 + e^(-x))
|
||||
|
||||
Args:
|
||||
x: Input tensor
|
||||
|
||||
Returns:
|
||||
Output tensor with Sigmoid applied element-wise
|
||||
|
||||
TODO: Implement sigmoid function (be careful with numerical stability!)
|
||||
"""
|
||||
raise NotImplementedError("Student implementation required")
|
||||
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
return self.forward(x)
|
||||
|
||||
# %% ../../modules/layers/layers_dev.ipynb 12
|
||||
class Sigmoid:
|
||||
"""Sigmoid Activation: f(x) = 1 / (1 + e^(-x))"""
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
"""Apply Sigmoid with numerical stability"""
|
||||
# Use the numerically stable version to avoid overflow
|
||||
# For x >= 0: sigmoid(x) = 1 / (1 + exp(-x))
|
||||
# For x < 0: sigmoid(x) = exp(x) / (1 + exp(x))
|
||||
x_data = x.data
|
||||
result = np.zeros_like(x_data)
|
||||
|
||||
# Stable computation
|
||||
positive_mask = x_data >= 0
|
||||
result[positive_mask] = 1.0 / (1.0 + np.exp(-x_data[positive_mask]))
|
||||
result[~positive_mask] = np.exp(x_data[~positive_mask]) / (1.0 + np.exp(x_data[~positive_mask]))
|
||||
|
||||
return Tensor(result)
|
||||
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
return self.forward(x)
|
||||
|
||||
# %% ../../modules/layers/layers_dev.ipynb 13
|
||||
class Tanh:
|
||||
"""
|
||||
Tanh Activation: f(x) = tanh(x)
|
||||
|
||||
Squashes input to range (-1, 1). Zero-centered output.
|
||||
|
||||
TODO: Implement Tanh activation function.
|
||||
"""
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
"""
|
||||
Apply Tanh: f(x) = tanh(x)
|
||||
|
||||
Args:
|
||||
x: Input tensor
|
||||
|
||||
Returns:
|
||||
Output tensor with Tanh applied element-wise
|
||||
|
||||
TODO: Implement tanh function
|
||||
"""
|
||||
raise NotImplementedError("Student implementation required")
|
||||
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
return self.forward(x)
|
||||
|
||||
# %% ../../modules/layers/layers_dev.ipynb 14
|
||||
class Tanh:
|
||||
"""Tanh Activation: f(x) = tanh(x)"""
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
"""Apply Tanh"""
|
||||
return Tensor(np.tanh(x.data))
|
||||
|
||||
def __call__(self, x: Tensor) -> Tensor:
|
||||
return self.forward(x)
|
||||
|
||||
Reference in New Issue
Block a user