diff --git a/tinytorch/milestones/05_2017_transformer/01_vaswani_attention.py b/tinytorch/milestones/05_2017_transformer/01_vaswani_attention.py index ee9509aad..d190d7b54 100755 --- a/tinytorch/milestones/05_2017_transformer/01_vaswani_attention.py +++ b/tinytorch/milestones/05_2017_transformer/01_vaswani_attention.py @@ -376,7 +376,7 @@ def train_epoch(model, dataloader, optimizer, loss_fn): pred = np.argmax(logits.data, axis=-1) for i in range(batch_size): if np.array_equal(pred[i], target_batch.data[i]): - correct_sequences += 1 + correct_sequences += 1 total_samples += batch_size return total_loss / total_samples, (correct_sequences / total_samples) * 100 diff --git a/tinytorch/tests/integration/integration_mnist_test.py b/tinytorch/tests/integration/integration_mnist_test.py index 25773e3c6..6671c1a0a 100644 --- a/tinytorch/tests/integration/integration_mnist_test.py +++ b/tinytorch/tests/integration/integration_mnist_test.py @@ -21,7 +21,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import numpy as np from tinytorch.core.tensor import Tensor -from tinytorch.core.layers import Linear as Dense +from tinytorch.core.layers import Linear from tinytorch.core.activations import ReLU from tinytorch.core.training import CrossEntropyLoss diff --git a/tinytorch/tests/integration/test_dataloader_integration.py b/tinytorch/tests/integration/test_dataloader_integration.py index b3d1cebb3..594373d67 100644 --- a/tinytorch/tests/integration/test_dataloader_integration.py +++ b/tinytorch/tests/integration/test_dataloader_integration.py @@ -12,16 +12,9 @@ import os # Add project root to path sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) -# Try to import from package, fall back to dev module if not exported yet -try: - from tinytorch import Tensor - from tinytorch.data.loader import Dataset, TensorDataset, DataLoader -except (ImportError, ModuleNotFoundError): - # Module not exported yet, use dev version - sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'modules', 'source', '05_dataloader')) - from dataloader_dev import Dataset, TensorDataset, DataLoader - sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'modules', 'source', '01_tensor')) - from tensor_dev import Tensor +# Import from TinyTorch package +from tinytorch import Tensor +from tinytorch.core.dataloader import Dataset, TensorDataset, DataLoader def test_training_workflow_integration(): diff --git a/tinytorch/tests/integration/test_module_05_dense.py b/tinytorch/tests/integration/test_module_05_dense.py index a6314879d..928b757c5 100644 --- a/tinytorch/tests/integration/test_module_05_dense.py +++ b/tinytorch/tests/integration/test_module_05_dense.py @@ -24,7 +24,7 @@ class TestDenseModuleExports: def test_dense_is_callable(self): """Test Dense can be instantiated.""" from tinytorch.core.layers import Linear as Dense - layer = Linear(10, 5) + layer = Dense(10, 5) assert layer is not None, "Should create Dense layer instance" assert hasattr(layer, 'forward'), "Dense should have forward method" @@ -38,7 +38,7 @@ class TestDenseLayerFunctionality: from tinytorch.core.tensor import Tensor # Create layer - layer = Linear(10, 5) + layer = Dense(10, 5) # Create input batch_size = 32 @@ -56,7 +56,7 @@ class TestDenseLayerFunctionality: from tinytorch.core.layers import Linear as Dense from tinytorch.core.tensor import Tensor - layer = Linear(10, 5, bias=True) + layer = Dense(10, 5, bias=True) assert hasattr(layer, 'bias'), "Layer should have bias" assert layer.bias is not None, "Bias should be initialized" @@ -69,7 +69,7 @@ class TestDenseLayerFunctionality: from tinytorch.core.layers import Linear as Dense from tinytorch.core.tensor import Tensor - layer = Linear(10, 5, bias=False) + layer = Dense(10, 5, bias=False) assert layer.bias is None, "Bias should be None when disabled" x = Tensor(np.random.randn(1, 10)) @@ -81,7 +81,7 @@ class TestDenseLayerFunctionality: from tinytorch.core.layers import Linear as Dense from tinytorch.core.tensor import Tensor - layer = Linear(10, 5) + layer = Dense(10, 5) x = Tensor(np.random.randn(4, 10)) # Test both forward() and __call__() @@ -114,9 +114,9 @@ class TestNetworkComposition: from tinytorch.core.tensor import Tensor # Build network manually (without Sequential) - layer1 = Linear(784, 128) + layer1 = Dense(784, 128) relu = ReLU() - layer2 = Linear(128, 10) + layer2 = Dense(128, 10) sigmoid = Sigmoid() # Test forward pass through all layers @@ -146,9 +146,9 @@ class TestXORCapability: from tinytorch.core.tensor import Tensor # XOR network: 2 -> 4 -> 1 - hidden = Linear(2, 4) + hidden = Dense(2, 4) relu = ReLU() - output = Linear(4, 1) + output = Dense(4, 1) sigmoid = Sigmoid() # XOR inputs