updated for wsl windows login features

This commit is contained in:
jettythek
2025-12-07 11:29:45 -08:00
parent cf3cfd02ed
commit 3e779340ec
7 changed files with 206 additions and 404 deletions

50
=2.3.0 Normal file
View File

@@ -0,0 +1,50 @@
Collecting nbdev
Downloading nbdev-2.4.6-py3-none-any.whl.metadata (10 kB)
Requirement already satisfied: packaging in ./.venv/lib/python3.12/site-packages (from nbdev) (25.0)
Collecting fastcore>=1.8.0 (from nbdev)
Downloading fastcore-1.8.16-py3-none-any.whl.metadata (3.7 kB)
Collecting execnb>=0.1.12 (from nbdev)
Downloading execnb-0.1.15-py3-none-any.whl.metadata (3.7 kB)
Collecting astunparse (from nbdev)
Downloading astunparse-1.6.3-py2.py3-none-any.whl.metadata (4.4 kB)
Collecting ghapi>=1.0.3 (from nbdev)
Downloading ghapi-1.0.8-py3-none-any.whl.metadata (14 kB)
Collecting watchdog (from nbdev)
Downloading watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl.metadata (44 kB)
Requirement already satisfied: asttokens in ./.venv/lib/python3.12/site-packages (from nbdev) (3.0.1)
Requirement already satisfied: setuptools in ./.venv/lib/python3.12/site-packages (from nbdev) (80.9.0)
Collecting build (from nbdev)
Using cached build-1.3.0-py3-none-any.whl.metadata (5.6 kB)
Requirement already satisfied: PyYAML in ./.venv/lib/python3.12/site-packages (from nbdev) (6.0.3)
Requirement already satisfied: ipython in ./.venv/lib/python3.12/site-packages (from execnb>=0.1.12->nbdev) (9.8.0)
Collecting wheel<1.0,>=0.23.0 (from astunparse->nbdev)
Using cached wheel-0.45.1-py3-none-any.whl.metadata (2.3 kB)
Requirement already satisfied: six<2.0,>=1.6.1 in ./.venv/lib/python3.12/site-packages (from astunparse->nbdev) (1.17.0)
Collecting pyproject_hooks (from build->nbdev)
Using cached pyproject_hooks-1.2.0-py3-none-any.whl.metadata (1.3 kB)
Requirement already satisfied: decorator>=4.3.2 in ./.venv/lib/python3.12/site-packages (from ipython->execnb>=0.1.12->nbdev) (5.2.1)
Requirement already satisfied: ipython-pygments-lexers>=1.0.0 in ./.venv/lib/python3.12/site-packages (from ipython->execnb>=0.1.12->nbdev) (1.1.1)
Requirement already satisfied: jedi>=0.18.1 in ./.venv/lib/python3.12/site-packages (from ipython->execnb>=0.1.12->nbdev) (0.19.2)
Requirement already satisfied: matplotlib-inline>=0.1.5 in ./.venv/lib/python3.12/site-packages (from ipython->execnb>=0.1.12->nbdev) (0.2.1)
Requirement already satisfied: pexpect>4.3 in ./.venv/lib/python3.12/site-packages (from ipython->execnb>=0.1.12->nbdev) (4.9.0)
Requirement already satisfied: prompt_toolkit<3.1.0,>=3.0.41 in ./.venv/lib/python3.12/site-packages (from ipython->execnb>=0.1.12->nbdev) (3.0.52)
Requirement already satisfied: pygments>=2.11.0 in ./.venv/lib/python3.12/site-packages (from ipython->execnb>=0.1.12->nbdev) (2.19.2)
Requirement already satisfied: stack_data>=0.6.0 in ./.venv/lib/python3.12/site-packages (from ipython->execnb>=0.1.12->nbdev) (0.6.3)
Requirement already satisfied: traitlets>=5.13.0 in ./.venv/lib/python3.12/site-packages (from ipython->execnb>=0.1.12->nbdev) (5.14.3)
Requirement already satisfied: wcwidth in ./.venv/lib/python3.12/site-packages (from prompt_toolkit<3.1.0,>=3.0.41->ipython->execnb>=0.1.12->nbdev) (0.2.14)
Requirement already satisfied: parso<0.9.0,>=0.8.4 in ./.venv/lib/python3.12/site-packages (from jedi>=0.18.1->ipython->execnb>=0.1.12->nbdev) (0.8.5)
Requirement already satisfied: ptyprocess>=0.5 in ./.venv/lib/python3.12/site-packages (from pexpect>4.3->ipython->execnb>=0.1.12->nbdev) (0.7.0)
Requirement already satisfied: executing>=1.2.0 in ./.venv/lib/python3.12/site-packages (from stack_data>=0.6.0->ipython->execnb>=0.1.12->nbdev) (2.2.1)
Requirement already satisfied: pure-eval in ./.venv/lib/python3.12/site-packages (from stack_data>=0.6.0->ipython->execnb>=0.1.12->nbdev) (0.2.3)
Downloading nbdev-2.4.6-py3-none-any.whl (70 kB)
Downloading execnb-0.1.15-py3-none-any.whl (13 kB)
Downloading fastcore-1.8.16-py3-none-any.whl (86 kB)
Downloading ghapi-1.0.8-py3-none-any.whl (68 kB)
Downloading astunparse-1.6.3-py2.py3-none-any.whl (12 kB)
Using cached wheel-0.45.1-py3-none-any.whl (72 kB)
Using cached build-1.3.0-py3-none-any.whl (23 kB)
Using cached pyproject_hooks-1.2.0-py3-none-any.whl (10 kB)
Downloading watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl (79 kB)
Installing collected packages: wheel, watchdog, pyproject_hooks, fastcore, ghapi, build, astunparse, execnb, nbdev
Successfully installed astunparse-1.6.3 build-1.3.0 execnb-0.1.15 fastcore-1.8.16 ghapi-1.0.8 nbdev-2.4.6 pyproject_hooks-1.2.0 watchdog-6.0.0 wheel-0.45.1

View File

@@ -29,6 +29,9 @@ pytest-cov>=4.0.0
jupytext>=1.16.0
nbformat>=5.10.0
# nbdev - Export notebooks to Python modules
nbdev>=2.3.0
# ============================================================================
# Optional Dependencies (Uncomment if needed)
# ============================================================================

395
tinytorch/_modidx.py generated
View File

@@ -51,56 +51,6 @@ d = { 'settings': { 'branch': 'main',
'tinytorch/applications/tinygpt.py'),
'tinytorch.applications.tinygpt.test_unit_training_pipeline': ( '20_capstone/capstone.html#test_unit_training_pipeline',
'tinytorch/applications/tinygpt.py')},
'tinytorch.bench': { 'tinytorch.bench.Benchmark': ('19_benchmarking/benchmarking.html#benchmark', 'tinytorch/bench.py'),
'tinytorch.bench.Benchmark.__init__': ( '19_benchmarking/benchmarking.html#benchmark.__init__',
'tinytorch/bench.py'),
'tinytorch.bench.Benchmark.compare_models': ( '19_benchmarking/benchmarking.html#benchmark.compare_models',
'tinytorch/bench.py'),
'tinytorch.bench.Benchmark.run_accuracy_benchmark': ( '19_benchmarking/benchmarking.html#benchmark.run_accuracy_benchmark',
'tinytorch/bench.py'),
'tinytorch.bench.Benchmark.run_latency_benchmark': ( '19_benchmarking/benchmarking.html#benchmark.run_latency_benchmark',
'tinytorch/bench.py'),
'tinytorch.bench.Benchmark.run_memory_benchmark': ( '19_benchmarking/benchmarking.html#benchmark.run_memory_benchmark',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkResult': ( '19_benchmarking/benchmarking.html#benchmarkresult',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkResult.__post_init__': ( '19_benchmarking/benchmarking.html#benchmarkresult.__post_init__',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkResult.__str__': ( '19_benchmarking/benchmarking.html#benchmarkresult.__str__',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkResult.to_dict': ( '19_benchmarking/benchmarking.html#benchmarkresult.to_dict',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkSuite': ( '19_benchmarking/benchmarking.html#benchmarksuite',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkSuite.__init__': ( '19_benchmarking/benchmarking.html#benchmarksuite.__init__',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkSuite._estimate_energy_efficiency': ( '19_benchmarking/benchmarking.html#benchmarksuite._estimate_energy_efficiency',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkSuite.generate_report': ( '19_benchmarking/benchmarking.html#benchmarksuite.generate_report',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkSuite.plot_pareto_frontier': ( '19_benchmarking/benchmarking.html#benchmarksuite.plot_pareto_frontier',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkSuite.plot_results': ( '19_benchmarking/benchmarking.html#benchmarksuite.plot_results',
'tinytorch/bench.py'),
'tinytorch.bench.BenchmarkSuite.run_full_benchmark': ( '19_benchmarking/benchmarking.html#benchmarksuite.run_full_benchmark',
'tinytorch/bench.py'),
'tinytorch.bench.TinyMLPerf': ('19_benchmarking/benchmarking.html#tinymlperf', 'tinytorch/bench.py'),
'tinytorch.bench.TinyMLPerf.__init__': ( '19_benchmarking/benchmarking.html#tinymlperf.__init__',
'tinytorch/bench.py'),
'tinytorch.bench.TinyMLPerf.generate_compliance_report': ( '19_benchmarking/benchmarking.html#tinymlperf.generate_compliance_report',
'tinytorch/bench.py'),
'tinytorch.bench.TinyMLPerf.run_all_benchmarks': ( '19_benchmarking/benchmarking.html#tinymlperf.run_all_benchmarks',
'tinytorch/bench.py'),
'tinytorch.bench.TinyMLPerf.run_standard_benchmark': ( '19_benchmarking/benchmarking.html#tinymlperf.run_standard_benchmark',
'tinytorch/bench.py'),
'tinytorch.bench.test_unit_benchmark': ( '19_benchmarking/benchmarking.html#test_unit_benchmark',
'tinytorch/bench.py'),
'tinytorch.bench.test_unit_benchmark_result': ( '19_benchmarking/benchmarking.html#test_unit_benchmark_result',
'tinytorch/bench.py'),
'tinytorch.bench.test_unit_benchmark_suite': ( '19_benchmarking/benchmarking.html#test_unit_benchmark_suite',
'tinytorch/bench.py'),
'tinytorch.bench.test_unit_tinymlperf': ( '19_benchmarking/benchmarking.html#test_unit_tinymlperf',
'tinytorch/bench.py')},
'tinytorch.benchmarking.benchmark': { 'tinytorch.benchmarking.benchmark.Benchmark': ( '19_benchmarking/benchmarking.html#benchmark',
'tinytorch/benchmarking/benchmark.py'),
'tinytorch.benchmarking.benchmark.Benchmark.__init__': ( '19_benchmarking/benchmarking.html#benchmark.__init__',
@@ -153,27 +103,6 @@ d = { 'settings': { 'branch': 'main',
'tinytorch/benchmarking/benchmark.py'),
'tinytorch.benchmarking.benchmark.test_unit_tinymlperf': ( '19_benchmarking/benchmarking.html#test_unit_tinymlperf',
'tinytorch/benchmarking/benchmark.py')},
'tinytorch.capstone': { 'tinytorch.capstone.BenchmarkReport': ( '20_capstone/capstone.html#benchmarkreport',
'tinytorch/capstone.py'),
'tinytorch.capstone.BenchmarkReport.__init__': ( '20_capstone/capstone.html#benchmarkreport.__init__',
'tinytorch/capstone.py'),
'tinytorch.capstone.BenchmarkReport._get_system_info': ( '20_capstone/capstone.html#benchmarkreport._get_system_info',
'tinytorch/capstone.py'),
'tinytorch.capstone.BenchmarkReport.benchmark_model': ( '20_capstone/capstone.html#benchmarkreport.benchmark_model',
'tinytorch/capstone.py'),
'tinytorch.capstone.SimpleMLP': ('20_capstone/capstone.html#simplemlp', 'tinytorch/capstone.py'),
'tinytorch.capstone.SimpleMLP.__init__': ( '20_capstone/capstone.html#simplemlp.__init__',
'tinytorch/capstone.py'),
'tinytorch.capstone.SimpleMLP.count_parameters': ( '20_capstone/capstone.html#simplemlp.count_parameters',
'tinytorch/capstone.py'),
'tinytorch.capstone.SimpleMLP.forward': ( '20_capstone/capstone.html#simplemlp.forward',
'tinytorch/capstone.py'),
'tinytorch.capstone.SimpleMLP.parameters': ( '20_capstone/capstone.html#simplemlp.parameters',
'tinytorch/capstone.py'),
'tinytorch.capstone.generate_submission': ( '20_capstone/capstone.html#generate_submission',
'tinytorch/capstone.py'),
'tinytorch.capstone.save_submission': ( '20_capstone/capstone.html#save_submission',
'tinytorch/capstone.py')},
'tinytorch.competition.submit': { 'tinytorch.competition.submit.generate_baseline': ( 'source/20_competition/competition_dev.html#generate_baseline',
'tinytorch/competition/submit.py'),
'tinytorch.competition.submit.generate_submission': ( 'source/20_competition/competition_dev.html#generate_submission',
@@ -188,56 +117,6 @@ d = { 'settings': { 'branch': 'main',
'tinytorch/competition/submit.py'),
'tinytorch.competition.submit.worked_example_optimization': ( 'source/20_competition/competition_dev.html#worked_example_optimization',
'tinytorch/competition/submit.py')},
'tinytorch.core.activations': { 'tinytorch.core.activations.GELU': ( '02_activations/activations.html#gelu',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.GELU.__call__': ( '02_activations/activations.html#gelu.__call__',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.GELU.backward': ( '02_activations/activations.html#gelu.backward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.GELU.forward': ( '02_activations/activations.html#gelu.forward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.GELU.parameters': ( '02_activations/activations.html#gelu.parameters',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.ReLU': ( '02_activations/activations.html#relu',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.ReLU.__call__': ( '02_activations/activations.html#relu.__call__',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.ReLU.backward': ( '02_activations/activations.html#relu.backward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.ReLU.forward': ( '02_activations/activations.html#relu.forward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.ReLU.parameters': ( '02_activations/activations.html#relu.parameters',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Sigmoid': ( '02_activations/activations.html#sigmoid',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Sigmoid.__call__': ( '02_activations/activations.html#sigmoid.__call__',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Sigmoid.backward': ( '02_activations/activations.html#sigmoid.backward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Sigmoid.forward': ( '02_activations/activations.html#sigmoid.forward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Sigmoid.parameters': ( '02_activations/activations.html#sigmoid.parameters',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Softmax': ( '02_activations/activations.html#softmax',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Softmax.__call__': ( '02_activations/activations.html#softmax.__call__',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Softmax.backward': ( '02_activations/activations.html#softmax.backward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Softmax.forward': ( '02_activations/activations.html#softmax.forward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Softmax.parameters': ( '02_activations/activations.html#softmax.parameters',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Tanh': ( '02_activations/activations.html#tanh',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Tanh.__call__': ( '02_activations/activations.html#tanh.__call__',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Tanh.backward': ( '02_activations/activations.html#tanh.backward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Tanh.forward': ( '02_activations/activations.html#tanh.forward',
'tinytorch/core/activations.py'),
'tinytorch.core.activations.Tanh.parameters': ( '02_activations/activations.html#tanh.parameters',
'tinytorch/core/activations.py')},
'tinytorch.core.attention': { 'tinytorch.core.attention.MultiHeadAttention': ( '12_attention/attention.html#multiheadattention',
'tinytorch/core/attention.py'),
'tinytorch.core.attention.MultiHeadAttention.__call__': ( '12_attention/attention.html#multiheadattention.__call__',
@@ -251,86 +130,6 @@ d = { 'settings': { 'branch': 'main',
'tinytorch.core.attention.scaled_dot_product_attention': ( '12_attention/attention.html#scaled_dot_product_attention',
'tinytorch/core/attention.py')},
'tinytorch.core.autograd': {},
'tinytorch.core.dataloader': { 'tinytorch.core.dataloader.Compose': ( '08_dataloader/dataloader.html#compose',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.Compose.__call__': ( '08_dataloader/dataloader.html#compose.__call__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.Compose.__init__': ( '08_dataloader/dataloader.html#compose.__init__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.DataLoader': ( '08_dataloader/dataloader.html#dataloader',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.DataLoader.__init__': ( '08_dataloader/dataloader.html#dataloader.__init__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.DataLoader.__iter__': ( '08_dataloader/dataloader.html#dataloader.__iter__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.DataLoader.__len__': ( '08_dataloader/dataloader.html#dataloader.__len__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.DataLoader._collate_batch': ( '08_dataloader/dataloader.html#dataloader._collate_batch',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.Dataset': ( '08_dataloader/dataloader.html#dataset',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.Dataset.__getitem__': ( '08_dataloader/dataloader.html#dataset.__getitem__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.Dataset.__len__': ( '08_dataloader/dataloader.html#dataset.__len__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.RandomCrop': ( '08_dataloader/dataloader.html#randomcrop',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.RandomCrop.__call__': ( '08_dataloader/dataloader.html#randomcrop.__call__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.RandomCrop.__init__': ( '08_dataloader/dataloader.html#randomcrop.__init__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.RandomHorizontalFlip': ( '08_dataloader/dataloader.html#randomhorizontalflip',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.RandomHorizontalFlip.__call__': ( '08_dataloader/dataloader.html#randomhorizontalflip.__call__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.RandomHorizontalFlip.__init__': ( '08_dataloader/dataloader.html#randomhorizontalflip.__init__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.TensorDataset': ( '08_dataloader/dataloader.html#tensordataset',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.TensorDataset.__getitem__': ( '08_dataloader/dataloader.html#tensordataset.__getitem__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.TensorDataset.__init__': ( '08_dataloader/dataloader.html#tensordataset.__init__',
'tinytorch/core/dataloader.py'),
'tinytorch.core.dataloader.TensorDataset.__len__': ( '08_dataloader/dataloader.html#tensordataset.__len__',
'tinytorch/core/dataloader.py')},
'tinytorch.core.embeddings': { 'tinytorch.core.embeddings.Embedding': ( '11_embeddings/embeddings.html#embedding',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.Embedding.__call__': ( '11_embeddings/embeddings.html#embedding.__call__',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.Embedding.__init__': ( '11_embeddings/embeddings.html#embedding.__init__',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.Embedding.__repr__': ( '11_embeddings/embeddings.html#embedding.__repr__',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.Embedding.forward': ( '11_embeddings/embeddings.html#embedding.forward',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.Embedding.parameters': ( '11_embeddings/embeddings.html#embedding.parameters',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.EmbeddingLayer': ( '11_embeddings/embeddings.html#embeddinglayer',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.EmbeddingLayer.__call__': ( '11_embeddings/embeddings.html#embeddinglayer.__call__',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.EmbeddingLayer.__init__': ( '11_embeddings/embeddings.html#embeddinglayer.__init__',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.EmbeddingLayer.__repr__': ( '11_embeddings/embeddings.html#embeddinglayer.__repr__',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.EmbeddingLayer.forward': ( '11_embeddings/embeddings.html#embeddinglayer.forward',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.EmbeddingLayer.parameters': ( '11_embeddings/embeddings.html#embeddinglayer.parameters',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.PositionalEncoding': ( '11_embeddings/embeddings.html#positionalencoding',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.PositionalEncoding.__call__': ( '11_embeddings/embeddings.html#positionalencoding.__call__',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.PositionalEncoding.__init__': ( '11_embeddings/embeddings.html#positionalencoding.__init__',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.PositionalEncoding.__repr__': ( '11_embeddings/embeddings.html#positionalencoding.__repr__',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.PositionalEncoding.forward': ( '11_embeddings/embeddings.html#positionalencoding.forward',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.PositionalEncoding.parameters': ( '11_embeddings/embeddings.html#positionalencoding.parameters',
'tinytorch/core/embeddings.py'),
'tinytorch.core.embeddings.create_sinusoidal_embeddings': ( '11_embeddings/embeddings.html#create_sinusoidal_embeddings',
'tinytorch/core/embeddings.py')},
'tinytorch.core.layers': { 'tinytorch.core.layers.Dropout': ('03_layers/layers.html#dropout', 'tinytorch/core/layers.py'),
'tinytorch.core.layers.Dropout.__call__': ( '03_layers/layers.html#dropout.__call__',
'tinytorch/core/layers.py'),
@@ -485,6 +284,8 @@ d = { 'settings': { 'branch': 'main',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.__init__': ( '01_tensor/tensor.html#tensor.__init__',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.__matmul__': ( '01_tensor/tensor.html#tensor.__matmul__',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.__mul__': ( '01_tensor/tensor.html#tensor.__mul__',
'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.__repr__': ( '01_tensor/tensor.html#tensor.__repr__',
@@ -509,40 +310,6 @@ d = { 'settings': { 'branch': 'main',
'tinytorch.core.tensor.Tensor.sum': ('01_tensor/tensor.html#tensor.sum', 'tinytorch/core/tensor.py'),
'tinytorch.core.tensor.Tensor.transpose': ( '01_tensor/tensor.html#tensor.transpose',
'tinytorch/core/tensor.py')},
'tinytorch.core.tokenization': { 'tinytorch.core.tokenization.BPETokenizer': ( '10_tokenization/tokenization.html#bpetokenizer',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.BPETokenizer.__init__': ( '10_tokenization/tokenization.html#bpetokenizer.__init__',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.BPETokenizer._apply_merges': ( '10_tokenization/tokenization.html#bpetokenizer._apply_merges',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.BPETokenizer._build_mappings': ( '10_tokenization/tokenization.html#bpetokenizer._build_mappings',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.BPETokenizer._get_pairs': ( '10_tokenization/tokenization.html#bpetokenizer._get_pairs',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.BPETokenizer._get_word_tokens': ( '10_tokenization/tokenization.html#bpetokenizer._get_word_tokens',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.BPETokenizer.decode': ( '10_tokenization/tokenization.html#bpetokenizer.decode',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.BPETokenizer.encode': ( '10_tokenization/tokenization.html#bpetokenizer.encode',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.BPETokenizer.train': ( '10_tokenization/tokenization.html#bpetokenizer.train',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.CharTokenizer': ( '10_tokenization/tokenization.html#chartokenizer',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.CharTokenizer.__init__': ( '10_tokenization/tokenization.html#chartokenizer.__init__',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.CharTokenizer.build_vocab': ( '10_tokenization/tokenization.html#chartokenizer.build_vocab',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.CharTokenizer.decode': ( '10_tokenization/tokenization.html#chartokenizer.decode',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.CharTokenizer.encode': ( '10_tokenization/tokenization.html#chartokenizer.encode',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.Tokenizer': ( '10_tokenization/tokenization.html#tokenizer',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.Tokenizer.decode': ( '10_tokenization/tokenization.html#tokenizer.decode',
'tinytorch/core/tokenization.py'),
'tinytorch.core.tokenization.Tokenizer.encode': ( '10_tokenization/tokenization.html#tokenizer.encode',
'tinytorch/core/tokenization.py')},
'tinytorch.core.training': { 'tinytorch.core.training.CosineSchedule': ( '07_training/training.html#cosineschedule',
'tinytorch/core/training.py'),
'tinytorch.core.training.CosineSchedule.__init__': ( '07_training/training.html#cosineschedule.__init__',
@@ -575,52 +342,6 @@ d = { 'settings': { 'branch': 'main',
'tinytorch/core/training.py'),
'tinytorch.core.training.clip_grad_norm': ( '07_training/training.html#clip_grad_norm',
'tinytorch/core/training.py')},
'tinytorch.core.transformer': { 'tinytorch.core.transformer.GPT': ( '13_transformers/transformers.html#gpt',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.GPT.__call__': ( '13_transformers/transformers.html#gpt.__call__',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.GPT.__init__': ( '13_transformers/transformers.html#gpt.__init__',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.GPT._create_causal_mask': ( '13_transformers/transformers.html#gpt._create_causal_mask',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.GPT.forward': ( '13_transformers/transformers.html#gpt.forward',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.GPT.generate': ( '13_transformers/transformers.html#gpt.generate',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.GPT.parameters': ( '13_transformers/transformers.html#gpt.parameters',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.LayerNorm': ( '13_transformers/transformers.html#layernorm',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.LayerNorm.__call__': ( '13_transformers/transformers.html#layernorm.__call__',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.LayerNorm.__init__': ( '13_transformers/transformers.html#layernorm.__init__',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.LayerNorm.forward': ( '13_transformers/transformers.html#layernorm.forward',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.LayerNorm.parameters': ( '13_transformers/transformers.html#layernorm.parameters',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.MLP': ( '13_transformers/transformers.html#mlp',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.MLP.__call__': ( '13_transformers/transformers.html#mlp.__call__',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.MLP.__init__': ( '13_transformers/transformers.html#mlp.__init__',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.MLP.forward': ( '13_transformers/transformers.html#mlp.forward',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.MLP.parameters': ( '13_transformers/transformers.html#mlp.parameters',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.TransformerBlock': ( '13_transformers/transformers.html#transformerblock',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.TransformerBlock.__call__': ( '13_transformers/transformers.html#transformerblock.__call__',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.TransformerBlock.__init__': ( '13_transformers/transformers.html#transformerblock.__init__',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.TransformerBlock.forward': ( '13_transformers/transformers.html#transformerblock.forward',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.TransformerBlock.parameters': ( '13_transformers/transformers.html#transformerblock.parameters',
'tinytorch/core/transformer.py'),
'tinytorch.core.transformer.create_causal_mask': ( '13_transformers/transformers.html#create_causal_mask',
'tinytorch/core/transformer.py')},
'tinytorch.data.loader': { 'tinytorch.data.loader.Compose': ( '08_dataloader/dataloader.html#compose',
'tinytorch/data/loader.py'),
'tinytorch.data.loader.Compose.__call__': ( '08_dataloader/dataloader.html#compose.__call__',
@@ -779,118 +500,6 @@ d = { 'settings': { 'branch': 'main',
'tinytorch/optimization/quantization.py'),
'tinytorch.optimization.quantization.quantize_model': ( 'source/16_quantization/quantization_dev.html#quantize_model',
'tinytorch/optimization/quantization.py')},
'tinytorch.perf.acceleration': { 'tinytorch.perf.acceleration.fused_gelu': ( '18_acceleration/acceleration.html#fused_gelu',
'tinytorch/perf/acceleration.py'),
'tinytorch.perf.acceleration.tiled_matmul': ( '18_acceleration/acceleration.html#tiled_matmul',
'tinytorch/perf/acceleration.py'),
'tinytorch.perf.acceleration.vectorized_matmul': ( '18_acceleration/acceleration.html#vectorized_matmul',
'tinytorch/perf/acceleration.py')},
'tinytorch.perf.compression': { 'tinytorch.perf.compression.Compressor': ( '16_compression/compression.html#compressor',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.Compressor.compress_model': ( '16_compression/compression.html#compressor.compress_model',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.Compressor.magnitude_prune': ( '16_compression/compression.html#compressor.magnitude_prune',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.Compressor.measure_sparsity': ( '16_compression/compression.html#compressor.measure_sparsity',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.Compressor.structured_prune': ( '16_compression/compression.html#compressor.structured_prune',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.KnowledgeDistillation': ( '16_compression/compression.html#knowledgedistillation',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.KnowledgeDistillation.__init__': ( '16_compression/compression.html#knowledgedistillation.__init__',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.KnowledgeDistillation._cross_entropy': ( '16_compression/compression.html#knowledgedistillation._cross_entropy',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.KnowledgeDistillation._kl_divergence': ( '16_compression/compression.html#knowledgedistillation._kl_divergence',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.KnowledgeDistillation._softmax': ( '16_compression/compression.html#knowledgedistillation._softmax',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.KnowledgeDistillation.distillation_loss': ( '16_compression/compression.html#knowledgedistillation.distillation_loss',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.compress_model': ( '16_compression/compression.html#compress_model',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.low_rank_approximate': ( '16_compression/compression.html#low_rank_approximate',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.magnitude_prune': ( '16_compression/compression.html#magnitude_prune',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.measure_sparsity': ( '16_compression/compression.html#measure_sparsity',
'tinytorch/perf/compression.py'),
'tinytorch.perf.compression.structured_prune': ( '16_compression/compression.html#structured_prune',
'tinytorch/perf/compression.py')},
'tinytorch.perf.memoization': { 'tinytorch.perf.memoization.KVCache': ( '17_memoization/memoization.html#kvcache',
'tinytorch/perf/memoization.py'),
'tinytorch.perf.memoization.KVCache.__init__': ( '17_memoization/memoization.html#kvcache.__init__',
'tinytorch/perf/memoization.py'),
'tinytorch.perf.memoization.KVCache.advance': ( '17_memoization/memoization.html#kvcache.advance',
'tinytorch/perf/memoization.py'),
'tinytorch.perf.memoization.KVCache.get': ( '17_memoization/memoization.html#kvcache.get',
'tinytorch/perf/memoization.py'),
'tinytorch.perf.memoization.KVCache.get_memory_usage': ( '17_memoization/memoization.html#kvcache.get_memory_usage',
'tinytorch/perf/memoization.py'),
'tinytorch.perf.memoization.KVCache.reset': ( '17_memoization/memoization.html#kvcache.reset',
'tinytorch/perf/memoization.py'),
'tinytorch.perf.memoization.KVCache.update': ( '17_memoization/memoization.html#kvcache.update',
'tinytorch/perf/memoization.py'),
'tinytorch.perf.memoization.create_kv_cache': ( '17_memoization/memoization.html#create_kv_cache',
'tinytorch/perf/memoization.py'),
'tinytorch.perf.memoization.disable_kv_cache': ( '17_memoization/memoization.html#disable_kv_cache',
'tinytorch/perf/memoization.py'),
'tinytorch.perf.memoization.enable_kv_cache': ( '17_memoization/memoization.html#enable_kv_cache',
'tinytorch/perf/memoization.py')},
'tinytorch.perf.profiling': { 'tinytorch.perf.profiling.Profiler': ( '14_profiling/profiling.html#profiler',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.Profiler.__init__': ( '14_profiling/profiling.html#profiler.__init__',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.Profiler.count_flops': ( '14_profiling/profiling.html#profiler.count_flops',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.Profiler.count_parameters': ( '14_profiling/profiling.html#profiler.count_parameters',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.Profiler.measure_latency': ( '14_profiling/profiling.html#profiler.measure_latency',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.Profiler.measure_memory': ( '14_profiling/profiling.html#profiler.measure_memory',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.Profiler.profile_backward_pass': ( '14_profiling/profiling.html#profiler.profile_backward_pass',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.Profiler.profile_forward_pass': ( '14_profiling/profiling.html#profiler.profile_forward_pass',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.Profiler.profile_layer': ( '14_profiling/profiling.html#profiler.profile_layer',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.analyze_weight_distribution': ( '14_profiling/profiling.html#analyze_weight_distribution',
'tinytorch/perf/profiling.py'),
'tinytorch.perf.profiling.quick_profile': ( '14_profiling/profiling.html#quick_profile',
'tinytorch/perf/profiling.py')},
'tinytorch.perf.quantization': { 'tinytorch.perf.quantization.QuantizedLinear': ( '15_quantization/quantization.html#quantizedlinear',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.QuantizedLinear.__call__': ( '15_quantization/quantization.html#quantizedlinear.__call__',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.QuantizedLinear.__init__': ( '15_quantization/quantization.html#quantizedlinear.__init__',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.QuantizedLinear.calibrate': ( '15_quantization/quantization.html#quantizedlinear.calibrate',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.QuantizedLinear.forward': ( '15_quantization/quantization.html#quantizedlinear.forward',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.QuantizedLinear.memory_usage': ( '15_quantization/quantization.html#quantizedlinear.memory_usage',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.QuantizedLinear.parameters': ( '15_quantization/quantization.html#quantizedlinear.parameters',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.Quantizer': ( '15_quantization/quantization.html#quantizer',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.Quantizer.compare_models': ( '15_quantization/quantization.html#quantizer.compare_models',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.Quantizer.dequantize_tensor': ( '15_quantization/quantization.html#quantizer.dequantize_tensor',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.Quantizer.quantize_model': ( '15_quantization/quantization.html#quantizer.quantize_model',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.Quantizer.quantize_tensor': ( '15_quantization/quantization.html#quantizer.quantize_tensor',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.compare_model_sizes': ( '15_quantization/quantization.html#compare_model_sizes',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.dequantize_int8': ( '15_quantization/quantization.html#dequantize_int8',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.quantize_int8': ( '15_quantization/quantization.html#quantize_int8',
'tinytorch/perf/quantization.py'),
'tinytorch.perf.quantization.quantize_model': ( '15_quantization/quantization.html#quantize_model',
'tinytorch/perf/quantization.py')},
'tinytorch.profiling.profiler': { 'tinytorch.profiling.profiler.Profiler': ( '14_profiling/profiling.html#profiler',
'tinytorch/profiling/profiler.py'),
'tinytorch.profiling.profiler.Profiler.__init__': ( '14_profiling/profiling.html#profiler.__init__',

View File

@@ -17,7 +17,7 @@
# %% auto 0
__all__ = ['BYTES_PER_FLOAT32', 'KB_TO_BYTES', 'MB_TO_BYTES', 'Tensor']
# %% ../../modules/01_tensor/tensor.ipynb 1
# %% ../../modules/01_tensor/01_tensor.ipynb 1
import numpy as np
# Constants for memory calculations
@@ -25,7 +25,7 @@ BYTES_PER_FLOAT32 = 4 # Standard float32 size in bytes
KB_TO_BYTES = 1024 # Kilobytes to bytes conversion
MB_TO_BYTES = 1024 * 1024 # Megabytes to bytes conversion
# %% ../../modules/01_tensor/tensor.ipynb 7
# %% ../../modules/01_tensor/01_tensor.ipynb 7
class Tensor:
"""Educational tensor that grows with student knowledge.
@@ -112,10 +112,38 @@ class Tensor:
f"Cannot perform matrix multiplication: {self.shape} @ {other.shape}. "
f"Inner dimensions must match: {self.shape[-1]}{other.shape[-2]}"
)
result_data = np.matmul(self.data, other.data)
# Educational implementation: explicit loops to show what matrix multiplication does
# This is intentionally slower than np.matmul to demonstrate the value of vectorization
# In Module 18 (Acceleration), students will learn to use optimized BLAS operations
a = self.data
b = other.data
# Handle 2D matrices with explicit loops (educational)
if len(a.shape) == 2 and len(b.shape) == 2:
M, K = a.shape
K2, N = b.shape
result_data = np.zeros((M, N), dtype=a.dtype)
# Explicit nested loops - students can see exactly what's happening!
# Each output element is a dot product of a row from A and a column from B
for i in range(M):
for j in range(N):
# Dot product of row i from A with column j from B
result_data[i, j] = np.dot(a[i, :], b[:, j])
else:
# For batched operations (3D+), use np.matmul for correctness
# Students will understand this once they grasp the 2D case
result_data = np.matmul(a, b)
return Tensor(result_data)
### END SOLUTION
def __matmul__(self, other):
"""Enable @ operator for matrix multiplication."""
return self.matmul(other)
def __getitem__(self, key):
"""Enable indexing and slicing operations on Tensors."""
### BEGIN SOLUTION
@@ -146,8 +174,9 @@ class Tensor:
new_shape[unknown_idx] = unknown_dim
new_shape = tuple(new_shape)
if np.prod(new_shape) != self.size:
target_size = int(np.prod(new_shape))
raise ValueError(
f"Cannot reshape tensor of size {self.size} to shape {new_shape}"
f"Total elements must match: {self.size} {target_size}"
)
reshaped_data = np.reshape(self.data, new_shape)
result = Tensor(reshaped_data, requires_grad=self.requires_grad)

View File

@@ -1,5 +1,4 @@
# tito/commands/login.py
import webbrowser
import time
import json
import urllib.parse
@@ -8,6 +7,7 @@ from argparse import ArgumentParser, Namespace
from rich.prompt import Confirm
from tito.commands.base import BaseCommand
from tito.core.auth import AuthReceiver, save_credentials, delete_credentials, ENDPOINTS, is_logged_in
from tito.core.browser import open_url
class LoginCommand(BaseCommand):
@property
@@ -73,9 +73,10 @@ class LoginCommand(BaseCommand):
query_string = urllib.parse.urlencode(params)
target_url = f"{ENDPOINTS['cli_login']}?{query_string}"
self.console.print(f"Opening browser to: [cyan]{target_url}[/cyan]")
self.console.print("Waiting for authentication...")
webbrowser.open(target_url)
# Use cross-platform browser opener
open_url(target_url, self.console, show_manual_fallback=True)
self.console.print("\n[dim]Waiting for authentication...[/dim]")
tokens = receiver.wait_for_tokens()
if tokens:
save_credentials(tokens)
@@ -110,7 +111,7 @@ class LogoutCommand(BaseCommand):
# Open browser to local logout endpoint
logout_url = f"http://127.0.0.1:{port}/logout"
self.console.print(f"Opening browser to complete logout...")
webbrowser.open(logout_url)
open_url(logout_url, self.console, show_manual_fallback=False)
# Give browser time to redirect and close
time.sleep(2.0)

View File

@@ -13,7 +13,6 @@ import sys
import os
import platform
import datetime
import webbrowser
from pathlib import Path
from argparse import ArgumentParser, Namespace
from typing import Dict, Any, Optional
@@ -28,6 +27,7 @@ from .base import BaseCommand
from .login import LoginCommand
from ..core.console import get_console
from ..core.auth import is_logged_in
from ..core.browser import open_url
def _print_file_update(console, file_path: Path) -> None:
"""Print a notification when a file is created or updated."""
@@ -402,7 +402,7 @@ class SetupCommand(BaseCommand):
))
if Confirm.ask("[bold]Update your community profile?[/bold]", default=True):
self.console.print("[dim]Opening profile editor...[/dim]")
webbrowser.open("https://tinytorch.ai/community/?action=profile")
open_url("https://tinytorch.ai/community/?action=profile", self.console, show_manual_fallback=True)
else:
self.console.print("[yellow]⚠️ Community connection failed or was cancelled. You can try again later with 'tito login'.[/yellow]")
except Exception as e:

110
tito/core/browser.py Normal file
View File

@@ -0,0 +1,110 @@
"""
Cross-platform browser opening utility for TinyTorch CLI.
Handles WSL, macOS, Linux, and Windows environments gracefully.
"""
import webbrowser
import subprocess
import platform
from typing import Optional
from rich.console import Console
from rich.panel import Panel
def is_wsl() -> bool:
"""Check if running in WSL (Windows Subsystem for Linux) environment."""
try:
with open('/proc/version', 'r') as f:
return 'microsoft' in f.read().lower()
except:
return False
def open_url(url: str, console: Optional[Console] = None, show_manual_fallback: bool = True) -> bool:
"""
Open URL in browser with cross-platform support.
Args:
url: The URL to open
console: Optional Rich console for output
show_manual_fallback: Whether to show manual instructions if browser fails
Returns:
True if browser was opened successfully, False otherwise
"""
if console is None:
console = Console()
browser_opened = False
system = platform.system()
# Try WSL-specific approach first
if is_wsl():
console.print("[cyan]Detected WSL environment - opening Windows browser...[/cyan]")
browser_opened = _open_url_wsl(url)
# Try macOS-specific approach
elif system == "Darwin":
browser_opened = _open_url_macos(url)
# Try standard webbrowser module
if not browser_opened:
try:
browser_opened = webbrowser.open(url)
except Exception:
pass
# Handle success/failure
if browser_opened:
console.print(f"[green]✓[/green] Browser opened to: [cyan]{url}[/cyan]")
else:
if show_manual_fallback:
console.print()
console.print(Panel(
f"[yellow]⚠️ Could not open browser automatically[/yellow]\n\n"
f"Please manually open this URL in your browser:\n\n"
f"[cyan]{url}[/cyan]\n\n"
f"Copy and paste this link into your browser to continue.",
title="Manual Browser Access Required",
border_style="yellow"
))
console.print()
else:
console.print(f"[yellow]⚠️ Could not open browser. Please manually visit:[/yellow] [cyan]{url}[/cyan]")
return browser_opened
def _open_url_wsl(url: str) -> bool:
"""Try to open URL in Windows browser from WSL."""
try:
# Method 1: Use cmd.exe to start default browser
result = subprocess.run(
['cmd.exe', '/c', 'start', url],
capture_output=True,
timeout=5
)
if result.returncode == 0:
return True
# Method 2: Try powershell.exe
result = subprocess.run(
['powershell.exe', '-Command', f'Start-Process "{url}"'],
capture_output=True,
timeout=5
)
return result.returncode == 0
except Exception:
return False
def _open_url_macos(url: str) -> bool:
"""Try to open URL in macOS default browser."""
try:
result = subprocess.run(
['open', url],
capture_output=True,
timeout=5
)
return result.returncode == 0
except Exception:
return False