mirror of
https://github.com/MLSysBook/TinyTorch.git
synced 2026-03-12 02:09:16 -05:00
Add live spinner to milestone training loops
Use rich.live.Live to show real-time progress indicator during epoch training. This gives visual feedback that code is running during potentially slow operations.
This commit is contained in:
@@ -40,6 +40,9 @@ from tinytorch import Tensor, Linear, Sigmoid, BinaryCrossEntropyLoss, SGD
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich.live import Live
|
||||
from rich.spinner import Spinner
|
||||
from rich.text import Text
|
||||
from rich import box
|
||||
|
||||
console = Console()
|
||||
@@ -107,40 +110,48 @@ def generate_data(n_samples=100, seed=None):
|
||||
|
||||
def train_perceptron(model, X, y, epochs=100, lr=0.1):
|
||||
"""Train the perceptron using SGD."""
|
||||
|
||||
|
||||
# Setup training components
|
||||
loss_fn = BinaryCrossEntropyLoss()
|
||||
optimizer = SGD(model.parameters(), lr=lr)
|
||||
|
||||
|
||||
console.print("\n[bold cyan]🔥 Starting Training...[/bold cyan]\n")
|
||||
|
||||
|
||||
history = {"loss": [], "accuracy": []}
|
||||
|
||||
for epoch in range(epochs):
|
||||
# Forward pass
|
||||
predictions = model(X)
|
||||
loss = loss_fn(predictions, y)
|
||||
|
||||
# Backward pass
|
||||
loss.backward()
|
||||
|
||||
# Update weights
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Calculate accuracy
|
||||
pred_classes = (predictions.data > 0.5).astype(int)
|
||||
accuracy = (pred_classes == y.data).mean()
|
||||
|
||||
history["loss"].append(loss.data.item())
|
||||
history["accuracy"].append(accuracy)
|
||||
|
||||
# Print progress every 10 epochs
|
||||
if (epoch + 1) % 10 == 0:
|
||||
console.print(f"Epoch {epoch+1:3d}/{epochs} Loss: {loss.data:.4f} Accuracy: {accuracy:.1%}")
|
||||
|
||||
|
||||
# Use Live display with spinner for real-time feedback
|
||||
with Live(console=console, refresh_per_second=10) as live:
|
||||
for epoch in range(epochs):
|
||||
# Forward pass
|
||||
predictions = model(X)
|
||||
loss = loss_fn(predictions, y)
|
||||
|
||||
# Backward pass
|
||||
loss.backward()
|
||||
|
||||
# Update weights
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Calculate accuracy
|
||||
pred_classes = (predictions.data > 0.5).astype(int)
|
||||
accuracy = (pred_classes == y.data).mean()
|
||||
|
||||
history["loss"].append(loss.data.item())
|
||||
history["accuracy"].append(accuracy)
|
||||
|
||||
# Update spinner with current progress
|
||||
spinner_text = Text()
|
||||
spinner_text.append("⠋ ", style="cyan")
|
||||
spinner_text.append(f"Epoch {epoch+1:3d}/{epochs} Loss: {loss.data:.4f} Accuracy: {accuracy:.1%}")
|
||||
live.update(spinner_text)
|
||||
|
||||
# Print progress every 10 epochs
|
||||
if (epoch + 1) % 10 == 0:
|
||||
live.console.print(f"Epoch {epoch+1:3d}/{epochs} Loss: {loss.data:.4f} Accuracy: {accuracy:.1%}")
|
||||
|
||||
console.print("\n[bold green]✅ Training Complete![/bold green]\n")
|
||||
|
||||
|
||||
return history
|
||||
|
||||
|
||||
|
||||
@@ -64,6 +64,8 @@ import numpy as np
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
from rich.live import Live
|
||||
from rich.text import Text
|
||||
from rich import box
|
||||
|
||||
# Add project root to path
|
||||
@@ -160,40 +162,50 @@ class XORNetwork:
|
||||
def train_network(model, X, y, epochs=500, lr=0.5):
|
||||
"""
|
||||
Train multi-layer network on XOR.
|
||||
|
||||
|
||||
This WILL succeed - hidden layers solve the problem!
|
||||
"""
|
||||
loss_fn = BinaryCrossEntropyLoss()
|
||||
optimizer = SGD(model.parameters(), lr=lr)
|
||||
|
||||
|
||||
console.print("\n[bold cyan]🔥 Training Multi-Layer Network...[/bold cyan]")
|
||||
console.print("[dim](This will work - hidden layers solve XOR!)[/dim]\n")
|
||||
|
||||
|
||||
history = {"loss": [], "accuracy": []}
|
||||
|
||||
for epoch in range(epochs):
|
||||
# Forward pass
|
||||
predictions = model(X)
|
||||
loss = loss_fn(predictions, y)
|
||||
|
||||
# Backward pass (through hidden layers!)
|
||||
loss.backward()
|
||||
|
||||
# Update weights
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Calculate accuracy
|
||||
pred_classes = (predictions.data > 0.5).astype(int)
|
||||
accuracy = (pred_classes == y.data).mean()
|
||||
|
||||
history["loss"].append(loss.data.item())
|
||||
history["accuracy"].append(accuracy)
|
||||
|
||||
# Print progress every 100 epochs
|
||||
if (epoch + 1) % 100 == 0:
|
||||
console.print(f"Epoch {epoch+1:3d}/{epochs} Loss: {loss.data:.4f} Accuracy: {accuracy:.1%}")
|
||||
|
||||
|
||||
# Use Live display with spinner for real-time feedback
|
||||
with Live(console=console, refresh_per_second=10) as live:
|
||||
for epoch in range(epochs):
|
||||
# Forward pass
|
||||
predictions = model(X)
|
||||
loss = loss_fn(predictions, y)
|
||||
|
||||
# Backward pass (through hidden layers!)
|
||||
loss.backward()
|
||||
|
||||
# Update weights
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Calculate accuracy
|
||||
pred_classes = (predictions.data > 0.5).astype(int)
|
||||
accuracy = (pred_classes == y.data).mean()
|
||||
|
||||
history["loss"].append(loss.data.item())
|
||||
history["accuracy"].append(accuracy)
|
||||
|
||||
# Update spinner with current progress
|
||||
spinner_text = Text()
|
||||
spinner_text.append("⠋ ", style="cyan")
|
||||
spinner_text.append(f"Epoch {epoch+1:3d}/{epochs} Loss: {loss.data:.4f} Accuracy: {accuracy:.1%}")
|
||||
live.update(spinner_text)
|
||||
|
||||
# Print progress every 100 epochs
|
||||
if (epoch + 1) % 100 == 0:
|
||||
live.console.print(f"Epoch {epoch+1:3d}/{epochs} Loss: {loss.data:.4f} Accuracy: {accuracy:.1%}")
|
||||
|
||||
console.print("\n[green]✅ Training Complete - XOR Solved![/green]")
|
||||
|
||||
return history
|
||||
|
||||
|
||||
|
||||
@@ -58,6 +58,8 @@ from tinytorch.data.loader import TensorDataset, DataLoader
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich.live import Live
|
||||
from rich.text import Text
|
||||
from rich import box
|
||||
|
||||
console = Console()
|
||||
@@ -374,51 +376,59 @@ def train_mlp():
|
||||
"train_accuracy": [],
|
||||
"test_accuracy": []
|
||||
}
|
||||
|
||||
for epoch in range(epochs):
|
||||
epoch_loss = 0.0
|
||||
batch_count = 0
|
||||
|
||||
for batch_images, batch_labels in train_loader:
|
||||
# Forward pass
|
||||
logits = model(batch_images)
|
||||
loss = loss_fn(logits, batch_labels)
|
||||
|
||||
# Backward pass
|
||||
loss.backward()
|
||||
|
||||
# Update weights
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
epoch_loss += loss.data
|
||||
batch_count += 1
|
||||
|
||||
avg_loss = epoch_loss / batch_count
|
||||
|
||||
# Evaluate on both train and test to detect overfitting
|
||||
train_acc, _ = evaluate_accuracy(model, train_images, train_labels)
|
||||
test_acc, _ = evaluate_accuracy(model, test_images, test_labels)
|
||||
|
||||
history["train_loss"].append(avg_loss)
|
||||
history["train_accuracy"].append(train_acc)
|
||||
history["test_accuracy"].append(test_acc)
|
||||
|
||||
if initial_loss is None:
|
||||
initial_loss = avg_loss
|
||||
|
||||
# Print progress every 5 epochs
|
||||
if (epoch + 1) % 5 == 0:
|
||||
gap = train_acc - test_acc
|
||||
gap_indicator = "⚠️" if gap > 10 else "✓"
|
||||
console.print(
|
||||
f"Epoch {epoch+1:2d}/{epochs} "
|
||||
f"Loss: {avg_loss:.4f} "
|
||||
f"Train: {train_acc:.1f}% "
|
||||
f"Test: {test_acc:.1f}% "
|
||||
f"{gap_indicator} Gap: {gap:.1f}%"
|
||||
)
|
||||
|
||||
|
||||
# Use Live display with spinner for real-time feedback
|
||||
with Live(console=console, refresh_per_second=10) as live:
|
||||
for epoch in range(epochs):
|
||||
epoch_loss = 0.0
|
||||
batch_count = 0
|
||||
|
||||
for batch_images, batch_labels in train_loader:
|
||||
# Forward pass
|
||||
logits = model(batch_images)
|
||||
loss = loss_fn(logits, batch_labels)
|
||||
|
||||
# Backward pass
|
||||
loss.backward()
|
||||
|
||||
# Update weights
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
epoch_loss += loss.data
|
||||
batch_count += 1
|
||||
|
||||
# Update spinner with current batch progress
|
||||
spinner_text = Text()
|
||||
spinner_text.append("⠋ ", style="cyan")
|
||||
spinner_text.append(f"Epoch {epoch+1:2d}/{epochs} Batch {batch_count}/{len(train_loader)}")
|
||||
live.update(spinner_text)
|
||||
|
||||
avg_loss = epoch_loss / batch_count
|
||||
|
||||
# Evaluate on both train and test to detect overfitting
|
||||
train_acc, _ = evaluate_accuracy(model, train_images, train_labels)
|
||||
test_acc, _ = evaluate_accuracy(model, test_images, test_labels)
|
||||
|
||||
history["train_loss"].append(avg_loss)
|
||||
history["train_accuracy"].append(train_acc)
|
||||
history["test_accuracy"].append(test_acc)
|
||||
|
||||
if initial_loss is None:
|
||||
initial_loss = avg_loss
|
||||
|
||||
# Print progress every 5 epochs
|
||||
if (epoch + 1) % 5 == 0:
|
||||
gap = train_acc - test_acc
|
||||
gap_indicator = "⚠️" if gap > 10 else "✓"
|
||||
live.console.print(
|
||||
f"Epoch {epoch+1:2d}/{epochs} "
|
||||
f"Loss: {avg_loss:.4f} "
|
||||
f"Train: {train_acc:.1f}% "
|
||||
f"Test: {test_acc:.1f}% "
|
||||
f"{gap_indicator} Gap: {gap:.1f}%"
|
||||
)
|
||||
|
||||
console.print("\n[green]✅ Training Complete![/green]")
|
||||
|
||||
final_train_acc = history["train_accuracy"][-1]
|
||||
|
||||
Reference in New Issue
Block a user