Improve MLPerf milestone and add centralized progress sync

MLPerf changes:
- Show quantization and pruning individually (not combined)
- Added 'Challenge: Combine Both' as future competition
- Clearer output showing each technique's impact

Progress sync:
- Added _offer_progress_sync() to milestone completion
- Uses centralized SubmissionHandler (same as module completion)
- Prompts user to sync achievement after milestone success
- Single endpoint for all progress updates
This commit is contained in:
Vijay Janapa Reddi
2025-12-02 23:40:57 -08:00
parent 7f6dd19c10
commit 9eabcbab89
2 changed files with 59 additions and 50 deletions

View File

@@ -451,27 +451,6 @@ def main():
console.print(table)
console.print()
# ========================================================================
# STEP 4: COMBINED
# ========================================================================
console.print(Panel(
"[bold green]🎯 STEP 4: Combined Optimization[/bold green]\n"
"Apply BOTH quantization AND pruning",
border_style="green"
))
# Apply both
combined_weights = prune_weights(baseline_weights, sparsity=0.5)
combined_weights = quantize_weights(combined_weights, bits=8)
model.set_weights(combined_weights)
combined_size = quant_size # Still quantized
combined_acc = evaluate_accuracy(model, X_test, y_test)
# Calculate effective compression (quantization + sparsity)
effective_compression = 4 * 2 # 4× from quantization, potential 2× from sparsity
console.print()
# ========================================================================
@@ -483,36 +462,30 @@ def main():
console.print()
# Final comparison table
table = Table(title="🎖️ Final Standings", box=box.DOUBLE)
table.add_column("Configuration", style="cyan", width=20)
table = Table(title="🎖️ Optimization Results", box=box.DOUBLE)
table.add_column("Technique", style="cyan", width=20)
table.add_column("Size", style="yellow", justify="right")
table.add_column("Accuracy", style="green", justify="right")
table.add_column("Compression", style="bold magenta", justify="right")
table.add_row(
"🥇 Baseline (FP32)",
"📊 Baseline (FP32)",
f"{baseline_size:,} B",
f"{baseline_acc:.1f}%",
"1×"
)
table.add_row(
"🥈 + Quantization",
"🗜️ Quantization (INT8)",
f"{quant_size:,} B",
f"{quant_acc:.1f}%",
"[green]4×[/green]"
)
table.add_row(
"🥉 + Pruning",
"✂️ Pruning (50%)",
f"~{baseline_size//2:,} B*",
f"{pruned_acc:.1f}%",
"[green]2×[/green]"
)
table.add_row(
"🏆 Combined",
f"~{baseline_size//8:,} B*",
f"{combined_acc:.1f}%",
f"[bold green]{effective_compression}×[/bold green]"
)
console.print(table)
console.print("[dim]* Effective size with sparse storage[/dim]")
@@ -529,32 +502,33 @@ def main():
f"{sparsity:.0f}% weights removed\n"
f"{abs(baseline_acc - pruned_acc):.1f}% accuracy impact\n"
f" • [dim]Used by: Mobile models, edge deployment[/dim]\n\n"
f" [cyan]Combined:[/cyan]\n"
f"{effective_compression}× total compression\n"
f"{abs(baseline_acc - combined_acc):.1f}% accuracy impact\n"
f" • [dim]The secret sauce of production ML![/dim]",
f"💡 [yellow]Challenge: Combine Both![/yellow]\n"
f"Can you achieve 8× compression with <5% accuracy loss?\n"
f"[dim]This is a future competition track![/dim]",
border_style="cyan",
box=box.ROUNDED
))
# Verdict
accuracy_drop = baseline_acc - combined_acc
# Verdict - based on best individual technique
best_compression = max(4, int(sparsity / 25)) # Rough estimate
accuracy_drop = max(abs(baseline_acc - quant_acc), abs(baseline_acc - pruned_acc))
if accuracy_drop < 5:
verdict = "[bold green]🏆 EXCELLENT![/bold green] Great compression with minimal accuracy loss!"
elif accuracy_drop < 10:
elif accuracy_drop < 15:
verdict = "[bold yellow]🥈 GOOD![/bold yellow] Solid compression, acceptable accuracy tradeoff."
else:
verdict = "[bold red]⚠️ HIGH LOSS[/bold red] - Consider less aggressive settings."
verdict = "[bold red]⚠️ HIGH LOSS[/bold red] - The model may need more training first."
console.print(Panel(
f"{verdict}\n\n"
f"[dim]You achieved {effective_compression}× compression with {accuracy_drop:.1f}% accuracy loss.[/dim]\n\n"
f"[dim]Quantization: 4× compression, {abs(baseline_acc - quant_acc):.1f}% accuracy change[/dim]\n"
f"[dim]Pruning: {sparsity:.0f}% sparsity, {abs(baseline_acc - pruned_acc):.1f}% accuracy change[/dim]\n\n"
"[bold cyan]What you learned:[/bold cyan]\n"
" ✅ How to profile ML models\n"
" ✅ How to profile ML models (parameters, size, latency)\n"
" ✅ Quantization: reduce precision for smaller models\n"
" ✅ Pruning: remove weights for sparser models\n"
" ✅ The accuracy-efficiency tradeoff\n\n"
"[bold]This is how production ML systems are deployed![/bold]",
" ✅ The accuracy-efficiency tradeoff in production ML\n\n"
"[bold]This is how production ML systems are optimized![/bold]",
title="🎯 Milestone 06 Complete",
border_style="green",
box=box.DOUBLE

View File

@@ -1045,10 +1045,6 @@ class MilestoneCommand(BaseCommand):
if result.returncode == 0:
# Success! Mark milestone as complete
self._mark_milestone_complete(milestone_id)
# Progress tracking is handled by _mark_milestone_complete
# which updates .tito/milestones.json
pass
console.print(Panel(
f"[bold green]🏆 MILESTONE ACHIEVED![/bold green]\n\n"
@@ -1058,11 +1054,14 @@ class MilestoneCommand(BaseCommand):
f"• Every line of code: YOUR implementations\n"
f"• Every tensor operation: YOUR Tensor class\n"
f"• Every gradient: YOUR autograd\n\n"
f"[cyan]Achievement saved to your progress![/cyan]",
f"[cyan]Achievement saved locally![/cyan]",
title="✨ Achievement Unlocked ✨",
border_style="bright_green",
padding=(1, 2)
))
# Offer to sync progress (uses centralized SubmissionHandler)
self._offer_progress_sync(milestone_id, milestone['name'])
# Show next steps
next_id = str(int(milestone_id) + 1).zfill(2)
@@ -1221,4 +1220,40 @@ class MilestoneCommand(BaseCommand):
with open(progress_file, 'w') as f:
json.dump(milestone_data, f, indent=2)
except IOError:
pass
pass
def _offer_progress_sync(self, milestone_id: str, milestone_name: str) -> None:
"""
Offer to sync progress after milestone completion.
Uses the centralized SubmissionHandler for all progress syncing.
"""
from ..core import auth
from ..core.submission import SubmissionHandler
from rich.prompt import Confirm
console = self.console
# Check if user is logged in
if auth.is_logged_in():
console.print()
should_sync = Confirm.ask(
f"[cyan]Would you like to sync this achievement to your profile?[/cyan]",
default=True
)
if should_sync:
try:
# Use the centralized SubmissionHandler
handler = SubmissionHandler(self.config, console)
# Sync progress (includes modules and milestones)
# The handler reads from both progress.json and .tito/milestones.json
handler.sync_progress()
console.print(f"[green]✅ Milestone {milestone_id} synced to your profile![/green]")
except Exception as e:
console.print(f"[yellow]⚠️ Could not sync: {e}[/yellow]")
console.print("[dim]Your progress is saved locally and will sync next time.[/dim]")
else:
console.print()
console.print("[dim]💡 Run 'tito login' to sync your achievements to the leaderboard![/dim]")