mirror of
https://github.com/harvard-edge/cs249r_book.git
synced 2026-03-11 17:49:25 -05:00
Complete MLSYSIM v0.1.0 implementation with: - Documentation website (Quarto): landing page with animated hero and capability carousel, 4 tutorials (hello world, LLM serving, distributed training, sustainability), hardware/model/fleet/infra catalogs, solver guide, whitepaper, math foundations, glossary, and full quartodoc API reference - Typed registry system: Hardware (18 devices across 5 tiers), Models (15 workloads), Systems (fleets, clusters, fabrics), Infrastructure (grid profiles, rack configs, datacenters) - Core types: Pint-backed Quantity, Metadata provenance tracking, custom exception hierarchy (OOMError, SLAViolation) - SimulationConfig with YAML/JSON loading and pre-validation - Scenario system tying workloads to systems with SLA constraints - Multi-level evaluation scorecard (feasibility, performance, macro) - Examples, tests, and Jetson Orin NX spec fix (100 → 25 TFLOP/s) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
51 lines
1.6 KiB
Python
51 lines
1.6 KiB
Python
"""
|
|
Hello World: Ten Minutes to mlsysim
|
|
===================================
|
|
This tutorial demonstrates the end-to-end workflow of mlsysim:
|
|
1. Load a Model and Hardware.
|
|
2. Solve single-node performance.
|
|
3. Scale to a fleet.
|
|
4. Calculate Sustainability and Economics.
|
|
"""
|
|
|
|
import mlsysim
|
|
from mlsysim import load_config
|
|
|
|
def main():
|
|
print("--- 1. Define Your Simulation ---")
|
|
user_choice = {
|
|
"model": "ResNet50",
|
|
"hardware": "A100",
|
|
"batch_size": 32,
|
|
"fleet_size": 128,
|
|
"region": "Quebec"
|
|
}
|
|
|
|
# load_config automatically validates physical feasibility!
|
|
config = load_config(user_choice)
|
|
print("Config Validated: " + config.model + " on " + config.hardware + " in " + config.region + "\n")
|
|
|
|
print("--- 2. Single-Node Performance (The Iron Law) ---")
|
|
model = getattr(mlsysim.Models, config.model)
|
|
hardware = getattr(mlsysim.Hardware, config.hardware)
|
|
|
|
perf = mlsysim.Engine.solve(model, hardware, batch_size=config.batch_size)
|
|
print("Latency: " + str(perf.latency))
|
|
print("Throughput: " + str(perf.throughput))
|
|
print("Bottleneck: " + perf.bottleneck + "\n")
|
|
|
|
print("--- 3. Scenario Evaluation & Visualization ---")
|
|
# Using a vetted lighthouse scenario
|
|
scenario = mlsysim.Applications.AutoDrive
|
|
evaluation = scenario.evaluate()
|
|
print(evaluation.scorecard())
|
|
|
|
# Visual Scorecard
|
|
fig, ax = mlsysim.plot_evaluation_scorecard(evaluation)
|
|
print("\nVisual Scorecard generated.")
|
|
|
|
print("\nSimulation Complete. Check mlsysbook.ai for advanced labs!")
|
|
|
|
if __name__ == "__main__":
|
|
main()
|