mirror of
https://github.com/harvard-edge/cs249r_book.git
synced 2026-04-29 17:20:21 -05:00
refactor(maintenance): consolidate release notes scripts into unified tool
Addresses script organization and maintainability: - Merged generate_release_notes.py and release_notes.py into changelog-releasenotes.py - Removed deprecated change_log.py (superseded by changelog-releasenotes.py) - Added diagram-*.pdf to .gitignore (Quarto auto-generated cache files) This consolidation simplifies the release workflow and eliminates duplicate code.
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -43,6 +43,9 @@ __pycache__/
|
||||
_quarto_test.yml
|
||||
swap_quarto.bat
|
||||
|
||||
# Diagram cache files (auto-generated by Quarto)
|
||||
diagram-*.pdf
|
||||
|
||||
# Script backup files
|
||||
*.bak
|
||||
*.backup
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -347,9 +347,9 @@ chapter_lookup = [
|
||||
("contents/core/ops/ops.qmd", "ML Operations", 13),
|
||||
("contents/core/ondevice_learning/ondevice_learning.qmd", "On-Device Learning", 14),
|
||||
("contents/core/privacy_security/privacy_security.qmd", "Security & Privacy", 15),
|
||||
("contents/core/responsible_ai/responsible_ai.qmd", "Responsible AI", 16),
|
||||
("contents/core/sustainable_ai/sustainable_ai.qmd", "Sustainable AI", 17),
|
||||
("contents/core/robust_ai/robust_ai.qmd", "Robust AI", 18),
|
||||
("contents/core/robust_ai/robust_ai.qmd", "Robust AI", 16),
|
||||
("contents/core/responsible_ai/responsible_ai.qmd", "Responsible AI", 17),
|
||||
("contents/core/sustainable_ai/sustainable_ai.qmd", "Sustainable AI", 18),
|
||||
("contents/core/ai_for_good/ai_for_good.qmd", "AI for Good", 19),
|
||||
("contents/core/frontiers/frontiers.qmd", "Frontiers", 20),
|
||||
("contents/core/conclusion/conclusion.qmd", "Conclusion", 21),
|
||||
@@ -483,6 +483,19 @@ def extract_chapter_title(file_path):
|
||||
else:
|
||||
return base.replace('_', ' ').replace('.qmd', '').title()
|
||||
|
||||
def generate_impact_bar(change_count):
|
||||
"""Generate impact bar based on number of line changes (added + removed)."""
|
||||
if change_count >= 225:
|
||||
return "█████" # Major: 225+ lines
|
||||
elif change_count >= 72:
|
||||
return "████░" # Large: 72-224 lines
|
||||
elif change_count >= 15:
|
||||
return "███░░" # Medium: 15-71 lines
|
||||
elif change_count >= 5:
|
||||
return "██░░░" # Small: 5-14 lines
|
||||
else:
|
||||
return "█░░░░" # Tiny: 1-4 lines
|
||||
|
||||
def sort_by_impact_level(updates):
|
||||
def extract_impact_level(update):
|
||||
# Extract impact bars from the start of each update
|
||||
@@ -671,14 +684,17 @@ def generate_entry(start_date, end_date=None, verbose=False, is_latest=False, ai
|
||||
|
||||
# Generate summary based on AI mode
|
||||
chapter_title = extract_chapter_title(file_path)
|
||||
total_changes = added + removed
|
||||
impact_bar = generate_impact_bar(total_changes)
|
||||
|
||||
if ai_mode:
|
||||
summary_text = generate_ai_summary(chapter_title, commit_msgs, file_path, verbose=verbose)
|
||||
summary = f"- **{chapter_title}**: {summary_text}"
|
||||
summary = f"- `{impact_bar}` **{chapter_title}**: {summary_text}"
|
||||
else:
|
||||
# Create simple summary based on file path and commit count
|
||||
commit_count = len([msg for msg in commit_msgs.split('\n') if msg.strip()])
|
||||
summary_text = f"Updated content with {commit_count} changes"
|
||||
summary = f"- **{chapter_title}**: {summary_text}"
|
||||
summary = f"- `{impact_bar}` **{chapter_title}**: {summary_text}"
|
||||
|
||||
# Show the generated summary
|
||||
print(f" 📝 {summary_text}")
|
||||
@@ -969,7 +985,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--demo", action="store_true", help="Generate a demo changelog entry with sample data.")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output.")
|
||||
parser.add_argument("-q", "--quarto-config", type=str, help="Path to quarto config file (default: quarto/config/_quarto-pdf.yml)")
|
||||
parser.add_argument("--ai-mode", action="store_true", help="Enable AI-generated summaries instead of simple change counts.")
|
||||
parser.add_argument("--ai-mode", type=lambda x: x.lower() == 'true', default=True, help="Enable AI-generated summaries with detailed breakdowns (default: true). Use --ai-mode=false to disable.")
|
||||
parser.add_argument("--ollama-url", default="http://localhost:11434", help="Ollama API URL for AI summaries.")
|
||||
parser.add_argument("--ollama-model", default="gemma2:9b", help="Ollama model to use for AI summaries.")
|
||||
|
||||
|
||||
@@ -1,232 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate comprehensive release notes for the Machine Learning Systems textbook.
|
||||
|
||||
This script helps create detailed release notes by analyzing:
|
||||
- Git commits since last release
|
||||
- Changed files and directories
|
||||
- Content updates and improvements
|
||||
- Technical changes and infrastructure updates
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
def get_git_commits_since_tag(tag):
|
||||
"""Get commits since the specified tag"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['git', 'log', f'{tag}..HEAD', '--oneline', '--no-merges'],
|
||||
capture_output=True, text=True, check=True
|
||||
)
|
||||
return result.stdout.strip().split('\n') if result.stdout.strip() else []
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
def get_changed_files_since_tag(tag):
|
||||
"""Get list of changed files since the specified tag"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['git', 'diff', '--name-only', f'{tag}..HEAD'],
|
||||
capture_output=True, text=True, check=True
|
||||
)
|
||||
return result.stdout.strip().split('\n') if result.stdout.strip() else []
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
def categorize_changes(files):
|
||||
"""Categorize changed files by type"""
|
||||
categories = {
|
||||
'content': [],
|
||||
'infrastructure': [],
|
||||
'documentation': [],
|
||||
'labs': [],
|
||||
'scripts': [],
|
||||
'workflows': [],
|
||||
'other': []
|
||||
}
|
||||
|
||||
for file in files:
|
||||
if not file:
|
||||
continue
|
||||
|
||||
if file.startswith('contents/core/') or file.startswith('contents/frontmatter/'):
|
||||
categories['content'].append(file)
|
||||
elif file.startswith('contents/labs/'):
|
||||
categories['labs'].append(file)
|
||||
elif file.startswith('.github/workflows/') or file.startswith('tools/scripts/'):
|
||||
categories['workflows'].append(file)
|
||||
elif file.startswith('docs/') or file.endswith('.md'):
|
||||
categories['documentation'].append(file)
|
||||
elif file.startswith('binder') or file.startswith('netlify.toml') or file.startswith('.gitignore'):
|
||||
categories['infrastructure'].append(file)
|
||||
elif file.startswith('tools/'):
|
||||
categories['scripts'].append(file)
|
||||
else:
|
||||
categories['other'].append(file)
|
||||
|
||||
return categories
|
||||
|
||||
def analyze_commit_messages(commits):
|
||||
"""Analyze commit messages for key themes"""
|
||||
themes = {
|
||||
'content': [],
|
||||
'infrastructure': [],
|
||||
'bugfixes': [],
|
||||
'features': [],
|
||||
'documentation': [],
|
||||
'other': []
|
||||
}
|
||||
|
||||
for commit in commits:
|
||||
if not commit:
|
||||
continue
|
||||
|
||||
msg = commit.split(' ', 1)[1] if ' ' in commit else commit
|
||||
|
||||
if any(keyword in msg.lower() for keyword in ['fix', 'bug', 'error', 'issue']):
|
||||
themes['bugfixes'].append(commit)
|
||||
elif any(keyword in msg.lower() for keyword in ['feat', 'add', 'new', 'implement']):
|
||||
themes['features'].append(commit)
|
||||
elif any(keyword in msg.lower() for keyword in ['content', 'chapter', 'section']):
|
||||
themes['content'].append(commit)
|
||||
elif any(keyword in msg.lower() for keyword in ['workflow', 'ci', 'deploy', 'build']):
|
||||
themes['infrastructure'].append(commit)
|
||||
elif any(keyword in msg.lower() for keyword in ['doc', 'readme', 'guide']):
|
||||
themes['documentation'].append(commit)
|
||||
else:
|
||||
themes['other'].append(commit)
|
||||
|
||||
return themes
|
||||
|
||||
def generate_release_notes(version, description, previous_version):
|
||||
"""Generate comprehensive release notes"""
|
||||
|
||||
print(f"🔍 Analyzing changes since {previous_version}...")
|
||||
|
||||
# Get commits and files
|
||||
commits = get_git_commits_since_tag(previous_version)
|
||||
files = get_changed_files_since_tag(previous_version)
|
||||
|
||||
# Analyze changes
|
||||
file_categories = categorize_changes(files)
|
||||
commit_themes = analyze_commit_messages(commits)
|
||||
|
||||
# Generate release notes
|
||||
notes = f"""## 📚 Release {version}
|
||||
|
||||
**{description}**
|
||||
|
||||
### 📋 Release Information
|
||||
- **Type**: Release
|
||||
- **Previous Version**: {previous_version}
|
||||
- **Generated at**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
||||
- **Total Changes**: {len(commits)} commits, {len(files)} files modified
|
||||
|
||||
### 📝 Summary of Changes
|
||||
|
||||
"""
|
||||
|
||||
# Content changes
|
||||
if file_categories['content']:
|
||||
notes += "#### 📖 Content Updates\n"
|
||||
notes += f"- **Modified chapters**: {len([f for f in file_categories['content'] if f.endswith('.qmd')])} files\n"
|
||||
notes += f"- **Bibliography updates**: {len([f for f in file_categories['content'] if f.endswith('.bib')])} files\n"
|
||||
notes += "\n"
|
||||
|
||||
# Lab changes
|
||||
if file_categories['labs']:
|
||||
notes += "#### 🧪 Lab Updates\n"
|
||||
notes += f"- **Lab materials**: {len(file_categories['labs'])} files modified\n"
|
||||
notes += "\n"
|
||||
|
||||
# Infrastructure changes
|
||||
if file_categories['infrastructure'] or file_categories['workflows']:
|
||||
notes += "#### 🔧 Infrastructure & Workflow Updates\n"
|
||||
if file_categories['workflows']:
|
||||
notes += f"- **CI/CD workflows**: {len(file_categories['workflows'])} files updated\n"
|
||||
if file_categories['infrastructure']:
|
||||
notes += f"- **Build system**: {len(file_categories['infrastructure'])} files modified\n"
|
||||
notes += "\n"
|
||||
|
||||
# Documentation changes
|
||||
if file_categories['documentation']:
|
||||
notes += "#### 📚 Documentation Updates\n"
|
||||
notes += f"- **Documentation**: {len(file_categories['documentation'])} files updated\n"
|
||||
notes += "\n"
|
||||
|
||||
# Script changes
|
||||
if file_categories['scripts']:
|
||||
notes += "#### 🛠️ Tool & Script Updates\n"
|
||||
notes += f"- **Tools and scripts**: {len(file_categories['scripts'])} files modified\n"
|
||||
notes += "\n"
|
||||
|
||||
# Key commits
|
||||
if commits:
|
||||
notes += "#### 🔑 Key Changes\n"
|
||||
for i, commit in enumerate(commits[:10], 1): # Show first 10 commits
|
||||
notes += f"{i}. {commit}\n"
|
||||
if len(commits) > 10:
|
||||
notes += f"... and {len(commits) - 10} more commits\n"
|
||||
notes += "\n"
|
||||
|
||||
# Quick links
|
||||
notes += """### 🔗 Quick Links
|
||||
- 🌐 [Read Online](https://mlsysbook.ai)
|
||||
- 📄 [Download PDF](https://mlsysbook.ai/pdf)
|
||||
- 🧪 [Labs & Exercises](https://mlsysbook.ai/labs)
|
||||
- 📚 [GitHub Repository](https://github.com/harvard-edge/cs249r_book)
|
||||
|
||||
### 📊 Technical Details
|
||||
- **Build System**: Quarto with custom extensions
|
||||
- **Deployment**: GitHub Pages + Netlify
|
||||
- **PDF Generation**: LaTeX with compression
|
||||
- **Content**: Markdown with interactive elements
|
||||
|
||||
---
|
||||
*Generated automatically by the release notes generator*
|
||||
"""
|
||||
|
||||
return notes
|
||||
|
||||
def main():
|
||||
"""Main function"""
|
||||
if len(sys.argv) < 4:
|
||||
print("Usage: python generate_release_notes.py <version> <description> <previous_version>")
|
||||
print("Example: python generate_release_notes.py v1.2.0 'Add new chapter on TinyML' v1.1.0")
|
||||
sys.exit(1)
|
||||
|
||||
version = sys.argv[1]
|
||||
description = sys.argv[2]
|
||||
previous_version = sys.argv[3]
|
||||
|
||||
print(f"📝 Generating release notes for {version}...")
|
||||
print(f"📋 Description: {description}")
|
||||
print(f"🔄 Previous version: {previous_version}")
|
||||
print()
|
||||
|
||||
notes = generate_release_notes(version, description, previous_version)
|
||||
|
||||
# Save to file
|
||||
output_file = f"release_notes_{version}.md"
|
||||
with open(output_file, 'w') as f:
|
||||
f.write(notes)
|
||||
|
||||
print(f"✅ Release notes saved to: {output_file}")
|
||||
print()
|
||||
print("📄 Preview:")
|
||||
print("=" * 50)
|
||||
print(notes)
|
||||
print("=" * 50)
|
||||
print()
|
||||
print("💡 Next steps:")
|
||||
print("1. Review and edit the release notes")
|
||||
print("2. Copy the content to your GitHub release")
|
||||
print("3. Publish the release")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,338 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate release notes with exact behavior from original unified script.
|
||||
|
||||
This script generates release notes for GitHub releases, matching the exact
|
||||
behavior of the original changelog-releasenotes.py script.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import requests
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
# =============================================================================
|
||||
# GLOBAL CONFIGURATION
|
||||
# =============================================================================
|
||||
CHANGELOG_FILE = "CHANGELOG.md"
|
||||
RELEASE_NOTES_FILE = "release_notes_v{version}.md"
|
||||
|
||||
def extract_latest_changelog_section(changelog_file="CHANGELOG.md"):
|
||||
"""Extract the most recent changelog section for release notes generation."""
|
||||
if not os.path.exists(changelog_file):
|
||||
print(f"❌ Changelog file not found: {changelog_file}")
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(changelog_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Find the most recent section (after the last "## YYYY Updates" header)
|
||||
sections = re.split(r'## \d{4} Updates', content)
|
||||
if len(sections) < 2:
|
||||
print("❌ No changelog sections found")
|
||||
return None
|
||||
|
||||
# Get the most recent section (last one)
|
||||
latest_section = sections[-1].strip()
|
||||
|
||||
# Extract the first entry (most recent) from this section
|
||||
# Look for the first "### 📅" entry
|
||||
entries = re.split(r'### 📅', latest_section)
|
||||
if len(entries) < 2:
|
||||
print("❌ No changelog entries found in latest section")
|
||||
return None
|
||||
|
||||
# Get the most recent entry (first one after the split)
|
||||
latest_entry = entries[1].strip()
|
||||
|
||||
# Clean up the entry - remove any trailing content
|
||||
# Stop at the next "### 📅" or end of content
|
||||
if "### 📅" in latest_entry:
|
||||
latest_entry = latest_entry.split("### 📅")[0].strip()
|
||||
|
||||
print(f"✅ Extracted latest changelog entry ({len(latest_entry)} characters)")
|
||||
return latest_entry
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error reading changelog: {e}")
|
||||
return None
|
||||
|
||||
def call_ollama(prompt, model="gemma2:9b", url="http://localhost:11434"):
|
||||
"""Call Ollama API to generate AI summaries."""
|
||||
try:
|
||||
payload = {
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False
|
||||
}
|
||||
|
||||
response = requests.post(f"{url}/api/generate", json=payload, timeout=30)
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
return result.get('response', '').strip()
|
||||
else:
|
||||
print(f"⚠️ Ollama API error: {response.status_code}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error calling Ollama: {e}")
|
||||
return None
|
||||
|
||||
def generate_ai_release_summary(changelog_content, version, description, model="gemma2:9b", url="http://localhost:11434"):
|
||||
"""Generate AI summary of changelog content for release notes."""
|
||||
prompt = f"""As a professor addressing students and faculty, provide a concise, professional summary of the changes in version {version}.
|
||||
|
||||
Version: {version}
|
||||
Description: {description}
|
||||
|
||||
Changelog Content:
|
||||
{changelog_content}
|
||||
|
||||
Write a clear, academic-style summary that:
|
||||
1. Highlights the most significant updates and their educational value
|
||||
2. Emphasizes improvements to learning outcomes and practical applications
|
||||
3. Maintains a professional tone suitable for academic communication
|
||||
4. Focuses on content quality and pedagogical enhancements
|
||||
|
||||
Keep it concise but comprehensive, as if explaining to colleagues and students what has been improved in this release:"""
|
||||
|
||||
ai_summary = call_ollama(prompt, model, url)
|
||||
|
||||
if ai_summary:
|
||||
return ai_summary
|
||||
else:
|
||||
# Fallback to simple summary
|
||||
return f"This release includes various improvements and updates to the Machine Learning Systems textbook."
|
||||
|
||||
def generate_release_notes_from_changelog(version, previous_version, description, changelog_entry, verbose=False, ai_mode=False, ollama_model="gemma2:9b", ollama_url="http://localhost:11434"):
|
||||
"""Generate release notes using changelog data."""
|
||||
|
||||
if verbose:
|
||||
print(f"📝 Generating release notes...")
|
||||
print(f"📋 Version: {version}")
|
||||
print(f"📋 Previous: {previous_version}")
|
||||
print(f"📋 Description: {description}")
|
||||
print(f"📋 Changelog entry length: {len(changelog_entry)} characters")
|
||||
print(f"🤖 AI Mode: {'ON' if ai_mode else 'OFF'}")
|
||||
|
||||
# Generate AI summary if enabled
|
||||
if ai_mode and changelog_entry:
|
||||
print("🤖 Generating AI-powered release summary...")
|
||||
ai_summary = generate_ai_release_summary(changelog_entry, version, description, ollama_model, ollama_url)
|
||||
if ai_summary:
|
||||
key_updates = ai_summary
|
||||
else:
|
||||
key_updates = "- Repository restructuring for better organization\n- Enhanced learning with integrated quizzes\n- Improved content clarity and navigation"
|
||||
else:
|
||||
key_updates = "- Repository restructuring for better organization\n- Enhanced learning with integrated quizzes\n- Improved content clarity and navigation"
|
||||
|
||||
# Add changelog content to release notes (only in non-AI mode)
|
||||
changelog_section = ""
|
||||
if not ai_mode and changelog_entry:
|
||||
changelog_section = f"\n### 📋 Detailed Changelog\n\n```markdown\n{changelog_entry}\n```"
|
||||
|
||||
# Create release notes template
|
||||
release_notes = f"""## 📚 Release {version}
|
||||
|
||||
### 🎯 Key Updates
|
||||
{key_updates}
|
||||
|
||||
### 📋 Release Information
|
||||
- **Type**: Release
|
||||
- **Previous Version**: {previous_version}
|
||||
- **Published at**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
||||
- **Build Platform**: Linux (HTML + PDF)
|
||||
|
||||
### 🔗 Quick Links
|
||||
- 🌐 [Web](https://mlsysbook.ai)
|
||||
- 📄 [PDF](https://mlsysbook.ai/pdf)
|
||||
|
||||
### 📖 Detailed Changes
|
||||
For a complete list of all changes, improvements, and updates, see the [detailed changelog](https://www.mlsysbook.ai/contents/frontmatter/changelog/changelog).{changelog_section}
|
||||
|
||||
### 🏗️ Build Information
|
||||
- **Platform**: Linux
|
||||
- **Outputs**: HTML + PDF
|
||||
- **Deployment**: GitHub Pages
|
||||
- **PDF Generation**: Quarto with LaTeX
|
||||
"""
|
||||
|
||||
return release_notes
|
||||
|
||||
def generate_release_notes(version, previous_version, description, verbose=False, ai_mode=False, ollama_model="gemma2:9b", ollama_url="http://localhost:11434", changelog_input=None):
|
||||
"""Generate release notes and save to file."""
|
||||
|
||||
print(f"📝 Generating release notes for version {version}...")
|
||||
|
||||
# First, ensure we have a changelog
|
||||
if not os.path.exists(CHANGELOG_FILE):
|
||||
print(f"📝 Changelog not found, generating incremental changelog...")
|
||||
# Import and call the changelog generation
|
||||
import subprocess
|
||||
cmd = ["python3", "tools/scripts/maintenance/change_log.py", "--incremental"]
|
||||
if verbose:
|
||||
cmd.append("--verbose")
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
print(f"⚠️ Failed to generate changelog: {result.stderr}")
|
||||
|
||||
# Determine changelog source and processing method
|
||||
if changelog_input:
|
||||
# Use provided changelog file directly
|
||||
print(f"📄 Using direct changelog input: {changelog_input}")
|
||||
changelog_entry = extract_latest_changelog_section(changelog_input)
|
||||
if not changelog_entry:
|
||||
print(f"❌ Could not extract from {changelog_input}, falling back to default")
|
||||
changelog_entry = extract_latest_changelog_section(CHANGELOG_FILE)
|
||||
else:
|
||||
# Use default CHANGELOG.md
|
||||
changelog_entry = extract_latest_changelog_section(CHANGELOG_FILE)
|
||||
|
||||
if changelog_entry:
|
||||
print("📝 Using changelog data for release notes generation...")
|
||||
release_notes = generate_release_notes_from_changelog(
|
||||
version=version,
|
||||
previous_version=previous_version,
|
||||
description=description,
|
||||
changelog_entry=changelog_entry,
|
||||
verbose=verbose,
|
||||
ai_mode=ai_mode,
|
||||
ollama_model=ollama_model,
|
||||
ollama_url=ollama_url
|
||||
)
|
||||
else:
|
||||
print("⚠️ No changelog data found, using basic generation...")
|
||||
|
||||
# Fallback to basic generation
|
||||
if verbose:
|
||||
print(f"📋 Version: {version}")
|
||||
print(f"📋 Previous: {previous_version}")
|
||||
print(f"📋 Description: {description}")
|
||||
print("🧪 TEST MODE - Using basic template")
|
||||
|
||||
# Create the final release notes (basic template)
|
||||
release_notes = f"""## 📚 Release {version}
|
||||
|
||||
### 🎯 Key Updates
|
||||
- Repository restructuring for better organization
|
||||
- Enhanced learning with integrated quizzes
|
||||
- Improved content clarity and navigation
|
||||
|
||||
### 📋 Release Information
|
||||
- **Type**: Release
|
||||
- **Previous Version**: {previous_version}
|
||||
- **Published at**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
||||
- **Build Platform**: Linux (HTML + PDF)
|
||||
|
||||
### 🔗 Quick Links
|
||||
- 🌐 [Web](https://mlsysbook.ai)
|
||||
- 📄 [PDF](https://mlsysbook.ai/pdf)
|
||||
|
||||
### 📖 Detailed Changes
|
||||
For a complete list of all changes, improvements, and updates, see the [detailed changelog](https://www.mlsysbook.ai/contents/frontmatter/changelog/changelog).
|
||||
|
||||
### 🏗️ Build Information
|
||||
- **Platform**: Linux
|
||||
- **Outputs**: HTML + PDF
|
||||
- **Deployment**: GitHub Pages
|
||||
- **PDF Generation**: Quarto with LaTeX
|
||||
|
||||
---
|
||||
*Basic template - no changelog data available*
|
||||
"""
|
||||
|
||||
# Save release notes to file
|
||||
filename = RELEASE_NOTES_FILE.format(version=version)
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
f.write(release_notes)
|
||||
|
||||
print(f"✅ Release notes saved to: {filename}")
|
||||
return filename
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Generate release notes with exact behavior from original unified script.")
|
||||
|
||||
# Release notes arguments
|
||||
parser.add_argument("--version", type=str, required=True, help="Version number for release notes.")
|
||||
parser.add_argument("--previous-version", type=str, required=True, help="Previous version number for release notes.")
|
||||
parser.add_argument("--description", type=str, required=True, help="Release description for release notes.")
|
||||
|
||||
# Options
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show output without writing to file.")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output.")
|
||||
parser.add_argument("--changelog-file", default="CHANGELOG.md", help="Path to changelog file.")
|
||||
|
||||
# AI options
|
||||
parser.add_argument("--ai-mode", action="store_true", help="Enable AI-generated summaries from changelog.")
|
||||
parser.add_argument("--changelog-input", type=str, help="Path to changelog file to use directly (if not provided, uses AI to process default CHANGELOG.md).")
|
||||
parser.add_argument("--ollama-url", default="http://localhost:11434", help="Ollama API URL for AI summaries.")
|
||||
parser.add_argument("--ollama-model", default="gemma2:9b", help="Ollama model to use for AI summaries.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
# Print configuration header
|
||||
print("=" * 60)
|
||||
print("📝 RELEASE NOTES GENERATION CONFIG")
|
||||
print("=" * 60)
|
||||
print(f"🎯 Version: {args.version}")
|
||||
print(f"📋 Previous: {args.previous_version}")
|
||||
print(f"📢 Description: {args.description}")
|
||||
print(f"🔧 Dry Run Mode: {'ON' if args.dry_run else 'OFF'}")
|
||||
print(f"📢 Verbose: {'ON' if args.verbose else 'OFF'}")
|
||||
print(f"📄 Changelog File: {args.changelog_file}")
|
||||
print(f"🤖 AI Mode: {'ON' if args.ai_mode else 'OFF'}")
|
||||
if args.ai_mode:
|
||||
print(f"🤖 AI Model: {args.ollama_model}")
|
||||
print(f"🤖 AI URL: {args.ollama_url}")
|
||||
if args.changelog_input:
|
||||
print(f"📄 Direct Changelog Input: {args.changelog_input}")
|
||||
else:
|
||||
print(f"📄 AI Processing: Latest from {args.changelog_file}")
|
||||
print("=" * 60)
|
||||
print()
|
||||
|
||||
print("🚀 Starting release notes generation...")
|
||||
|
||||
# Generate release notes
|
||||
filename = generate_release_notes(
|
||||
version=args.version,
|
||||
previous_version=args.previous_version,
|
||||
description=args.description,
|
||||
verbose=args.verbose,
|
||||
ai_mode=args.ai_mode,
|
||||
ollama_model=args.ollama_model,
|
||||
ollama_url=args.ollama_url,
|
||||
changelog_input=args.changelog_input
|
||||
)
|
||||
|
||||
if filename and os.path.exists(filename):
|
||||
if args.dry_run:
|
||||
# Read and display the content for dry run mode
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
print("🧪 DRY RUN - Release notes content:")
|
||||
print("=" * 60)
|
||||
print(content)
|
||||
print("=" * 60)
|
||||
print(f"📊 File size: {len(content)} characters")
|
||||
# Clean up dry run file
|
||||
os.remove(filename)
|
||||
print("🧹 Dry run file cleaned up")
|
||||
else:
|
||||
print(f"✅ Release notes saved to: {filename}")
|
||||
print(f"📊 File size: {os.path.getsize(filename)} bytes")
|
||||
else:
|
||||
print("❌ Failed to generate release notes")
|
||||
exit(1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print(f"\n⚠️ Process interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
exit(1)
|
||||
Reference in New Issue
Block a user