From 6819ee0f9b3d9aa4ae6c96ce8a62a3eebb6d9ba6 Mon Sep 17 00:00:00 2001 From: STiFLeR7 Date: Mon, 9 Feb 2026 11:17:10 +0530 Subject: [PATCH] feat(devpulse_ai): add streamlit UI and faster demo defaults --- .../multi_agent_apps/devpulse_ai/README.md | 19 +- .../multi_agent_apps/devpulse_ai/main.py | 23 +- .../devpulse_ai/requirements.txt | 1 + .../devpulse_ai/streamlit_app.py | 216 ++++++++++++++++++ 4 files changed, 250 insertions(+), 9 deletions(-) create mode 100644 advanced_ai_agents/multi_agent_apps/devpulse_ai/streamlit_app.py diff --git a/advanced_ai_agents/multi_agent_apps/devpulse_ai/README.md b/advanced_ai_agents/multi_agent_apps/devpulse_ai/README.md index d66d32b..bf210e1 100644 --- a/advanced_ai_agents/multi_agent_apps/devpulse_ai/README.md +++ b/advanced_ai_agents/multi_agent_apps/devpulse_ai/README.md @@ -87,6 +87,22 @@ python verify.py python main.py ``` +### Streamlit Demo + +A modern, interactive dashboard is included to visualize the multi-agent pipeline: + +1. Launch the app: + +```bash +streamlit run streamlit_app.py +``` + +2. Configure sources and signal counts in the sidebar. +2. Provide an OpenAI API key (optional) to use full LLM intelligence. +3. View real-time progress as agents collaborate. + +> **Note**: The default configuration is optimized for fast demo runs. + ### Verification Script The `verify.py` script tests the entire pipeline using **mock data only** - no network calls or API keys required: @@ -129,7 +145,8 @@ devpulse_ai/ │ └── synthesis_agent.py ├── workflows/ │ └── signal-intelligence-pipeline.json -├── main.py # Full pipeline demo +├── main.py # Full pipeline demo (CLI) +├── streamlit_app.py # Interactive dashboard (UI) ├── verify.py # Mock data verification ├── requirements.txt └── README.md diff --git a/advanced_ai_agents/multi_agent_apps/devpulse_ai/main.py b/advanced_ai_agents/multi_agent_apps/devpulse_ai/main.py index bc14815..275a67b 100644 --- a/advanced_ai_agents/multi_agent_apps/devpulse_ai/main.py +++ b/advanced_ai_agents/multi_agent_apps/devpulse_ai/main.py @@ -13,7 +13,10 @@ Requirements: """ import os -from typing import List, Dict, Any +from typing import List, Dict, Any, Optional + +# Reduced default signal count for faster demo execution +DEFAULT_SIGNAL_LIMIT = 8 # Import adapters from adapters.github import fetch_github_trending @@ -31,32 +34,36 @@ from agents import ( ) -def collect_signals() -> List[Dict[str, Any]]: +def collect_signals(limit: Optional[int] = None) -> List[Dict[str, Any]]: """ Collect signals from all configured sources. + Args: + limit: Maximum signals to fetch per source. Defaults to DEFAULT_SIGNAL_LIMIT. + Returns: Combined list of signals from all adapters. """ - print("\n📡 [1/4] Collecting Signals...") + fetch_limit = limit if limit is not None else DEFAULT_SIGNAL_LIMIT + print(f"\n📡 [1/4] Collecting Signals (limit: {fetch_limit} per source)...") signals = [] # Fetch from each source print(" → Fetching GitHub trending repos...") - signals.extend(fetch_github_trending(limit=5)) + signals.extend(fetch_github_trending(limit=fetch_limit)) print(" → Fetching ArXiv papers...") - signals.extend(fetch_arxiv_papers(limit=5)) + signals.extend(fetch_arxiv_papers(limit=fetch_limit)) print(" → Fetching HackerNews stories...") - signals.extend(fetch_hackernews_stories(limit=5)) + signals.extend(fetch_hackernews_stories(limit=fetch_limit)) print(" → Fetching Medium blogs...") - signals.extend(fetch_medium_blogs(limit=3)) + signals.extend(fetch_medium_blogs(limit=min(fetch_limit, 3))) # Medium feeds are usually denser print(" → Fetching HuggingFace models...") - signals.extend(fetch_huggingface_models(limit=5)) + signals.extend(fetch_huggingface_models(limit=fetch_limit)) print(f" ✓ Collected {len(signals)} raw signals") return signals diff --git a/advanced_ai_agents/multi_agent_apps/devpulse_ai/requirements.txt b/advanced_ai_agents/multi_agent_apps/devpulse_ai/requirements.txt index 2b93919..77fa635 100644 --- a/advanced_ai_agents/multi_agent_apps/devpulse_ai/requirements.txt +++ b/advanced_ai_agents/multi_agent_apps/devpulse_ai/requirements.txt @@ -2,3 +2,4 @@ agno httpx openai feedparser +streamlit>=1.30 diff --git a/advanced_ai_agents/multi_agent_apps/devpulse_ai/streamlit_app.py b/advanced_ai_agents/multi_agent_apps/devpulse_ai/streamlit_app.py new file mode 100644 index 0000000..acfee07 --- /dev/null +++ b/advanced_ai_agents/multi_agent_apps/devpulse_ai/streamlit_app.py @@ -0,0 +1,216 @@ +import streamlit as st +import os +from typing import List, Dict, Any + +# Import pipeline components from main.py and agents +from main import collect_signals, DEFAULT_SIGNAL_LIMIT +from agents import ( + SignalCollectorAgent, + RelevanceAgent, + RiskAgent, + SynthesisAgent +) + +# Page Config +st.set_page_config( + page_title="DevPulseAI – Signal Intelligence Demo", + page_icon="🧠", + layout="wide" +) + +# Custom CSS for glassmorphism and premium feel +st.markdown(""" + +""", unsafe_allow_html=True) + +# Title and Description +st.title("🧠 DevPulseAI – Signal Intelligence Demo") +st.markdown(""" +This demo showcases a **multi-agent system** that aggregates technical signals from various developer sources, +scores them for relevance, identifies potential risks, and synthesizes a final intelligence digest. +""") + +# Sidebar Configuration +st.sidebar.header("⚙️ Pipeline Configuration") + +# API Key +api_key = st.sidebar.text_input("OpenAI API Key (optional)", type="password", help="If not provided, agents will use fallback heuristic logic.") +if api_key: + os.environ["OPENAI_API_KEY"] = api_key + +# Source Selection +sources = st.sidebar.multiselect( + "Signal Sources", + ["GitHub", "ArXiv", "HackerNews", "Medium", "HuggingFace"], + default=["GitHub", "ArXiv", "HackerNews", "Medium", "HuggingFace"] +) + +# Signal Count Slider +signal_count = st.sidebar.slider( + "Signals per source", + min_value=4, + max_value=32, + value=DEFAULT_SIGNAL_LIMIT, + step=4 +) + +run_button = st.sidebar.button("🚀 Run Intelligence Pipeline", use_container_width=True) + +# Main Area Logic +if run_button: + if not sources: + st.warning("Please select at least one signal source.") + else: + # Initialize Agents + collector = SignalCollectorAgent() + relevance = RelevanceAgent() + risk = RiskAgent() + synthesis = SynthesisAgent() + + # Step 1: Collection + with st.status("📡 Collecting and normalizing signals...", expanded=True) as status: + st.write("Fetching raw data from sources...") + + # Map selected sources to fetch calls (simplified reuse) + # We use the collect_signals logic but filter by selected sources + raw_signals = [] + from adapters.github import fetch_github_trending + from adapters.arxiv import fetch_arxiv_papers + from adapters.hackernews import fetch_hackernews_stories + from adapters.medium import fetch_medium_blogs + from adapters.huggingface import fetch_huggingface_models + + if "GitHub" in sources: + st.write("Fetching GitHub trending...") + raw_signals.extend(fetch_github_trending(limit=signal_count)) + if "ArXiv" in sources: + st.write("Fetching ArXiv papers...") + raw_signals.extend(fetch_arxiv_papers(limit=signal_count)) + if "HackerNews" in sources: + st.write("Fetching HackerNews stories...") + raw_signals.extend(fetch_hackernews_stories(limit=signal_count)) + if "Medium" in sources: + st.write("Fetching Medium blogs...") + raw_signals.extend(fetch_medium_blogs(limit=min(signal_count, 3))) + if "HuggingFace" in sources: + st.write("Fetching HuggingFace models...") + raw_signals.extend(fetch_huggingface_models(limit=signal_count)) + + st.write(f"Normalizing {len(raw_signals)} raw signals...") + normalized = collector.collect(raw_signals) + status.update(label=f"✅ {len(normalized)} unique signals collected", state="complete") + + # Step 2: Analysis + col1, col2 = st.columns(2) + + with col1: + with st.status("📊 Scoring Relevance...") as status: + scored = relevance.score_batch(normalized) + status.update(label="✅ Relevance scoring complete", state="complete") + + with col2: + with st.status("⚠️ Assessing Security Risks...") as status: + assessed = risk.assess_batch(scored) + status.update(label="✅ Risk assessment complete", state="complete") + + # Step 3: Synthesis + with st.status("📋 Generating Intelligence Digest...") as status: + digest = synthesis.synthesize(assessed) + status.update(label="✅ Final synthesis complete", state="complete") + + # Display Results + st.divider() + st.header("📄 Intelligence Digest") + + # Executive Summary + st.info(f"**Executive Summary:** {digest['executive_summary']}") + + # Recommendations + st.subheader("💡 Recommendations") + for rec in digest['recommendations']: + st.write(f"• {rec}") + + st.divider() + st.subheader("🎯 Priority Signals") + + # Display signals in expandable sections + for signal in assessed: + rel = signal.get("relevance", {}) + risk_info = signal.get("risk", {}) + risk_level = risk_info.get("risk_level", "UNKNOWN") + + with st.expander(f"[{signal['source'].upper()}] {signal['title']}"): + col_a, col_b = st.columns([3, 1]) + + with col_a: + st.write(f"**Description:** {signal['description']}") + st.write(f"**URL:** [{signal['url']}]({signal['url']})") + if risk_info.get("concerns"): + st.markdown("**Security Concerns:**") + for concern in risk_info["concerns"]: + st.write(f"- {concern}") + + with col_b: + st.markdown("
", unsafe_allow_html=True) + st.markdown(f"
{rel.get('score', 0)}
", unsafe_allow_html=True) + st.markdown("RELEVANCE", unsafe_allow_html=True) + + risk_class = f"risk-{risk_level.lower()}" + st.markdown(f"
{risk_level} RISK
", unsafe_allow_html=True) + st.markdown("
", unsafe_allow_html=True) + + if rel.get("reasoning"): + st.caption(f"Reason: {rel['reasoning']}") + +else: + # Landing state + st.image("https://raw.githubusercontent.com/Shubhamsaboo/awesome-llm-apps/main/advanced_ai_agents/multi_agent_apps/devpulse_ai/assets/logo.png", width=200) # Placeholder for logo logic + st.info("👈 Use the sidebar to configure the pipeline and click 'Run' to begin.") + + # Educational Section + with st.expander("🛠️ How it works", expanded=True): + st.markdown(""" + 1. **Collector Agent**: Gathers data from GitHub, ArXiv, HN, Medium, and HuggingFace. + 2. **Relevance Agent**: LLM analysis to score each signal for developer impact. + 3. **Risk Agent**: Scans for breaking changes, vulnerabilities, or deprecations. + 4. **Synthesis Agent**: Combines all findings into an actionable report. + """)