Merge pull request #110 from Madhuvod/phidata-agno

Move phidata to Agno + few old error corrections
This commit is contained in:
Shubham Saboo
2025-02-03 08:52:12 -06:00
committed by GitHub
69 changed files with 276 additions and 275 deletions

View File

@@ -0,0 +1,7 @@
scrapegraphai
playwright
langchain-community
streamlit-chat
streamlit
crewai
ollama

View File

@@ -1,2 +1,3 @@
streamlit
"routellm[serve,eval]"
"routellm[serve,eval]"
routellm

View File

@@ -1,9 +1,9 @@
import streamlit as st
import os
from phi.assistant import Assistant
from phi.llm.ollama import Ollama
from phi.tools.yfinance import YFinanceTools
from phi.tools.serpapi_tools import SerpApiTools
from agno.agent import Agent
from agno.models.ollama import Ollama
from agno.tools.yfinance import YFinanceTools
from agno.tools.serpapi import SerpApiTools
st.set_page_config(page_title="Llama-3 Tool Use", page_icon="🦙")
@@ -13,9 +13,9 @@ if 'SERPAPI_API_KEY' not in os.environ:
st.stop()
def get_assistant(tools):
return Assistant(
return Agent(
name="llama3_assistant",
llm=Ollama(model="llama3"),
model=Ollama(id="llama3.1:8b"),
tools=tools,
description="You are a helpful assistant that can access specific tools based on user selection.",
show_tool_calls=True,
@@ -25,7 +25,7 @@ def get_assistant(tools):
)
st.title("🦙 Local Llama-3 Tool Use")
st.title("🦙 Local Llama-3.1 Tool Use")
st.markdown("""
This app demonstrates function calling with the local Llama3 model using Ollama.
Select tools in the sidebar and ask relevant questions!

View File

@@ -1,3 +1,3 @@
streamlit
ollama
phidata
agno

View File

@@ -1,8 +1,8 @@
# Import the required libraries
import streamlit as st
from phi.assistant import Assistant
from phi.tools.duckduckgo import DuckDuckGo
from phi.llm.anthropic import Claude
from agno.agent import Agent
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.models.anthropic import Claude
# Set up the Streamlit app
st.title("Claude Sonnet + AI Web Search 🤖")
@@ -13,12 +13,12 @@ anthropic_api_key = st.text_input("Anthropic's Claude API Key", type="password")
# If Anthropic API key is provided, create an instance of Assistant
if anthropic_api_key:
assistant = Assistant(
llm=Claude(
model="claude-3-5-sonnet-20240620",
assistant = Agent(
model=Claude(
id="claude-3-5-sonnet-20240620",
max_tokens=1024,
temperature=0.9,
api_key=anthropic_api_key) , tools=[DuckDuckGo()], show_tool_calls=True
temperature=0.3,
api_key=anthropic_api_key) , tools=[DuckDuckGoTools()], show_tool_calls=True
)
# Get the search query from the user
query= st.text_input("Enter the Search Query", type="default")
@@ -26,4 +26,4 @@ if anthropic_api_key:
if query:
# Search the web using the AI Assistant
response = assistant.run(query, stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,8 +1,8 @@
# Import the required libraries
import streamlit as st
from phi.assistant import Assistant
from phi.tools.duckduckgo import DuckDuckGo
from phi.llm.openai import OpenAIChat
from agno.agent import Agent
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.models.openai import OpenAIChat
# Set up the Streamlit app
st.title("AI Web Search Assistant 🤖")
@@ -14,12 +14,12 @@ openai_access_token = st.text_input("OpenAI API Key", type="password")
# If OpenAI API key is provided, create an instance of Assistant
if openai_access_token:
# Create an instance of the Assistant
assistant = Assistant(
llm=OpenAIChat(
model="gpt-4o",
assistant = Agent(
model=OpenAIChat(
id="gpt-4o",
max_tokens=1024,
temperature=0.9,
api_key=openai_access_token) , tools=[DuckDuckGo()], show_tool_calls=True
api_key=openai_access_token) , tools=[DuckDuckGoTools()], show_tool_calls=True
)
# Get the search query from the user
@@ -28,4 +28,4 @@ if openai_access_token:
if query:
# Search the web using the AI Assistant
response = assistant.run(query, stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,4 +1,4 @@
streamlit
openai
phidata
agno
duckduckgo-search

View File

@@ -1,6 +1,6 @@
# 📊 AI Data Analysis Agent
An AI data analysis Agent built using the phidata Agent framework and Openai's gpt-4o model. This agent helps users analyze their data - csv, excel files through natural language queries, powered by OpenAI's language models and DuckDB for efficient data processing - making data analysis accessible to users regardless of their SQL expertise.
An AI data analysis Agent built using the Agno Agent framework and Openai's gpt-4o model. This agent helps users analyze their data - csv, excel files through natural language queries, powered by OpenAI's language models and DuckDB for efficient data processing - making data analysis accessible to users regardless of their SQL expertise.
## Features

View File

@@ -3,9 +3,9 @@ import tempfile
import csv
import streamlit as st
import pandas as pd
from phi.model.openai import OpenAIChat
from agno.models.openai import OpenAIChat
from phi.agent.duckdb import DuckDbAgent
from phi.tools.pandas import PandasTools
from agno.tools.pandas import PandasTools
import re
# Function to preprocess and save the uploaded file

View File

@@ -1,6 +1,7 @@
phidata==2.7.3
phidata
streamlit==1.41.1
openai==1.58.1
duckdb==1.1.3
pandas
numpy==1.26.4
numpy==1.26.4
agno

View File

@@ -1,16 +1,16 @@
from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.storage.agent.sqlite import SqlAgentStorage
from phi.tools.duckduckgo import DuckDuckGo
from phi.tools.yfinance import YFinanceTools
from phi.playground import Playground, serve_playground_app
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.storage.agent.sqlite import SqliteAgentStorage
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.tools.yfinance import YFinanceTools
from agno.playground import Playground, serve_playground_app
web_agent = Agent(
name="Web Agent",
role="Search the web for information",
model=OpenAIChat(id="gpt-4o"),
tools=[DuckDuckGo()],
storage=SqlAgentStorage(table_name="web_agent", db_file="agents.db"),
tools=[DuckDuckGoTools()],
storage=SqliteAgentStorage(table_name="web_agent", db_file="agents.db"),
add_history_to_messages=True,
markdown=True,
)
@@ -21,7 +21,7 @@ finance_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[YFinanceTools(stock_price=True, analyst_recommendations=True, company_info=True, company_news=True)],
instructions=["Always use tables to display data"],
storage=SqlAgentStorage(table_name="finance_agent", db_file="agents.db"),
storage=SqliteAgentStorage(table_name="finance_agent", db_file="agents.db"),
add_history_to_messages=True,
markdown=True,
)

View File

@@ -1,5 +1,5 @@
openai
phidata
agno
duckduckgo-search
yfinance
fastapi[standard]

View File

@@ -1,6 +1,6 @@
# AI Health & Fitness Planner Agent 🏋️‍♂️
The **AI Health & Fitness Planner** is a personalized health and fitness Agent powered by Phidata's AI Agent framework. This app generates tailored dietary and fitness plans based on user inputs such as age, weight, height, activity level, dietary preferences, and fitness goals.
The **AI Health & Fitness Planner** is a personalized health and fitness Agent powered by Agno AI Agent framework. This app generates tailored dietary and fitness plans based on user inputs such as age, weight, height, activity level, dietary preferences, and fitness goals.
## Features
@@ -24,7 +24,7 @@ The **AI Health & Fitness Planner** is a personalized health and fitness Agent p
The application requires the following Python libraries:
- `phidata`
- `agno`
- `google-generativeai`
- `streamlit`

View File

@@ -1,6 +1,6 @@
import streamlit as st
from phi.agent import Agent
from phi.model.google import Gemini
from agno.agent import Agent
from agno.models.google import Gemini
st.set_page_config(
page_title="AI Health & Fitness Planner",

View File

@@ -1,3 +1,3 @@
phidata==2.5.33
google-generativeai==0.8.3
streamlit==1.40.2
streamlit==1.40.2
agno

View File

@@ -1,5 +1,5 @@
## 📈 AI Investment Agent
This Streamlit app is an AI-powered investment agent that compares the performance of two stocks and generates detailed reports. By using GPT-4o with Yahoo Finance data, this app provides valuable insights to help you make informed investment decisions.
This Streamlit app is an AI-powered investment agent built with Agno's AI Agent framework that compares the performance of two stocks and generates detailed reports. By using GPT-4o with Yahoo Finance data, this app provides valuable insights to help you make informed investment decisions.
### Features
- Compare the performance of two stocks
@@ -32,7 +32,7 @@ streamlit run investment_agent.py
### How it Works?
- Upon running the app, you will be prompted to enter your OpenAI API key. This key is used to authenticate and access the OpenAI language model.
- Once you provide a valid API key, an instance of the Assistant class is created. This assistant utilizes the GPT-4 language model from OpenAI and the YFinanceTools for accessing stock data.
- Once you provide a valid API key, an instance of the Assistant class is created. This assistant utilizes the GPT-4o language model from OpenAI and the YFinanceTools for accessing stock data.
- Enter the stock symbols of the two companies you want to compare in the provided text input fields.
- The assistant will perform the following steps:
- Retrieve real-time stock prices and historical data using YFinanceTools

View File

@@ -1,8 +1,8 @@
# Import the required libraries
import streamlit as st
from phi.assistant import Assistant
from phi.llm.openai import OpenAIChat
from phi.tools.yfinance import YFinanceTools
from agno.agent import Assistant
from agno.models.openai import OpenAIChat
from agno.tools.yfinance import YFinanceTools
# Set up the Streamlit app
st.title("AI Investment Agent 📈🤖")
@@ -27,4 +27,4 @@ if openai_api_key:
# Get the response from the assistant
query = f"Compare {stock1} to {stock2}. Use every tool you have."
response = assistant.run(query, stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,4 +1,4 @@
streamlit
phidata
agno
openai
yfinance

View File

@@ -1,10 +1,10 @@
# Import the required libraries
from textwrap import dedent
from phi.assistant import Assistant
from phi.tools.serpapi_tools import SerpApiTools
from phi.tools.newspaper4k import Newspaper4k as NewspaperToolkit
from agno.agent import Agent
from agno.tools.serpapi import SerpApiTools
from agno.tools.newspaper4k import Newspaper4kTools
import streamlit as st
from phi.llm.openai import OpenAIChat
from agno.models.openai import OpenAIChat
# Set up the Streamlit app
st.title("AI Journalist Agent 🗞️")
@@ -17,10 +17,10 @@ openai_api_key = st.text_input("Enter OpenAI API Key to access GPT-4o", type="pa
serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password")
if openai_api_key and serp_api_key:
searcher = Assistant(
searcher = Agent(
name="Searcher",
role="Searches for top URLs based on a topic",
llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key),
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a world-class journalist for the New York Times. Given a topic, generate a list of 3 search terms
@@ -37,10 +37,10 @@ if openai_api_key and serp_api_key:
tools=[SerpApiTools(api_key=serp_api_key)],
add_datetime_to_instructions=True,
)
writer = Assistant(
writer = Agent(
name="Writer",
role="Retrieves text from URLs and writes a high-quality article",
llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key),
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a senior writer for the New York Times. Given a topic and a list of URLs,
@@ -57,15 +57,14 @@ if openai_api_key and serp_api_key:
"Focus on clarity, coherence, and overall quality.",
"Never make up facts or plagiarize. Always provide proper attribution.",
],
tools=[NewspaperToolkit()],
tools=[Newspaper4kTools()],
add_datetime_to_instructions=True,
add_chat_history_to_prompt=True,
num_history_messages=3,
markdown=True,
)
editor = Assistant(
editor = Agent(
name="Editor",
llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key),
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
team=[searcher, writer],
description="You are a senior NYT editor. Given a topic, your goal is to write a NYT worthy article.",
instructions=[
@@ -88,4 +87,4 @@ if openai_api_key and serp_api_key:
with st.spinner("Processing..."):
# Get the response from the assistant
response = editor.run(query, stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,5 +1,5 @@
streamlit
phidata
agno
openai
google-search-results
newspaper4k

View File

@@ -1,8 +1,8 @@
import streamlit as st
import requests
from phi.agent import Agent
from phi.tools.firecrawl import FirecrawlTools
from phi.model.openai import OpenAIChat
from agno.agent import Agent
from agno.tools.firecrawl import FirecrawlTools
from agno.models.openai import OpenAIChat
from firecrawl import FirecrawlApp
from pydantic import BaseModel, Field
from typing import List

View File

@@ -1,6 +1,6 @@
firecrawl-py==1.9.0
phidata==2.7.3
composio-phidata==0.6.15
agno
composio-phidata
composio==0.1.1
pydantic==2.10.5
streamlit

View File

@@ -1,10 +1,10 @@
import streamlit as st
from phi.agent import Agent
from phi.knowledge.pdf import PDFKnowledgeBase, PDFReader
from phi.vectordb.qdrant import Qdrant
from phi.tools.duckduckgo import DuckDuckGo
from phi.model.openai import OpenAIChat
from phi.embedder.openai import OpenAIEmbedder
from agno.agent import Agent
from agno.knowledge.pdf import PDFKnowledgeBase, PDFReader
from agno.vectordb.qdrant import Qdrant
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.models.openai import OpenAIChat
from agno.embedder.openai import OpenAIEmbedder
import tempfile
import os
@@ -133,7 +133,7 @@ def main():
name="Legal Researcher",
role="Legal research specialist",
model=OpenAIChat(model="gpt-4o"),
tools=[DuckDuckGo()],
tools=[DuckDuckGoTools()],
knowledge=st.session_state.knowledge_base,
search_knowledge=True,
instructions=[

View File

@@ -1,9 +1,9 @@
import streamlit as st
from phi.agent import Agent
from phi.knowledge.pdf import PDFKnowledgeBase, PDFReader
from phi.vectordb.qdrant import Qdrant
from phi.model.ollama import Ollama
from phi.embedder.ollama import OllamaEmbedder
from agno.agent import Agent
from agno.knowledge.pdf import PDFKnowledgeBase, PDFReader
from agno.vectordb.qdrant import Qdrant
from agno.models.ollama import Ollama
from agno.embedder.ollama import OllamaEmbedder
import tempfile
import os

View File

@@ -1,4 +1,4 @@
phidata==2.6.7
agno
streamlit==1.40.2
qdrant-client==1.12.1
ollama==0.4.4

View File

@@ -1,4 +1,4 @@
phidata==2.5.33
agno
streamlit==1.40.2
qdrant-client==1.12.1
openai

View File

@@ -1,6 +1,6 @@
# 🩻 Medical Imaging Diagnosis Agent
A Medical Imaging Diagnosis Agent build on phidata powered by Gemini 2.0 Flash Experimental that provides AI-assisted analysis of medical images of various scans. The agent acts as a medical imaging diagnosis expert to analyze various types of medical images and videos, providing detailed diagnostic insights and explanations.
A Medical Imaging Diagnosis Agent build on agno powered by Gemini 2.0 Flash Experimental that provides AI-assisted analysis of medical images of various scans. The agent acts as a medical imaging diagnosis expert to analyze various types of medical images and videos, providing detailed diagnostic insights and explanations.
## Features

View File

@@ -1,9 +1,9 @@
import os
from PIL import Image
from phi.agent import Agent
from phi.model.google import Gemini
from agno.agent import Agent
from agno.models.google import Gemini
import streamlit as st
from phi.tools.duckduckgo import DuckDuckGo
from agno.tools.duckduckgo import DuckDuckGoTools
if "GOOGLE_API_KEY" not in st.session_state:
st.session_state.GOOGLE_API_KEY = None
@@ -45,7 +45,7 @@ medical_agent = Agent(
api_key=st.session_state.GOOGLE_API_KEY,
id="gemini-2.0-flash-exp"
),
tools=[DuckDuckGo()],
tools=[DuckDuckGoTools()],
markdown=True
) if st.session_state.GOOGLE_API_KEY else None

View File

@@ -1,5 +1,5 @@
streamlit==1.40.2
phidata==2.7.3
agno
Pillow==10.0.0
duckduckgo-search==6.4.1
google-generativeai==0.8.3

View File

@@ -1,8 +1,8 @@
# Import the required libraries
import streamlit as st
from phi.assistant import Assistant
from phi.tools.serpapi_tools import SerpApiTools
from phi.llm.anthropic import Claude
from agno.agent import Agent
from agno.tools.serpapi import SerpApiTools
from agno.models.anthropic import Claude
from textwrap import dedent
# Set up the Streamlit app
@@ -15,9 +15,9 @@ anthropic_api_key = st.text_input("Enter Anthropic API Key to access Claude Sonn
serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password")
if anthropic_api_key and serp_api_key:
script_writer = Assistant(
script_writer = Agent(
name="ScriptWriter",
llm=Claude(model="claude-3-5-sonnet-20240620", api_key=anthropic_api_key),
model=Claude(id="claude-3-5-sonnet-20240620", api_key=anthropic_api_key),
description=dedent(
"""\
You are an expert screenplay writer. Given a movie idea and genre,
@@ -31,9 +31,9 @@ if anthropic_api_key and serp_api_key:
],
)
casting_director = Assistant(
casting_director = Agent(
name="CastingDirector",
llm=Claude(model="claude-3-5-sonnet-20240620", api_key=anthropic_api_key),
model=Claude(id="claude-3-5-sonnet-20240620", api_key=anthropic_api_key),
description=dedent(
"""\
You are a talented casting director. Given a script outline and character descriptions,
@@ -49,9 +49,9 @@ if anthropic_api_key and serp_api_key:
tools=[SerpApiTools(api_key=serp_api_key)],
)
movie_producer = Assistant(
movie_producer = Agent(
name="MovieProducer",
llm=Claude(model="claude-3-5-sonnet-20240620", api_key=anthropic_api_key),
model=Claude(id="claude-3-5-sonnet-20240620", api_key=anthropic_api_key),
team=[script_writer, casting_director],
description="Experienced movie producer overseeing script and casting.",
instructions=[

View File

@@ -1,5 +1,5 @@
streamlit
phidata
agno
anthropic
google-search-results
lxml_html_clean

View File

@@ -1,8 +1,8 @@
from textwrap import dedent
from phi.assistant import Assistant
from phi.tools.serpapi_tools import SerpApiTools
from agno.agent import Agent
from agno.tools.serpapi import SerpApiTools
import streamlit as st
from phi.llm.openai import OpenAIChat
from agno.models.openai import OpenAIChat
# Set up the Streamlit app
st.title("AI Personal Finance Planner 💰")
@@ -15,10 +15,10 @@ openai_api_key = st.text_input("Enter OpenAI API Key to access GPT-4o", type="pa
serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password")
if openai_api_key and serp_api_key:
researcher = Assistant(
researcher = Agent(
name="Researcher",
role="Searches for financial advice, investment opportunities, and savings strategies based on user preferences",
llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key),
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a world-class financial researcher. Given a user's financial goals and current financial situation,
@@ -35,10 +35,10 @@ if openai_api_key and serp_api_key:
tools=[SerpApiTools(api_key=serp_api_key)],
add_datetime_to_instructions=True,
)
planner = Assistant(
planner = Agent(
name="Planner",
role="Generates a personalized financial plan based on user preferences and research results",
llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key),
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a senior financial planner. Given a user's financial goals, current financial situation, and a list of research results,
@@ -54,8 +54,6 @@ if openai_api_key and serp_api_key:
"Never make up facts or plagiarize. Always provide proper attribution.",
],
add_datetime_to_instructions=True,
add_chat_history_to_prompt=True,
num_history_messages=3,
)
# Input fields for the user's financial goals and current financial situation
@@ -66,4 +64,4 @@ if openai_api_key and serp_api_key:
with st.spinner("Processing..."):
# Get the response from the assistant
response = planner.run(f"Financial goals: {financial_goals}, Current situation: {current_situation}", stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,4 +1,4 @@
streamlit
phidata
agno
openai
google-search-results

View File

@@ -1,6 +1,6 @@
from phi.agent import Agent
from phi.model.ollama import Ollama
from phi.playground import Playground, serve_playground_app
from agno.agent import Agent
from agno.models.ollama import Ollama
from agno.playground import Playground, serve_playground_app
reasoning_agent = Agent(name="Reasoning Agent", model=Ollama(id="qwq:32b"), markdown=True)

View File

@@ -1,9 +1,9 @@
from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.cli.console import console
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from rich.console import Console
regular_agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), markdown=True)
console = Console()
reasoning_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
reasoning=True,

View File

@@ -8,9 +8,9 @@ from datetime import datetime, timedelta
import pytz
import streamlit as st
from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.tools.email import EmailTools
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.email import EmailTools
from phi.tools.zoom import ZoomTool
from phi.utils.log import logger
from streamlit_pdf_viewer import pdf_viewer

View File

@@ -1,5 +1,6 @@
# Core dependencies
phidata==2.7.3
phidata
agno
streamlit==1.40.2
PyPDF2==3.0.1
streamlit-pdf-viewer==0.0.19

View File

@@ -1,4 +1,4 @@
phidata==2.5.33
agno
streamlit==1.40.2
duckduckgo_search==6.3.7
newspaper4k==0.9.3.1

View File

@@ -1,9 +1,9 @@
import streamlit as st
from phi.agent import Agent
from phi.tools.duckduckgo import DuckDuckGo
from phi.model.anthropic import Claude
from phi.tools.newspaper4k import Newspaper4k
from phi.tools import Tool
from agno.agent import Agent
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.models.anthropic import Claude
from agno.tools.newspaper4k import Newspaper4kTools
from agno.tools import Tool
import logging
logging.basicConfig(level=logging.DEBUG)
@@ -25,7 +25,7 @@ if st.button("Generate Analysis"):
anthropic_model = Claude(id ="claude-3-5-sonnet-20240620",api_key=anthropic_api_key)
# Define News Collector Agent - Duckduckgo_search tool enables an Agent to search the web for information.
search_tool = DuckDuckGo(search=True, news=True, fixed_max_results=5)
search_tool = DuckDuckGoTools(search=True, news=True, fixed_max_results=5)
news_collector = Agent(
name="News Collector",
role="Collects recent news articles on the given topic",
@@ -37,7 +37,7 @@ if st.button("Generate Analysis"):
)
# Define Summary Writer Agent
news_tool = Newspaper4k(read_article=True, include_summary=True)
news_tool = Newspaper4kTools(read_article=True, include_summary=True)
summary_writer = Agent(
name="Summary Writer",
role="Summarizes collected news articles",

View File

@@ -2,7 +2,7 @@ streamlit==1.41.1
openai==1.58.1
duckduckgo-search==6.4.1
typing-extensions>=4.5.0
phidata==2.7.3
agno
composio-phidata==0.6.9
composio_core
composio==0.1.1

View File

@@ -1,11 +1,11 @@
import streamlit as st
from phi.agent import Agent, RunResponse
from phi.model.openai import OpenAIChat
from agno.agent import Agent, RunResponse
from agno.models.openai import OpenAIChat
from composio_phidata import Action, ComposioToolSet
import os
from phi.tools.arxiv_toolkit import ArxivToolkit
from phi.utils.pprint import pprint_run_response
from phi.tools.serpapi_tools import SerpApiTools
from agno.tools.arxiv import ArxivTools
from agno.utils.pprint import pprint_run_response
from agno.tools.serpapi import SerpApiTools
# Set page configuration
st.set_page_config(page_title="👨‍🏫 AI Teaching Agent Team", layout="centered")

View File

@@ -1,6 +1,6 @@
# 🎮 Agent X vs Agent O: Tic-Tac-Toe Game
An interactive Tic-Tac-Toe game where two AI agents powered by different language models compete against each other built on phidata Agent Framework and Streamlit as UI. Watch as GPT-4O battles against either DeepSeek V3 or Google's Gemini 1.5 Flash in this classic game.
An interactive Tic-Tac-Toe game where two AI agents powered by different language models compete against each other built on Agno Agent Framework and Streamlit as UI. Watch as GPT-4O battles against either DeepSeek V3 or Google's Gemini 1.5 Flash in this classic game.
## Features

View File

@@ -1,9 +1,9 @@
import re
import streamlit as st
from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.model.deepseek import DeepSeekChat
from phi.model.google import Gemini
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.models.deepseek import DeepSeek
from agno.models.google import Gemini
# Streamlit App Title
st.title("🎮 Agent X vs Agent O: Tic-Tac-Toe Game")
@@ -122,7 +122,7 @@ if 'openai_api_key' in st.session_state:
if 'deepseek_api_key' in st.session_state:
player_o = Agent(
name="Player O",
model=DeepSeekChat(api_key=st.session_state.deepseek_api_key),
model=DeepSeek(id="deepseek-chat", api_key=st.session_state.deepseek_api_key),
instructions=[
"You are a Tic-Tac-Toe player using the symbol 'O'.",
"Your opponent is using the symbol 'X'. Block their potential winning moves.",

View File

@@ -1,4 +1,4 @@
streamlit==1.41.1
phidata==2.7.3
agno
openai==1.58.1
google-generativeai==0.8.3
google-generativeai==0.8.3

View File

@@ -1,21 +1,21 @@
from textwrap import dedent
from phi.assistant import Assistant
from phi.tools.serpapi_tools import SerpApiTools
from agno.agent import Agent
from agno.tools.serpapi import SerpApiTools
import streamlit as st
from phi.llm.ollama import Ollama
from agno.models.ollama import Ollama
# Set up the Streamlit app
st.title("AI Travel Planner using Llama-3 ✈️")
st.title("AI Travel Planner using Llama-3.2 ✈️")
st.caption("Plan your next adventure with AI Travel Planner by researching and planning a personalized itinerary on autopilot using local Llama-3")
# Get SerpAPI key from the user
serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password")
if serp_api_key:
researcher = Assistant(
researcher = Agent(
name="Researcher",
role="Searches for travel destinations, activities, and accommodations based on user preferences",
llm=Ollama(model="llama3:instruct", max_tokens=1024),
model=Ollama(id="llama3.2", max_tokens=1024),
description=dedent(
"""\
You are a world-class travel researcher. Given a travel destination and the number of days the user wants to travel for,
@@ -32,10 +32,10 @@ if serp_api_key:
tools=[SerpApiTools(api_key=serp_api_key)],
add_datetime_to_instructions=True,
)
planner = Assistant(
planner = Agent(
name="Planner",
role="Generates a draft itinerary based on user preferences and research results",
llm=Ollama(model="llama3:instruct", max_tokens=1024),
model=Ollama(id="llama3.2", max_tokens=1024),
description=dedent(
"""\
You are a senior travel planner. Given a travel destination, the number of days the user wants to travel for, and a list of research results,
@@ -51,8 +51,6 @@ if serp_api_key:
"Never make up facts or plagiarize. Always provide proper attribution.",
],
add_datetime_to_instructions=True,
add_chat_history_to_prompt=True,
num_history_messages=3,
)
# Input fields for the user's destination and the number of days they want to travel for
@@ -63,4 +61,4 @@ if serp_api_key:
with st.spinner("Processing..."):
# Get the response from the assistant
response = planner.run(f"{destination} for {num_days} days", stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,4 +1,4 @@
streamlit
phidata
agno
openai
google-search-results

View File

@@ -1,8 +1,8 @@
from textwrap import dedent
from phi.assistant import Assistant
from phi.tools.serpapi_tools import SerpApiTools
from agno.agent import Agent
from agno.tools.serpapi import SerpApiTools
import streamlit as st
from phi.llm.openai import OpenAIChat
from agno.models.openai import OpenAIChat
# Set up the Streamlit app
st.title("AI Travel Planner ✈️")
@@ -15,10 +15,10 @@ openai_api_key = st.text_input("Enter OpenAI API Key to access GPT-4o", type="pa
serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password")
if openai_api_key and serp_api_key:
researcher = Assistant(
researcher = Agent(
name="Researcher",
role="Searches for travel destinations, activities, and accommodations based on user preferences",
llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key),
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a world-class travel researcher. Given a travel destination and the number of days the user wants to travel for,
@@ -35,10 +35,10 @@ if openai_api_key and serp_api_key:
tools=[SerpApiTools(api_key=serp_api_key)],
add_datetime_to_instructions=True,
)
planner = Assistant(
planner = Agent(
name="Planner",
role="Generates a draft itinerary based on user preferences and research results",
llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key),
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a senior travel planner. Given a travel destination, the number of days the user wants to travel for, and a list of research results,
@@ -54,8 +54,6 @@ if openai_api_key and serp_api_key:
"Never make up facts or plagiarize. Always provide proper attribution.",
],
add_datetime_to_instructions=True,
add_chat_history_to_prompt=True,
num_history_messages=3,
)
# Input fields for the user's destination and the number of days they want to travel for
@@ -66,4 +64,4 @@ if openai_api_key and serp_api_key:
with st.spinner("Processing..."):
# Get the response from the assistant
response = planner.run(f"{destination} for {num_days} days", stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,11 +1,11 @@
from phi.agent import Agent
from phi.model.google import Gemini
from phi.tools.duckduckgo import DuckDuckGo
from agno.agent import Agent
from agno.models.google import Gemini
from agno.tools.duckduckgo import DuckDuckGoTools
from google.generativeai import upload_file, get_file
import time
# 1. Initialize the Multimodal Agent
agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), tools=[DuckDuckGo()], markdown=True)
agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), tools=[DuckDuckGoTools()], markdown=True)
# 2. Image Input
image_url = "https://example.com/sample_image.jpg"

View File

@@ -1,3 +1,3 @@
streamlit
phidata
agno
openai

View File

@@ -1,8 +1,8 @@
# Import the required libraries
import streamlit as st
from phi.assistant import Assistant
from phi.tools.hackernews import HackerNews
from phi.llm.openai import OpenAIChat
from agno.agent import Agent
from agno.tools.hackernews import HackerNewsTools
from agno.models.openai import OpenAIChat
# Set up the Streamlit app
st.title("Multi-Agent AI Researcher 🔍🤖")
@@ -13,23 +13,23 @@ openai_api_key = st.text_input("OpenAI API Key", type="password")
if openai_api_key:
# Create instances of the Assistant
story_researcher = Assistant(
story_researcher = Agent(
name="HackerNews Story Researcher",
role="Researches hackernews stories and users.",
tools=[HackerNews()],
tools=[HackerNewsTools()],
)
user_researcher = Assistant(
user_researcher = Agent(
name="HackerNews User Researcher",
role="Reads articles from URLs.",
tools=[HackerNews()],
tools=[HackerNewsTools()],
)
hn_assistant = Assistant(
hn_assistant = Agent(
name="Hackernews Team",
team=[story_researcher, user_researcher],
llm=OpenAIChat(
model="gpt-4o",
model=OpenAIChat(
id="gpt-4o",
max_tokens=1024,
temperature=0.5,
api_key=openai_api_key
@@ -42,4 +42,4 @@ if openai_api_key:
if query:
# Get the response from the assistant
response = hn_assistant.run(query, stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,32 +1,32 @@
# Import the required libraries
import streamlit as st
from phi.assistant import Assistant
from phi.tools.hackernews import HackerNews
from phi.llm.ollama import Ollama
from agno.agent import Agent
from agno.tools.hackernews import HackerNews
from agno.models.ollama import Ollama
# Set up the Streamlit app
st.title("Multi-Agent AI Researcher using Llama-3 🔍🤖")
st.caption("This app allows you to research top stories and users on HackerNews and write blogs, reports and social posts.")
# Create instances of the Assistant
story_researcher = Assistant(
story_researcher = Agent(
name="HackerNews Story Researcher",
role="Researches hackernews stories and users.",
tools=[HackerNews()],
llm=Ollama(model="llama3:instruct", max_tokens=1024)
model=Ollama(id="llama3.2", max_tokens=1024)
)
user_researcher = Assistant(
user_researcher = Agent(
name="HackerNews User Researcher",
role="Reads articles from URLs.",
tools=[HackerNews()],
llm=Ollama(model="llama3:instruct", max_tokens=1024)
model=Ollama(id="llama3.2", max_tokens=1024)
)
hn_assistant = Assistant(
hn_assistant = Agent(
name="Hackernews Team",
team=[story_researcher, user_researcher],
llm=Ollama(model="llama3:instruct", max_tokens=1024)
model=Ollama(id="llama3.2", max_tokens=1024)
)
# Input field for the report query
@@ -35,4 +35,4 @@ query = st.text_input("Enter your report query")
if query:
# Get the response from the assistant
response = hn_assistant.run(query, stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,6 +1,6 @@
import streamlit as st
from phi.agent import Agent
from phi.model.google import Gemini
from agno.agent import Agent
from agno.models.google import Gemini
import tempfile
import os

View File

@@ -1,7 +1,7 @@
import streamlit as st
from phi.agent import Agent
from phi.model.google import Gemini
from phi.tools.duckduckgo import DuckDuckGo
from agno.agent import Agent
from agno.models.google import Gemini
from agno.tools.duckduckgo import DuckDuckGoTools
from google.generativeai import upload_file, get_file
import time
from pathlib import Path
@@ -21,7 +21,7 @@ def initialize_agent():
return Agent(
name="Multimodal Analyst",
model=Gemini(id="gemini-2.0-flash-exp"),
tools=[DuckDuckGo()],
tools=[DuckDuckGoTools()],
markdown=True,
)

View File

@@ -1,3 +1,3 @@
phidata==2.7.2
agno
google-generativeai==0.8.3
streamlit==1.40.2

View File

@@ -1,6 +1,6 @@
from phi.agent import Agent
from phi.model.google import Gemini
from phi.tools.duckduckgo import DuckDuckGo
from agno.agent import Agent
from agno.models.google import Gemini
from agno.tools.duckduckgo import DuckDuckGoTools
import streamlit as st
from PIL import Image
from typing import List, Optional
@@ -37,7 +37,7 @@ def initialize_agents(api_key: str) -> tuple[Agent, Agent, Agent]:
market_agent = Agent(
model=model,
tools=[DuckDuckGo(search=True)],
tools=[DuckDuckGoTools()],
instructions=[
"You are a market research expert that:",
"1. Identifies market trends and competitor patterns",

View File

@@ -1,6 +1,6 @@
google-generativeai==0.8.3
streamlit==1.41.1
phidata==2.7.2
agno
Pillow==11.0.0
duckduckgo-search==6.3.7

View File

@@ -1,4 +1,4 @@
phidata
agno
duckduckgo-search
yfinance
fastapi[standard]

View File

@@ -1,15 +1,15 @@
# import necessary python libraries
from phi.agent import Agent
from phi.model.xai import xAI
from phi.tools.yfinance import YFinanceTools
from phi.tools.duckduckgo import DuckDuckGo
from phi.playground import Playground, serve_playground_app
from agno.agent import Agent
from agno.models.xai import xAI
from agno.tools.yfinance import YFinanceTools
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.playground import Playground, serve_playground_app
# create the AI finance agent
agent = Agent(
name="xAI Finance Agent",
model = xAI(id="grok-beta"),
tools=[DuckDuckGo(), YFinanceTools(stock_price=True, analyst_recommendations=True, stock_fundamentals=True)],
tools=[DuckDuckGoTools(), YFinanceTools(stock_price=True, analyst_recommendations=True, stock_fundamentals=True)],
instructions = ["Always use tables to display financial/numerical data. For text data use bullet points and small paragrpahs."],
show_tool_calls = True,
markdown = True,

View File

@@ -1,2 +1,3 @@
streamlit
embedchain
embedchain
streamlit-chat

View File

@@ -1,8 +1,8 @@
# Import the required libraries
import streamlit as st
from phi.assistant import Assistant
from phi.llm.openai import OpenAIChat
from phi.tools.arxiv_toolkit import ArxivToolkit
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.arxiv import ArxivTools
# Set up the Streamlit app
st.title("Chat with Research Papers 🔎🤖")
@@ -14,12 +14,12 @@ openai_access_token = st.text_input("OpenAI API Key", type="password")
# If OpenAI API key is provided, create an instance of Assistant
if openai_access_token:
# Create an instance of the Assistant
assistant = Assistant(
llm=OpenAIChat(
model="gpt-4o",
assistant = Agent(
model=OpenAIChat(
id="gpt-4o",
max_tokens=1024,
temperature=0.9,
api_key=openai_access_token) , tools=[ArxivToolkit()]
api_key=openai_access_token) , tools=[ArxivTools()]
)
# Get the search query from the user
@@ -28,4 +28,4 @@ if openai_access_token:
if query:
# Search the web using the AI Assistant
response = assistant.run(query, stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,17 +1,17 @@
# Import the required libraries
import streamlit as st
from phi.assistant import Assistant
from phi.llm.ollama import Ollama
from phi.tools.arxiv_toolkit import ArxivToolkit
from agno.agent import Agent
from agno.models.ollama import Ollama
from agno.tools.arxiv import ArxivTools
# Set up the Streamlit app
st.title("Chat with Research Papers 🔎🤖")
st.caption("This app allows you to chat with arXiv research papers using Llama-3 running locally.")
# Create an instance of the Assistant
assistant = Assistant(
llm=Ollama(
model="llama3:instruct") , tools=[ArxivToolkit()], show_tool_calls=True
assistant = Agent(
model=Ollama(
id="llama3.1:8b") , tools=[ArxivTools()], show_tool_calls=True
)
# Get the search query from the user
@@ -20,4 +20,4 @@ query= st.text_input("Enter the Search Query", type="default")
if query:
# Search the web using the AI Assistant
response = assistant.run(query, stream=False)
st.write(response)
st.write(response.content)

View File

@@ -1,5 +1,5 @@
streamlit
phidata
agno
arxiv
openai
pypdf

View File

@@ -1,9 +1,9 @@
from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.knowledge.pdf import PDFUrlKnowledgeBase
from phi.vectordb.lancedb import LanceDb, SearchType
from phi.playground import Playground, serve_playground_app
from phi.tools.duckduckgo import DuckDuckGo
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.knowledge.pdf_url import PDFUrlKnowledgeBase
from agno.vectordb.lancedb import LanceDb, SearchType
from agno.playground import Playground, serve_playground_app
from agno.tools.duckduckgo import DuckDuckGoTools
db_uri = "tmp/lancedb"
# Create a knowledge base from a PDF
@@ -19,7 +19,7 @@ rag_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
agent_id="rag-agent",
knowledge=knowledge_base, # Add the knowledge base to the agent
tools=[DuckDuckGo()],
tools=[DuckDuckGoTools()],
show_tool_calls=True,
markdown=True,
)

View File

@@ -1,4 +1,4 @@
phidata
agno
openai
lancedb
tantivy

View File

@@ -1,14 +1,14 @@
import streamlit as st
import nest_asyncio
from io import BytesIO
from phi.assistant import Assistant
from phi.document.reader.pdf import PDFReader
from phi.llm.openai import OpenAIChat
from phi.knowledge import AssistantKnowledge
from phi.tools.duckduckgo import DuckDuckGo
from phi.embedder.openai import OpenAIEmbedder
from phi.vectordb.pgvector import PgVector2
from phi.storage.assistant.postgres import PgAssistantStorage
from agno.agent import Agent
from agno.document.reader.pdf_reader import PDFReader
from agno.models.openai import OpenAIChat
from agno.knowledge.pdf_url import PDFUrlKnowledgeBase
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.embedder.openai import OpenAIEmbedder
from agno.vectordb.pgvector import PgVector, SearchType
from agno.storage.agent.postgres import PostgresAgentStorage
# Apply nest_asyncio to allow nested event loops, required for running async functions in Streamlit
nest_asyncio.apply()
@@ -18,22 +18,22 @@ DB_URL = "postgresql+psycopg://ai:ai@localhost:5532/ai"
# Function to set up the Assistant, utilizing caching for resource efficiency
@st.cache_resource
def setup_assistant(api_key: str) -> Assistant:
llm = OpenAIChat(model="gpt-4o-mini", api_key=api_key)
def setup_assistant(api_key: str) -> Agent:
llm = OpenAIChat(id="gpt-4o-mini", api_key=api_key)
# Set up the Assistant with storage, knowledge base, and tools
return Assistant(
name="auto_rag_assistant", # Name of the Assistant
llm=llm, # Language model to be used
storage=PgAssistantStorage(table_name="auto_rag_storage", db_url=DB_URL),
knowledge_base=AssistantKnowledge(
vector_db=PgVector2(
return Agent(
id="auto_rag_agent", # Name of the Assistant
model=llm, # Language model to be used
storage=PostgresAgentStorage(table_name="auto_rag_storage", db_url=DB_URL),
knowledge_base=PDFUrlKnowledgeBase(
vector_db=PgVector(
db_url=DB_URL,
collection="auto_rag_docs",
embedder=OpenAIEmbedder(model="text-embedding-ada-002", dimensions=1536, api_key=api_key),
embedder=OpenAIEmbedder(id="text-embedding-ada-002", dimensions=1536, api_key=api_key),
),
num_documents=3,
),
tools=[DuckDuckGo()], # Additional tool for web search via DuckDuckGo
tools=[DuckDuckGoTools()], # Additional tool for web search via DuckDuckGo
instructions=[
"Search your knowledge base first.",
"If not found, search the internet.",
@@ -41,24 +41,23 @@ def setup_assistant(api_key: str) -> Assistant:
],
show_tool_calls=True,
search_knowledge=True,
read_chat_history=True,
markdown=True,
debug_mode=True,
)
# Function to add a PDF document to the knowledge base
def add_document(assistant: Assistant, file: BytesIO):
def add_document(agent: Agent, file: BytesIO):
reader = PDFReader()
docs = reader.read(file)
if docs:
assistant.knowledge_base.load_documents(docs, upsert=True)
agent.knowledge_base.load_documents(docs, upsert=True)
st.success("Document added to the knowledge base.")
else:
st.error("Failed to read the document.")
# Function to query the Assistant and return a response
def query_assistant(assistant: Assistant, question: str) -> str:
return "".join([delta for delta in assistant.run(question)])
def query_assistant(agent: Agent, question: str) -> str:
return "".join([delta for delta in agent.run(question)])
# Main function to handle Streamlit app layout and interactions
def main():
@@ -87,7 +86,7 @@ def main():
with st.spinner("🤔 Thinking..."):
# Query the assistant and display the response
answer = query_assistant(assistant, question)
st.write("📝 **Response:**", answer)
st.write("📝 **Response:**", answer.content)
else:
# Show an error if the question input is empty
st.error("Please enter a question.")

View File

@@ -1,5 +1,5 @@
streamlit
phidata
agno
openai
psycopg-binary
pgvector

View File

@@ -1,10 +1,10 @@
# Import necessary libraries
from phi.agent import Agent
from phi.model.ollama import Ollama
from phi.knowledge.pdf import PDFUrlKnowledgeBase
from phi.vectordb.qdrant import Qdrant
from phi.embedder.ollama import OllamaEmbedder
from phi.playground import Playground, serve_playground_app
from agno.agent import Agent
from agno.models.ollama import Ollama
from agno.knowledge.pdf_url import PDFUrlKnowledgeBase
from agno.vectordb.qdrant import Qdrant
from agno.embedder.ollama import OllamaEmbedder
from agno.playground import Playground, serve_playground_app
# Define the collection name for the vector database
collection_name = "thai-recipe-index"

View File

@@ -1,4 +1,4 @@
phidata
agno
qdrant-client
ollama
pypdf

View File

@@ -10,8 +10,6 @@ from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from dotenv import load_dotenv
load_dotenv()
# Initialize embedding model
embedding_model = GoogleGenerativeAIEmbeddings(model="models/embedding-001")