From 3277f702bb8e355f80d8e584a4ffc335289f6d1b Mon Sep 17 00:00:00 2001 From: Madhu Date: Mon, 3 Feb 2025 00:11:12 +0530 Subject: [PATCH] complete phidata -> Agno + few old error corrections --- .../cursor_ai_experiments/requirements.txt | 7 +++ .../llm_router_app/requirements.txt | 3 +- .../llama3_tool_use.py | 14 +++--- .../local_llama3.1_tool_use/requirements.txt | 2 +- .../claude_websearch.py | 18 +++---- .../web_search_ai_assistant/gpt4_websearch.py | 16 +++---- .../web_search_ai_assistant/requirements.txt | 2 +- .../ai_data_analysis_agent/README.md | 2 +- .../ai_data_analysis_agent/ai_data_analyst.py | 4 +- .../ai_data_analysis_agent/requirements.txt | 5 +- .../finance_agent_team.py | 18 +++---- .../ai_finance_agent_team/requirements.txt | 2 +- .../ai_health_fitness_agent/README.md | 4 +- .../ai_health_fitness_agent/health_agent.py | 4 +- .../ai_health_fitness_agent/requirements.txt | 4 +- .../ai_investment_agent/README.md | 4 +- .../ai_investment_agent/investment_agent.py | 8 ++-- .../ai_investment_agent/requirements.txt | 2 +- .../ai_journalist_agent/journalist_agent.py | 27 +++++------ .../ai_journalist_agent/requirements.txt | 2 +- .../ai_lead_generation_agent.py | 6 +-- .../ai_lead_generation_agent/requirements.txt | 4 +- .../ai_legal_agent_team/legal_agent_team.py | 14 +++--- .../local_legal_agent.py | 10 ++-- .../requirements.txt | 2 +- .../ai_legal_agent_team/requirements.txt | 2 +- .../ai_medical_imaging_agent/README.md | 2 +- .../ai_medical_imaging.py | 8 ++-- .../ai_medical_imaging_agent/requirements.txt | 2 +- .../movie_production_agent.py | 18 +++---- .../requirements.txt | 2 +- .../finance_agent.py | 18 ++++--- .../requirements.txt | 2 +- .../local_ai_reasoning_agent.py | 6 +-- .../ai_reasoning_agent/reasoning_agent.py | 8 ++-- .../ai_recruitment_agent_team.py | 6 +-- .../requirements.txt | 3 +- .../requirements.txt | 2 +- .../startup_trends_agent.py | 14 +++--- .../ai_teaching_agent_team/requirements.txt | 2 +- .../teaching_agent_team.py | 10 ++-- .../ai_tic_tac_toe_agent/README.md | 2 +- .../ai_tic_tac_toe_agent.py | 10 ++-- .../ai_tic_tac_toe_agent/requirements.txt | 4 +- .../ai_travel_agent/local_travel_agent.py | 20 ++++---- .../ai_travel_agent/requirements.txt | 2 +- .../ai_travel_agent/travel_agent.py | 18 ++++--- .../multimodal_ai_agent.py | 8 ++-- .../multi_agent_researcher/requirements.txt | 2 +- .../multi_agent_researcher/research_agent.py | 22 ++++----- .../research_agent_llama3.py | 20 ++++---- .../multimodal_reasoning_agent.py | 4 +- .../multimodal_ai_agent/mutimodal_agent.py | 8 ++-- .../multimodal_ai_agent/requirements.txt | 2 +- .../design_agent_team.py | 8 ++-- .../requirements.txt | 2 +- .../xai_finance_agent/requirements.txt | 2 +- .../xai_finance_agent/xai_finance_agent.py | 12 ++--- .../chat_with_pdf/requirements.txt | 3 +- .../chat_with_research_papers/chat_arxiv.py | 16 +++---- .../chat_arxiv_llama3.py | 14 +++--- .../requirements.txt | 2 +- rag_tutorials/agentic_rag/rag_agent.py | 14 +++--- rag_tutorials/agentic_rag/requirements.txt | 2 +- rag_tutorials/autonomous_rag/autorag.py | 47 +++++++++---------- rag_tutorials/autonomous_rag/requirements.txt | 2 +- .../local_rag_agent/local_rag_agent.py | 12 ++--- .../local_rag_agent/requirements.txt | 2 +- rag_tutorials/rag_chain/app.py | 2 - 69 files changed, 276 insertions(+), 275 deletions(-) create mode 100644 advanced_tools_frameworks/cursor_ai_experiments/requirements.txt diff --git a/advanced_tools_frameworks/cursor_ai_experiments/requirements.txt b/advanced_tools_frameworks/cursor_ai_experiments/requirements.txt new file mode 100644 index 0000000..ee215a2 --- /dev/null +++ b/advanced_tools_frameworks/cursor_ai_experiments/requirements.txt @@ -0,0 +1,7 @@ +scrapegraphai +playwright +langchain-community +streamlit-chat +streamlit +crewai +ollama \ No newline at end of file diff --git a/advanced_tools_frameworks/llm_router_app/requirements.txt b/advanced_tools_frameworks/llm_router_app/requirements.txt index a8f8745..e3defc0 100644 --- a/advanced_tools_frameworks/llm_router_app/requirements.txt +++ b/advanced_tools_frameworks/llm_router_app/requirements.txt @@ -1,2 +1,3 @@ streamlit -"routellm[serve,eval]" \ No newline at end of file +"routellm[serve,eval]" +routellm \ No newline at end of file diff --git a/advanced_tools_frameworks/local_llama3.1_tool_use/llama3_tool_use.py b/advanced_tools_frameworks/local_llama3.1_tool_use/llama3_tool_use.py index f42872c..a1a16e2 100644 --- a/advanced_tools_frameworks/local_llama3.1_tool_use/llama3_tool_use.py +++ b/advanced_tools_frameworks/local_llama3.1_tool_use/llama3_tool_use.py @@ -1,9 +1,9 @@ import streamlit as st import os -from phi.assistant import Assistant -from phi.llm.ollama import Ollama -from phi.tools.yfinance import YFinanceTools -from phi.tools.serpapi_tools import SerpApiTools +from agno.agent import Agent +from agno.models.ollama import Ollama +from agno.tools.yfinance import YFinanceTools +from agno.tools.serpapi import SerpApiTools st.set_page_config(page_title="Llama-3 Tool Use", page_icon="🦙") @@ -13,9 +13,9 @@ if 'SERPAPI_API_KEY' not in os.environ: st.stop() def get_assistant(tools): - return Assistant( + return Agent( name="llama3_assistant", - llm=Ollama(model="llama3"), + model=Ollama(id="llama3.1:8b"), tools=tools, description="You are a helpful assistant that can access specific tools based on user selection.", show_tool_calls=True, @@ -25,7 +25,7 @@ def get_assistant(tools): ) -st.title("🦙 Local Llama-3 Tool Use") +st.title("🦙 Local Llama-3.1 Tool Use") st.markdown(""" This app demonstrates function calling with the local Llama3 model using Ollama. Select tools in the sidebar and ask relevant questions! diff --git a/advanced_tools_frameworks/local_llama3.1_tool_use/requirements.txt b/advanced_tools_frameworks/local_llama3.1_tool_use/requirements.txt index f3bea12..c103f25 100644 --- a/advanced_tools_frameworks/local_llama3.1_tool_use/requirements.txt +++ b/advanced_tools_frameworks/local_llama3.1_tool_use/requirements.txt @@ -1,3 +1,3 @@ streamlit ollama -phidata \ No newline at end of file +agno \ No newline at end of file diff --git a/advanced_tools_frameworks/web_search_ai_assistant/claude_websearch.py b/advanced_tools_frameworks/web_search_ai_assistant/claude_websearch.py index 9d6a506..6dd9e63 100644 --- a/advanced_tools_frameworks/web_search_ai_assistant/claude_websearch.py +++ b/advanced_tools_frameworks/web_search_ai_assistant/claude_websearch.py @@ -1,8 +1,8 @@ # Import the required libraries import streamlit as st -from phi.assistant import Assistant -from phi.tools.duckduckgo import DuckDuckGo -from phi.llm.anthropic import Claude +from agno.agent import Agent +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.models.anthropic import Claude # Set up the Streamlit app st.title("Claude Sonnet + AI Web Search 🤖") @@ -13,12 +13,12 @@ anthropic_api_key = st.text_input("Anthropic's Claude API Key", type="password") # If Anthropic API key is provided, create an instance of Assistant if anthropic_api_key: - assistant = Assistant( - llm=Claude( - model="claude-3-5-sonnet-20240620", + assistant = Agent( + model=Claude( + id="claude-3-5-sonnet-20240620", max_tokens=1024, - temperature=0.9, - api_key=anthropic_api_key) , tools=[DuckDuckGo()], show_tool_calls=True + temperature=0.3, + api_key=anthropic_api_key) , tools=[DuckDuckGoTools()], show_tool_calls=True ) # Get the search query from the user query= st.text_input("Enter the Search Query", type="default") @@ -26,4 +26,4 @@ if anthropic_api_key: if query: # Search the web using the AI Assistant response = assistant.run(query, stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/advanced_tools_frameworks/web_search_ai_assistant/gpt4_websearch.py b/advanced_tools_frameworks/web_search_ai_assistant/gpt4_websearch.py index aab2563..d805671 100644 --- a/advanced_tools_frameworks/web_search_ai_assistant/gpt4_websearch.py +++ b/advanced_tools_frameworks/web_search_ai_assistant/gpt4_websearch.py @@ -1,8 +1,8 @@ # Import the required libraries import streamlit as st -from phi.assistant import Assistant -from phi.tools.duckduckgo import DuckDuckGo -from phi.llm.openai import OpenAIChat +from agno.agent import Agent +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.models.openai import OpenAIChat # Set up the Streamlit app st.title("AI Web Search Assistant 🤖") @@ -14,12 +14,12 @@ openai_access_token = st.text_input("OpenAI API Key", type="password") # If OpenAI API key is provided, create an instance of Assistant if openai_access_token: # Create an instance of the Assistant - assistant = Assistant( - llm=OpenAIChat( - model="gpt-4o", + assistant = Agent( + model=OpenAIChat( + id="gpt-4o", max_tokens=1024, temperature=0.9, - api_key=openai_access_token) , tools=[DuckDuckGo()], show_tool_calls=True + api_key=openai_access_token) , tools=[DuckDuckGoTools()], show_tool_calls=True ) # Get the search query from the user @@ -28,4 +28,4 @@ if openai_access_token: if query: # Search the web using the AI Assistant response = assistant.run(query, stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/advanced_tools_frameworks/web_search_ai_assistant/requirements.txt b/advanced_tools_frameworks/web_search_ai_assistant/requirements.txt index be21749..dd38cb3 100644 --- a/advanced_tools_frameworks/web_search_ai_assistant/requirements.txt +++ b/advanced_tools_frameworks/web_search_ai_assistant/requirements.txt @@ -1,4 +1,4 @@ streamlit openai -phidata +agno duckduckgo-search \ No newline at end of file diff --git a/ai_agent_tutorials/ai_data_analysis_agent/README.md b/ai_agent_tutorials/ai_data_analysis_agent/README.md index 2cfc970..bdd53a0 100644 --- a/ai_agent_tutorials/ai_data_analysis_agent/README.md +++ b/ai_agent_tutorials/ai_data_analysis_agent/README.md @@ -1,6 +1,6 @@ # 📊 AI Data Analysis Agent -An AI data analysis Agent built using the phidata Agent framework and Openai's gpt-4o model. This agent helps users analyze their data - csv, excel files through natural language queries, powered by OpenAI's language models and DuckDB for efficient data processing - making data analysis accessible to users regardless of their SQL expertise. +An AI data analysis Agent built using the Agno Agent framework and Openai's gpt-4o model. This agent helps users analyze their data - csv, excel files through natural language queries, powered by OpenAI's language models and DuckDB for efficient data processing - making data analysis accessible to users regardless of their SQL expertise. ## Features diff --git a/ai_agent_tutorials/ai_data_analysis_agent/ai_data_analyst.py b/ai_agent_tutorials/ai_data_analysis_agent/ai_data_analyst.py index dcc8ce6..cfc90af 100644 --- a/ai_agent_tutorials/ai_data_analysis_agent/ai_data_analyst.py +++ b/ai_agent_tutorials/ai_data_analysis_agent/ai_data_analyst.py @@ -3,9 +3,9 @@ import tempfile import csv import streamlit as st import pandas as pd -from phi.model.openai import OpenAIChat +from agno.models.openai import OpenAIChat from phi.agent.duckdb import DuckDbAgent -from phi.tools.pandas import PandasTools +from agno.tools.pandas import PandasTools import re # Function to preprocess and save the uploaded file diff --git a/ai_agent_tutorials/ai_data_analysis_agent/requirements.txt b/ai_agent_tutorials/ai_data_analysis_agent/requirements.txt index 48230a7..ed751ae 100644 --- a/ai_agent_tutorials/ai_data_analysis_agent/requirements.txt +++ b/ai_agent_tutorials/ai_data_analysis_agent/requirements.txt @@ -1,6 +1,7 @@ -phidata==2.7.3 +phidata streamlit==1.41.1 openai==1.58.1 duckdb==1.1.3 pandas -numpy==1.26.4 \ No newline at end of file +numpy==1.26.4 +agno \ No newline at end of file diff --git a/ai_agent_tutorials/ai_finance_agent_team/finance_agent_team.py b/ai_agent_tutorials/ai_finance_agent_team/finance_agent_team.py index f8aa3b3..8758cf8 100644 --- a/ai_agent_tutorials/ai_finance_agent_team/finance_agent_team.py +++ b/ai_agent_tutorials/ai_finance_agent_team/finance_agent_team.py @@ -1,16 +1,16 @@ -from phi.agent import Agent -from phi.model.openai import OpenAIChat -from phi.storage.agent.sqlite import SqlAgentStorage -from phi.tools.duckduckgo import DuckDuckGo -from phi.tools.yfinance import YFinanceTools -from phi.playground import Playground, serve_playground_app +from agno.agent import Agent +from agno.models.openai import OpenAIChat +from agno.storage.agent.sqlite import SqliteAgentStorage +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.tools.yfinance import YFinanceTools +from agno.playground import Playground, serve_playground_app web_agent = Agent( name="Web Agent", role="Search the web for information", model=OpenAIChat(id="gpt-4o"), - tools=[DuckDuckGo()], - storage=SqlAgentStorage(table_name="web_agent", db_file="agents.db"), + tools=[DuckDuckGoTools()], + storage=SqliteAgentStorage(table_name="web_agent", db_file="agents.db"), add_history_to_messages=True, markdown=True, ) @@ -21,7 +21,7 @@ finance_agent = Agent( model=OpenAIChat(id="gpt-4o"), tools=[YFinanceTools(stock_price=True, analyst_recommendations=True, company_info=True, company_news=True)], instructions=["Always use tables to display data"], - storage=SqlAgentStorage(table_name="finance_agent", db_file="agents.db"), + storage=SqliteAgentStorage(table_name="finance_agent", db_file="agents.db"), add_history_to_messages=True, markdown=True, ) diff --git a/ai_agent_tutorials/ai_finance_agent_team/requirements.txt b/ai_agent_tutorials/ai_finance_agent_team/requirements.txt index 0638842..958c9bb 100644 --- a/ai_agent_tutorials/ai_finance_agent_team/requirements.txt +++ b/ai_agent_tutorials/ai_finance_agent_team/requirements.txt @@ -1,5 +1,5 @@ openai -phidata +agno duckduckgo-search yfinance fastapi[standard] diff --git a/ai_agent_tutorials/ai_health_fitness_agent/README.md b/ai_agent_tutorials/ai_health_fitness_agent/README.md index 6b7bac9..2151ae8 100644 --- a/ai_agent_tutorials/ai_health_fitness_agent/README.md +++ b/ai_agent_tutorials/ai_health_fitness_agent/README.md @@ -1,6 +1,6 @@ # AI Health & Fitness Planner Agent 🏋️‍♂️ -The **AI Health & Fitness Planner** is a personalized health and fitness Agent powered by Phidata's AI Agent framework. This app generates tailored dietary and fitness plans based on user inputs such as age, weight, height, activity level, dietary preferences, and fitness goals. +The **AI Health & Fitness Planner** is a personalized health and fitness Agent powered by Agno AI Agent framework. This app generates tailored dietary and fitness plans based on user inputs such as age, weight, height, activity level, dietary preferences, and fitness goals. ## Features @@ -24,7 +24,7 @@ The **AI Health & Fitness Planner** is a personalized health and fitness Agent p The application requires the following Python libraries: -- `phidata` +- `agno` - `google-generativeai` - `streamlit` diff --git a/ai_agent_tutorials/ai_health_fitness_agent/health_agent.py b/ai_agent_tutorials/ai_health_fitness_agent/health_agent.py index 84783c0..30dc5dc 100644 --- a/ai_agent_tutorials/ai_health_fitness_agent/health_agent.py +++ b/ai_agent_tutorials/ai_health_fitness_agent/health_agent.py @@ -1,6 +1,6 @@ import streamlit as st -from phi.agent import Agent -from phi.model.google import Gemini +from agno.agent import Agent +from agno.models.google import Gemini st.set_page_config( page_title="AI Health & Fitness Planner", diff --git a/ai_agent_tutorials/ai_health_fitness_agent/requirements.txt b/ai_agent_tutorials/ai_health_fitness_agent/requirements.txt index 86a802a..0762e21 100644 --- a/ai_agent_tutorials/ai_health_fitness_agent/requirements.txt +++ b/ai_agent_tutorials/ai_health_fitness_agent/requirements.txt @@ -1,3 +1,3 @@ -phidata==2.5.33 google-generativeai==0.8.3 -streamlit==1.40.2 \ No newline at end of file +streamlit==1.40.2 +agno \ No newline at end of file diff --git a/ai_agent_tutorials/ai_investment_agent/README.md b/ai_agent_tutorials/ai_investment_agent/README.md index fe085b7..6ce5973 100644 --- a/ai_agent_tutorials/ai_investment_agent/README.md +++ b/ai_agent_tutorials/ai_investment_agent/README.md @@ -1,5 +1,5 @@ ## 📈 AI Investment Agent -This Streamlit app is an AI-powered investment agent that compares the performance of two stocks and generates detailed reports. By using GPT-4o with Yahoo Finance data, this app provides valuable insights to help you make informed investment decisions. +This Streamlit app is an AI-powered investment agent built with Agno's AI Agent framework that compares the performance of two stocks and generates detailed reports. By using GPT-4o with Yahoo Finance data, this app provides valuable insights to help you make informed investment decisions. ### Features - Compare the performance of two stocks @@ -32,7 +32,7 @@ streamlit run investment_agent.py ### How it Works? - Upon running the app, you will be prompted to enter your OpenAI API key. This key is used to authenticate and access the OpenAI language model. -- Once you provide a valid API key, an instance of the Assistant class is created. This assistant utilizes the GPT-4 language model from OpenAI and the YFinanceTools for accessing stock data. +- Once you provide a valid API key, an instance of the Assistant class is created. This assistant utilizes the GPT-4o language model from OpenAI and the YFinanceTools for accessing stock data. - Enter the stock symbols of the two companies you want to compare in the provided text input fields. - The assistant will perform the following steps: - Retrieve real-time stock prices and historical data using YFinanceTools diff --git a/ai_agent_tutorials/ai_investment_agent/investment_agent.py b/ai_agent_tutorials/ai_investment_agent/investment_agent.py index 2bd474d..acfef2c 100644 --- a/ai_agent_tutorials/ai_investment_agent/investment_agent.py +++ b/ai_agent_tutorials/ai_investment_agent/investment_agent.py @@ -1,8 +1,8 @@ # Import the required libraries import streamlit as st -from phi.assistant import Assistant -from phi.llm.openai import OpenAIChat -from phi.tools.yfinance import YFinanceTools +from agno.agent import Assistant +from agno.models.openai import OpenAIChat +from agno.tools.yfinance import YFinanceTools # Set up the Streamlit app st.title("AI Investment Agent 📈🤖") @@ -27,4 +27,4 @@ if openai_api_key: # Get the response from the assistant query = f"Compare {stock1} to {stock2}. Use every tool you have." response = assistant.run(query, stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/ai_agent_tutorials/ai_investment_agent/requirements.txt b/ai_agent_tutorials/ai_investment_agent/requirements.txt index fe27582..b25ad27 100644 --- a/ai_agent_tutorials/ai_investment_agent/requirements.txt +++ b/ai_agent_tutorials/ai_investment_agent/requirements.txt @@ -1,4 +1,4 @@ streamlit -phidata +agno openai yfinance diff --git a/ai_agent_tutorials/ai_journalist_agent/journalist_agent.py b/ai_agent_tutorials/ai_journalist_agent/journalist_agent.py index 3bc8ab5..af7390b 100644 --- a/ai_agent_tutorials/ai_journalist_agent/journalist_agent.py +++ b/ai_agent_tutorials/ai_journalist_agent/journalist_agent.py @@ -1,10 +1,10 @@ # Import the required libraries from textwrap import dedent -from phi.assistant import Assistant -from phi.tools.serpapi_tools import SerpApiTools -from phi.tools.newspaper4k import Newspaper4k as NewspaperToolkit +from agno.agent import Agent +from agno.tools.serpapi import SerpApiTools +from agno.tools.newspaper4k import Newspaper4kTools import streamlit as st -from phi.llm.openai import OpenAIChat +from agno.models.openai import OpenAIChat # Set up the Streamlit app st.title("AI Journalist Agent 🗞️") @@ -17,10 +17,10 @@ openai_api_key = st.text_input("Enter OpenAI API Key to access GPT-4o", type="pa serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password") if openai_api_key and serp_api_key: - searcher = Assistant( + searcher = Agent( name="Searcher", role="Searches for top URLs based on a topic", - llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key), + model=OpenAIChat(id="gpt-4o", api_key=openai_api_key), description=dedent( """\ You are a world-class journalist for the New York Times. Given a topic, generate a list of 3 search terms @@ -37,10 +37,10 @@ if openai_api_key and serp_api_key: tools=[SerpApiTools(api_key=serp_api_key)], add_datetime_to_instructions=True, ) - writer = Assistant( + writer = Agent( name="Writer", role="Retrieves text from URLs and writes a high-quality article", - llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key), + model=OpenAIChat(id="gpt-4o", api_key=openai_api_key), description=dedent( """\ You are a senior writer for the New York Times. Given a topic and a list of URLs, @@ -57,15 +57,14 @@ if openai_api_key and serp_api_key: "Focus on clarity, coherence, and overall quality.", "Never make up facts or plagiarize. Always provide proper attribution.", ], - tools=[NewspaperToolkit()], + tools=[Newspaper4kTools()], add_datetime_to_instructions=True, - add_chat_history_to_prompt=True, - num_history_messages=3, + markdown=True, ) - editor = Assistant( + editor = Agent( name="Editor", - llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key), + model=OpenAIChat(id="gpt-4o", api_key=openai_api_key), team=[searcher, writer], description="You are a senior NYT editor. Given a topic, your goal is to write a NYT worthy article.", instructions=[ @@ -88,4 +87,4 @@ if openai_api_key and serp_api_key: with st.spinner("Processing..."): # Get the response from the assistant response = editor.run(query, stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/ai_agent_tutorials/ai_journalist_agent/requirements.txt b/ai_agent_tutorials/ai_journalist_agent/requirements.txt index 56f1049..c97808a 100644 --- a/ai_agent_tutorials/ai_journalist_agent/requirements.txt +++ b/ai_agent_tutorials/ai_journalist_agent/requirements.txt @@ -1,5 +1,5 @@ streamlit -phidata +agno openai google-search-results newspaper4k diff --git a/ai_agent_tutorials/ai_lead_generation_agent/ai_lead_generation_agent.py b/ai_agent_tutorials/ai_lead_generation_agent/ai_lead_generation_agent.py index dd48829..2814167 100644 --- a/ai_agent_tutorials/ai_lead_generation_agent/ai_lead_generation_agent.py +++ b/ai_agent_tutorials/ai_lead_generation_agent/ai_lead_generation_agent.py @@ -1,8 +1,8 @@ import streamlit as st import requests -from phi.agent import Agent -from phi.tools.firecrawl import FirecrawlTools -from phi.model.openai import OpenAIChat +from agno.agent import Agent +from agno.tools.firecrawl import FirecrawlTools +from agno.models.openai import OpenAIChat from firecrawl import FirecrawlApp from pydantic import BaseModel, Field from typing import List diff --git a/ai_agent_tutorials/ai_lead_generation_agent/requirements.txt b/ai_agent_tutorials/ai_lead_generation_agent/requirements.txt index a7bebc9..5fa3753 100644 --- a/ai_agent_tutorials/ai_lead_generation_agent/requirements.txt +++ b/ai_agent_tutorials/ai_lead_generation_agent/requirements.txt @@ -1,6 +1,6 @@ firecrawl-py==1.9.0 -phidata==2.7.3 -composio-phidata==0.6.15 +agno +composio-phidata composio==0.1.1 pydantic==2.10.5 streamlit \ No newline at end of file diff --git a/ai_agent_tutorials/ai_legal_agent_team/legal_agent_team.py b/ai_agent_tutorials/ai_legal_agent_team/legal_agent_team.py index 319d570..c8673fc 100644 --- a/ai_agent_tutorials/ai_legal_agent_team/legal_agent_team.py +++ b/ai_agent_tutorials/ai_legal_agent_team/legal_agent_team.py @@ -1,10 +1,10 @@ import streamlit as st -from phi.agent import Agent -from phi.knowledge.pdf import PDFKnowledgeBase, PDFReader -from phi.vectordb.qdrant import Qdrant -from phi.tools.duckduckgo import DuckDuckGo -from phi.model.openai import OpenAIChat -from phi.embedder.openai import OpenAIEmbedder +from agno.agent import Agent +from agno.knowledge.pdf import PDFKnowledgeBase, PDFReader +from agno.vectordb.qdrant import Qdrant +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.models.openai import OpenAIChat +from agno.embedder.openai import OpenAIEmbedder import tempfile import os @@ -133,7 +133,7 @@ def main(): name="Legal Researcher", role="Legal research specialist", model=OpenAIChat(model="gpt-4o"), - tools=[DuckDuckGo()], + tools=[DuckDuckGoTools()], knowledge=st.session_state.knowledge_base, search_knowledge=True, instructions=[ diff --git a/ai_agent_tutorials/ai_legal_agent_team/local_ai_legal_agent_team/local_legal_agent.py b/ai_agent_tutorials/ai_legal_agent_team/local_ai_legal_agent_team/local_legal_agent.py index 8de227b..16c0e8f 100644 --- a/ai_agent_tutorials/ai_legal_agent_team/local_ai_legal_agent_team/local_legal_agent.py +++ b/ai_agent_tutorials/ai_legal_agent_team/local_ai_legal_agent_team/local_legal_agent.py @@ -1,9 +1,9 @@ import streamlit as st -from phi.agent import Agent -from phi.knowledge.pdf import PDFKnowledgeBase, PDFReader -from phi.vectordb.qdrant import Qdrant -from phi.model.ollama import Ollama -from phi.embedder.ollama import OllamaEmbedder +from agno.agent import Agent +from agno.knowledge.pdf import PDFKnowledgeBase, PDFReader +from agno.vectordb.qdrant import Qdrant +from agno.models.ollama import Ollama +from agno.embedder.ollama import OllamaEmbedder import tempfile import os diff --git a/ai_agent_tutorials/ai_legal_agent_team/local_ai_legal_agent_team/requirements.txt b/ai_agent_tutorials/ai_legal_agent_team/local_ai_legal_agent_team/requirements.txt index ceca432..e23a639 100644 --- a/ai_agent_tutorials/ai_legal_agent_team/local_ai_legal_agent_team/requirements.txt +++ b/ai_agent_tutorials/ai_legal_agent_team/local_ai_legal_agent_team/requirements.txt @@ -1,4 +1,4 @@ -phidata==2.6.7 +agno streamlit==1.40.2 qdrant-client==1.12.1 ollama==0.4.4 diff --git a/ai_agent_tutorials/ai_legal_agent_team/requirements.txt b/ai_agent_tutorials/ai_legal_agent_team/requirements.txt index 35e0aa7..c6cda18 100644 --- a/ai_agent_tutorials/ai_legal_agent_team/requirements.txt +++ b/ai_agent_tutorials/ai_legal_agent_team/requirements.txt @@ -1,4 +1,4 @@ -phidata==2.5.33 +agno streamlit==1.40.2 qdrant-client==1.12.1 openai diff --git a/ai_agent_tutorials/ai_medical_imaging_agent/README.md b/ai_agent_tutorials/ai_medical_imaging_agent/README.md index 3a4ca7c..e58104e 100644 --- a/ai_agent_tutorials/ai_medical_imaging_agent/README.md +++ b/ai_agent_tutorials/ai_medical_imaging_agent/README.md @@ -1,6 +1,6 @@ # 🩻 Medical Imaging Diagnosis Agent -A Medical Imaging Diagnosis Agent build on phidata powered by Gemini 2.0 Flash Experimental that provides AI-assisted analysis of medical images of various scans. The agent acts as a medical imaging diagnosis expert to analyze various types of medical images and videos, providing detailed diagnostic insights and explanations. +A Medical Imaging Diagnosis Agent build on agno powered by Gemini 2.0 Flash Experimental that provides AI-assisted analysis of medical images of various scans. The agent acts as a medical imaging diagnosis expert to analyze various types of medical images and videos, providing detailed diagnostic insights and explanations. ## Features diff --git a/ai_agent_tutorials/ai_medical_imaging_agent/ai_medical_imaging.py b/ai_agent_tutorials/ai_medical_imaging_agent/ai_medical_imaging.py index c42c379..bc86a87 100644 --- a/ai_agent_tutorials/ai_medical_imaging_agent/ai_medical_imaging.py +++ b/ai_agent_tutorials/ai_medical_imaging_agent/ai_medical_imaging.py @@ -1,9 +1,9 @@ import os from PIL import Image -from phi.agent import Agent -from phi.model.google import Gemini +from agno.agent import Agent +from agno.models.google import Gemini import streamlit as st -from phi.tools.duckduckgo import DuckDuckGo +from agno.tools.duckduckgo import DuckDuckGoTools if "GOOGLE_API_KEY" not in st.session_state: st.session_state.GOOGLE_API_KEY = None @@ -45,7 +45,7 @@ medical_agent = Agent( api_key=st.session_state.GOOGLE_API_KEY, id="gemini-2.0-flash-exp" ), - tools=[DuckDuckGo()], + tools=[DuckDuckGoTools()], markdown=True ) if st.session_state.GOOGLE_API_KEY else None diff --git a/ai_agent_tutorials/ai_medical_imaging_agent/requirements.txt b/ai_agent_tutorials/ai_medical_imaging_agent/requirements.txt index 69bc025..9eb6d27 100644 --- a/ai_agent_tutorials/ai_medical_imaging_agent/requirements.txt +++ b/ai_agent_tutorials/ai_medical_imaging_agent/requirements.txt @@ -1,5 +1,5 @@ streamlit==1.40.2 -phidata==2.7.3 +agno Pillow==10.0.0 duckduckgo-search==6.4.1 google-generativeai==0.8.3 \ No newline at end of file diff --git a/ai_agent_tutorials/ai_movie_production_agent/movie_production_agent.py b/ai_agent_tutorials/ai_movie_production_agent/movie_production_agent.py index 064d2c2..b324bca 100644 --- a/ai_agent_tutorials/ai_movie_production_agent/movie_production_agent.py +++ b/ai_agent_tutorials/ai_movie_production_agent/movie_production_agent.py @@ -1,8 +1,8 @@ # Import the required libraries import streamlit as st -from phi.assistant import Assistant -from phi.tools.serpapi_tools import SerpApiTools -from phi.llm.anthropic import Claude +from agno.agent import Agent +from agno.tools.serpapi import SerpApiTools +from agno.models.anthropic import Claude from textwrap import dedent # Set up the Streamlit app @@ -15,9 +15,9 @@ anthropic_api_key = st.text_input("Enter Anthropic API Key to access Claude Sonn serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password") if anthropic_api_key and serp_api_key: - script_writer = Assistant( + script_writer = Agent( name="ScriptWriter", - llm=Claude(model="claude-3-5-sonnet-20240620", api_key=anthropic_api_key), + model=Claude(id="claude-3-5-sonnet-20240620", api_key=anthropic_api_key), description=dedent( """\ You are an expert screenplay writer. Given a movie idea and genre, @@ -31,9 +31,9 @@ if anthropic_api_key and serp_api_key: ], ) - casting_director = Assistant( + casting_director = Agent( name="CastingDirector", - llm=Claude(model="claude-3-5-sonnet-20240620", api_key=anthropic_api_key), + model=Claude(id="claude-3-5-sonnet-20240620", api_key=anthropic_api_key), description=dedent( """\ You are a talented casting director. Given a script outline and character descriptions, @@ -49,9 +49,9 @@ if anthropic_api_key and serp_api_key: tools=[SerpApiTools(api_key=serp_api_key)], ) - movie_producer = Assistant( + movie_producer = Agent( name="MovieProducer", - llm=Claude(model="claude-3-5-sonnet-20240620", api_key=anthropic_api_key), + model=Claude(id="claude-3-5-sonnet-20240620", api_key=anthropic_api_key), team=[script_writer, casting_director], description="Experienced movie producer overseeing script and casting.", instructions=[ diff --git a/ai_agent_tutorials/ai_movie_production_agent/requirements.txt b/ai_agent_tutorials/ai_movie_production_agent/requirements.txt index 2c59945..fc46b55 100644 --- a/ai_agent_tutorials/ai_movie_production_agent/requirements.txt +++ b/ai_agent_tutorials/ai_movie_production_agent/requirements.txt @@ -1,5 +1,5 @@ streamlit -phidata +agno anthropic google-search-results lxml_html_clean \ No newline at end of file diff --git a/ai_agent_tutorials/ai_personal_finance_agent/finance_agent.py b/ai_agent_tutorials/ai_personal_finance_agent/finance_agent.py index 55be0a4..4d59936 100644 --- a/ai_agent_tutorials/ai_personal_finance_agent/finance_agent.py +++ b/ai_agent_tutorials/ai_personal_finance_agent/finance_agent.py @@ -1,8 +1,8 @@ from textwrap import dedent -from phi.assistant import Assistant -from phi.tools.serpapi_tools import SerpApiTools +from agno.agent import Agent +from agno.tools.serpapi import SerpApiTools import streamlit as st -from phi.llm.openai import OpenAIChat +from agno.models.openai import OpenAIChat # Set up the Streamlit app st.title("AI Personal Finance Planner 💰") @@ -15,10 +15,10 @@ openai_api_key = st.text_input("Enter OpenAI API Key to access GPT-4o", type="pa serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password") if openai_api_key and serp_api_key: - researcher = Assistant( + researcher = Agent( name="Researcher", role="Searches for financial advice, investment opportunities, and savings strategies based on user preferences", - llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key), + model=OpenAIChat(id="gpt-4o", api_key=openai_api_key), description=dedent( """\ You are a world-class financial researcher. Given a user's financial goals and current financial situation, @@ -35,10 +35,10 @@ if openai_api_key and serp_api_key: tools=[SerpApiTools(api_key=serp_api_key)], add_datetime_to_instructions=True, ) - planner = Assistant( + planner = Agent( name="Planner", role="Generates a personalized financial plan based on user preferences and research results", - llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key), + model=OpenAIChat(id="gpt-4o", api_key=openai_api_key), description=dedent( """\ You are a senior financial planner. Given a user's financial goals, current financial situation, and a list of research results, @@ -54,8 +54,6 @@ if openai_api_key and serp_api_key: "Never make up facts or plagiarize. Always provide proper attribution.", ], add_datetime_to_instructions=True, - add_chat_history_to_prompt=True, - num_history_messages=3, ) # Input fields for the user's financial goals and current financial situation @@ -66,4 +64,4 @@ if openai_api_key and serp_api_key: with st.spinner("Processing..."): # Get the response from the assistant response = planner.run(f"Financial goals: {financial_goals}, Current situation: {current_situation}", stream=False) - st.write(response) + st.write(response.content) diff --git a/ai_agent_tutorials/ai_personal_finance_agent/requirements.txt b/ai_agent_tutorials/ai_personal_finance_agent/requirements.txt index 549573b..ffff278 100644 --- a/ai_agent_tutorials/ai_personal_finance_agent/requirements.txt +++ b/ai_agent_tutorials/ai_personal_finance_agent/requirements.txt @@ -1,4 +1,4 @@ streamlit -phidata +agno openai google-search-results \ No newline at end of file diff --git a/ai_agent_tutorials/ai_reasoning_agent/local_ai_reasoning_agent.py b/ai_agent_tutorials/ai_reasoning_agent/local_ai_reasoning_agent.py index 8f58d97..d31910c 100644 --- a/ai_agent_tutorials/ai_reasoning_agent/local_ai_reasoning_agent.py +++ b/ai_agent_tutorials/ai_reasoning_agent/local_ai_reasoning_agent.py @@ -1,6 +1,6 @@ -from phi.agent import Agent -from phi.model.ollama import Ollama -from phi.playground import Playground, serve_playground_app +from agno.agent import Agent +from agno.models.ollama import Ollama +from agno.playground import Playground, serve_playground_app reasoning_agent = Agent(name="Reasoning Agent", model=Ollama(id="qwq:32b"), markdown=True) diff --git a/ai_agent_tutorials/ai_reasoning_agent/reasoning_agent.py b/ai_agent_tutorials/ai_reasoning_agent/reasoning_agent.py index 4f55d79..137b5d3 100644 --- a/ai_agent_tutorials/ai_reasoning_agent/reasoning_agent.py +++ b/ai_agent_tutorials/ai_reasoning_agent/reasoning_agent.py @@ -1,9 +1,9 @@ -from phi.agent import Agent -from phi.model.openai import OpenAIChat -from phi.cli.console import console +from agno.agent import Agent +from agno.models.openai import OpenAIChat +from rich.console import Console regular_agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), markdown=True) - +console = Console() reasoning_agent = Agent( model=OpenAIChat(id="gpt-4o"), reasoning=True, diff --git a/ai_agent_tutorials/ai_recruitment_agent_team/ai_recruitment_agent_team.py b/ai_agent_tutorials/ai_recruitment_agent_team/ai_recruitment_agent_team.py index aa63971..59e56dc 100644 --- a/ai_agent_tutorials/ai_recruitment_agent_team/ai_recruitment_agent_team.py +++ b/ai_agent_tutorials/ai_recruitment_agent_team/ai_recruitment_agent_team.py @@ -8,9 +8,9 @@ from datetime import datetime, timedelta import pytz import streamlit as st -from phi.agent import Agent -from phi.model.openai import OpenAIChat -from phi.tools.email import EmailTools +from agno.agent import Agent +from agno.models.openai import OpenAIChat +from agno.tools.email import EmailTools from phi.tools.zoom import ZoomTool from phi.utils.log import logger from streamlit_pdf_viewer import pdf_viewer diff --git a/ai_agent_tutorials/ai_recruitment_agent_team/requirements.txt b/ai_agent_tutorials/ai_recruitment_agent_team/requirements.txt index ac1d845..caaaa4e 100644 --- a/ai_agent_tutorials/ai_recruitment_agent_team/requirements.txt +++ b/ai_agent_tutorials/ai_recruitment_agent_team/requirements.txt @@ -1,5 +1,6 @@ # Core dependencies -phidata==2.7.3 +phidata +agno streamlit==1.40.2 PyPDF2==3.0.1 streamlit-pdf-viewer==0.0.19 diff --git a/ai_agent_tutorials/ai_startup_trend_analysis_agent/requirements.txt b/ai_agent_tutorials/ai_startup_trend_analysis_agent/requirements.txt index 8e7848d..00ecb40 100644 --- a/ai_agent_tutorials/ai_startup_trend_analysis_agent/requirements.txt +++ b/ai_agent_tutorials/ai_startup_trend_analysis_agent/requirements.txt @@ -1,4 +1,4 @@ -phidata==2.5.33 +agno streamlit==1.40.2 duckduckgo_search==6.3.7 newspaper4k==0.9.3.1 diff --git a/ai_agent_tutorials/ai_startup_trend_analysis_agent/startup_trends_agent.py b/ai_agent_tutorials/ai_startup_trend_analysis_agent/startup_trends_agent.py index 6c46cf7..8b9a722 100644 --- a/ai_agent_tutorials/ai_startup_trend_analysis_agent/startup_trends_agent.py +++ b/ai_agent_tutorials/ai_startup_trend_analysis_agent/startup_trends_agent.py @@ -1,9 +1,9 @@ import streamlit as st -from phi.agent import Agent -from phi.tools.duckduckgo import DuckDuckGo -from phi.model.anthropic import Claude -from phi.tools.newspaper4k import Newspaper4k -from phi.tools import Tool +from agno.agent import Agent +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.models.anthropic import Claude +from agno.tools.newspaper4k import Newspaper4kTools +from agno.tools import Tool import logging logging.basicConfig(level=logging.DEBUG) @@ -25,7 +25,7 @@ if st.button("Generate Analysis"): anthropic_model = Claude(id ="claude-3-5-sonnet-20240620",api_key=anthropic_api_key) # Define News Collector Agent - Duckduckgo_search tool enables an Agent to search the web for information. - search_tool = DuckDuckGo(search=True, news=True, fixed_max_results=5) + search_tool = DuckDuckGoTools(search=True, news=True, fixed_max_results=5) news_collector = Agent( name="News Collector", role="Collects recent news articles on the given topic", @@ -37,7 +37,7 @@ if st.button("Generate Analysis"): ) # Define Summary Writer Agent - news_tool = Newspaper4k(read_article=True, include_summary=True) + news_tool = Newspaper4kTools(read_article=True, include_summary=True) summary_writer = Agent( name="Summary Writer", role="Summarizes collected news articles", diff --git a/ai_agent_tutorials/ai_teaching_agent_team/requirements.txt b/ai_agent_tutorials/ai_teaching_agent_team/requirements.txt index d7222b6..7b16c6c 100644 --- a/ai_agent_tutorials/ai_teaching_agent_team/requirements.txt +++ b/ai_agent_tutorials/ai_teaching_agent_team/requirements.txt @@ -2,7 +2,7 @@ streamlit==1.41.1 openai==1.58.1 duckduckgo-search==6.4.1 typing-extensions>=4.5.0 -phidata==2.7.3 +agno composio-phidata==0.6.9 composio_core composio==0.1.1 diff --git a/ai_agent_tutorials/ai_teaching_agent_team/teaching_agent_team.py b/ai_agent_tutorials/ai_teaching_agent_team/teaching_agent_team.py index 30d015f..8306ae3 100644 --- a/ai_agent_tutorials/ai_teaching_agent_team/teaching_agent_team.py +++ b/ai_agent_tutorials/ai_teaching_agent_team/teaching_agent_team.py @@ -1,11 +1,11 @@ import streamlit as st -from phi.agent import Agent, RunResponse -from phi.model.openai import OpenAIChat +from agno.agent import Agent, RunResponse +from agno.models.openai import OpenAIChat from composio_phidata import Action, ComposioToolSet import os -from phi.tools.arxiv_toolkit import ArxivToolkit -from phi.utils.pprint import pprint_run_response -from phi.tools.serpapi_tools import SerpApiTools +from agno.tools.arxiv import ArxivTools +from agno.utils.pprint import pprint_run_response +from agno.tools.serpapi import SerpApiTools # Set page configuration st.set_page_config(page_title="👨‍🏫 AI Teaching Agent Team", layout="centered") diff --git a/ai_agent_tutorials/ai_tic_tac_toe_agent/README.md b/ai_agent_tutorials/ai_tic_tac_toe_agent/README.md index d65a178..b5744d9 100644 --- a/ai_agent_tutorials/ai_tic_tac_toe_agent/README.md +++ b/ai_agent_tutorials/ai_tic_tac_toe_agent/README.md @@ -1,6 +1,6 @@ # 🎮 Agent X vs Agent O: Tic-Tac-Toe Game -An interactive Tic-Tac-Toe game where two AI agents powered by different language models compete against each other built on phidata Agent Framework and Streamlit as UI. Watch as GPT-4O battles against either DeepSeek V3 or Google's Gemini 1.5 Flash in this classic game. +An interactive Tic-Tac-Toe game where two AI agents powered by different language models compete against each other built on Agno Agent Framework and Streamlit as UI. Watch as GPT-4O battles against either DeepSeek V3 or Google's Gemini 1.5 Flash in this classic game. ## Features diff --git a/ai_agent_tutorials/ai_tic_tac_toe_agent/ai_tic_tac_toe_agent.py b/ai_agent_tutorials/ai_tic_tac_toe_agent/ai_tic_tac_toe_agent.py index ae5aa15..084d344 100644 --- a/ai_agent_tutorials/ai_tic_tac_toe_agent/ai_tic_tac_toe_agent.py +++ b/ai_agent_tutorials/ai_tic_tac_toe_agent/ai_tic_tac_toe_agent.py @@ -1,9 +1,9 @@ import re import streamlit as st -from phi.agent import Agent -from phi.model.openai import OpenAIChat -from phi.model.deepseek import DeepSeekChat -from phi.model.google import Gemini +from agno.agent import Agent +from agno.models.openai import OpenAIChat +from agno.models.deepseek import DeepSeek +from agno.models.google import Gemini # Streamlit App Title st.title("🎮 Agent X vs Agent O: Tic-Tac-Toe Game") @@ -122,7 +122,7 @@ if 'openai_api_key' in st.session_state: if 'deepseek_api_key' in st.session_state: player_o = Agent( name="Player O", - model=DeepSeekChat(api_key=st.session_state.deepseek_api_key), + model=DeepSeek(id="deepseek-chat", api_key=st.session_state.deepseek_api_key), instructions=[ "You are a Tic-Tac-Toe player using the symbol 'O'.", "Your opponent is using the symbol 'X'. Block their potential winning moves.", diff --git a/ai_agent_tutorials/ai_tic_tac_toe_agent/requirements.txt b/ai_agent_tutorials/ai_tic_tac_toe_agent/requirements.txt index 4129d50..e7c8c1c 100644 --- a/ai_agent_tutorials/ai_tic_tac_toe_agent/requirements.txt +++ b/ai_agent_tutorials/ai_tic_tac_toe_agent/requirements.txt @@ -1,4 +1,4 @@ streamlit==1.41.1 -phidata==2.7.3 +agno openai==1.58.1 -google-generativeai==0.8.3 +google-generativeai==0.8.3 \ No newline at end of file diff --git a/ai_agent_tutorials/ai_travel_agent/local_travel_agent.py b/ai_agent_tutorials/ai_travel_agent/local_travel_agent.py index fb0b856..e56a91c 100644 --- a/ai_agent_tutorials/ai_travel_agent/local_travel_agent.py +++ b/ai_agent_tutorials/ai_travel_agent/local_travel_agent.py @@ -1,21 +1,21 @@ from textwrap import dedent -from phi.assistant import Assistant -from phi.tools.serpapi_tools import SerpApiTools +from agno.agent import Agent +from agno.tools.serpapi import SerpApiTools import streamlit as st -from phi.llm.ollama import Ollama +from agno.models.ollama import Ollama # Set up the Streamlit app -st.title("AI Travel Planner using Llama-3 ✈️") +st.title("AI Travel Planner using Llama-3.2 ✈️") st.caption("Plan your next adventure with AI Travel Planner by researching and planning a personalized itinerary on autopilot using local Llama-3") # Get SerpAPI key from the user serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password") if serp_api_key: - researcher = Assistant( + researcher = Agent( name="Researcher", role="Searches for travel destinations, activities, and accommodations based on user preferences", - llm=Ollama(model="llama3:instruct", max_tokens=1024), + model=Ollama(id="llama3.2", max_tokens=1024), description=dedent( """\ You are a world-class travel researcher. Given a travel destination and the number of days the user wants to travel for, @@ -32,10 +32,10 @@ if serp_api_key: tools=[SerpApiTools(api_key=serp_api_key)], add_datetime_to_instructions=True, ) - planner = Assistant( + planner = Agent( name="Planner", role="Generates a draft itinerary based on user preferences and research results", - llm=Ollama(model="llama3:instruct", max_tokens=1024), + model=Ollama(id="llama3.2", max_tokens=1024), description=dedent( """\ You are a senior travel planner. Given a travel destination, the number of days the user wants to travel for, and a list of research results, @@ -51,8 +51,6 @@ if serp_api_key: "Never make up facts or plagiarize. Always provide proper attribution.", ], add_datetime_to_instructions=True, - add_chat_history_to_prompt=True, - num_history_messages=3, ) # Input fields for the user's destination and the number of days they want to travel for @@ -63,4 +61,4 @@ if serp_api_key: with st.spinner("Processing..."): # Get the response from the assistant response = planner.run(f"{destination} for {num_days} days", stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/ai_agent_tutorials/ai_travel_agent/requirements.txt b/ai_agent_tutorials/ai_travel_agent/requirements.txt index 549573b..ffff278 100644 --- a/ai_agent_tutorials/ai_travel_agent/requirements.txt +++ b/ai_agent_tutorials/ai_travel_agent/requirements.txt @@ -1,4 +1,4 @@ streamlit -phidata +agno openai google-search-results \ No newline at end of file diff --git a/ai_agent_tutorials/ai_travel_agent/travel_agent.py b/ai_agent_tutorials/ai_travel_agent/travel_agent.py index 5a3fee6..18db2cf 100644 --- a/ai_agent_tutorials/ai_travel_agent/travel_agent.py +++ b/ai_agent_tutorials/ai_travel_agent/travel_agent.py @@ -1,8 +1,8 @@ from textwrap import dedent -from phi.assistant import Assistant -from phi.tools.serpapi_tools import SerpApiTools +from agno.agent import Agent +from agno.tools.serpapi import SerpApiTools import streamlit as st -from phi.llm.openai import OpenAIChat +from agno.models.openai import OpenAIChat # Set up the Streamlit app st.title("AI Travel Planner ✈️") @@ -15,10 +15,10 @@ openai_api_key = st.text_input("Enter OpenAI API Key to access GPT-4o", type="pa serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password") if openai_api_key and serp_api_key: - researcher = Assistant( + researcher = Agent( name="Researcher", role="Searches for travel destinations, activities, and accommodations based on user preferences", - llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key), + model=OpenAIChat(id="gpt-4o", api_key=openai_api_key), description=dedent( """\ You are a world-class travel researcher. Given a travel destination and the number of days the user wants to travel for, @@ -35,10 +35,10 @@ if openai_api_key and serp_api_key: tools=[SerpApiTools(api_key=serp_api_key)], add_datetime_to_instructions=True, ) - planner = Assistant( + planner = Agent( name="Planner", role="Generates a draft itinerary based on user preferences and research results", - llm=OpenAIChat(model="gpt-4o", api_key=openai_api_key), + model=OpenAIChat(id="gpt-4o", api_key=openai_api_key), description=dedent( """\ You are a senior travel planner. Given a travel destination, the number of days the user wants to travel for, and a list of research results, @@ -54,8 +54,6 @@ if openai_api_key and serp_api_key: "Never make up facts or plagiarize. Always provide proper attribution.", ], add_datetime_to_instructions=True, - add_chat_history_to_prompt=True, - num_history_messages=3, ) # Input fields for the user's destination and the number of days they want to travel for @@ -66,4 +64,4 @@ if openai_api_key and serp_api_key: with st.spinner("Processing..."): # Get the response from the assistant response = planner.run(f"{destination} for {num_days} days", stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/ai_agent_tutorials/gemini_multimodal_agent_demo/multimodal_ai_agent.py b/ai_agent_tutorials/gemini_multimodal_agent_demo/multimodal_ai_agent.py index ce79a64..1a6a3cd 100644 --- a/ai_agent_tutorials/gemini_multimodal_agent_demo/multimodal_ai_agent.py +++ b/ai_agent_tutorials/gemini_multimodal_agent_demo/multimodal_ai_agent.py @@ -1,11 +1,11 @@ -from phi.agent import Agent -from phi.model.google import Gemini -from phi.tools.duckduckgo import DuckDuckGo +from agno.agent import Agent +from agno.models.google import Gemini +from agno.tools.duckduckgo import DuckDuckGoTools from google.generativeai import upload_file, get_file import time # 1. Initialize the Multimodal Agent -agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), tools=[DuckDuckGo()], markdown=True) +agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), tools=[DuckDuckGoTools()], markdown=True) # 2. Image Input image_url = "https://example.com/sample_image.jpg" diff --git a/ai_agent_tutorials/multi_agent_researcher/requirements.txt b/ai_agent_tutorials/multi_agent_researcher/requirements.txt index a0e8efb..c54384d 100644 --- a/ai_agent_tutorials/multi_agent_researcher/requirements.txt +++ b/ai_agent_tutorials/multi_agent_researcher/requirements.txt @@ -1,3 +1,3 @@ streamlit -phidata +agno openai \ No newline at end of file diff --git a/ai_agent_tutorials/multi_agent_researcher/research_agent.py b/ai_agent_tutorials/multi_agent_researcher/research_agent.py index a5a6fcb..e1c06dd 100644 --- a/ai_agent_tutorials/multi_agent_researcher/research_agent.py +++ b/ai_agent_tutorials/multi_agent_researcher/research_agent.py @@ -1,8 +1,8 @@ # Import the required libraries import streamlit as st -from phi.assistant import Assistant -from phi.tools.hackernews import HackerNews -from phi.llm.openai import OpenAIChat +from agno.agent import Agent +from agno.tools.hackernews import HackerNewsTools +from agno.models.openai import OpenAIChat # Set up the Streamlit app st.title("Multi-Agent AI Researcher 🔍🤖") @@ -13,23 +13,23 @@ openai_api_key = st.text_input("OpenAI API Key", type="password") if openai_api_key: # Create instances of the Assistant - story_researcher = Assistant( + story_researcher = Agent( name="HackerNews Story Researcher", role="Researches hackernews stories and users.", - tools=[HackerNews()], + tools=[HackerNewsTools()], ) - user_researcher = Assistant( + user_researcher = Agent( name="HackerNews User Researcher", role="Reads articles from URLs.", - tools=[HackerNews()], + tools=[HackerNewsTools()], ) - hn_assistant = Assistant( + hn_assistant = Agent( name="Hackernews Team", team=[story_researcher, user_researcher], - llm=OpenAIChat( - model="gpt-4o", + model=OpenAIChat( + id="gpt-4o", max_tokens=1024, temperature=0.5, api_key=openai_api_key @@ -42,4 +42,4 @@ if openai_api_key: if query: # Get the response from the assistant response = hn_assistant.run(query, stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/ai_agent_tutorials/multi_agent_researcher/research_agent_llama3.py b/ai_agent_tutorials/multi_agent_researcher/research_agent_llama3.py index 96d55e0..1be2d52 100644 --- a/ai_agent_tutorials/multi_agent_researcher/research_agent_llama3.py +++ b/ai_agent_tutorials/multi_agent_researcher/research_agent_llama3.py @@ -1,32 +1,32 @@ # Import the required libraries import streamlit as st -from phi.assistant import Assistant -from phi.tools.hackernews import HackerNews -from phi.llm.ollama import Ollama +from agno.agent import Agent +from agno.tools.hackernews import HackerNews +from agno.models.ollama import Ollama # Set up the Streamlit app st.title("Multi-Agent AI Researcher using Llama-3 🔍🤖") st.caption("This app allows you to research top stories and users on HackerNews and write blogs, reports and social posts.") # Create instances of the Assistant -story_researcher = Assistant( +story_researcher = Agent( name="HackerNews Story Researcher", role="Researches hackernews stories and users.", tools=[HackerNews()], - llm=Ollama(model="llama3:instruct", max_tokens=1024) + model=Ollama(id="llama3.2", max_tokens=1024) ) -user_researcher = Assistant( +user_researcher = Agent( name="HackerNews User Researcher", role="Reads articles from URLs.", tools=[HackerNews()], - llm=Ollama(model="llama3:instruct", max_tokens=1024) + model=Ollama(id="llama3.2", max_tokens=1024) ) -hn_assistant = Assistant( +hn_assistant = Agent( name="Hackernews Team", team=[story_researcher, user_researcher], - llm=Ollama(model="llama3:instruct", max_tokens=1024) + model=Ollama(id="llama3.2", max_tokens=1024) ) # Input field for the report query @@ -35,4 +35,4 @@ query = st.text_input("Enter your report query") if query: # Get the response from the assistant response = hn_assistant.run(query, stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/ai_agent_tutorials/multimodal_ai_agent/multimodal_reasoning_agent.py b/ai_agent_tutorials/multimodal_ai_agent/multimodal_reasoning_agent.py index cb12428..428805a 100644 --- a/ai_agent_tutorials/multimodal_ai_agent/multimodal_reasoning_agent.py +++ b/ai_agent_tutorials/multimodal_ai_agent/multimodal_reasoning_agent.py @@ -1,6 +1,6 @@ import streamlit as st -from phi.agent import Agent -from phi.model.google import Gemini +from agno.agent import Agent +from agno.models.google import Gemini import tempfile import os diff --git a/ai_agent_tutorials/multimodal_ai_agent/mutimodal_agent.py b/ai_agent_tutorials/multimodal_ai_agent/mutimodal_agent.py index a2d0190..79ee9e3 100644 --- a/ai_agent_tutorials/multimodal_ai_agent/mutimodal_agent.py +++ b/ai_agent_tutorials/multimodal_ai_agent/mutimodal_agent.py @@ -1,7 +1,7 @@ import streamlit as st -from phi.agent import Agent -from phi.model.google import Gemini -from phi.tools.duckduckgo import DuckDuckGo +from agno.agent import Agent +from agno.models.google import Gemini +from agno.tools.duckduckgo import DuckDuckGoTools from google.generativeai import upload_file, get_file import time from pathlib import Path @@ -21,7 +21,7 @@ def initialize_agent(): return Agent( name="Multimodal Analyst", model=Gemini(id="gemini-2.0-flash-exp"), - tools=[DuckDuckGo()], + tools=[DuckDuckGoTools()], markdown=True, ) diff --git a/ai_agent_tutorials/multimodal_ai_agent/requirements.txt b/ai_agent_tutorials/multimodal_ai_agent/requirements.txt index 6e0d8b8..fad806f 100644 --- a/ai_agent_tutorials/multimodal_ai_agent/requirements.txt +++ b/ai_agent_tutorials/multimodal_ai_agent/requirements.txt @@ -1,3 +1,3 @@ -phidata==2.7.2 +agno google-generativeai==0.8.3 streamlit==1.40.2 \ No newline at end of file diff --git a/ai_agent_tutorials/multimodal_design_agent_team/design_agent_team.py b/ai_agent_tutorials/multimodal_design_agent_team/design_agent_team.py index ffa520c..32145d5 100644 --- a/ai_agent_tutorials/multimodal_design_agent_team/design_agent_team.py +++ b/ai_agent_tutorials/multimodal_design_agent_team/design_agent_team.py @@ -1,6 +1,6 @@ -from phi.agent import Agent -from phi.model.google import Gemini -from phi.tools.duckduckgo import DuckDuckGo +from agno.agent import Agent +from agno.models.google import Gemini +from agno.tools.duckduckgo import DuckDuckGoTools import streamlit as st from PIL import Image from typing import List, Optional @@ -37,7 +37,7 @@ def initialize_agents(api_key: str) -> tuple[Agent, Agent, Agent]: market_agent = Agent( model=model, - tools=[DuckDuckGo(search=True)], + tools=[DuckDuckGoTools()], instructions=[ "You are a market research expert that:", "1. Identifies market trends and competitor patterns", diff --git a/ai_agent_tutorials/multimodal_design_agent_team/requirements.txt b/ai_agent_tutorials/multimodal_design_agent_team/requirements.txt index 6ec9d03..6cb878b 100644 --- a/ai_agent_tutorials/multimodal_design_agent_team/requirements.txt +++ b/ai_agent_tutorials/multimodal_design_agent_team/requirements.txt @@ -1,6 +1,6 @@ google-generativeai==0.8.3 streamlit==1.41.1 -phidata==2.7.2 +agno Pillow==11.0.0 duckduckgo-search==6.3.7 diff --git a/ai_agent_tutorials/xai_finance_agent/requirements.txt b/ai_agent_tutorials/xai_finance_agent/requirements.txt index a391d95..d0527a4 100644 --- a/ai_agent_tutorials/xai_finance_agent/requirements.txt +++ b/ai_agent_tutorials/xai_finance_agent/requirements.txt @@ -1,4 +1,4 @@ -phidata +agno duckduckgo-search yfinance fastapi[standard] diff --git a/ai_agent_tutorials/xai_finance_agent/xai_finance_agent.py b/ai_agent_tutorials/xai_finance_agent/xai_finance_agent.py index ac03077..ccde613 100644 --- a/ai_agent_tutorials/xai_finance_agent/xai_finance_agent.py +++ b/ai_agent_tutorials/xai_finance_agent/xai_finance_agent.py @@ -1,15 +1,15 @@ # import necessary python libraries -from phi.agent import Agent -from phi.model.xai import xAI -from phi.tools.yfinance import YFinanceTools -from phi.tools.duckduckgo import DuckDuckGo -from phi.playground import Playground, serve_playground_app +from agno.agent import Agent +from agno.models.xai import xAI +from agno.tools.yfinance import YFinanceTools +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.playground import Playground, serve_playground_app # create the AI finance agent agent = Agent( name="xAI Finance Agent", model = xAI(id="grok-beta"), - tools=[DuckDuckGo(), YFinanceTools(stock_price=True, analyst_recommendations=True, stock_fundamentals=True)], + tools=[DuckDuckGoTools(), YFinanceTools(stock_price=True, analyst_recommendations=True, stock_fundamentals=True)], instructions = ["Always use tables to display financial/numerical data. For text data use bullet points and small paragrpahs."], show_tool_calls = True, markdown = True, diff --git a/chat_with_X_tutorials/chat_with_pdf/requirements.txt b/chat_with_X_tutorials/chat_with_pdf/requirements.txt index ca9e4b6..47fe46a 100644 --- a/chat_with_X_tutorials/chat_with_pdf/requirements.txt +++ b/chat_with_X_tutorials/chat_with_pdf/requirements.txt @@ -1,2 +1,3 @@ streamlit -embedchain \ No newline at end of file +embedchain +streamlit-chat \ No newline at end of file diff --git a/chat_with_X_tutorials/chat_with_research_papers/chat_arxiv.py b/chat_with_X_tutorials/chat_with_research_papers/chat_arxiv.py index a06e956..0467829 100644 --- a/chat_with_X_tutorials/chat_with_research_papers/chat_arxiv.py +++ b/chat_with_X_tutorials/chat_with_research_papers/chat_arxiv.py @@ -1,8 +1,8 @@ # Import the required libraries import streamlit as st -from phi.assistant import Assistant -from phi.llm.openai import OpenAIChat -from phi.tools.arxiv_toolkit import ArxivToolkit +from agno.agent import Agent +from agno.models.openai import OpenAIChat +from agno.tools.arxiv import ArxivTools # Set up the Streamlit app st.title("Chat with Research Papers 🔎🤖") @@ -14,12 +14,12 @@ openai_access_token = st.text_input("OpenAI API Key", type="password") # If OpenAI API key is provided, create an instance of Assistant if openai_access_token: # Create an instance of the Assistant - assistant = Assistant( - llm=OpenAIChat( - model="gpt-4o", + assistant = Agent( + model=OpenAIChat( + id="gpt-4o", max_tokens=1024, temperature=0.9, - api_key=openai_access_token) , tools=[ArxivToolkit()] + api_key=openai_access_token) , tools=[ArxivTools()] ) # Get the search query from the user @@ -28,4 +28,4 @@ if openai_access_token: if query: # Search the web using the AI Assistant response = assistant.run(query, stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/chat_with_X_tutorials/chat_with_research_papers/chat_arxiv_llama3.py b/chat_with_X_tutorials/chat_with_research_papers/chat_arxiv_llama3.py index 5e1181d..8de78b8 100644 --- a/chat_with_X_tutorials/chat_with_research_papers/chat_arxiv_llama3.py +++ b/chat_with_X_tutorials/chat_with_research_papers/chat_arxiv_llama3.py @@ -1,17 +1,17 @@ # Import the required libraries import streamlit as st -from phi.assistant import Assistant -from phi.llm.ollama import Ollama -from phi.tools.arxiv_toolkit import ArxivToolkit +from agno.agent import Agent +from agno.models.ollama import Ollama +from agno.tools.arxiv import ArxivTools # Set up the Streamlit app st.title("Chat with Research Papers 🔎🤖") st.caption("This app allows you to chat with arXiv research papers using Llama-3 running locally.") # Create an instance of the Assistant -assistant = Assistant( -llm=Ollama( - model="llama3:instruct") , tools=[ArxivToolkit()], show_tool_calls=True +assistant = Agent( +model=Ollama( + id="llama3.1:8b") , tools=[ArxivTools()], show_tool_calls=True ) # Get the search query from the user @@ -20,4 +20,4 @@ query= st.text_input("Enter the Search Query", type="default") if query: # Search the web using the AI Assistant response = assistant.run(query, stream=False) - st.write(response) \ No newline at end of file + st.write(response.content) \ No newline at end of file diff --git a/chat_with_X_tutorials/chat_with_research_papers/requirements.txt b/chat_with_X_tutorials/chat_with_research_papers/requirements.txt index 88f1d68..bcb7b91 100644 --- a/chat_with_X_tutorials/chat_with_research_papers/requirements.txt +++ b/chat_with_X_tutorials/chat_with_research_papers/requirements.txt @@ -1,5 +1,5 @@ streamlit -phidata +agno arxiv openai pypdf \ No newline at end of file diff --git a/rag_tutorials/agentic_rag/rag_agent.py b/rag_tutorials/agentic_rag/rag_agent.py index 118bcaa..6c4ebaf 100644 --- a/rag_tutorials/agentic_rag/rag_agent.py +++ b/rag_tutorials/agentic_rag/rag_agent.py @@ -1,9 +1,9 @@ -from phi.agent import Agent -from phi.model.openai import OpenAIChat -from phi.knowledge.pdf import PDFUrlKnowledgeBase -from phi.vectordb.lancedb import LanceDb, SearchType -from phi.playground import Playground, serve_playground_app -from phi.tools.duckduckgo import DuckDuckGo +from agno.agent import Agent +from agno.models.openai import OpenAIChat +from agno.knowledge.pdf_url import PDFUrlKnowledgeBase +from agno.vectordb.lancedb import LanceDb, SearchType +from agno.playground import Playground, serve_playground_app +from agno.tools.duckduckgo import DuckDuckGoTools db_uri = "tmp/lancedb" # Create a knowledge base from a PDF @@ -19,7 +19,7 @@ rag_agent = Agent( model=OpenAIChat(id="gpt-4o"), agent_id="rag-agent", knowledge=knowledge_base, # Add the knowledge base to the agent - tools=[DuckDuckGo()], + tools=[DuckDuckGoTools()], show_tool_calls=True, markdown=True, ) diff --git a/rag_tutorials/agentic_rag/requirements.txt b/rag_tutorials/agentic_rag/requirements.txt index 845536c..9a87d2a 100644 --- a/rag_tutorials/agentic_rag/requirements.txt +++ b/rag_tutorials/agentic_rag/requirements.txt @@ -1,4 +1,4 @@ -phidata +agno openai lancedb tantivy diff --git a/rag_tutorials/autonomous_rag/autorag.py b/rag_tutorials/autonomous_rag/autorag.py index 76d8a19..82d35fd 100644 --- a/rag_tutorials/autonomous_rag/autorag.py +++ b/rag_tutorials/autonomous_rag/autorag.py @@ -1,14 +1,14 @@ import streamlit as st import nest_asyncio from io import BytesIO -from phi.assistant import Assistant -from phi.document.reader.pdf import PDFReader -from phi.llm.openai import OpenAIChat -from phi.knowledge import AssistantKnowledge -from phi.tools.duckduckgo import DuckDuckGo -from phi.embedder.openai import OpenAIEmbedder -from phi.vectordb.pgvector import PgVector2 -from phi.storage.assistant.postgres import PgAssistantStorage +from agno.agent import Agent +from agno.document.reader.pdf_reader import PDFReader +from agno.models.openai import OpenAIChat +from agno.knowledge.pdf_url import PDFUrlKnowledgeBase +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.embedder.openai import OpenAIEmbedder +from agno.vectordb.pgvector import PgVector, SearchType +from agno.storage.agent.postgres import PostgresAgentStorage # Apply nest_asyncio to allow nested event loops, required for running async functions in Streamlit nest_asyncio.apply() @@ -18,22 +18,22 @@ DB_URL = "postgresql+psycopg://ai:ai@localhost:5532/ai" # Function to set up the Assistant, utilizing caching for resource efficiency @st.cache_resource -def setup_assistant(api_key: str) -> Assistant: - llm = OpenAIChat(model="gpt-4o-mini", api_key=api_key) +def setup_assistant(api_key: str) -> Agent: + llm = OpenAIChat(id="gpt-4o-mini", api_key=api_key) # Set up the Assistant with storage, knowledge base, and tools - return Assistant( - name="auto_rag_assistant", # Name of the Assistant - llm=llm, # Language model to be used - storage=PgAssistantStorage(table_name="auto_rag_storage", db_url=DB_URL), - knowledge_base=AssistantKnowledge( - vector_db=PgVector2( + return Agent( + id="auto_rag_agent", # Name of the Assistant + model=llm, # Language model to be used + storage=PostgresAgentStorage(table_name="auto_rag_storage", db_url=DB_URL), + knowledge_base=PDFUrlKnowledgeBase( + vector_db=PgVector( db_url=DB_URL, collection="auto_rag_docs", - embedder=OpenAIEmbedder(model="text-embedding-ada-002", dimensions=1536, api_key=api_key), + embedder=OpenAIEmbedder(id="text-embedding-ada-002", dimensions=1536, api_key=api_key), ), num_documents=3, ), - tools=[DuckDuckGo()], # Additional tool for web search via DuckDuckGo + tools=[DuckDuckGoTools()], # Additional tool for web search via DuckDuckGo instructions=[ "Search your knowledge base first.", "If not found, search the internet.", @@ -41,24 +41,23 @@ def setup_assistant(api_key: str) -> Assistant: ], show_tool_calls=True, search_knowledge=True, - read_chat_history=True, markdown=True, debug_mode=True, ) # Function to add a PDF document to the knowledge base -def add_document(assistant: Assistant, file: BytesIO): +def add_document(agent: Agent, file: BytesIO): reader = PDFReader() docs = reader.read(file) if docs: - assistant.knowledge_base.load_documents(docs, upsert=True) + agent.knowledge_base.load_documents(docs, upsert=True) st.success("Document added to the knowledge base.") else: st.error("Failed to read the document.") # Function to query the Assistant and return a response -def query_assistant(assistant: Assistant, question: str) -> str: - return "".join([delta for delta in assistant.run(question)]) +def query_assistant(agent: Agent, question: str) -> str: + return "".join([delta for delta in agent.run(question)]) # Main function to handle Streamlit app layout and interactions def main(): @@ -87,7 +86,7 @@ def main(): with st.spinner("🤔 Thinking..."): # Query the assistant and display the response answer = query_assistant(assistant, question) - st.write("📝 **Response:**", answer) + st.write("📝 **Response:**", answer.content) else: # Show an error if the question input is empty st.error("Please enter a question.") diff --git a/rag_tutorials/autonomous_rag/requirements.txt b/rag_tutorials/autonomous_rag/requirements.txt index 365f9d3..6abc006 100644 --- a/rag_tutorials/autonomous_rag/requirements.txt +++ b/rag_tutorials/autonomous_rag/requirements.txt @@ -1,5 +1,5 @@ streamlit -phidata +agno openai psycopg-binary pgvector diff --git a/rag_tutorials/local_rag_agent/local_rag_agent.py b/rag_tutorials/local_rag_agent/local_rag_agent.py index 6ffdf72..8e3c272 100644 --- a/rag_tutorials/local_rag_agent/local_rag_agent.py +++ b/rag_tutorials/local_rag_agent/local_rag_agent.py @@ -1,10 +1,10 @@ # Import necessary libraries -from phi.agent import Agent -from phi.model.ollama import Ollama -from phi.knowledge.pdf import PDFUrlKnowledgeBase -from phi.vectordb.qdrant import Qdrant -from phi.embedder.ollama import OllamaEmbedder -from phi.playground import Playground, serve_playground_app +from agno.agent import Agent +from agno.models.ollama import Ollama +from agno.knowledge.pdf_url import PDFUrlKnowledgeBase +from agno.vectordb.qdrant import Qdrant +from agno.embedder.ollama import OllamaEmbedder +from agno.playground import Playground, serve_playground_app # Define the collection name for the vector database collection_name = "thai-recipe-index" diff --git a/rag_tutorials/local_rag_agent/requirements.txt b/rag_tutorials/local_rag_agent/requirements.txt index fce98c3..38393d7 100644 --- a/rag_tutorials/local_rag_agent/requirements.txt +++ b/rag_tutorials/local_rag_agent/requirements.txt @@ -1,4 +1,4 @@ -phidata +agno qdrant-client ollama pypdf diff --git a/rag_tutorials/rag_chain/app.py b/rag_tutorials/rag_chain/app.py index 2a16b73..92eea3f 100644 --- a/rag_tutorials/rag_chain/app.py +++ b/rag_tutorials/rag_chain/app.py @@ -10,8 +10,6 @@ from langchain_google_genai import ChatGoogleGenerativeAI from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough -from dotenv import load_dotenv -load_dotenv() # Initialize embedding model embedding_model = GoogleGenerativeAIEmbeddings(model="models/embedding-001")