From f51e282f6139d27e91076fb09da1f1a4639039ed Mon Sep 17 00:00:00 2001 From: ShubhamSaboo Date: Sat, 9 Nov 2024 18:50:46 -0600 Subject: [PATCH] Added new tutorial --- rag_tutorials/local_rag_agent/README.md | 45 +++++++++++++++++++ .../local_rag_agent/local_rag_agent.py | 40 +++++++++++++++++ .../local_rag_agent/requirements.txt | 4 ++ 3 files changed, 89 insertions(+) create mode 100644 rag_tutorials/local_rag_agent/README.md create mode 100644 rag_tutorials/local_rag_agent/local_rag_agent.py create mode 100644 rag_tutorials/local_rag_agent/requirements.txt diff --git a/rag_tutorials/local_rag_agent/README.md b/rag_tutorials/local_rag_agent/README.md new file mode 100644 index 0000000..f5ff1f8 --- /dev/null +++ b/rag_tutorials/local_rag_agent/README.md @@ -0,0 +1,45 @@ +## 🦙 Local RAG Agent with Llama 3.2 +This application implements a Retrieval-Augmented Generation (RAG) system using Llama 3.2 via Ollama, with Qdrant as the vector database. + + +### Features +- Fully local RAG implementation +- Powered by Llama 3.2 through Ollama +- Vector search using Qdrant +- Interactive playground interface +- No external API dependencies + +### How to get Started? + +1. Clone the GitHub repository +```bash +git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git +``` + +2. Install the required dependencies: + +```bash +cd rag_tutorials/local_rag_agent +pip install -r requirements.txt +``` + +3. Install and start [Qdrant](https://qdrant.tech/) vector database locally + +```bash +docker pull qdrant/qdrant +docker run -p 6333:6333 qdrant/qdrant +``` + +4. Install [Ollama](https://ollama.com/download) and pull Llama 3.2 +```bash +ollama pull llama3.2 + +``` + +4. Run the AI RAG Agent +```bash +python local_rag_agent.py +``` +5. Open your web browser and navigate to the URL provided in the console output to interact with the RAG agent through the playground interface. + + diff --git a/rag_tutorials/local_rag_agent/local_rag_agent.py b/rag_tutorials/local_rag_agent/local_rag_agent.py new file mode 100644 index 0000000..6ffdf72 --- /dev/null +++ b/rag_tutorials/local_rag_agent/local_rag_agent.py @@ -0,0 +1,40 @@ +# Import necessary libraries +from phi.agent import Agent +from phi.model.ollama import Ollama +from phi.knowledge.pdf import PDFUrlKnowledgeBase +from phi.vectordb.qdrant import Qdrant +from phi.embedder.ollama import OllamaEmbedder +from phi.playground import Playground, serve_playground_app + +# Define the collection name for the vector database +collection_name = "thai-recipe-index" + +# Set up Qdrant as the vector database with the embedder +vector_db = Qdrant( + collection=collection_name, + url="http://localhost:6333/", + embedder=OllamaEmbedder() +) + +# Define the knowledge base with the specified PDF URL +knowledge_base = PDFUrlKnowledgeBase( + urls=["https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"], + vector_db=vector_db, +) + +# Load the knowledge base, comment out after the first run to avoid reloading +knowledge_base.load(recreate=True, upsert=True) + +# Create the Agent using Ollama's llama3.2 model and the knowledge base +agent = Agent( + name="Local RAG Agent", + model=Ollama(id="llama3.2"), + knowledge=knowledge_base, +) + +# UI for RAG agent +app = Playground(agents=[agent]).get_app() + +# Run the Playground app +if __name__ == "__main__": + serve_playground_app("local_rag_agent:app", reload=True) diff --git a/rag_tutorials/local_rag_agent/requirements.txt b/rag_tutorials/local_rag_agent/requirements.txt new file mode 100644 index 0000000..527985b --- /dev/null +++ b/rag_tutorials/local_rag_agent/requirements.txt @@ -0,0 +1,4 @@ +phidata +qdrant-client +ollama +pypdf \ No newline at end of file