mirror of
https://github.com/Shubhamsaboo/awesome-llm-apps.git
synced 2026-04-29 22:59:12 -05:00
chore: Updated the structure
This commit is contained in:
45
rag_tutorials/agentic_rag/README.md
Normal file
45
rag_tutorials/agentic_rag/README.md
Normal file
@@ -0,0 +1,45 @@
|
||||
## 🗃️ AI RAG Agent with Web Access
|
||||
This script demonstrates how to build a Retrieval-Augmented Generation (RAG) agent with web access using GPT-4o in just 15 lines of Python code. The agent uses a PDF knowledge base and has the ability to search the web using DuckDuckGo.
|
||||
|
||||
### Features
|
||||
|
||||
- Creates a RAG agent using GPT-4o
|
||||
- Incorporates a PDF-based knowledge base
|
||||
- Uses LanceDB as the vector database for efficient similarity search
|
||||
- Includes web search capability through DuckDuckGo
|
||||
- Provides a playground interface for easy interaction
|
||||
|
||||
### How to get Started?
|
||||
|
||||
1. Clone the GitHub repository
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
```
|
||||
|
||||
2. Install the required dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. Get your OpenAI API Key
|
||||
|
||||
- Sign up for an [OpenAI account](https://platform.openai.com/) (or the LLM provider of your choice) and obtain your API key.
|
||||
- Set your OpenAI API key as an environment variable:
|
||||
```bash
|
||||
export OPENAI_API_KEY='your-api-key-here'
|
||||
```
|
||||
|
||||
4. Run the AI RAG Agent
|
||||
```bash
|
||||
python3 rag_agent.py
|
||||
```
|
||||
5. Open your web browser and navigate to the URL provided in the console output to interact with the RAG agent through the playground interface.
|
||||
|
||||
### How it works?
|
||||
|
||||
1. **Knowledge Base Creation:** The script creates a knowledge base from a PDF file hosted online.
|
||||
2. **Vector Database Setup:** LanceDB is used as the vector database for efficient similarity search within the knowledge base.
|
||||
3. **Agent Configuration:** An AI agent is created using GPT-4o as the underlying model, with the PDF knowledge base and DuckDuckGo search tool.
|
||||
4. **Playground Setup:** A playground interface is set up for easy interaction with the RAG agent.
|
||||
|
||||
30
rag_tutorials/agentic_rag/rag_agent.py
Normal file
30
rag_tutorials/agentic_rag/rag_agent.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from phi.agent import Agent
|
||||
from phi.model.openai import OpenAIChat
|
||||
from phi.knowledge.pdf import PDFUrlKnowledgeBase
|
||||
from phi.vectordb.lancedb import LanceDb, SearchType
|
||||
from phi.playground import Playground, serve_playground_app
|
||||
from phi.tools.duckduckgo import DuckDuckGo
|
||||
|
||||
db_uri = "tmp/lancedb"
|
||||
# Create a knowledge base from a PDF
|
||||
knowledge_base = PDFUrlKnowledgeBase(
|
||||
urls=["https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
|
||||
# Use LanceDB as the vector database
|
||||
vector_db=LanceDb(table_name="recipes", uri=db_uri, search_type=SearchType.vector),
|
||||
)
|
||||
# Load the knowledge base: Comment out after first run
|
||||
knowledge_base.load(upsert=True)
|
||||
|
||||
rag_agent = Agent(
|
||||
model=OpenAIChat(id="gpt-4o"),
|
||||
agent_id="rag-agent",
|
||||
knowledge=knowledge_base, # Add the knowledge base to the agent
|
||||
tools=[DuckDuckGo()],
|
||||
show_tool_calls=True,
|
||||
markdown=True,
|
||||
)
|
||||
|
||||
app = Playground(agents=[rag_agent]).get_app()
|
||||
|
||||
if __name__ == "__main__":
|
||||
serve_playground_app("rag_agent:app", reload=True)
|
||||
8
rag_tutorials/agentic_rag/requirements.txt
Normal file
8
rag_tutorials/agentic_rag/requirements.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
phidata
|
||||
openai
|
||||
lancedb
|
||||
tantivy
|
||||
pypdf
|
||||
sqlalchemy
|
||||
pgvector
|
||||
psycopg[binary]
|
||||
43
rag_tutorials/autonomous_rag/README.md
Normal file
43
rag_tutorials/autonomous_rag/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
## 🤖 AutoRAG: Autonomous RAG with GPT-4o and Vector Database
|
||||
This Streamlit application implements an Autonomous Retrieval-Augmented Generation (RAG) system using OpenAI's GPT-4o model and PgVector database. It allows users to upload PDF documents, add them to a knowledge base, and query the AI assistant with context from both the knowledge base and web searches.
|
||||
Features
|
||||
|
||||
### Freatures
|
||||
- Chat interface for interacting with the AI assistant
|
||||
- PDF document upload and processing
|
||||
- Knowledge base integration using PostgreSQL and Pgvector
|
||||
- Web search capability using DuckDuckGo
|
||||
- Persistent storage of assistant data and conversations
|
||||
|
||||
### How to get Started?
|
||||
|
||||
1. Clone the GitHub repository
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
```
|
||||
|
||||
2. Install the required dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. Ensure PgVector Database is running:
|
||||
The app expects PgVector to be running on [localhost:5532](http://localhost:5532/). Adjust the configuration in the code if your setup is different.
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
-e POSTGRES_DB=ai \
|
||||
-e POSTGRES_USER=ai \
|
||||
-e POSTGRES_PASSWORD=ai \
|
||||
-e PGDATA=/var/lib/postgresql/data/pgdata \
|
||||
-v pgvolume:/var/lib/postgresql/data \
|
||||
-p 5532:5432 \
|
||||
--name pgvector \
|
||||
phidata/pgvector:16
|
||||
```
|
||||
|
||||
4. Run the Streamlit App
|
||||
```bash
|
||||
streamlit run autorag.py
|
||||
```
|
||||
97
rag_tutorials/autonomous_rag/autorag.py
Normal file
97
rag_tutorials/autonomous_rag/autorag.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import streamlit as st
|
||||
import nest_asyncio
|
||||
from io import BytesIO
|
||||
from phi.assistant import Assistant
|
||||
from phi.document.reader.pdf import PDFReader
|
||||
from phi.llm.openai import OpenAIChat
|
||||
from phi.knowledge import AssistantKnowledge
|
||||
from phi.tools.duckduckgo import DuckDuckGo
|
||||
from phi.embedder.openai import OpenAIEmbedder
|
||||
from phi.vectordb.pgvector import PgVector2
|
||||
from phi.storage.assistant.postgres import PgAssistantStorage
|
||||
|
||||
# Apply nest_asyncio to allow nested event loops, required for running async functions in Streamlit
|
||||
nest_asyncio.apply()
|
||||
|
||||
# Database connection string for PostgreSQL
|
||||
DB_URL = "postgresql+psycopg://ai:ai@localhost:5532/ai"
|
||||
|
||||
# Function to set up the Assistant, utilizing caching for resource efficiency
|
||||
@st.cache_resource
|
||||
def setup_assistant(api_key: str) -> Assistant:
|
||||
llm = OpenAIChat(model="gpt-4o-mini", api_key=api_key)
|
||||
# Set up the Assistant with storage, knowledge base, and tools
|
||||
return Assistant(
|
||||
name="auto_rag_assistant", # Name of the Assistant
|
||||
llm=llm, # Language model to be used
|
||||
storage=PgAssistantStorage(table_name="auto_rag_storage", db_url=DB_URL),
|
||||
knowledge_base=AssistantKnowledge(
|
||||
vector_db=PgVector2(
|
||||
db_url=DB_URL,
|
||||
collection="auto_rag_docs",
|
||||
embedder=OpenAIEmbedder(model="text-embedding-ada-002", dimensions=1536, api_key=api_key),
|
||||
),
|
||||
num_documents=3,
|
||||
),
|
||||
tools=[DuckDuckGo()], # Additional tool for web search via DuckDuckGo
|
||||
instructions=[
|
||||
"Search your knowledge base first.",
|
||||
"If not found, search the internet.",
|
||||
"Provide clear and concise answers.",
|
||||
],
|
||||
show_tool_calls=True,
|
||||
search_knowledge=True,
|
||||
read_chat_history=True,
|
||||
markdown=True,
|
||||
debug_mode=True,
|
||||
)
|
||||
|
||||
# Function to add a PDF document to the knowledge base
|
||||
def add_document(assistant: Assistant, file: BytesIO):
|
||||
reader = PDFReader()
|
||||
docs = reader.read(file)
|
||||
if docs:
|
||||
assistant.knowledge_base.load_documents(docs, upsert=True)
|
||||
st.success("Document added to the knowledge base.")
|
||||
else:
|
||||
st.error("Failed to read the document.")
|
||||
|
||||
# Function to query the Assistant and return a response
|
||||
def query_assistant(assistant: Assistant, question: str) -> str:
|
||||
return "".join([delta for delta in assistant.run(question)])
|
||||
|
||||
# Main function to handle Streamlit app layout and interactions
|
||||
def main():
|
||||
st.set_page_config(page_title="AutoRAG", layout="wide")
|
||||
st.title("🤖 Auto-RAG: Autonomous RAG with GPT-4o")
|
||||
|
||||
api_key = st.sidebar.text_input("Enter your OpenAI API Key 🔑", type="password")
|
||||
|
||||
if not api_key:
|
||||
st.sidebar.warning("Enter your OpenAI API Key to proceed.")
|
||||
st.stop()
|
||||
|
||||
assistant = setup_assistant(api_key)
|
||||
|
||||
uploaded_file = st.sidebar.file_uploader("📄 Upload PDF", type=["pdf"])
|
||||
|
||||
if uploaded_file and st.sidebar.button("🛠️ Add to Knowledge Base"):
|
||||
add_document(assistant, BytesIO(uploaded_file.read()))
|
||||
|
||||
question = st.text_input("💬 Ask Your Question:")
|
||||
|
||||
# When the user submits a question, query the assistant for an answer
|
||||
if st.button("🔍 Get Answer"):
|
||||
# Ensure the question is not empty
|
||||
if question.strip():
|
||||
with st.spinner("🤔 Thinking..."):
|
||||
# Query the assistant and display the response
|
||||
answer = query_assistant(assistant, question)
|
||||
st.write("📝 **Response:**", answer)
|
||||
else:
|
||||
# Show an error if the question input is empty
|
||||
st.error("Please enter a question.")
|
||||
|
||||
# Entry point of the application
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
9
rag_tutorials/autonomous_rag/requirements.txt
Normal file
9
rag_tutorials/autonomous_rag/requirements.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
streamlit
|
||||
phidata
|
||||
openai
|
||||
psycopg-binary
|
||||
pgvector
|
||||
requests
|
||||
sqlalchemy
|
||||
pypdf
|
||||
duckduckgo-search
|
||||
34
rag_tutorials/llama3.1_local_rag/README.md
Normal file
34
rag_tutorials/llama3.1_local_rag/README.md
Normal file
@@ -0,0 +1,34 @@
|
||||
## 💻 Local Lllama-3.1 with RAG
|
||||
Streamlit app that allows you to chat with any webpage using local Llama-3.1 and Retrieval Augmented Generation (RAG). This app runs entirely on your computer, making it 100% free and without the need for an internet connection.
|
||||
|
||||
|
||||
### Features
|
||||
- Input a webpage URL
|
||||
- Ask questions about the content of the webpage
|
||||
- Get accurate answers using RAG and the Llama-3.1 model running locally on your computer
|
||||
|
||||
### How to get Started?
|
||||
|
||||
1. Clone the GitHub repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
```
|
||||
2. Install the required dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
3. Run the Streamlit App
|
||||
```bash
|
||||
streamlit run llama3.1_local_rag.py
|
||||
```
|
||||
|
||||
### How it Works?
|
||||
|
||||
- The app loads the webpage data using WebBaseLoader and splits it into chunks using RecursiveCharacterTextSplitter.
|
||||
- It creates Ollama embeddings and a vector store using Chroma.
|
||||
- The app sets up a RAG (Retrieval-Augmented Generation) chain, which retrieves relevant documents based on the user's question.
|
||||
- The Llama-3.1 model is called to generate an answer using the retrieved context.
|
||||
- The app displays the answer to the user's question.
|
||||
|
||||
50
rag_tutorials/llama3.1_local_rag/llama3.1_local_rag.py
Normal file
50
rag_tutorials/llama3.1_local_rag/llama3.1_local_rag.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import streamlit as st
|
||||
import ollama
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain_community.document_loaders import WebBaseLoader
|
||||
from langchain_community.vectorstores import Chroma
|
||||
from langchain_community.embeddings import OllamaEmbeddings
|
||||
|
||||
st.title("Chat with Webpage 🌐")
|
||||
st.caption("This app allows you to chat with a webpage using local llama3 and RAG")
|
||||
|
||||
# Get the webpage URL from the user
|
||||
webpage_url = st.text_input("Enter Webpage URL", type="default")
|
||||
|
||||
if webpage_url:
|
||||
# 1. Load the data
|
||||
loader = WebBaseLoader(webpage_url)
|
||||
docs = loader.load()
|
||||
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=10)
|
||||
splits = text_splitter.split_documents(docs)
|
||||
|
||||
# 2. Create Ollama embeddings and vector store
|
||||
embeddings = OllamaEmbeddings(model="llama3.1")
|
||||
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
|
||||
|
||||
# 3. Call Ollama Llama3 model
|
||||
def ollama_llm(question, context):
|
||||
formatted_prompt = f"Question: {question}\n\nContext: {context}"
|
||||
response = ollama.chat(model='llama3.1', messages=[{'role': 'user', 'content': formatted_prompt}])
|
||||
return response['message']['content']
|
||||
|
||||
# 4. RAG Setup
|
||||
retriever = vectorstore.as_retriever()
|
||||
|
||||
def combine_docs(docs):
|
||||
return "\n\n".join(doc.page_content for doc in docs)
|
||||
|
||||
def rag_chain(question):
|
||||
retrieved_docs = retriever.invoke(question)
|
||||
formatted_context = combine_docs(retrieved_docs)
|
||||
return ollama_llm(question, formatted_context)
|
||||
|
||||
st.success(f"Loaded {webpage_url} successfully!")
|
||||
|
||||
# Ask a question about the webpage
|
||||
prompt = st.text_input("Ask any question about the webpage")
|
||||
|
||||
# Chat with the webpage
|
||||
if prompt:
|
||||
result = rag_chain(prompt)
|
||||
st.write(result)
|
||||
4
rag_tutorials/llama3.1_local_rag/requirements.txt
Normal file
4
rag_tutorials/llama3.1_local_rag/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
streamlit
|
||||
ollama
|
||||
langchain
|
||||
langchain_community
|
||||
33
rag_tutorials/rag-as-a-service/README.md
Normal file
33
rag_tutorials/rag-as-a-service/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## 🖇️ RAG-as-a-Service with Claude 3.5 Sonnet
|
||||
Build and deploy a production-ready Retrieval-Augmented Generation (RAG) service using Claude 3.5 Sonnet and Ragie.ai. This implementation allows you to create a document querying system with a user-friendly Streamlit interface in less than 50 lines of Python code.
|
||||
|
||||
### Features
|
||||
- Production-ready RAG pipeline
|
||||
- Integration with Claude 3.5 Sonnet for response generation
|
||||
- Document upload from URLs
|
||||
- Real-time document querying
|
||||
- Support for both fast and accurate document processing modes
|
||||
|
||||
### How to get Started?
|
||||
|
||||
1. Clone the GitHub repository
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
cd rag-as-a-service
|
||||
```
|
||||
|
||||
2. Install the required dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. Get your Anthropic API and Ragie API Key
|
||||
|
||||
- Sign up for an [Anthropic account](https://console.anthropic.com/) and get your API key
|
||||
- Sign up for an [Ragie account](https://www.ragie.ai/) and get your API key
|
||||
|
||||
4. Run the Streamlit app
|
||||
```bash
|
||||
streamlit run rag_app.py
|
||||
```
|
||||
190
rag_tutorials/rag-as-a-service/rag_app.py
Normal file
190
rag_tutorials/rag-as-a-service/rag_app.py
Normal file
@@ -0,0 +1,190 @@
|
||||
import streamlit as st
|
||||
import requests
|
||||
from anthropic import Anthropic
|
||||
import time
|
||||
from typing import List, Dict, Optional
|
||||
from urllib.parse import urlparse
|
||||
|
||||
class RAGPipeline:
|
||||
def __init__(self, ragie_api_key: str, anthropic_api_key: str):
|
||||
"""
|
||||
Initialize the RAG pipeline with API keys.
|
||||
"""
|
||||
self.ragie_api_key = ragie_api_key
|
||||
self.anthropic_api_key = anthropic_api_key
|
||||
self.anthropic_client = Anthropic(api_key=anthropic_api_key)
|
||||
|
||||
# API endpoints
|
||||
self.RAGIE_UPLOAD_URL = "https://api.ragie.ai/documents/url"
|
||||
self.RAGIE_RETRIEVAL_URL = "https://api.ragie.ai/retrievals"
|
||||
|
||||
def upload_document(self, url: str, name: Optional[str] = None, mode: str = "fast") -> Dict:
|
||||
"""
|
||||
Upload a document to Ragie from a URL.
|
||||
"""
|
||||
if not name:
|
||||
name = urlparse(url).path.split('/')[-1] or "document"
|
||||
|
||||
payload = {
|
||||
"mode": mode,
|
||||
"name": name,
|
||||
"url": url
|
||||
}
|
||||
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"content-type": "application/json",
|
||||
"authorization": f"Bearer {self.ragie_api_key}"
|
||||
}
|
||||
|
||||
response = requests.post(self.RAGIE_UPLOAD_URL, json=payload, headers=headers)
|
||||
|
||||
if not response.ok:
|
||||
raise Exception(f"Document upload failed: {response.status_code} {response.reason}")
|
||||
|
||||
return response.json()
|
||||
|
||||
def retrieve_chunks(self, query: str, scope: str = "tutorial") -> List[str]:
|
||||
"""
|
||||
Retrieve relevant chunks from Ragie for a given query.
|
||||
"""
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {self.ragie_api_key}"
|
||||
}
|
||||
|
||||
payload = {
|
||||
"query": query,
|
||||
"filters": {
|
||||
"scope": scope
|
||||
}
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
self.RAGIE_RETRIEVAL_URL,
|
||||
headers=headers,
|
||||
json=payload
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
raise Exception(f"Retrieval failed: {response.status_code} {response.reason}")
|
||||
|
||||
data = response.json()
|
||||
return [chunk["text"] for chunk in data["scored_chunks"]]
|
||||
|
||||
def create_system_prompt(self, chunk_texts: List[str]) -> str:
|
||||
"""
|
||||
Create the system prompt with the retrieved chunks.
|
||||
"""
|
||||
return f"""These are very important to follow: You are "Ragie AI", a professional but friendly AI chatbot working as an assistant to the user. Your current task is to help the user based on all of the information available to you shown below. Answer informally, directly, and concisely without a heading or greeting but include everything relevant. Use richtext Markdown when appropriate including bold, italic, paragraphs, and lists when helpful. If using LaTeX, use double $$ as delimiter instead of single $. Use $$...$$ instead of parentheses. Organize information into multiple sections or points when appropriate. Don't include raw item IDs or other raw fields from the source. Don't use XML or other markup unless requested by the user. Here is all of the information available to answer the user: === {chunk_texts} === If the user asked for a search and there are no results, make sure to let the user know that you couldn't find anything, and what they might be able to do to find the information they need. END SYSTEM INSTRUCTIONS"""
|
||||
|
||||
def generate_response(self, system_prompt: str, query: str) -> str:
|
||||
"""
|
||||
Generate response using Claude 3.5 Sonnet.
|
||||
"""
|
||||
message = self.anthropic_client.messages.create(
|
||||
model="claude-3-sonnet-20240229",
|
||||
max_tokens=1024,
|
||||
system=system_prompt,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": query
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
return message.content[0].text
|
||||
|
||||
def process_query(self, query: str, scope: str = "tutorial") -> str:
|
||||
"""
|
||||
Process a query through the complete RAG pipeline.
|
||||
"""
|
||||
chunks = self.retrieve_chunks(query, scope)
|
||||
|
||||
if not chunks:
|
||||
return "No relevant information found for your query."
|
||||
|
||||
system_prompt = self.create_system_prompt(chunks)
|
||||
return self.generate_response(system_prompt, query)
|
||||
|
||||
def initialize_session_state():
|
||||
"""Initialize session state variables."""
|
||||
if 'pipeline' not in st.session_state:
|
||||
st.session_state.pipeline = None
|
||||
if 'document_uploaded' not in st.session_state:
|
||||
st.session_state.document_uploaded = False
|
||||
if 'api_keys_submitted' not in st.session_state:
|
||||
st.session_state.api_keys_submitted = False
|
||||
|
||||
def main():
|
||||
st.set_page_config(page_title="RAG-as-a-Service", layout="wide")
|
||||
initialize_session_state()
|
||||
|
||||
st.title(":linked_paperclips: RAG-as-a-Service")
|
||||
|
||||
# API Keys Section
|
||||
with st.expander("🔑 API Keys Configuration", expanded=not st.session_state.api_keys_submitted):
|
||||
col1, col2 = st.columns(2)
|
||||
with col1:
|
||||
ragie_key = st.text_input("Ragie API Key", type="password", key="ragie_key")
|
||||
with col2:
|
||||
anthropic_key = st.text_input("Anthropic API Key", type="password", key="anthropic_key")
|
||||
|
||||
if st.button("Submit API Keys"):
|
||||
if ragie_key and anthropic_key:
|
||||
try:
|
||||
st.session_state.pipeline = RAGPipeline(ragie_key, anthropic_key)
|
||||
st.session_state.api_keys_submitted = True
|
||||
st.success("API keys configured successfully!")
|
||||
except Exception as e:
|
||||
st.error(f"Error configuring API keys: {str(e)}")
|
||||
else:
|
||||
st.error("Please provide both API keys.")
|
||||
|
||||
# Document Upload Section
|
||||
if st.session_state.api_keys_submitted:
|
||||
st.markdown("### 📄 Document Upload")
|
||||
doc_url = st.text_input("Enter document URL")
|
||||
doc_name = st.text_input("Document name (optional)")
|
||||
|
||||
col1, col2 = st.columns([1, 3])
|
||||
with col1:
|
||||
upload_mode = st.selectbox("Upload mode", ["fast", "accurate"])
|
||||
|
||||
if st.button("Upload Document"):
|
||||
if doc_url:
|
||||
try:
|
||||
with st.spinner("Uploading document..."):
|
||||
st.session_state.pipeline.upload_document(
|
||||
url=doc_url,
|
||||
name=doc_name if doc_name else None,
|
||||
mode=upload_mode
|
||||
)
|
||||
time.sleep(5) # Wait for indexing
|
||||
st.session_state.document_uploaded = True
|
||||
st.success("Document uploaded and indexed successfully!")
|
||||
except Exception as e:
|
||||
st.error(f"Error uploading document: {str(e)}")
|
||||
else:
|
||||
st.error("Please provide a document URL.")
|
||||
|
||||
# Query Section
|
||||
if st.session_state.document_uploaded:
|
||||
st.markdown("### 🔍 Query Document")
|
||||
query = st.text_input("Enter your query")
|
||||
|
||||
if st.button("Generate Response"):
|
||||
if query:
|
||||
try:
|
||||
with st.spinner("Generating response..."):
|
||||
response = st.session_state.pipeline.process_query(query)
|
||||
st.markdown("### Response:")
|
||||
st.markdown(response)
|
||||
except Exception as e:
|
||||
st.error(f"Error generating response: {str(e)}")
|
||||
else:
|
||||
st.error("Please enter a query.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
3
rag_tutorials/rag-as-a-service/requirements.txt
Normal file
3
rag_tutorials/rag-as-a-service/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
streamlit
|
||||
anthropic
|
||||
requests
|
||||
Reference in New Issue
Block a user