Added New Examples

This commit is contained in:
ShubhamSaboo
2024-04-29 15:01:47 -05:00
parent b3a8f66583
commit 097504f6a9
8 changed files with 112 additions and 0 deletions

BIN
chat_with_pdf/.DS_Store vendored Normal file

Binary file not shown.

30
chat_with_pdf/README.md Normal file
View File

@@ -0,0 +1,30 @@
## Chat with PDF 📚
LLM app with RAG to chat with PDF in just 30 lines of Python Code. The app uses Retrieval Augmented Generation (RAG) to provide accurate answers to questions based on the content of the uploaded PDF.
### Features
- Upload a PDF document
- Ask questions about the content of the PDF
- Get accurate answers using RAG and the selected LLM
### How to get Started?
1. Clone the GitHub repository
```bash
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
```
2. Install the required dependencies:
```bash
pip install -r requirements.txt
```
3. Get your OpenAI API Key
- Sign up for an [OpenAI account](https://platform.openai.com/) (or the LLM provider of your choice) and obtain your API key.
4. Run the Streamlit App
```bash
streamlit run chat_pdf.py
```

Binary file not shown.

38
chat_with_pdf/chat_pdf.py Normal file
View File

@@ -0,0 +1,38 @@
import os
import tempfile
import streamlit as st
from embedchain import App
def embedchain_bot(db_path, api_key):
return App.from_config(
config={
"llm": {"provider": "openai", "config": {"api_key": api_key}},
"vectordb": {"provider": "chroma", "config": {"dir": db_path}},
"embedder": {"provider": "openai", "config": {"api_key": api_key}},
}
)
st.title("Chat with PDF")
openai_access_token = st.text_input("OpenAI API Key", type="password")
if openai_access_token:
db_path = tempfile.mkdtemp()
app = embedchain_bot(db_path, openai_access_token)
pdf_file = st.file_uploader("Upload a PDF file", type="pdf")
if pdf_file:
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as f:
f.write(pdf_file.getvalue())
app.add(f.name, data_type="pdf_file")
os.remove(f.name)
st.success(f"Added {pdf_file.name} to knowledge base!")
prompt = st.text_input("Ask a question about the PDF")
if prompt:
answer = app.chat(prompt)
st.write(answer)

View File

@@ -0,0 +1,2 @@
streamlit
embedchain

View File

@@ -0,0 +1,42 @@
# Import the required libraries
import tempfile
import streamlit as st
from embedchain import App
# Define the embedchain_bot function
def embedchain_bot(db_path, api_key):
return App.from_config(
config={
"llm": {"provider": "openai", "config": {"model": "gpt-4-turbo", "temperature": 0.5, "api_key": api_key}},
"vectordb": {"provider": "chroma", "config": {"dir": db_path}},
"embedder": {"provider": "openai", "config": {"api_key": api_key}},
}
)
# Create Streamlit app
st.title("Chat with YouTube Video 📺")
st.caption("This app allows you to chat with a YouTube video using OpenAI API")
# Get OpenAI API key from user
openai_access_token = st.text_input("OpenAI API Key", type="password")
# If OpenAI API key is provided, create an instance of App
if openai_access_token:
# Create a temporary directory to store the database
db_path = tempfile.mkdtemp()
# Create an instance of Embedchain App
app = embedchain_bot(db_path, openai_access_token)
# Get the YouTube video URL from the user
video_url = st.text_input("Enter YouTube Video URL", type="default")
# Add the video to the knowledge base
if video_url:
app.add(video_url, data_type="youtube_video")
st.success(f"Added {video_url} to knowledge base!")
# Ask a question about the video
prompt = st.text_input("Ask any question about the YouTube Video")
# Chat with the video
if prompt:
answer = app.chat(prompt)
st.write(answer)

BIN
docs/.DS_Store vendored Normal file

Binary file not shown.

BIN
docs/banner/unwind.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1015 KiB