diff --git a/chat_with_pdf/.DS_Store b/chat_with_pdf/.DS_Store new file mode 100644 index 0000000..d845118 Binary files /dev/null and b/chat_with_pdf/.DS_Store differ diff --git a/chat_with_pdf/README.md b/chat_with_pdf/README.md new file mode 100644 index 0000000..8d4df03 --- /dev/null +++ b/chat_with_pdf/README.md @@ -0,0 +1,30 @@ +## Chat with PDF 📚 + +LLM app with RAG to chat with PDF in just 30 lines of Python Code. The app uses Retrieval Augmented Generation (RAG) to provide accurate answers to questions based on the content of the uploaded PDF. + +### Features + +- Upload a PDF document +- Ask questions about the content of the PDF +- Get accurate answers using RAG and the selected LLM + +### How to get Started? + +1. Clone the GitHub repository + +```bash +git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git +``` +2. Install the required dependencies: + +```bash +pip install -r requirements.txt +``` +3. Get your OpenAI API Key + +- Sign up for an [OpenAI account](https://platform.openai.com/) (or the LLM provider of your choice) and obtain your API key. + +4. Run the Streamlit App +```bash +streamlit run chat_pdf.py +``` diff --git a/chat_with_pdf/assets/chatwithpdf.mov b/chat_with_pdf/assets/chatwithpdf.mov new file mode 100644 index 0000000..ba1384e Binary files /dev/null and b/chat_with_pdf/assets/chatwithpdf.mov differ diff --git a/chat_with_pdf/chat_pdf.py b/chat_with_pdf/chat_pdf.py new file mode 100644 index 0000000..ac1719f --- /dev/null +++ b/chat_with_pdf/chat_pdf.py @@ -0,0 +1,38 @@ +import os +import tempfile +import streamlit as st +from embedchain import App + +def embedchain_bot(db_path, api_key): + return App.from_config( + config={ + "llm": {"provider": "openai", "config": {"api_key": api_key}}, + "vectordb": {"provider": "chroma", "config": {"dir": db_path}}, + "embedder": {"provider": "openai", "config": {"api_key": api_key}}, + } + ) + +st.title("Chat with PDF") + +openai_access_token = st.text_input("OpenAI API Key", type="password") + +if openai_access_token: + db_path = tempfile.mkdtemp() + app = embedchain_bot(db_path, openai_access_token) + + pdf_file = st.file_uploader("Upload a PDF file", type="pdf") + + if pdf_file: + with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as f: + f.write(pdf_file.getvalue()) + app.add(f.name, data_type="pdf_file") + os.remove(f.name) + st.success(f"Added {pdf_file.name} to knowledge base!") + + prompt = st.text_input("Ask a question about the PDF") + + if prompt: + answer = app.chat(prompt) + st.write(answer) + + \ No newline at end of file diff --git a/chat_with_pdf/requirements.txt b/chat_with_pdf/requirements.txt new file mode 100644 index 0000000..ca9e4b6 --- /dev/null +++ b/chat_with_pdf/requirements.txt @@ -0,0 +1,2 @@ +streamlit +embedchain \ No newline at end of file diff --git a/chat_with_youtube_videos/chat_youtube.py b/chat_with_youtube_videos/chat_youtube.py new file mode 100644 index 0000000..61bbd7d --- /dev/null +++ b/chat_with_youtube_videos/chat_youtube.py @@ -0,0 +1,42 @@ +# Import the required libraries +import tempfile +import streamlit as st +from embedchain import App + +# Define the embedchain_bot function +def embedchain_bot(db_path, api_key): + return App.from_config( + config={ + "llm": {"provider": "openai", "config": {"model": "gpt-4-turbo", "temperature": 0.5, "api_key": api_key}}, + "vectordb": {"provider": "chroma", "config": {"dir": db_path}}, + "embedder": {"provider": "openai", "config": {"api_key": api_key}}, + } + ) + +# Create Streamlit app +st.title("Chat with YouTube Video 📺") +st.caption("This app allows you to chat with a YouTube video using OpenAI API") + +# Get OpenAI API key from user +openai_access_token = st.text_input("OpenAI API Key", type="password") + +# If OpenAI API key is provided, create an instance of App +if openai_access_token: + # Create a temporary directory to store the database + db_path = tempfile.mkdtemp() + # Create an instance of Embedchain App + app = embedchain_bot(db_path, openai_access_token) + # Get the YouTube video URL from the user + video_url = st.text_input("Enter YouTube Video URL", type="default") + # Add the video to the knowledge base + if video_url: + app.add(video_url, data_type="youtube_video") + st.success(f"Added {video_url} to knowledge base!") + # Ask a question about the video + prompt = st.text_input("Ask any question about the YouTube Video") + # Chat with the video + if prompt: + answer = app.chat(prompt) + st.write(answer) + + \ No newline at end of file diff --git a/docs/.DS_Store b/docs/.DS_Store new file mode 100644 index 0000000..27503ba Binary files /dev/null and b/docs/.DS_Store differ diff --git a/docs/banner/unwind.png b/docs/banner/unwind.png new file mode 100644 index 0000000..ef9f06a Binary files /dev/null and b/docs/banner/unwind.png differ