mirror of
https://github.com/Shubhamsaboo/awesome-llm-apps.git
synced 2026-04-29 22:59:12 -05:00
chore: Updated the structure
This commit is contained in:
48
chat_with_X_tutorials/chat_with_github/README.md
Normal file
48
chat_with_X_tutorials/chat_with_github/README.md
Normal file
@@ -0,0 +1,48 @@
|
||||
## 💬 Chat with GitHub Repo
|
||||
|
||||
LLM app with RAG to chat with GitHub Repo in just 30 lines of Python Code. The app uses Retrieval Augmented Generation (RAG) to provide accurate answers to questions based on the content of the specified GitHub repository.
|
||||
|
||||
### Features
|
||||
|
||||
- Provide the name of GitHub Repository as input
|
||||
- Ask questions about the content of the GitHub repository
|
||||
- Get accurate answers using OpenAI's API and Embedchain
|
||||
|
||||
### How to get Started?
|
||||
|
||||
1. Clone the GitHub repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
```
|
||||
2. Install the required dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
3. Get your OpenAI API Key
|
||||
|
||||
- Sign up for an [OpenAI account](https://platform.openai.com/) (or the LLM provider of your choice) and obtain your API key.
|
||||
|
||||
4. Get your GitHub Access Token
|
||||
|
||||
- Create a [personal access token](https://docs.github.com/en/enterprise-server@3.6/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token) with the necessary permissions to access the desired GitHub repository.
|
||||
|
||||
4. Run the Streamlit App
|
||||
```bash
|
||||
streamlit run chat_github.py
|
||||
```
|
||||
|
||||
### How it Works?
|
||||
|
||||
- The app prompts the user to enter their OpenAI API key, which is used to authenticate requests to the OpenAI API.
|
||||
|
||||
- It initializes an instance of the Embedchain App class and a GithubLoader with the provided GitHub Access Token.
|
||||
|
||||
- The user is prompted to enter a GitHub repository URL, which is then added to the Embedchain app's knowledge base using the GithubLoader.
|
||||
|
||||
- The user can ask questions about the GitHub repository using the text input.
|
||||
|
||||
- When a question is asked, the app uses the chat method of the Embedchain app to generate an answer based on the content of the GitHub repository.
|
||||
|
||||
- The app displays the generated answer to the user.
|
||||
36
chat_with_X_tutorials/chat_with_github/chat_github.py
Normal file
36
chat_with_X_tutorials/chat_with_github/chat_github.py
Normal file
@@ -0,0 +1,36 @@
|
||||
# Import the required libraries
|
||||
from embedchain.pipeline import Pipeline as App
|
||||
from embedchain.loaders.github import GithubLoader
|
||||
import streamlit as st
|
||||
import os
|
||||
|
||||
loader = GithubLoader(
|
||||
config={
|
||||
"token":"Your GitHub Token",
|
||||
}
|
||||
)
|
||||
|
||||
# Create Streamlit app
|
||||
st.title("Chat with GitHub Repository 💬")
|
||||
st.caption("This app allows you to chat with a GitHub Repo using OpenAI API")
|
||||
|
||||
# Get OpenAI API key from user
|
||||
openai_access_token = st.text_input("OpenAI API Key", type="password")
|
||||
|
||||
# If OpenAI API key is provided, create an instance of App
|
||||
if openai_access_token:
|
||||
os.environ["OPENAI_API_KEY"] = openai_access_token
|
||||
# Create an instance of Embedchain App
|
||||
app = App()
|
||||
# Get the GitHub repo from the user
|
||||
git_repo = st.text_input("Enter the GitHub Repo", type="default")
|
||||
if git_repo:
|
||||
# Add the repo to the knowledge base
|
||||
app.add("repo:" + git_repo + " " + "type:repo", data_type="github", loader=loader)
|
||||
st.success(f"Added {git_repo} to knowledge base!")
|
||||
# Ask a question about the Github Repo
|
||||
prompt = st.text_input("Ask any question about the GitHub Repo")
|
||||
# Chat with the GitHub Repo
|
||||
if prompt:
|
||||
answer = app.chat(prompt)
|
||||
st.write(answer)
|
||||
72
chat_with_X_tutorials/chat_with_github/chat_github_llama3.py
Normal file
72
chat_with_X_tutorials/chat_with_github/chat_github_llama3.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# Import the required libraries
|
||||
import tempfile
|
||||
from embedchain import App
|
||||
from embedchain.loaders.github import GithubLoader
|
||||
import streamlit as st
|
||||
import os
|
||||
|
||||
GITHUB_TOKEN = os.getenv("Your GitHub Token")
|
||||
|
||||
def get_loader():
|
||||
loader = GithubLoader(
|
||||
config={
|
||||
"token": GITHUB_TOKEN
|
||||
}
|
||||
)
|
||||
return loader
|
||||
|
||||
if "loader" not in st.session_state:
|
||||
st.session_state['loader'] = get_loader()
|
||||
|
||||
loader = st.session_state.loader
|
||||
|
||||
# Define the embedchain_bot function
|
||||
def embedchain_bot(db_path):
|
||||
return App.from_config(
|
||||
config={
|
||||
"llm": {"provider": "ollama", "config": {"model": "llama3:instruct", "max_tokens": 250, "temperature": 0.5, "stream": True, "base_url": 'http://localhost:11434'}},
|
||||
"vectordb": {"provider": "chroma", "config": {"dir": db_path}},
|
||||
"embedder": {"provider": "ollama", "config": {"model": "llama3:instruct", "base_url": 'http://localhost:11434'}},
|
||||
}
|
||||
)
|
||||
|
||||
def load_repo(git_repo):
|
||||
global app
|
||||
# Add the repo to the knowledge base
|
||||
print(f"Adding {git_repo} to knowledge base!")
|
||||
app.add("repo:" + git_repo + " " + "type:repo", data_type="github", loader=loader)
|
||||
st.success(f"Added {git_repo} to knowledge base!")
|
||||
|
||||
|
||||
def make_db_path():
|
||||
ret = tempfile.mkdtemp(suffix="chroma")
|
||||
print(f"Created Chroma DB at {ret}")
|
||||
return ret
|
||||
|
||||
# Create Streamlit app
|
||||
st.title("Chat with GitHub Repository 💬")
|
||||
st.caption("This app allows you to chat with a GitHub Repo using Llama-3 running with Ollama")
|
||||
|
||||
# Initialize the Embedchain App
|
||||
if "app" not in st.session_state:
|
||||
st.session_state['app'] = embedchain_bot(make_db_path())
|
||||
|
||||
app = st.session_state.app
|
||||
|
||||
# Get the GitHub repo from the user
|
||||
git_repo = st.text_input("Enter the GitHub Repo", type="default")
|
||||
|
||||
if git_repo and ("repos" not in st.session_state or git_repo not in st.session_state.repos):
|
||||
if "repos" not in st.session_state:
|
||||
st.session_state["repos"] = [git_repo]
|
||||
else:
|
||||
st.session_state.repos.append(git_repo)
|
||||
load_repo(git_repo)
|
||||
|
||||
|
||||
# Ask a question about the Github Repo
|
||||
prompt = st.text_input("Ask any question about the GitHub Repo")
|
||||
# Chat with the GitHub Repo
|
||||
if prompt:
|
||||
answer = st.session_state.app.chat(prompt)
|
||||
st.write(answer)
|
||||
2
chat_with_X_tutorials/chat_with_github/requirements.txt
Normal file
2
chat_with_X_tutorials/chat_with_github/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
streamlit
|
||||
embedchain[github]
|
||||
42
chat_with_X_tutorials/chat_with_gmail/README.md
Normal file
42
chat_with_X_tutorials/chat_with_gmail/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
## 📨 Chat with Gmail Inbox
|
||||
|
||||
LLM app with RAG to chat with Gmail in just 30 lines of Python Code. The app uses Retrieval Augmented Generation (RAG) to provide accurate answers to questions based on the content of your Gmail Inbox.
|
||||
|
||||
### Features
|
||||
|
||||
- Connect to your Gmail Inbox
|
||||
- Ask questions about the content of your emails
|
||||
- Get accurate answers using RAG and the selected LLM
|
||||
|
||||
### Installation
|
||||
|
||||
1. Clone the repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
```
|
||||
2. Install the required dependencies
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. Set up your Google Cloud project and enable the Gmail API:
|
||||
|
||||
- Go to the [Google Cloud Console](https://console.cloud.google.com/) and create a new project.
|
||||
- Navigate to "APIs & Services > OAuth consent screen" and configure the OAuth consent screen.
|
||||
- Publish the OAuth consent screen by providing the necessary app information.
|
||||
- Enable the Gmail API and create OAuth client ID credentials.
|
||||
- Download the credentials in JSON format and save them as `credentials.json` in your working directory.
|
||||
|
||||
4. Get your OpenAI API Key
|
||||
|
||||
- Sign up for an [OpenAI account](https://platform.openai.com/) (or the LLM provider of your choice) and obtain your API key.
|
||||
|
||||
4. Run the Streamlit App
|
||||
|
||||
```bash
|
||||
streamlit run chat_gmail.py
|
||||
```
|
||||
|
||||
|
||||
40
chat_with_X_tutorials/chat_with_gmail/chat_gmail.py
Normal file
40
chat_with_X_tutorials/chat_with_gmail/chat_gmail.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import tempfile
|
||||
import streamlit as st
|
||||
from embedchain import App
|
||||
|
||||
# Define the embedchain_bot function
|
||||
def embedchain_bot(db_path, api_key):
|
||||
return App.from_config(
|
||||
config={
|
||||
"llm": {"provider": "openai", "config": {"model": "gpt-4-turbo", "temperature": 0.5, "api_key": api_key}},
|
||||
"vectordb": {"provider": "chroma", "config": {"dir": db_path}},
|
||||
"embedder": {"provider": "openai", "config": {"api_key": api_key}},
|
||||
}
|
||||
)
|
||||
|
||||
# Create Streamlit app
|
||||
st.title("Chat with your Gmail Inbox 📧")
|
||||
st.caption("This app allows you to chat with your Gmail inbox using OpenAI API")
|
||||
|
||||
# Get the OpenAI API key from the user
|
||||
openai_access_token = st.text_input("Enter your OpenAI API Key", type="password")
|
||||
|
||||
# Set the Gmail filter statically
|
||||
gmail_filter = "to: me label:inbox"
|
||||
|
||||
# Add the Gmail data to the knowledge base if the OpenAI API key is provided
|
||||
if openai_access_token:
|
||||
# Create a temporary directory to store the database
|
||||
db_path = tempfile.mkdtemp()
|
||||
# Create an instance of Embedchain App
|
||||
app = embedchain_bot(db_path, openai_access_token)
|
||||
app.add(gmail_filter, data_type="gmail")
|
||||
st.success(f"Added emails from Inbox to the knowledge base!")
|
||||
|
||||
# Ask a question about the emails
|
||||
prompt = st.text_input("Ask any question about your emails")
|
||||
|
||||
# Chat with the emails
|
||||
if prompt:
|
||||
answer = app.query(prompt)
|
||||
st.write(answer)
|
||||
2
chat_with_X_tutorials/chat_with_gmail/requirements.txt
Normal file
2
chat_with_X_tutorials/chat_with_gmail/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
streamlit
|
||||
embedchain[gmail]
|
||||
33
chat_with_X_tutorials/chat_with_pdf/README.md
Normal file
33
chat_with_X_tutorials/chat_with_pdf/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## 📄 Chat with PDF
|
||||
|
||||
LLM app with RAG to chat with PDF in just 30 lines of Python Code. The app uses Retrieval Augmented Generation (RAG) to provide accurate answers to questions based on the content of the uploaded PDF.
|
||||
|
||||
### Features
|
||||
|
||||
- Upload a PDF document
|
||||
- Ask questions about the content of the PDF
|
||||
- Get accurate answers using RAG and the selected LLM
|
||||
|
||||
### How to get Started?
|
||||
|
||||
1. Clone the GitHub repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
```
|
||||
2. Install the required dependencies
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
3. Get your OpenAI API Key
|
||||
|
||||
- Sign up for an [OpenAI account](https://platform.openai.com/) (or the LLM provider of your choice) and obtain your API key.
|
||||
|
||||
4. Run the Streamlit App
|
||||
```bash
|
||||
streamlit run chat_pdf.py
|
||||
```
|
||||
### Interactive Application Demo
|
||||
https://github.com/Shubhamsaboo/awesome-llm-apps/assets/31396011/12bdfc11-c877-4fc7-9e70-63f21d2eb977
|
||||
|
||||
38
chat_with_X_tutorials/chat_with_pdf/chat_pdf.py
Normal file
38
chat_with_X_tutorials/chat_with_pdf/chat_pdf.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import os
|
||||
import tempfile
|
||||
import streamlit as st
|
||||
from embedchain import App
|
||||
|
||||
def embedchain_bot(db_path, api_key):
|
||||
return App.from_config(
|
||||
config={
|
||||
"llm": {"provider": "openai", "config": {"api_key": api_key}},
|
||||
"vectordb": {"provider": "chroma", "config": {"dir": db_path}},
|
||||
"embedder": {"provider": "openai", "config": {"api_key": api_key}},
|
||||
}
|
||||
)
|
||||
|
||||
st.title("Chat with PDF")
|
||||
|
||||
openai_access_token = st.text_input("OpenAI API Key", type="password")
|
||||
|
||||
if openai_access_token:
|
||||
db_path = tempfile.mkdtemp()
|
||||
app = embedchain_bot(db_path, openai_access_token)
|
||||
|
||||
pdf_file = st.file_uploader("Upload a PDF file", type="pdf")
|
||||
|
||||
if pdf_file:
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as f:
|
||||
f.write(pdf_file.getvalue())
|
||||
app.add(f.name, data_type="pdf_file")
|
||||
os.remove(f.name)
|
||||
st.success(f"Added {pdf_file.name} to knowledge base!")
|
||||
|
||||
prompt = st.text_input("Ask a question about the PDF")
|
||||
|
||||
if prompt:
|
||||
answer = app.chat(prompt)
|
||||
st.write(answer)
|
||||
|
||||
|
||||
69
chat_with_X_tutorials/chat_with_pdf/chat_pdf_llama3.2.py
Normal file
69
chat_with_X_tutorials/chat_with_pdf/chat_pdf_llama3.2.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# Import necessary libraries
|
||||
import os
|
||||
import tempfile
|
||||
import streamlit as st
|
||||
from embedchain import App
|
||||
import base64
|
||||
from streamlit_chat import message
|
||||
|
||||
# Define the embedchain_bot function
|
||||
def embedchain_bot(db_path):
|
||||
return App.from_config(
|
||||
config={
|
||||
"llm": {"provider": "ollama", "config": {"model": "llama3.2:latest", "max_tokens": 250, "temperature": 0.5, "stream": True, "base_url": 'http://localhost:11434'}},
|
||||
"vectordb": {"provider": "chroma", "config": {"dir": db_path}},
|
||||
"embedder": {"provider": "ollama", "config": {"model": "llama3.2:latest", "base_url": 'http://localhost:11434'}},
|
||||
}
|
||||
)
|
||||
|
||||
# Add a function to display PDF
|
||||
def display_pdf(file):
|
||||
base64_pdf = base64.b64encode(file.read()).decode('utf-8')
|
||||
pdf_display = f'<iframe src="data:application/pdf;base64,{base64_pdf}" width="100%" height="400" type="application/pdf"></iframe>'
|
||||
st.markdown(pdf_display, unsafe_allow_html=True)
|
||||
|
||||
st.title("Chat with PDF using Llama 3.2")
|
||||
st.caption("This app allows you to chat with a PDF using Llama 3.2 running locally with Ollama!")
|
||||
|
||||
# Define the database path
|
||||
db_path = tempfile.mkdtemp()
|
||||
|
||||
# Create a session state to store the app instance and chat history
|
||||
if 'app' not in st.session_state:
|
||||
st.session_state.app = embedchain_bot(db_path)
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
|
||||
# Sidebar for PDF upload and preview
|
||||
with st.sidebar:
|
||||
st.header("PDF Upload")
|
||||
pdf_file = st.file_uploader("Upload a PDF file", type="pdf")
|
||||
|
||||
if pdf_file:
|
||||
st.subheader("PDF Preview")
|
||||
display_pdf(pdf_file)
|
||||
|
||||
if st.button("Add to Knowledge Base"):
|
||||
with st.spinner("Adding PDF to knowledge base..."):
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as f:
|
||||
f.write(pdf_file.getvalue())
|
||||
st.session_state.app.add(f.name, data_type="pdf_file")
|
||||
os.remove(f.name)
|
||||
st.success(f"Added {pdf_file.name} to knowledge base!")
|
||||
|
||||
# Chat interface
|
||||
for i, msg in enumerate(st.session_state.messages):
|
||||
message(msg["content"], is_user=msg["role"] == "user", key=str(i))
|
||||
|
||||
if prompt := st.chat_input("Ask a question about the PDF"):
|
||||
st.session_state.messages.append({"role": "user", "content": prompt})
|
||||
message(prompt, is_user=True)
|
||||
|
||||
with st.spinner("Thinking..."):
|
||||
response = st.session_state.app.chat(prompt)
|
||||
st.session_state.messages.append({"role": "assistant", "content": response})
|
||||
message(response)
|
||||
|
||||
# Clear chat history button
|
||||
if st.button("Clear Chat History"):
|
||||
st.session_state.messages = []
|
||||
41
chat_with_X_tutorials/chat_with_pdf/chat_pdf_llama3.py
Normal file
41
chat_with_X_tutorials/chat_with_pdf/chat_pdf_llama3.py
Normal file
@@ -0,0 +1,41 @@
|
||||
# Import necessary libraries
|
||||
import os
|
||||
import tempfile
|
||||
import streamlit as st
|
||||
from embedchain import App
|
||||
|
||||
# Define the embedchain_bot function
|
||||
def embedchain_bot(db_path):
|
||||
return App.from_config(
|
||||
config={
|
||||
"llm": {"provider": "ollama", "config": {"model": "llama3:instruct", "max_tokens": 250, "temperature": 0.5, "stream": True, "base_url": 'http://localhost:11434'}},
|
||||
"vectordb": {"provider": "chroma", "config": {"dir": db_path}},
|
||||
"embedder": {"provider": "ollama", "config": {"model": "llama3:instruct", "base_url": 'http://localhost:11434'}},
|
||||
}
|
||||
)
|
||||
|
||||
st.title("Chat with PDF")
|
||||
st.caption("This app allows you to chat with a PDF using Llama3 running locally wiht Ollama!")
|
||||
|
||||
# Create a temporary directory to store the PDF file
|
||||
db_path = tempfile.mkdtemp()
|
||||
# Create an instance of the embedchain App
|
||||
app = embedchain_bot(db_path)
|
||||
|
||||
# Upload a PDF file
|
||||
pdf_file = st.file_uploader("Upload a PDF file", type="pdf")
|
||||
|
||||
# If a PDF file is uploaded, add it to the knowledge base
|
||||
if pdf_file:
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as f:
|
||||
f.write(pdf_file.getvalue())
|
||||
app.add(f.name, data_type="pdf_file")
|
||||
os.remove(f.name)
|
||||
st.success(f"Added {pdf_file.name} to knowledge base!")
|
||||
|
||||
# Ask a question about the PDF
|
||||
prompt = st.text_input("Ask a question about the PDF")
|
||||
# Display the answer
|
||||
if prompt:
|
||||
answer = app.chat(prompt)
|
||||
st.write(answer)
|
||||
2
chat_with_X_tutorials/chat_with_pdf/requirements.txt
Normal file
2
chat_with_X_tutorials/chat_with_pdf/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
streamlit
|
||||
embedchain
|
||||
28
chat_with_X_tutorials/chat_with_research_papers/README.md
Normal file
28
chat_with_X_tutorials/chat_with_research_papers/README.md
Normal file
@@ -0,0 +1,28 @@
|
||||
## 🔎 Chat with Arxiv Research Papers
|
||||
This Streamlit app enables you to engage in interactive conversations with arXiv, a vast repository of scholarly articles, using GPT-4o. With this RAG application, you can easily access and explore the wealth of knowledge contained within arXiv.
|
||||
|
||||
### Features
|
||||
- Engage in conversational interactions with arXiv
|
||||
- Access and explore a vast collection of research papers
|
||||
- Utilize OpenAI GPT-4o for intelligent responses
|
||||
|
||||
### How to get Started?
|
||||
|
||||
1. Clone the GitHub repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
```
|
||||
2. Install the required dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
3. Get your OpenAI API Key
|
||||
|
||||
- Sign up for an [OpenAI account](https://platform.openai.com/) (or the LLM provider of your choice) and obtain your API key.
|
||||
|
||||
4. Run the Streamlit App
|
||||
```bash
|
||||
streamlit run chat_arxiv.py
|
||||
```
|
||||
@@ -0,0 +1,31 @@
|
||||
# Import the required libraries
|
||||
import streamlit as st
|
||||
from phi.assistant import Assistant
|
||||
from phi.llm.openai import OpenAIChat
|
||||
from phi.tools.arxiv_toolkit import ArxivToolkit
|
||||
|
||||
# Set up the Streamlit app
|
||||
st.title("Chat with Research Papers 🔎🤖")
|
||||
st.caption("This app allows you to chat with arXiv research papers using OpenAI GPT-4o model.")
|
||||
|
||||
# Get OpenAI API key from user
|
||||
openai_access_token = st.text_input("OpenAI API Key", type="password")
|
||||
|
||||
# If OpenAI API key is provided, create an instance of Assistant
|
||||
if openai_access_token:
|
||||
# Create an instance of the Assistant
|
||||
assistant = Assistant(
|
||||
llm=OpenAIChat(
|
||||
model="gpt-4o",
|
||||
max_tokens=1024,
|
||||
temperature=0.9,
|
||||
api_key=openai_access_token) , tools=[ArxivToolkit()]
|
||||
)
|
||||
|
||||
# Get the search query from the user
|
||||
query= st.text_input("Enter the Search Query", type="default")
|
||||
|
||||
if query:
|
||||
# Search the web using the AI Assistant
|
||||
response = assistant.run(query, stream=False)
|
||||
st.write(response)
|
||||
@@ -0,0 +1,23 @@
|
||||
# Import the required libraries
|
||||
import streamlit as st
|
||||
from phi.assistant import Assistant
|
||||
from phi.llm.ollama import Ollama
|
||||
from phi.tools.arxiv_toolkit import ArxivToolkit
|
||||
|
||||
# Set up the Streamlit app
|
||||
st.title("Chat with Research Papers 🔎🤖")
|
||||
st.caption("This app allows you to chat with arXiv research papers using Llama-3 running locally.")
|
||||
|
||||
# Create an instance of the Assistant
|
||||
assistant = Assistant(
|
||||
llm=Ollama(
|
||||
model="llama3:instruct") , tools=[ArxivToolkit()], show_tool_calls=True
|
||||
)
|
||||
|
||||
# Get the search query from the user
|
||||
query= st.text_input("Enter the Search Query", type="default")
|
||||
|
||||
if query:
|
||||
# Search the web using the AI Assistant
|
||||
response = assistant.run(query, stream=False)
|
||||
st.write(response)
|
||||
@@ -0,0 +1,5 @@
|
||||
streamlit
|
||||
phidata
|
||||
arxiv
|
||||
openai
|
||||
pypdf
|
||||
28
chat_with_X_tutorials/chat_with_substack/README.md
Normal file
28
chat_with_X_tutorials/chat_with_substack/README.md
Normal file
@@ -0,0 +1,28 @@
|
||||
## 📝 Chat with Substack Newsletter
|
||||
Streamlit app that allows you to chat with a Substack newsletter using OpenAI's API and the Embedchain library. This app leverages GPT-4 to provide accurate answers to questions based on the content of the specified Substack newsletter.
|
||||
|
||||
## Features
|
||||
- Input a Substack blog URL
|
||||
- Ask questions about the content of the Substack newsletter
|
||||
- Get accurate answers using OpenAI's API and Embedchain
|
||||
|
||||
### How to get Started?
|
||||
|
||||
1. Clone the GitHub repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
```
|
||||
2. Install the required dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
3. Get your OpenAI API Key
|
||||
|
||||
- Sign up for an [OpenAI account](https://platform.openai.com/) (or the LLM provider of your choice) and obtain your API key.
|
||||
|
||||
4. Run the Streamlit App
|
||||
```bash
|
||||
streamlit run chat_substack.py
|
||||
```
|
||||
41
chat_with_X_tutorials/chat_with_substack/chat_substack.py
Normal file
41
chat_with_X_tutorials/chat_with_substack/chat_substack.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import streamlit as st
|
||||
from embedchain import App
|
||||
import tempfile
|
||||
|
||||
# Define the embedchain_bot function
|
||||
def embedchain_bot(db_path, api_key):
|
||||
return App.from_config(
|
||||
config={
|
||||
"llm": {"provider": "openai", "config": {"model": "gpt-4-turbo", "temperature": 0.5, "api_key": api_key}},
|
||||
"vectordb": {"provider": "chroma", "config": {"dir": db_path}},
|
||||
"embedder": {"provider": "openai", "config": {"api_key": api_key}},
|
||||
}
|
||||
)
|
||||
|
||||
st.title("Chat with Substack Newsletter 📝")
|
||||
st.caption("This app allows you to chat with Substack newsletter using OpenAI API")
|
||||
|
||||
# Get OpenAI API key from user
|
||||
openai_access_token = st.text_input("OpenAI API Key", type="password")
|
||||
|
||||
if openai_access_token:
|
||||
# Create a temporary directory to store the database
|
||||
db_path = tempfile.mkdtemp()
|
||||
# Create an instance of Embedchain App
|
||||
app = embedchain_bot(db_path, openai_access_token)
|
||||
|
||||
# Get the Substack blog URL from the user
|
||||
substack_url = st.text_input("Enter Substack Newsletter URL", type="default")
|
||||
|
||||
if substack_url:
|
||||
# Add the Substack blog to the knowledge base
|
||||
app.add(substack_url, data_type='substack')
|
||||
st.success(f"Added {substack_url} to knowledge base!")
|
||||
|
||||
# Ask a question about the Substack blog
|
||||
query = st.text_input("Ask any question about the substack newsletter!")
|
||||
|
||||
# Query the Substack blog
|
||||
if query:
|
||||
result = app.query(query)
|
||||
st.write(result)
|
||||
@@ -0,0 +1,2 @@
|
||||
streamlit
|
||||
embedchain
|
||||
30
chat_with_X_tutorials/chat_with_youtube_videos/README.md
Normal file
30
chat_with_X_tutorials/chat_with_youtube_videos/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
## 📽️ Chat with YouTube Videos
|
||||
|
||||
LLM app with RAG to chat with YouTube Videos in just 30 lines of Python Code. The app uses Retrieval Augmented Generation (RAG) to provide accurate answers to questions based on the content of the uploaded video.
|
||||
|
||||
### Features
|
||||
|
||||
- Input a YouTube video URL
|
||||
- Ask questions about the content of the video
|
||||
- Get accurate answers using RAG and the selected LLM
|
||||
|
||||
### How to get Started?
|
||||
|
||||
1. Clone the GitHub repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git
|
||||
```
|
||||
2. Install the required dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
3. Get your OpenAI API Key
|
||||
|
||||
- Sign up for an [OpenAI account](https://platform.openai.com/) (or the LLM provider of your choice) and obtain your API key.
|
||||
|
||||
4. Run the Streamlit App
|
||||
```bash
|
||||
streamlit run chat_youtube.py
|
||||
```
|
||||
@@ -0,0 +1,42 @@
|
||||
# Import the required libraries
|
||||
import tempfile
|
||||
import streamlit as st
|
||||
from embedchain import App
|
||||
|
||||
# Define the embedchain_bot function
|
||||
def embedchain_bot(db_path, api_key):
|
||||
return App.from_config(
|
||||
config={
|
||||
"llm": {"provider": "openai", "config": {"model": "gpt-4o", "temperature": 0.5, "api_key": api_key}},
|
||||
"vectordb": {"provider": "chroma", "config": {"dir": db_path}},
|
||||
"embedder": {"provider": "openai", "config": {"api_key": api_key}},
|
||||
}
|
||||
)
|
||||
|
||||
# Create Streamlit app
|
||||
st.title("Chat with YouTube Video 📺")
|
||||
st.caption("This app allows you to chat with a YouTube video using OpenAI API")
|
||||
|
||||
# Get OpenAI API key from user
|
||||
openai_access_token = st.text_input("OpenAI API Key", type="password")
|
||||
|
||||
# If OpenAI API key is provided, create an instance of App
|
||||
if openai_access_token:
|
||||
# Create a temporary directory to store the database
|
||||
db_path = tempfile.mkdtemp()
|
||||
# Create an instance of Embedchain App
|
||||
app = embedchain_bot(db_path, openai_access_token)
|
||||
# Get the YouTube video URL from the user
|
||||
video_url = st.text_input("Enter YouTube Video URL", type="default")
|
||||
# Add the video to the knowledge base
|
||||
if video_url:
|
||||
app.add(video_url, data_type="youtube_video")
|
||||
st.success(f"Added {video_url} to knowledge base!")
|
||||
# Ask a question about the video
|
||||
prompt = st.text_input("Ask any question about the YouTube Video")
|
||||
# Chat with the video
|
||||
if prompt:
|
||||
answer = app.chat(prompt)
|
||||
st.write(answer)
|
||||
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
streamlit
|
||||
embedchain[youtube]
|
||||
Reference in New Issue
Block a user