diff --git a/ai_agent_tutorials/ai_customer_support_agent/customer_support_agent.py b/ai_agent_tutorials/ai_customer_support_agent/customer_support_agent.py index a6fd43f..e9fc39c 100644 --- a/ai_agent_tutorials/ai_customer_support_agent/customer_support_agent.py +++ b/ai_agent_tutorials/ai_customer_support_agent/customer_support_agent.py @@ -34,8 +34,10 @@ if openai_api_key: def handle_query(self, query, user_id=None): relevant_memories = self.memory.search(query=query, user_id=user_id) context = "Relevant past information:\n" - for mem in relevant_memories: - context += f"- {mem['text']}\n" + if relevant_memories and "results" in relevant_memories: + for memory in relevant_memories["results"]: + if "memory" in memory: + context += f"- {memory['memory']}\n" full_prompt = f"{context}\nCustomer: {query}\nSupport Agent:" @@ -126,8 +128,10 @@ if openai_api_key: memories = support_agent.get_memories(user_id=customer_id) if memories: st.sidebar.write(f"Memory for customer **{customer_id}**:") - for mem in memories: - st.sidebar.write(f"- {mem['text']}") + if memories and "results" in memories: + for memory in memories["results"]: + if "memory" in memory: + st.write(f"- {memory['memory']}") else: st.sidebar.info("No memory found for this customer ID.") else: diff --git a/ai_agent_tutorials/ai_customer_support_agent/requirements.txt b/ai_agent_tutorials/ai_customer_support_agent/requirements.txt index 088b5ab..c7be07b 100644 --- a/ai_agent_tutorials/ai_customer_support_agent/requirements.txt +++ b/ai_agent_tutorials/ai_customer_support_agent/requirements.txt @@ -1,3 +1,3 @@ streamlit openai -mem0ai \ No newline at end of file +mem0ai==0.1.29 \ No newline at end of file diff --git a/ai_agent_tutorials/legal_ai_agent/legal_agent.py b/ai_agent_tutorials/legal_ai_agent/legal_agent.py deleted file mode 100644 index 7bdb400..0000000 --- a/ai_agent_tutorials/legal_ai_agent/legal_agent.py +++ /dev/null @@ -1,36 +0,0 @@ -from phi.agent import Agent -from phi.model.openai import OpenAIChat -from phi.knowledge.pdf import PDFKnowledgeBase, PDFReader -from phi.vectordb.lancedb import LanceDb, SearchType -from phi.playground import Playground, serve_playground_app -from phi.tools.duckduckgo import DuckDuckGo - -# Set up configurations -DB_URI = "tmp/legal_docs_db" - -# Create a knowledge base for legal documents -knowledge_base = PDFKnowledgeBase( - path="tmp/legal_docs", - vector_db=LanceDb( - table_name="legal_documents", - uri=DB_URI, - search_type=SearchType.vector - ), - reader=PDFReader(chunk=True), - num_documents=5 -) - -# Create the agent -agent = Agent( - model=OpenAIChat(id="gpt-4"), - agent_id="legal-analysis-agent", - knowledge=knowledge_base, - tools=[DuckDuckGo()], - show_tool_calls=True, - markdown=True, -) - -app = Playground(agents=[agent]).get_app() - -if __name__ == "__main__": - serve_playground_app("legal_agent:app", reload=True) \ No newline at end of file diff --git a/ai_agent_tutorials/legal_ai_agent/requirements.txt b/ai_agent_tutorials/legal_ai_agent/requirements.txt deleted file mode 100644 index 53c213c..0000000 --- a/ai_agent_tutorials/legal_ai_agent/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -streamlit -phidata -openai -lancedb -tantivy -pypdf -duckduckgo-search \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/ai_travel_agent_memory/requirements.txt b/llm_apps_with_memory_tutorials/ai_travel_agent_memory/requirements.txt index 088b5ab..c7be07b 100644 --- a/llm_apps_with_memory_tutorials/ai_travel_agent_memory/requirements.txt +++ b/llm_apps_with_memory_tutorials/ai_travel_agent_memory/requirements.txt @@ -1,3 +1,3 @@ streamlit openai -mem0ai \ No newline at end of file +mem0ai==0.1.29 \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/ai_travel_agent_memory/travel_agent_memory.py b/llm_apps_with_memory_tutorials/ai_travel_agent_memory/travel_agent_memory.py index f257a1e..ed57cfa 100644 --- a/llm_apps_with_memory_tutorials/ai_travel_agent_memory/travel_agent_memory.py +++ b/llm_apps_with_memory_tutorials/ai_travel_agent_memory/travel_agent_memory.py @@ -34,17 +34,19 @@ if openai_api_key: st.session_state.messages = [] st.session_state.previous_user_id = user_id - if st.sidebar.button("View Memory Info"): - if user_id: - memories = memory.get_all(user_id=user_id) - if memories: - st.sidebar.write(f"Memory for user **{user_id}**:") - for mem in memories: - st.sidebar.write(f"- {mem['text']}") - else: - st.sidebar.info("No memory found for this user ID.") + # Sidebar option to show memory + st.sidebar.title("Memory Info") + if st.button("View My Memory"): + memories = memory.get_all(user_id=user_id) + if memories and "results" in memories: + st.write(f"Memory history for **{user_id}**:") + for mem in memories["results"]: + if "memory" in mem: + st.write(f"- {mem['memory']}") else: - st.sidebar.error("Please enter a username to view memory info.") + st.sidebar.info("No learning history found for this user ID.") + else: + st.sidebar.error("Please enter a username to view memory info.") # Initialize the chat history if "messages" not in st.session_state: @@ -67,8 +69,10 @@ if openai_api_key: # Retrieve relevant memories relevant_memories = memory.search(query=prompt, user_id=user_id) context = "Relevant past information:\n" - for mem in relevant_memories: - context += f"- {mem['text']}\n" + if relevant_memories and "results" in relevant_memories: + for memory in relevant_memories["results"]: + if "memory" in memory: + context += f"- {memory['memory']}\n" # Prepare the full prompt full_prompt = f"{context}\nHuman: {prompt}\nAI:" diff --git a/llm_apps_with_memory_tutorials/llama3_stateful_chat/local_llama3_chat.py b/llm_apps_with_memory_tutorials/llama3_stateful_chat/local_llama3_chat.py new file mode 100644 index 0000000..e0cca8f --- /dev/null +++ b/llm_apps_with_memory_tutorials/llama3_stateful_chat/local_llama3_chat.py @@ -0,0 +1,37 @@ +import streamlit as st +from openai import OpenAI + +# Set up the Streamlit App +st.title("Local ChatGPT with Memory 🦙") +st.caption("Chat with locally hosted memory-enabled Llama-3 using the LM Studio 💯") + +# Point to the local server setup using LM Studio +client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio") + +# Initialize the chat history +if "messages" not in st.session_state: + st.session_state.messages = [] + +# Display the chat history +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# Accept user input +if prompt := st.chat_input("What is up?"): + st.session_state.messages.append({"role": "system", "content": "When the input starts with /add, don't follow up with a prompt."}) + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + # Generate response + response = client.chat.completions.create( + model="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF", + messages=st.session_state.messages, temperature=0.7 + ) + # Add assistant response to chat history + st.session_state.messages.append({"role": "assistant", "content": response.choices[0].message.content}) + # Display assistant response in chat message container + with st.chat_message("assistant"): + st.markdown(response.choices[0].message.content) diff --git a/llm_apps_with_memory_tutorials/llama3_stateful_chat/requirements.txt b/llm_apps_with_memory_tutorials/llama3_stateful_chat/requirements.txt new file mode 100644 index 0000000..959b0d7 --- /dev/null +++ b/llm_apps_with_memory_tutorials/llama3_stateful_chat/requirements.txt @@ -0,0 +1,2 @@ +streamlit +openai \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/llm_app_personalized_memory/llm_app_memory.py b/llm_apps_with_memory_tutorials/llm_app_personalized_memory/llm_app_memory.py index 4712147..c355131 100644 --- a/llm_apps_with_memory_tutorials/llm_app_personalized_memory/llm_app_memory.py +++ b/llm_apps_with_memory_tutorials/llm_app_personalized_memory/llm_app_memory.py @@ -1,3 +1,4 @@ +import os import streamlit as st from mem0 import Memory from openai import OpenAI @@ -6,6 +7,7 @@ st.title("LLM App with Memory 🧠") st.caption("LLM App with personalized memory layer that remembers ever user's choice and interests") openai_api_key = st.text_input("Enter OpenAI API Key", type="password") +os.environ["OPENAI_API_KEY"] = openai_api_key if openai_api_key: # Initialize OpenAI client @@ -16,6 +18,7 @@ if openai_api_key: "vector_store": { "provider": "qdrant", "config": { + "collection_name": "llm_app_memory", "host": "localhost", "port": 6333, } @@ -59,11 +62,12 @@ if openai_api_key: # Sidebar option to show memory st.sidebar.title("Memory Info") - if st.sidebar.button("View Memory Info"): - memories = memory.get_all(user_id=user_id) - if memories: - st.sidebar.write(f"You are viewing memory for user **{user_id}**") - for mem in memories: - st.sidebar.write(f"- {mem['text']}") - else: - st.sidebar.info("No learning history found for this user ID.") \ No newline at end of file + if st.button("View My Memory"): + memories = memory.get_all(user_id=user_id) + if memories and "results" in memories: + st.write(f"Memory history for **{user_id}**:") + for mem in memories["results"]: + if "memory" in mem: + st.write(f"- {mem['memory']}") + else: + st.sidebar.info("No learning history found for this user ID.") \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/llm_app_personalized_memory/requirements.txt b/llm_apps_with_memory_tutorials/llm_app_personalized_memory/requirements.txt index 88649cc..c7be07b 100644 --- a/llm_apps_with_memory_tutorials/llm_app_personalized_memory/requirements.txt +++ b/llm_apps_with_memory_tutorials/llm_app_personalized_memory/requirements.txt @@ -1,4 +1,3 @@ streamlit openai -mem0ai -litellm \ No newline at end of file +mem0ai==0.1.29 \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/README.md b/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/README.md new file mode 100644 index 0000000..41baaa1 --- /dev/null +++ b/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/README.md @@ -0,0 +1,40 @@ +## 🧠 Local ChatGPT using Llama 3.1 with Personal Memory +This Streamlit application implements a fully local ChatGPT-like experience using Llama 3.1, featuring personalized memory storage for each user. All components, including the language model, embeddings, and vector store, run locally without requiring external API keys. + +### Features +- Fully local implementation with no external API dependencies +- Powered by Llama 3.1 via Ollama +- Personal memory space for each user +- Local embedding generation using Nomic Embed +- Vector storage with Qdrant + +### How to get Started? + +1. Clone the GitHub repository +```bash +git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git +``` + +2. Install the required dependencies: + +```bash +cd rag_tutorials/local_rag_agent +pip install -r requirements.txt +``` + +3. Install and start [Qdrant](https://qdrant.tech/) vector database locally + +```bash +docker pull qdrant/qdrant +docker run -p 6333:6333 qdrant/qdrant +``` + +4. Install [Ollama](https://ollama.com/download) and pull Llama 3.2 +```bash +ollama pull llama3.1 +``` + +5. Run the Streamlit App +```bash +streamlit run local_chatgpt_memory.py +``` \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/local_chatgpt_memory.py b/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/local_chatgpt_memory.py index e0cca8f..6b14e11 100644 --- a/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/local_chatgpt_memory.py +++ b/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/local_chatgpt_memory.py @@ -1,37 +1,137 @@ import streamlit as st -from openai import OpenAI +from mem0 import Memory +from litellm import completion -# Set up the Streamlit App -st.title("Local ChatGPT with Memory 🦙") -st.caption("Chat with locally hosted memory-enabled Llama-3 using the LM Studio 💯") +# Configuration for Memory +config = { + "vector_store": { + "provider": "qdrant", + "config": { + "collection_name": "local-chatgpt-memory", + "host": "localhost", + "port": 6333, + "embedding_model_dims": 768, + }, + }, + "llm": { + "provider": "ollama", + "config": { + "model": "llama3.1:latest", + "temperature": 0, + "max_tokens": 8000, + "ollama_base_url": "http://localhost:11434", # Ensure this URL is correct + }, + }, + "embedder": { + "provider": "ollama", + "config": { + "model": "nomic-embed-text:latest", + # Alternatively, you can use "snowflake-arctic-embed:latest" + "ollama_base_url": "http://localhost:11434", + }, + }, + "version": "v1.1" +} -# Point to the local server setup using LM Studio -client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio") +st.title("Local ChatGPT using Llama 3.1 with Personal Memory 🧠") +st.caption("Each user gets their own personalized memory space!") -# Initialize the chat history +# Initialize session state for chat history and previous user ID if "messages" not in st.session_state: st.session_state.messages = [] +if "previous_user_id" not in st.session_state: + st.session_state.previous_user_id = None -# Display the chat history -for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) +# Sidebar for user authentication +with st.sidebar: + st.title("User Settings") + user_id = st.text_input("Enter your Username", key="user_id") + + # Check if user ID has changed + if user_id != st.session_state.previous_user_id: + st.session_state.messages = [] # Clear chat history + st.session_state.previous_user_id = user_id # Update previous user ID + + if user_id: + st.success(f"Logged in as: {user_id}") + + # Initialize Memory with the configuration + m = Memory.from_config(config) + + # Memory viewing section + st.header("Memory Context") + if st.button("View My Memory"): + memories = m.get_all(user_id=user_id) + if memories and "results" in memories: + st.write(f"Memory history for **{user_id}**:") + for memory in memories["results"]: + if "memory" in memory: + st.write(f"- {memory['memory']}") -# Accept user input -if prompt := st.chat_input("What is up?"): - st.session_state.messages.append({"role": "system", "content": "When the input starts with /add, don't follow up with a prompt."}) - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - # Display user message in chat message container - with st.chat_message("user"): - st.markdown(prompt) - # Generate response - response = client.chat.completions.create( - model="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF", - messages=st.session_state.messages, temperature=0.7 - ) - # Add assistant response to chat history - st.session_state.messages.append({"role": "assistant", "content": response.choices[0].message.content}) - # Display assistant response in chat message container - with st.chat_message("assistant"): - st.markdown(response.choices[0].message.content) +# Main chat interface +if user_id: # Only show chat interface if user is "logged in" + # Display chat history + for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + # User input + if prompt := st.chat_input("What is your message?"): + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + + # Display user message + with st.chat_message("user"): + st.markdown(prompt) + + # Add to memory + m.add(prompt, user_id=user_id) + + # Get context from memory + memories = m.get_all(user_id=user_id) + context = "" + if memories and "results" in memories: + for memory in memories["results"]: + if "memory" in memory: + context += f"- {memory['memory']}\n" + + # Generate assistant response + with st.chat_message("assistant"): + message_placeholder = st.empty() + full_response = "" + + # Stream the response + try: + response = completion( + model="ollama/llama3.1:latest", + messages=[ + {"role": "system", "content": "You are a helpful assistant with access to past conversations. Use the context provided to give personalized responses."}, + {"role": "user", "content": f"Context from previous conversations with {user_id}: {context}\nCurrent message: {prompt}"} + ], + api_base="http://localhost:11434", + stream=True + ) + + # Process streaming response + for chunk in response: + if hasattr(chunk, 'choices') and len(chunk.choices) > 0: + content = chunk.choices[0].delta.get('content', '') + if content: + full_response += content + message_placeholder.markdown(full_response + "▌") + + # Final update + message_placeholder.markdown(full_response) + except Exception as e: + st.error(f"Error generating response: {str(e)}") + full_response = "I apologize, but I encountered an error generating the response." + message_placeholder.markdown(full_response) + + # Add assistant response to chat history + st.session_state.messages.append({"role": "assistant", "content": full_response}) + + # Add response to memory + m.add(f"Assistant: {full_response}", user_id=user_id) + +else: + st.info("👈 Please enter your username in the sidebar to start chatting!") \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/requirements.txt b/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/requirements.txt index 959b0d7..0999609 100644 --- a/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/requirements.txt +++ b/llm_apps_with_memory_tutorials/local_chatgpt_with_memory/requirements.txt @@ -1,2 +1,4 @@ streamlit -openai \ No newline at end of file +openai +mem0ai==0.1.29 +litellm \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/multi_llm_memory/README.md b/llm_apps_with_memory_tutorials/multi_llm_memory/README.md new file mode 100644 index 0000000..6cf3945 --- /dev/null +++ b/llm_apps_with_memory_tutorials/multi_llm_memory/README.md @@ -0,0 +1,39 @@ +## 🧠 Multi-LLM App with Shared Memory +This Streamlit application demonstrates a multi-LLM system with a shared memory layer, allowing users to interact with different language models while maintaining conversation history and context across sessions. + +### Features + +- Support for multiple LLMs: + - OpenAI's GPT-4o + - Anthropic's Claude 3.5 Sonnet + +- Persistent memory using Qdrant vector store +- User-specific conversation history +- Memory retrieval for contextual responses +- User-friendly interface with LLM selection + +### How to get Started? + +1. Clone the GitHub repository +```bash +git clone https://github.com/Shubhamsaboo/awesome-llm-apps.git +``` + +2. Install the required dependencies: + +```bash +pip install -r requirements.txt +``` + +3. Ensure Qdrant is running: +The app expects Qdrant to be running on localhost:6333. Adjust the configuration in the code if your setup is different. + +```bash +docker pull qdrant/qdrant +docker run -p 6333:6333 qdrant/qdrant +``` + +4. Run the Streamlit App +```bash +streamlit run multi_llm_memory.py +``` \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/llm_app_personalized_memory/multi_llm_memory.py b/llm_apps_with_memory_tutorials/multi_llm_memory/multi_llm_memory.py similarity index 78% rename from llm_apps_with_memory_tutorials/llm_app_personalized_memory/multi_llm_memory.py rename to llm_apps_with_memory_tutorials/multi_llm_memory/multi_llm_memory.py index c62b7ea..a251c1e 100644 --- a/llm_apps_with_memory_tutorials/llm_app_personalized_memory/multi_llm_memory.py +++ b/llm_apps_with_memory_tutorials/multi_llm_memory/multi_llm_memory.py @@ -4,7 +4,7 @@ from openai import OpenAI import os from litellm import completion -st.title("LLM App with Shared Memory 🧠") +st.title("Multi-LLM App with Shared Memory 🧠") st.caption("LLM App with a personalized memory layer that remembers each user's choices and interests across multiple users and LLMs") openai_api_key = st.text_input("Enter OpenAI API Key", type="password") @@ -50,9 +50,10 @@ if openai_api_key and anthropic_api_key: with st.spinner('Searching...'): relevant_memories = memory.search(query=prompt, user_id=user_id) context = "Relevant past information:\n" - - for mem in relevant_memories: - context += f"- {mem['text']}\n" + if relevant_memories and "results" in relevant_memories: + for memory in relevant_memories["results"]: + if "memory" in memory: + context += f"- {memory['memory']}\n" full_prompt = f"{context}\nHuman: {prompt}\nAI:" @@ -76,12 +77,14 @@ if openai_api_key and anthropic_api_key: memory.add(answer, user_id=user_id) + # Sidebar option to show memory st.sidebar.title("Memory Info") - if st.sidebar.button("View Memory Info"): - memories = memory.get_all(user_id=user_id) - if memories: - st.sidebar.write(f"You are viewing memory for user **{user_id}**") - for mem in memories: - st.sidebar.write(f"- {mem['text']}") - else: - st.sidebar.info("No learning history found for this user ID.") \ No newline at end of file + if st.button("View My Memory"): + memories = memory.get_all(user_id=user_id) + if memories and "results" in memories: + st.write(f"Memory history for **{user_id}**:") + for mem in memories["results"]: + if "memory" in mem: + st.write(f"- {mem['memory']}") + else: + st.sidebar.info("No learning history found for this user ID.") \ No newline at end of file diff --git a/llm_apps_with_memory_tutorials/multi_llm_memory/requirements.txt b/llm_apps_with_memory_tutorials/multi_llm_memory/requirements.txt new file mode 100644 index 0000000..0999609 --- /dev/null +++ b/llm_apps_with_memory_tutorials/multi_llm_memory/requirements.txt @@ -0,0 +1,4 @@ +streamlit +openai +mem0ai==0.1.29 +litellm \ No newline at end of file diff --git a/rag_tutorials/local_rag_agent/README.md b/rag_tutorials/local_rag_agent/README.md index f5ff1f8..7ae6777 100644 --- a/rag_tutorials/local_rag_agent/README.md +++ b/rag_tutorials/local_rag_agent/README.md @@ -33,7 +33,6 @@ docker run -p 6333:6333 qdrant/qdrant 4. Install [Ollama](https://ollama.com/download) and pull Llama 3.2 ```bash ollama pull llama3.2 - ``` 4. Run the AI RAG Agent