Update llama3.1_local_rag.py

Fixes The class `OllamaEmbeddings` was deprecated in LangChain 0.3.1 and will be removed in 1.0.0. An updated version of the class exists in the :class:`~langchain-ollama package and should be used instead.
This commit is contained in:
Sebastian Schmidt
2025-02-02 09:46:27 +11:00
committed by publicarray
parent 610c8adee8
commit dad10e82cb
2 changed files with 12 additions and 7 deletions

View File

@@ -1,15 +1,19 @@
import streamlit as st import streamlit as st
import ollama
from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import OllamaEmbeddings from langchain_ollama import OllamaEmbeddings
from langchain_ollama import ChatOllama
st.title("Chat with Webpage 🌐") st.title("Chat with Webpage 🌐")
st.caption("This app allows you to chat with a webpage using local llama3 and RAG") st.caption("This app allows you to chat with a webpage using local llama3 and RAG")
# Get the webpage URL from the user # Get the webpage URL from the user
webpage_url = st.text_input("Enter Webpage URL", type="default") webpage_url = st.text_input("Enter Webpage URL", type="default")
# Connect to Ollama
ollama_endpoint = "http://127.0.0.1:11434"
ollama_model = "llama3.1"
ollama = ChatOllama(model=ollama_model, base_url=ollama_endpoint)
if webpage_url: if webpage_url:
# 1. Load the data # 1. Load the data
@@ -19,14 +23,14 @@ if webpage_url:
splits = text_splitter.split_documents(docs) splits = text_splitter.split_documents(docs)
# 2. Create Ollama embeddings and vector store # 2. Create Ollama embeddings and vector store
embeddings = OllamaEmbeddings(model="llama3.1") embeddings = OllamaEmbeddings(model=ollama_model, base_url=ollama_endpoint)
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings) vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
# 3. Call Ollama Llama3 model # 3. Call Ollama Llama3 model
def ollama_llm(question, context): def ollama_llm(question, context):
formatted_prompt = f"Question: {question}\n\nContext: {context}" formatted_prompt = f"Question: {question}\n\nContext: {context}"
response = ollama.chat(model='llama3.1', messages=[{'role': 'user', 'content': formatted_prompt}]) response = ollama.invoke([('human', formatted_prompt)])
return response['message']['content'] return response.content.strip()
# 4. RAG Setup # 4. RAG Setup
retriever = vectorstore.as_retriever() retriever = vectorstore.as_retriever()

View File

@@ -2,3 +2,4 @@ streamlit
ollama ollama
langchain langchain
langchain_community langchain_community
langchain_ollama