From 21cd0cd7b55631025a12fad0e0a0df6fa3e516e2 Mon Sep 17 00:00:00 2001 From: RajaBarhoumi Date: Sun, 10 Aug 2025 14:48:03 +0100 Subject: [PATCH 1/2] migrate RAG pipeline to Agentic RAG for dynamic planning, tool use, and adaptive responses --- query_data.py | 77 +++++++++++++++++++++++++----------------------- requirements.txt | 16 +++++----- 2 files changed, 48 insertions(+), 45 deletions(-) diff --git a/query_data.py b/query_data.py index 43ed9a5e6..349eb1fcd 100644 --- a/query_data.py +++ b/query_data.py @@ -1,52 +1,55 @@ +import os import argparse -# from dataclasses import dataclass -from langchain_community.vectorstores import Chroma -from langchain_openai import OpenAIEmbeddings -from langchain_openai import ChatOpenAI -from langchain.prompts import ChatPromptTemplate +from dotenv import load_dotenv -CHROMA_PATH = "chroma" - -PROMPT_TEMPLATE = """ -Answer the question based only on the following context: - -{context} +# Load environment variables +load_dotenv() +if not os.getenv("OPENAI_API_KEY"): + raise ValueError("OPENAI_API_KEY not found in .env") ---- - -Answer the question based on the above context: {question} -""" +from langchain_openai import ChatOpenAI, OpenAIEmbeddings +from langchain_chroma import Chroma +from langchain.tools import Tool +from langchain.agents import initialize_agent, AgentType +from langchain.prompts import PromptTemplate +CHROMA_PATH = "chroma" def main(): - # Create CLI. parser = argparse.ArgumentParser() parser.add_argument("query_text", type=str, help="The query text.") args = parser.parse_args() query_text = args.query_text - # Prepare the DB. + # Prepare DB + retriever embedding_function = OpenAIEmbeddings() - db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embedding_function) - - # Search the DB. - results = db.similarity_search_with_relevance_scores(query_text, k=3) - if len(results) == 0 or results[0][1] < 0.7: - print(f"Unable to find matching results.") - return - - context_text = "\n\n---\n\n".join([doc.page_content for doc, _score in results]) - prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE) - prompt = prompt_template.format(context=context_text, question=query_text) - print(prompt) - - model = ChatOpenAI() - response_text = model.predict(prompt) - - sources = [doc.metadata.get("source", None) for doc, _score in results] - formatted_response = f"Response: {response_text}\nSources: {sources}" - print(formatted_response) - + db = Chroma( + persist_directory=CHROMA_PATH, + embedding_function=embedding_function + ) + retriever = db.as_retriever(search_kwargs={"k": 3}) + + # Define a retrieval tool + retrieval_tool = Tool( + name="Document Search", + func=lambda q: "\n".join([doc.page_content for doc in retriever.get_relevant_documents(q)]), + description="Searches the knowledge base for relevant information to answer a question." + ) + + # LLM + llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) + + # Agent + agent = initialize_agent( + tools=[retrieval_tool], + llm=llm, + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, + verbose=True + ) + + # Ask the agent + answer = agent.run(query_text) + print("\nFinal Answer:\n", answer) if __name__ == "__main__": main() diff --git a/requirements.txt b/requirements.txt index 6578074e3..a445347a2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,13 @@ -python-dotenv==1.0.1 # For reading environment variables stored in .env file -langchain==0.2.2 -langchain-community==0.2.3 -langchain-openai==0.1.8 # For embeddings -unstructured==0.14.4 # Document loading +python-dotenv==1.1.1 # For reading environment variables stored in .env file +langchain==0.3.27 +langchain-community==0.3.27 +langchain-openai==0.3.29 # For embeddings +unstructured==0.18.11 # Document loading # onnxruntime==1.17.1 # chromadb dependency: on Mac use `conda install onnxruntime -c conda-forge` # For Windows users, install Microsoft Visual C++ Build Tools first # install onnxruntime before installing `chromadb` -chromadb==0.5.0 # Vector storage -openai==1.31.1 # For embeddings -tiktoken==0.7.0 # For embeddings +chromadb==1.0.16 # Vector storage +openai==1.99.6 # For embeddings +tiktoken==0.11.0 # For embeddings # install markdown depenendies with: `pip install "unstructured[md]"` after install the requirements file. Leave this line commented out. From 55b48cb737094998e770b10c6664e04fd3aa1522 Mon Sep 17 00:00:00 2001 From: RajaBarhoumi Date: Sun, 10 Aug 2025 14:55:29 +0100 Subject: [PATCH 2/2] update README for Agentic RAG architecture and usage --- README.md | 73 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 140f0c019..c8c665aef 100644 --- a/README.md +++ b/README.md @@ -1,47 +1,68 @@ -# Langchain RAG Tutorial +# Agentic RAG with LangChain -## Install dependencies +This project demonstrates how to build an Agentic Retrieval-Augmented Generation (RAG) pipeline using LangChain, where an LLM-powered agent can decide when and how to retrieve information from your knowledge base, instead of blindly fetching context every time. The agent dynamically chooses retrieval, reasoning, and tool usage, allowing more efficient and context-aware responses. -1. Do the following before installing the dependencies found in `requirements.txt` file because of current challenges installing `onnxruntime` through `pip install onnxruntime`. +## 🛠 Installation - - For MacOS users, a workaround is to first install `onnxruntime` dependency for `chromadb` using: - - ```python - conda install onnxruntime -c conda-forge - ``` - See this [thread](https://github.com/microsoft/onnxruntime/issues/11037) for additonal help if needed. - - - For Windows users, follow the guide [here](https://github.com/bycloudai/InstallVSBuildToolsWindows?tab=readme-ov-file) to install the Microsoft C++ Build Tools. Be sure to follow through to the last step to set the enviroment variable path. +### 1. Environment Setup +Some dependencies (like `onnxruntime`) can be tricky to install depending on your OS. Follow the steps for your platform before running `pip install`. +#### MacOS +```bash +conda install onnxruntime -c conda-forge +``` +See the [onnxruntime GitHub issue](https://github.com/microsoft/onnxruntime) for extra help. -2. Now run this command to install dependenies in the `requirements.txt` file. +#### Windows +Follow [this guide](https://docs.microsoft.com/en-us/cpp/build/vscpp-step-by-step) to install Microsoft C++ Build Tools, making sure to set the environment variable path. -```python +### 2. Install Requirements +```bash pip install -r requirements.txt ``` -3. Install markdown depenendies with: - -```python +### 3. Install Markdown Dependencies +```bash pip install "unstructured[md]" ``` -## Create database +## 📂 Building the Knowledge Base +This step ingests your documents into a Chroma vector database so the agent can retrieve relevant chunks when needed. -Create the Chroma DB. - -```python +```bash python create_database.py ``` -## Query the database +## 💬 Running the Agentic RAG +Instead of a simple retrieval pipeline, here the agent: +- Reads your query. +- Decides whether retrieval is needed. +- Queries Chroma DB only if relevant. +- Combines retrieved context with its own reasoning. + +Example query: +```bash +python agentic_query.py "How does Alice meet the Mad Hatter?" +``` + +## 🔑 API Keys +You’ll need an OpenAI API key set in your environment variables: -Query the Chroma DB. +```bash +# Mac/Linux +export OPENAI_API_KEY="your_api_key_here" -```python -python query_data.py "How does Alice meet the Mad Hatter?" +# Windows +setx OPENAI_API_KEY "your_api_key_here" ``` -> You'll also need to set up an OpenAI account (and set the OpenAI key in your environment variable) for this to work. +## 📺 Reference Tutorial +While this project is based on traditional RAG examples like [Pixegami’s LangChain RAG tutorial](https://github.com/pixegami/langchain-rag-tutorial), the code here has been adapted for Agentic RAG — allowing more intelligent, context-aware querying. -Here is a step-by-step tutorial video: [RAG+Langchain Python Project: Easy AI/Chat For Your Docs](https://www.youtube.com/watch?v=tcqEUSNCn8I&ab_channel=pixegami). +## 📜 Features of Agentic RAG vs Traditional RAG +| Feature | Traditional RAG | Agentic RAG | +|-----------------------------|-------------------------|------------------------| +| Always fetches context | ✅ Always | 🔄 Only when needed | +| Reasoning before retrieval | ❌ No | ✅ Yes | +| Multi-tool orchestration | ❌ Limited | ✅ Yes | +| Efficiency | ⚠️ Sometimes redundant | 🚀 Optimized | \ No newline at end of file