-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrag_core.py
More file actions
69 lines (53 loc) · 2.46 KB
/
rag_core.py
File metadata and controls
69 lines (53 loc) · 2.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# rag_core.py
import os
from typing import Any, Dict
from langchain_openai import ChatOpenAI
from langchain.chains import RetrievalQA
from indexing import load_and_index_documents
from langchain_core.language_models import BaseChatModel
from dotenv import load_dotenv
import logging
load_dotenv()
logger = logging.getLogger(__name__)
class RAGCore:
def __init__(self):
logger.info("Initializing RAGCore...")
vector_store = load_and_index_documents()
if not vector_store:
raise RuntimeError("RAG system failed to load documents. Check indexing.py logs.")
logger.info("Vector store loaded successfully")
self.retriever = vector_store.as_retriever(search_kwargs={"k": 2})
rag_model = os.getenv("RAG_LLM_MODEL")
rag_base_url = os.getenv("RAG_LLM_BASE_URL")
openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
if not rag_model or not rag_base_url:
raise ValueError("RAG LLM configuration (RAG_LLM_MODEL / RAG_LLM_BASE_URL) not set in .env.")
if not openrouter_api_key:
raise ValueError("OPENROUTER_API_KEY not set in .env. Get one from https://openrouter.ai/keys")
logger.info(f"Initializing LLM with model: {rag_model}, base_url: {rag_base_url}")
self.llm: BaseChatModel = ChatOpenAI(
model=rag_model,
openai_api_key=openrouter_api_key,
base_url=rag_base_url,
temperature=0
)
self.qa_chain = RetrievalQA.from_chain_type(
llm=self.llm,
chain_type="stuff",
retriever=self.retriever,
return_source_documents=True # This might help with debugging
)
logger.info("RAGCore initialized successfully")
async def query(self, question: str) -> Dict[str, Any]:
"""Runs the RAG chain ASYNCHRONOUSLY and returns the answer and retrieved context."""
try:
result = await self.qa_chain.ainvoke({"query": question})
retrieved_docs = await self.retriever.aget_relevant_documents(question)
context = [doc.page_content for doc in retrieved_docs]
return {
"answer": result.get("result", "Sorry, I couldn't find an answer."),
"context": context
}
except Exception as e:
logger.error(f"Query error: {e}", exc_info=True)
raise