-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathrag_tool.py
More file actions
45 lines (35 loc) · 1.33 KB
/
rag_tool.py
File metadata and controls
45 lines (35 loc) · 1.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# rag_tool.py
from smolagents import tool
from langchain_chroma import Chroma
from langchain_huggingface import HuggingFaceEmbeddings
from reasoning_model import reasoner # Import reasoner from reasoning_model.py
import os
# Initialize vector store and embeddings
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-mpnet-base-v2",
model_kwargs={'device': 'cpu'}
)
# Directory for the vector database
db_dir = os.path.join(os.path.dirname(__file__), "chroma_db")
# Initialize the vector store
vectordb = Chroma(persist_directory=db_dir, embedding_function=embeddings)
@tool
def rag_with_reasoner(user_query: str) -> str:
"""
Searches vector database for relevant context and generates a response using a reasoning model.
Args:
user_query: The user's question.
"""
# Retrieve relevant documents
docs = vectordb.similarity_search(user_query, k=3)
context = "\n\n".join(doc.page_content for doc in docs)
# Create prompt for the reasoning model
prompt = f"""Based on the following context, answer the user's question concisely.
If insufficient information is found, suggest a better query for RAG.
Context:
{context}
Question: {user_query}
Answer:"""
# Generate response using reasoner (DeepSeek-R1)
response = reasoner.run(prompt, reset=False)
return response