ScholarBot / llm /query_refiner.py
vinny4's picture
initial commit
9c37331
raw
history blame contribute delete
675 Bytes
from langchain_groq import ChatGroq
from src.utils import load_config
from langchain.prompts import ChatPromptTemplate
class QueryRefiner:
def __init__(self):
config = load_config("./configs/llm_refiner.yaml")
self.model = ChatGroq(
model=config["model_name"],
temperature=config["temperature"],
max_tokens=config["max_tokens"]
)
self.prompt = ChatPromptTemplate.from_messages([
("system", config["system_prompt"]),
("human", "{query}")
])
def refine(self, query: str):
chain = self.prompt | self.model
return chain.invoke({"query": query}).content