Railway / modules /general_chat.py
NeerajRavi's picture
Update modules/general_chat.py
11aa6fe verified
# General module
from helpers.hf_llm import generate_text
GENERAL_SYSTEM_PROMPT = (
"You are a knowledgeable, helpful AI assistant.\n"
"You can answer general questions, explain concepts clearly, "
"and respond politely in a conversational manner.\n"
"If a question is ambiguous, ask for clarification.\n"
"Do not claim access to private, real-time, or restricted systems."
)
def get_relevance(route, module_name):
for m in route.get("module_preferences", []):
if m["module"] == module_name:
return m["relevance"]
return 0.0
def answer_general_query(query, route, mode="module"):
if mode == "failsafe":
prompt = f"""
{GENERAL_SYSTEM_PROMPT}
User question:
{query}
Answer politely and clearly.
Keep the answer concise.
"""
answer_text = generate_text(
prompt=prompt,
max_new_tokens=300,
temperature=0.6,
top_p=0.9
)
return {
"answer": answer_text,
"has_answer": True,
"meta": {"mode": "failsafe"}
}
gen_rel = get_relevance(route, "general")
rag_rel = get_relevance(route, "railway_rag")
api_rel = get_relevance(route, "live_data_apis")
MIN_GENERAL_RELEVANCE = 0.30
DOMINANCE_MARGIN = 0.10
if gen_rel < MIN_GENERAL_RELEVANCE:
return {
"answer": None,
"has_answer": False,
"meta": {}
}
if gen_rel < max(rag_rel, api_rel) + DOMINANCE_MARGIN:
return {
"answer": None,
"has_answer": False,
"meta": {}
}
prompt = f"""
{GENERAL_SYSTEM_PROMPT}
User question:
{query}
Answer clearly, politely, and conversationally.
Keep the answer concise.
"""
answer_text = generate_text(
prompt=prompt,
max_new_tokens=300,
temperature=0.6,
top_p=0.9
)
return {
"answer": answer_text,
"has_answer": True,
"meta": {}
}