File size: 2,056 Bytes
11aa6fe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4033d95
 
 
 
 
 
11aa6fe
 
 
4033d95
11aa6fe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4033d95
 
 
 
 
 
11aa6fe
 
 
4033d95
11aa6fe
 
 
 
 
6b3769c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# General module
from helpers.hf_llm import generate_text

GENERAL_SYSTEM_PROMPT = (
    "You are a knowledgeable, helpful AI assistant.\n"
    "You can answer general questions, explain concepts clearly, "
    "and respond politely in a conversational manner.\n"
    "If a question is ambiguous, ask for clarification.\n"
    "Do not claim access to private, real-time, or restricted systems."
)
def get_relevance(route, module_name):
    for m in route.get("module_preferences", []):
        if m["module"] == module_name:
            return m["relevance"]
    return 0.0
def answer_general_query(query, route, mode="module"):
    if mode == "failsafe":
        prompt = (
            GENERAL_SYSTEM_PROMPT
            + "\n\nUser question:\n"
            + query
            + "\n\nAnswer politely and clearly.\nKeep the answer concise.\n"
        )
        answer_text = generate_text(
            prompt=prompt,
            max_new_tokens=300,
            temperature=0.6
        )
        return {
            "answer": answer_text,
            "has_answer": True,
            "meta": {"mode": "failsafe"}
        }

    gen_rel = get_relevance(route, "general")
    rag_rel = get_relevance(route, "railway_rag")
    api_rel = get_relevance(route, "live_data_apis")
    MIN_GENERAL_RELEVANCE = 0.30
    DOMINANCE_MARGIN = 0.10
    if gen_rel < MIN_GENERAL_RELEVANCE:
        return {
            "answer": None,
            "has_answer": False,
            "meta": {}
        }
    if gen_rel < max(rag_rel, api_rel) + DOMINANCE_MARGIN:
        return {
            "answer": None,
            "has_answer": False,
            "meta": {}
        }
    prompt = (
        GENERAL_SYSTEM_PROMPT
        + "\n\nUser question:\n"
        + query
        + "\n\nAnswer clearly, politely, and conversationally.\nKeep the answer concise.\n"
    )
    answer_text = generate_text(
        prompt=prompt,
        max_new_tokens=300,
        temperature=0.6
    )
    return {
        "answer": answer_text,
        "has_answer": True,
        "meta": {}
    }