File size: 3,871 Bytes
6cf6a92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
"""
Answer Generation Module for Retrieval-based Medical QA Chatbot
=================================================================
This module handles answer generation using Groq API with proper error handling.
"""

import os
from openai import OpenAI

# Get API key from environment
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")

if GROQ_API_KEY is None:
    print("[Warning] GROQ_API_KEY not set!")
    client = None
else:
    client = OpenAI(
        api_key=GROQ_API_KEY,
        base_url="https://api.groq.com/openai/v1"
    )

# -------------------------------
# Function: Query Groq API
# -------------------------------

def query_groq(prompt, model="meta-llama/llama-4-scout-17b-16e-instruct", max_tokens=300):
    """
    Sends a prompt to Groq API and returns the generated response.
    
    Parameters:
        prompt (str): The text prompt for the model.
        model (str): Model name deployed on Groq API.
        max_tokens (int): Maximum tokens allowed in the output.
    
    Returns:
        str: Model-generated response text.
    """
    if client is None:
        return "⚠️ Error: API key not configured. Please contact the administrator."
    
    try:
        response = client.chat.completions.create(
            model=model,
            messages=[
                {"role": "system", "content": "You are a helpful biomedical assistant providing accurate drug information."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.7,
            max_tokens=max_tokens
        )
        return response.choices[0].message.content.strip()
    except Exception as e:
        print(f"[Answer Generation] Error calling Groq API: {e}")
        return f"⚠️ Error generating answer: {str(e)}"

# -------------------------------
# Function: Build Prompt
# -------------------------------

def build_prompt(question, context):
    """
    Constructs a prompt for the model combining the user question and retrieved context.
    
    Parameters:
        question (str): User's question.
        context (str): Retrieved relevant text chunks.
    
    Returns:
        str: Complete prompt text.
    """
    return f"""Based strictly on the following medical information, answer the question clearly and concisely.

Question: {question}

Context:
{context}

Instructions:
- Provide a direct, accurate answer based only on the context
- Use clear, simple language
- If the context doesn't contain enough information, say so
- Do not add information not present in the context
"""

# -------------------------------
# Function: Answer Generation
# -------------------------------

def answer_generation(question, top_chunks, top_k=3):
    """
    Generates an answer based on retrieved top chunks.
    
    Parameters:
        question (str): User's question.
        top_chunks (DataFrame): Retrieved top chunks with context.
        top_k (int): Number of top chunks to use for answer generation.
    
    Returns:
        str: Final generated answer.
    """
    try:
        # Select top-k chunks
        top_chunks = top_chunks.head(top_k)
        print(f"[Answer Generation] Using top {len(top_chunks)} chunks")
        
        if top_chunks.empty:
            return "⚠️ No relevant information found. Please try rephrasing your question."
        
        # Join context
        context = "\n\n".join([
            f"Drug: {row['drug_name']}\n"
            f"Section: {row['section']}\n"
            f"Info: {row['chunk_text']}"
            for _, row in top_chunks.iterrows()
        ])
        
        # Build prompt and query Groq
        prompt = build_prompt(question, context)
        answer = query_groq(prompt)
        
        return answer
        
    except Exception as e:
        print(f"[Answer Generation] Error: {e}")
        return f"⚠️ Error generating answer: {str(e)}"