Update response_generator.py
Browse files- response_generator.py +19 -35
response_generator.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
-
#
|
| 2 |
-
from openai import OpenAI
|
| 3 |
from utils import setup_logger
|
| 4 |
from config import Config
|
| 5 |
|
|
@@ -7,63 +6,48 @@ logger = setup_logger('response_generator')
|
|
| 7 |
|
| 8 |
class ResponseGenerator:
|
| 9 |
def __init__(self):
|
| 10 |
-
""
|
| 11 |
-
Initialize connection to OpenAI
|
| 12 |
-
"""
|
| 13 |
-
logger.info("Response generator initialized (LLM mode)")
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
logger.error("OPENAI_API_KEY is missing in Config or Environment variables.")
|
| 18 |
self.client = None
|
| 19 |
else:
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
def generate_response(self, query, relevant_docs):
|
| 23 |
-
"""
|
| 24 |
-
Generate a formalized short answer using LLM based on retrieved docs
|
| 25 |
-
"""
|
| 26 |
-
# 1. Handle no results found
|
| 27 |
if len(relevant_docs) == 0:
|
| 28 |
return "عذرًا، لم أجد أي معلومات ذات صلة في المستندات."
|
| 29 |
|
| 30 |
-
# 2. Handle missing API Key gracefully
|
| 31 |
if not self.client:
|
| 32 |
-
return "عذرًا، لم يتم إعداد مفتاح API
|
| 33 |
|
| 34 |
try:
|
| 35 |
-
# 3. Construct the Context
|
| 36 |
-
# We combine the content of the top retrieved chunks
|
| 37 |
context_text = "\n\n".join(relevant_docs['content'].tolist())
|
| 38 |
|
| 39 |
-
# 4. Define the System Prompt
|
| 40 |
-
# Instructions: Act as a helpful assistant, use Arabic, be formal and short.
|
| 41 |
system_instruction = (
|
| 42 |
-
"أنت مساعد ذكي
|
| 43 |
-
"ا
|
| 44 |
-
"
|
| 45 |
-
"اجعل إجابتك قصيرة، رسمية، ومباشرة."
|
| 46 |
)
|
| 47 |
|
| 48 |
-
# 5. Define the User Message
|
| 49 |
user_message = f"السياق:\n{context_text}\n\nالسؤال: {query}"
|
| 50 |
|
| 51 |
-
# 6. Call OpenAI API
|
| 52 |
response = self.client.chat.completions.create(
|
| 53 |
-
model=Config.
|
| 54 |
messages=[
|
| 55 |
{"role": "system", "content": system_instruction},
|
| 56 |
{"role": "user", "content": user_message}
|
| 57 |
],
|
| 58 |
-
temperature=0.3,
|
| 59 |
-
max_tokens=
|
| 60 |
)
|
| 61 |
|
| 62 |
-
|
| 63 |
-
answer = response.choices[0].message.content.strip()
|
| 64 |
-
|
| 65 |
-
return answer
|
| 66 |
|
| 67 |
except Exception as e:
|
| 68 |
-
logger.error(f"Error generating
|
| 69 |
-
return "عذرًا،
|
|
|
|
| 1 |
+
from openai import OpenAI # نستخدم نفس المكتبة!
|
|
|
|
| 2 |
from utils import setup_logger
|
| 3 |
from config import Config
|
| 4 |
|
|
|
|
| 6 |
|
| 7 |
class ResponseGenerator:
|
| 8 |
def __init__(self):
|
| 9 |
+
logger.info("Response generator initialized (Groq Llama-3 mode)")
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
if not Config.GROQ_API_KEY:
|
| 12 |
+
logger.error("GROQ_API_KEY is missing.")
|
|
|
|
| 13 |
self.client = None
|
| 14 |
else:
|
| 15 |
+
# هنا السحر: نوجه العميل إلى رابط Groq
|
| 16 |
+
self.client = OpenAI(
|
| 17 |
+
api_key=Config.GROQ_API_KEY,
|
| 18 |
+
base_url="https://api.groq.com/openai/v1"
|
| 19 |
+
)
|
| 20 |
|
| 21 |
def generate_response(self, query, relevant_docs):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
if len(relevant_docs) == 0:
|
| 23 |
return "عذرًا، لم أجد أي معلومات ذات صلة في المستندات."
|
| 24 |
|
|
|
|
| 25 |
if not self.client:
|
| 26 |
+
return "عذرًا، لم يتم إعداد مفتاح API."
|
| 27 |
|
| 28 |
try:
|
|
|
|
|
|
|
| 29 |
context_text = "\n\n".join(relevant_docs['content'].tolist())
|
| 30 |
|
|
|
|
|
|
|
| 31 |
system_instruction = (
|
| 32 |
+
"أنت مساعد ذكي تتحدث العربية بطلاقة. مهمتك الإجابة بدقة بناءً على السياق فقط."
|
| 33 |
+
"إذا لم تجد الإجابة في السياق، قل 'لا تتوفر معلومات'."
|
| 34 |
+
"اجعل إجابتك رسمية، قصيرة، ومباشرة."
|
|
|
|
| 35 |
)
|
| 36 |
|
|
|
|
| 37 |
user_message = f"السياق:\n{context_text}\n\nالسؤال: {query}"
|
| 38 |
|
|
|
|
| 39 |
response = self.client.chat.completions.create(
|
| 40 |
+
model=Config.LLM_MODEL,
|
| 41 |
messages=[
|
| 42 |
{"role": "system", "content": system_instruction},
|
| 43 |
{"role": "user", "content": user_message}
|
| 44 |
],
|
| 45 |
+
temperature=0.3,
|
| 46 |
+
max_tokens=300
|
| 47 |
)
|
| 48 |
|
| 49 |
+
return response.choices[0].message.content.strip()
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
except Exception as e:
|
| 52 |
+
logger.error(f"Error generating response: {e}")
|
| 53 |
+
return "عذرًا، حدث خطأ أثناء المعالجة."
|