ytrsoymr commited on
Commit
6e6cc9e
·
verified ·
1 Parent(s): 8e12ecf

Update utils/llm.py

Browse files
Files changed (1) hide show
  1. utils/llm.py +34 -34
utils/llm.py CHANGED
@@ -1,34 +1,34 @@
1
- import os
2
- from langchain_google_genai import ChatGoogleGenerativeAI
3
- from langchain_core.prompts import ChatPromptTemplate
4
- from config import GOOGLE_API_KEY
5
-
6
-
7
- # Load Gemini LLM
8
- llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro", google_api_key=GOOGLE_API_KEY)
9
-
10
- # Define a structured prompt template
11
- PROMPT_TEMPLATE = """
12
- You are an AI assistant that answers user queries based strictly on the given context.
13
-
14
- CONTEXT INFORMATION:
15
- {context}
16
-
17
- QUESTION:
18
- {question}
19
-
20
- INSTRUCTIONS:
21
- - Answer the question based ONLY on the context.
22
- - Do NOT add any information beyond what is in the context.
23
- - Do NOT include phrases like "according to the context" or "mentioned in the context".
24
- - Provide a direct and precise response.
25
- """
26
-
27
-
28
- prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
29
-
30
- def generate_answer(query: str, context: str):
31
- """Generate AI response based on retrieved context."""
32
- prompt = prompt_template.format(context=context, question=query)
33
- response = llm.invoke(prompt)
34
- return response.content
 
1
+ import os
2
+ from langchain_google_genai import ChatGoogleGenerativeAI
3
+ from langchain_core.prompts import ChatPromptTemplate
4
+ from config import GOOGLE_API_KEY
5
+
6
+
7
+ # Load Gemini LLM
8
+ llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro", google_api_key=GOOGLE_API_KEY)
9
+
10
+ PROMPT_TEMPLATE = """
11
+ You are an AI assistant that answers user queries based on the given context.
12
+
13
+ CONTEXT INFORMATION:
14
+ {context}
15
+
16
+ QUESTION:
17
+ {question}
18
+
19
+ INSTRUCTIONS:
20
+ - Provide a detailed answer based on the context.
21
+ - If multiple relevant points exist, include them.
22
+ - Keep the response informative but concise.
23
+ - Do NOT add external information beyond the given context.
24
+ """
25
+
26
+
27
+
28
+ prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
29
+
30
+ def generate_answer(query: str, context: str):
31
+ """Generate AI response based on retrieved context."""
32
+ prompt = prompt_template.format(context=context, question=query)
33
+ response = llm.invoke(prompt)
34
+ return response.content