Spaces:
Runtime error
Runtime error
Ajey95
commited on
Commit
Β·
b8c4827
1
Parent(s):
8e2f4c8
Fix: chat_history addition
Browse files- agents/academic_agent.py +58 -8
- agents/drug_info_agent.py +2 -2
- agents/mnemonic_agent.py +2 -2
- agents/quiz_agent.py +2 -2
- agents/router_agent.py +2 -2
- app.py +18 -3
agents/academic_agent.py
CHANGED
|
@@ -290,13 +290,63 @@ class AcademicAgent:
|
|
| 290 |
with open(knowledge_file, 'r') as f: return json.load(f)
|
| 291 |
except: return {}
|
| 292 |
|
| 293 |
-
def process_with_ai(self, query, file_context=""):
|
| 294 |
-
|
| 295 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
try:
|
| 297 |
-
#
|
| 298 |
-
context_section = f"UPLOADED FILE CONTEXT:\n{file_context}" if file_context else ""
|
| 299 |
-
prompt = f"You are an expert pharmacy educator... STUDENT QUESTION: {query}\n{context_section} ..."
|
| 300 |
response = self.model.generate_content(prompt)
|
| 301 |
return response.text
|
| 302 |
except Exception as e:
|
|
@@ -335,7 +385,7 @@ class AcademicAgent:
|
|
| 335 |
|
| 336 |
# --- THIS IS THE ONLY METHOD THAT CHANGES ---
|
| 337 |
|
| 338 |
-
def process_query(self, query: str, file_context: str = ""):
|
| 339 |
"""
|
| 340 |
Main method to process academic queries.
|
| 341 |
It now returns a standardized dictionary.
|
|
@@ -344,7 +394,7 @@ class AcademicAgent:
|
|
| 344 |
try:
|
| 345 |
# Priority 1: Use AI for a comprehensive response if available.
|
| 346 |
if self.model:
|
| 347 |
-
ai_response = self.process_with_ai(query, file_context)
|
| 348 |
if ai_response:
|
| 349 |
response_message = f"π€ **AI-Powered Response**\n\n{ai_response}"
|
| 350 |
|
|
|
|
| 290 |
with open(knowledge_file, 'r') as f: return json.load(f)
|
| 291 |
except: return {}
|
| 292 |
|
| 293 |
+
# def process_with_ai(self, query, file_context=""):
|
| 294 |
+
# """Use Gemini AI to provide comprehensive, context-aware answers."""
|
| 295 |
+
# if not self.model: return None
|
| 296 |
+
# try:
|
| 297 |
+
# # (Content of this method is unchanged)
|
| 298 |
+
# context_section = f"UPLOADED FILE CONTEXT:\n{file_context}" if file_context else ""
|
| 299 |
+
# prompt = f"You are an expert pharmacy educator... STUDENT QUESTION: {query}\n{context_section} ..."
|
| 300 |
+
# response = self.model.generate_content(prompt)
|
| 301 |
+
# return response.text
|
| 302 |
+
# except Exception as e:
|
| 303 |
+
# print(f"Gemini API error in Academic Agent: {e}")
|
| 304 |
+
# return None
|
| 305 |
+
# In agents/academic_agent.py -> class AcademicAgent
|
| 306 |
+
|
| 307 |
+
def process_with_ai(self, query, file_context="", chat_history=None):
|
| 308 |
+
"""Use Gemini AI with conversation history and file context."""
|
| 309 |
+
if not self.model:
|
| 310 |
+
return None
|
| 311 |
+
|
| 312 |
+
# --- NEW HISTORY AND PROMPT LOGIC ---
|
| 313 |
+
|
| 314 |
+
# Format the past conversation for the prompt
|
| 315 |
+
history_for_prompt = ""
|
| 316 |
+
if chat_history:
|
| 317 |
+
for turn in chat_history:
|
| 318 |
+
# Ensure 'parts' is a list and not empty before accessing
|
| 319 |
+
if turn.get('parts') and isinstance(turn.get('parts'), list):
|
| 320 |
+
role = "User" if turn['role'] == 'user' else "AI"
|
| 321 |
+
history_for_prompt += f"{role}: {turn['parts'][0]}\n"
|
| 322 |
+
|
| 323 |
+
# Format the file context
|
| 324 |
+
context_section = ""
|
| 325 |
+
if file_context:
|
| 326 |
+
context_section = f"""
|
| 327 |
+
---
|
| 328 |
+
CONTEXT FROM UPLOADED FILE:
|
| 329 |
+
{file_context}
|
| 330 |
+
---
|
| 331 |
+
Use the context from the uploaded file above to answer the user's current question if it is relevant.
|
| 332 |
+
"""
|
| 333 |
+
|
| 334 |
+
# The new prompt structure
|
| 335 |
+
prompt = f"""You are a helpful and knowledgeable AI pharmacy tutor for a student in India.
|
| 336 |
+
|
| 337 |
+
CONVERSATION HISTORY:
|
| 338 |
+
{history_for_prompt}
|
| 339 |
+
{context_section}
|
| 340 |
+
CURRENT QUESTION:
|
| 341 |
+
User: {query}
|
| 342 |
+
|
| 343 |
+
Please provide a helpful and accurate answer to the user's CURRENT QUESTION.
|
| 344 |
+
- If the question is a follow-up, use the CONVERSATION HISTORY to understand the context.
|
| 345 |
+
- If the question relates to the UPLOADED FILE, prioritize information from that context.
|
| 346 |
+
- Keep the tone encouraging and professional.
|
| 347 |
+
"""
|
| 348 |
try:
|
| 349 |
+
# This is a more direct and robust way to send the complete context
|
|
|
|
|
|
|
| 350 |
response = self.model.generate_content(prompt)
|
| 351 |
return response.text
|
| 352 |
except Exception as e:
|
|
|
|
| 385 |
|
| 386 |
# --- THIS IS THE ONLY METHOD THAT CHANGES ---
|
| 387 |
|
| 388 |
+
def process_query(self, query: str, file_context: str = "",chat_history: list = None):
|
| 389 |
"""
|
| 390 |
Main method to process academic queries.
|
| 391 |
It now returns a standardized dictionary.
|
|
|
|
| 394 |
try:
|
| 395 |
# Priority 1: Use AI for a comprehensive response if available.
|
| 396 |
if self.model:
|
| 397 |
+
ai_response = self.process_with_ai(query, file_context,chat_history)
|
| 398 |
if ai_response:
|
| 399 |
response_message = f"π€ **AI-Powered Response**\n\n{ai_response}"
|
| 400 |
|
agents/drug_info_agent.py
CHANGED
|
@@ -32,7 +32,7 @@ class DrugInfoAgent:
|
|
| 32 |
# Clean up any extra whitespace
|
| 33 |
return drug_name.strip().title() # Capitalize for better recognition
|
| 34 |
|
| 35 |
-
def process_query(self, query: str, file_context: str = ""):
|
| 36 |
"""
|
| 37 |
Processes a query to retrieve information about a specific drug.
|
| 38 |
|
|
@@ -84,7 +84,7 @@ Use clear headings (like "π¬ Mechanism of Action") and bullet points for reada
|
|
| 84 |
|
| 85 |
try:
|
| 86 |
# Generate content using the AI model
|
| 87 |
-
ai_response = self.model.generate_content(prompt)
|
| 88 |
return {
|
| 89 |
'message': ai_response.text,
|
| 90 |
'agent_used': 'drug_info',
|
|
|
|
| 32 |
# Clean up any extra whitespace
|
| 33 |
return drug_name.strip().title() # Capitalize for better recognition
|
| 34 |
|
| 35 |
+
def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
| 36 |
"""
|
| 37 |
Processes a query to retrieve information about a specific drug.
|
| 38 |
|
|
|
|
| 84 |
|
| 85 |
try:
|
| 86 |
# Generate content using the AI model
|
| 87 |
+
ai_response = self.model.generate_content(prompt, chat_history)
|
| 88 |
return {
|
| 89 |
'message': ai_response.text,
|
| 90 |
'agent_used': 'drug_info',
|
agents/mnemonic_agent.py
CHANGED
|
@@ -31,7 +31,7 @@ class MnemonicAgent:
|
|
| 31 |
# Clean up any extra whitespace
|
| 32 |
return topic.strip()
|
| 33 |
|
| 34 |
-
def process_query(self, query: str, file_context: str = ""):
|
| 35 |
"""
|
| 36 |
Processes a query to generate a mnemonic.
|
| 37 |
|
|
@@ -98,7 +98,7 @@ Keep up the great work! You've got this! πͺ
|
|
| 98 |
|
| 99 |
try:
|
| 100 |
# Generate content using the AI model
|
| 101 |
-
ai_response = self.model.generate_content(prompt)
|
| 102 |
return {
|
| 103 |
'message': ai_response.text,
|
| 104 |
'agent_used': 'mnemonic_creation',
|
|
|
|
| 31 |
# Clean up any extra whitespace
|
| 32 |
return topic.strip()
|
| 33 |
|
| 34 |
+
def process_query(self, query: str, file_context: str = "",chat_history: list = None):
|
| 35 |
"""
|
| 36 |
Processes a query to generate a mnemonic.
|
| 37 |
|
|
|
|
| 98 |
|
| 99 |
try:
|
| 100 |
# Generate content using the AI model
|
| 101 |
+
ai_response = self.model.generate_content(prompt,chat_history)
|
| 102 |
return {
|
| 103 |
'message': ai_response.text,
|
| 104 |
'agent_used': 'mnemonic_creation',
|
agents/quiz_agent.py
CHANGED
|
@@ -32,7 +32,7 @@ class QuizAgent:
|
|
| 32 |
# Clean up any extra whitespace
|
| 33 |
return topic.strip()
|
| 34 |
|
| 35 |
-
def process_query(self, query: str, file_context: str = ""):
|
| 36 |
"""
|
| 37 |
Processes a query to generate a quiz. The agent prioritizes file_context if provided.
|
| 38 |
|
|
@@ -103,7 +103,7 @@ Let's test your knowledge! Good luck! π
|
|
| 103 |
|
| 104 |
try:
|
| 105 |
# Generate content using the AI model
|
| 106 |
-
ai_response = self.model.generate_content(prompt)
|
| 107 |
return {
|
| 108 |
'message': ai_response.text,
|
| 109 |
'agent_used': 'quiz_generation',
|
|
|
|
| 32 |
# Clean up any extra whitespace
|
| 33 |
return topic.strip()
|
| 34 |
|
| 35 |
+
def process_query(self, query: str, file_context: str = "",chat_history: list = None):
|
| 36 |
"""
|
| 37 |
Processes a query to generate a quiz. The agent prioritizes file_context if provided.
|
| 38 |
|
|
|
|
| 103 |
|
| 104 |
try:
|
| 105 |
# Generate content using the AI model
|
| 106 |
+
ai_response = self.model.generate_content(prompt, chat_history)
|
| 107 |
return {
|
| 108 |
'message': ai_response.text,
|
| 109 |
'agent_used': 'quiz_generation',
|
agents/router_agent.py
CHANGED
|
@@ -295,7 +295,7 @@ class RouterAgent:
|
|
| 295 |
self.viva_agent = VivaAgent(gemini_model)
|
| 296 |
self.default_agent = self.academic_agent
|
| 297 |
|
| 298 |
-
def route_query(self, query: str, file_context: str = "", viva_state: dict = None):
|
| 299 |
"""
|
| 300 |
Determines the user's intent and routes the query to the correct agent.
|
| 301 |
|
|
@@ -336,4 +336,4 @@ class RouterAgent:
|
|
| 336 |
|
| 337 |
# 5. Default to Academic Agent
|
| 338 |
# If no other intent is detected, it's likely a general academic question.
|
| 339 |
-
return self.academic_agent.process_query(query, file_context)
|
|
|
|
| 295 |
self.viva_agent = VivaAgent(gemini_model)
|
| 296 |
self.default_agent = self.academic_agent
|
| 297 |
|
| 298 |
+
def route_query(self, query: str, file_context: str = "", viva_state: dict = None, chat_history: list = None):
|
| 299 |
"""
|
| 300 |
Determines the user's intent and routes the query to the correct agent.
|
| 301 |
|
|
|
|
| 336 |
|
| 337 |
# 5. Default to Academic Agent
|
| 338 |
# If no other intent is detected, it's likely a general academic question.
|
| 339 |
+
return self.academic_agent.process_query(query, file_context,chat_history)
|
app.py
CHANGED
|
@@ -443,7 +443,7 @@ class MyPharmaAI:
|
|
| 443 |
self.quotes = load_quotes()
|
| 444 |
self.file_processor = FileProcessor()
|
| 445 |
|
| 446 |
-
def process_query(self, query, user_name="Student", viva_state=None, uploaded_files=None):
|
| 447 |
"""Routes a user's query to the appropriate agent, handling context."""
|
| 448 |
try:
|
| 449 |
file_context = ""
|
|
@@ -454,7 +454,7 @@ class MyPharmaAI:
|
|
| 454 |
print(f"--- DEBUG: Here is the text extracted from the file ---\n{file_context[:1000]}\n--- END DEBUG ---")
|
| 455 |
|
| 456 |
# Pass all context to the router
|
| 457 |
-
response_data = self.router.route_query(query, file_context, viva_state)
|
| 458 |
|
| 459 |
return {
|
| 460 |
'success': True,
|
|
@@ -512,13 +512,28 @@ def chat():
|
|
| 512 |
query = data.get('query', '').strip()
|
| 513 |
if not query:
|
| 514 |
return jsonify({'success': False, 'error': 'Empty query'}), 400
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 515 |
|
| 516 |
# Get current viva state from session for the Viva Agent
|
| 517 |
viva_state = session.get('viva_state', None)
|
| 518 |
uploaded_files = session.get('uploaded_files', None)
|
| 519 |
|
| 520 |
# Process the query through the main orchestrator
|
| 521 |
-
result = pharma_ai.process_query(query, viva_state=viva_state, uploaded_files=uploaded_files)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 522 |
|
| 523 |
# If the Viva agent returns an updated state, save it to the session
|
| 524 |
if 'viva_state' in result:
|
|
|
|
| 443 |
self.quotes = load_quotes()
|
| 444 |
self.file_processor = FileProcessor()
|
| 445 |
|
| 446 |
+
def process_query(self, query, user_name="Student", viva_state=None, uploaded_files=None,chat_history=None):
|
| 447 |
"""Routes a user's query to the appropriate agent, handling context."""
|
| 448 |
try:
|
| 449 |
file_context = ""
|
|
|
|
| 454 |
print(f"--- DEBUG: Here is the text extracted from the file ---\n{file_context[:1000]}\n--- END DEBUG ---")
|
| 455 |
|
| 456 |
# Pass all context to the router
|
| 457 |
+
response_data = self.router.route_query(query, file_context, viva_state, chat_history)
|
| 458 |
|
| 459 |
return {
|
| 460 |
'success': True,
|
|
|
|
| 512 |
query = data.get('query', '').strip()
|
| 513 |
if not query:
|
| 514 |
return jsonify({'success': False, 'error': 'Empty query'}), 400
|
| 515 |
+
|
| 516 |
+
# --- HISTORY MANAGEMENT START ---
|
| 517 |
+
|
| 518 |
+
# Get the conversation history from the session (or start a new one)
|
| 519 |
+
chat_history = session.get('chat_history', [])
|
| 520 |
|
| 521 |
# Get current viva state from session for the Viva Agent
|
| 522 |
viva_state = session.get('viva_state', None)
|
| 523 |
uploaded_files = session.get('uploaded_files', None)
|
| 524 |
|
| 525 |
# Process the query through the main orchestrator
|
| 526 |
+
result = pharma_ai.process_query(query, viva_state=viva_state, uploaded_files=uploaded_files,chathistory=chat_history)
|
| 527 |
+
# If the query was successful, update the history
|
| 528 |
+
if result.get('success'):
|
| 529 |
+
# Add the user's query and the AI's message to the history
|
| 530 |
+
chat_history.append({'role': 'user', 'parts': [query]})
|
| 531 |
+
chat_history.append({'role': 'model', 'parts': [result.get('message', '')]})
|
| 532 |
+
|
| 533 |
+
# Keep the history from getting too long (e.g., last 10 exchanges)
|
| 534 |
+
session['chat_history'] = chat_history[-20:]
|
| 535 |
+
|
| 536 |
+
# --- HISTORY MANAGEMENT END ---
|
| 537 |
|
| 538 |
# If the Viva agent returns an updated state, save it to the session
|
| 539 |
if 'viva_state' in result:
|