Spaces:
Build error
Build error
Upload 11 files
Browse files- .gitattributes +1 -0
- agent/composer_agent.py +47 -0
- agent/generic_agent.py +111 -0
- agent/planning_agent.py +198 -0
- agent/product_review_agent.py +212 -0
- agent/router_agent.py +151 -0
- app.py +97 -7
- data/cleaned_dataset_full.csv +0 -0
- data/embeddings.py +3 -0
- documents.pkl +3 -0
- interface.py +45 -0
- requirements.txt +27 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
data/embeddings.py filter=lfs diff=lfs merge=lfs -text
|
agent/composer_agent.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
# Configure logging
|
| 4 |
+
logging.basicConfig(level=logging.INFO)
|
| 5 |
+
logger = logging.getLogger(__name__)
|
| 6 |
+
|
| 7 |
+
def compose_response(response: str) -> str:
|
| 8 |
+
"""
|
| 9 |
+
Process and enhance the final response
|
| 10 |
+
"""
|
| 11 |
+
try:
|
| 12 |
+
# Remove any system artifacts or unwanted patterns
|
| 13 |
+
print("*********** in composer agent *************")
|
| 14 |
+
print("response input received : ", response)
|
| 15 |
+
response = remove_system_artifacts(response)
|
| 16 |
+
|
| 17 |
+
# Apply standard formatting
|
| 18 |
+
response = format_response(response)
|
| 19 |
+
|
| 20 |
+
return response
|
| 21 |
+
|
| 22 |
+
except Exception as e:
|
| 23 |
+
logger.error(f"Error in composition: {str(e)}")
|
| 24 |
+
return response # Fallback to original
|
| 25 |
+
|
| 26 |
+
def remove_system_artifacts(text: str) -> str:
|
| 27 |
+
"""Remove any system artifacts or unwanted patterns"""
|
| 28 |
+
artifacts = ["Assistant:", "AI:", "Human:", "User:"]
|
| 29 |
+
cleaned = text
|
| 30 |
+
for artifact in artifacts:
|
| 31 |
+
cleaned = cleaned.replace(artifact, "")
|
| 32 |
+
return cleaned.strip()
|
| 33 |
+
|
| 34 |
+
def format_response(text: str) -> str:
|
| 35 |
+
"""Apply standard formatting"""
|
| 36 |
+
# Add proper spacing
|
| 37 |
+
formatted = text.replace("\n\n\n", "\n\n")
|
| 38 |
+
|
| 39 |
+
# Ensure proper capitalization
|
| 40 |
+
formatted = ". ".join(s.strip().capitalize() for s in formatted.split(". "))
|
| 41 |
+
|
| 42 |
+
# Ensure proper ending punctuation
|
| 43 |
+
if formatted and not formatted[-1] in ['.', '!', '?']:
|
| 44 |
+
formatted += '.'
|
| 45 |
+
|
| 46 |
+
return formatted
|
| 47 |
+
|
agent/generic_agent.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_openai import ChatOpenAI
|
| 2 |
+
from langchain.prompts import ChatPromptTemplate
|
| 3 |
+
from langchain.memory import ConversationBufferMemory
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
# Configure logging
|
| 7 |
+
logging.basicConfig(level=logging.INFO)
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
+
|
| 10 |
+
# Global variables
|
| 11 |
+
llm = None
|
| 12 |
+
memory = None
|
| 13 |
+
prompt = None
|
| 14 |
+
|
| 15 |
+
system_prompt = """
|
| 16 |
+
Role
|
| 17 |
+
You are a knowledgeable and compassionate customer support chatbot specializing in various
|
| 18 |
+
products available in Amazon product catalogue. Your goal is to provide accurate, detailed
|
| 19 |
+
and empathetic information in response to the customer queries on various issues, challenges
|
| 20 |
+
faced by customer strictly related to the products available in Amazon catalogue.
|
| 21 |
+
Your tone is warm, professional, and supportive, ensuring customers feel informed and reassured
|
| 22 |
+
during every interaction.
|
| 23 |
+
|
| 24 |
+
Instructions
|
| 25 |
+
Shipment Tracking: When a customer asks about their shipment, request the tracking number and
|
| 26 |
+
tell them you will call back in 1 hour and provide the status on customer's callback number.
|
| 27 |
+
Issue Resolution: For issues such as delays, incorrect addresses, or lost shipments, respond with
|
| 28 |
+
empathy. Explain next steps clearly, including any proactive measures taken to resolve or escalate
|
| 29 |
+
the issue.
|
| 30 |
+
Proactive Alerts: Offer customers the option to receive notifications about key updates, such as
|
| 31 |
+
when shipments reach major checkpints or encounter delays.
|
| 32 |
+
FAQ Handling: Address frequently asked questions about handling products, special packaging
|
| 33 |
+
requirements, and preferred delivery times with clarity and simplicity.
|
| 34 |
+
Tone and Language: Maintain a professional and caring tone, particularly when discussing delays or
|
| 35 |
+
challenges. Show understanding and reassurance.
|
| 36 |
+
|
| 37 |
+
Constraints
|
| 38 |
+
Privacy: Never disclose personal information beyond what has been verified and confirmed by the
|
| 39 |
+
customer. Always ask for consent before discussing details about shipments.
|
| 40 |
+
Conciseness: Ensure responses are clear and detailed, avoiding jargon unless necessary for conext.
|
| 41 |
+
Empathy in Communication: When addressing delays or challenges, prioritize empathy and acknowledge
|
| 42 |
+
the customer's concern. Provide next steps and resasssurance.
|
| 43 |
+
Accuracy: Ensure all information shared with customer are accurate and up-to-date. If the query is
|
| 44 |
+
outside Amazon's products and services, clearly say I do not know.
|
| 45 |
+
Jargon-Free Language: Use simple language to explain logistics terms or processes to customers,
|
| 46 |
+
particularly when dealing with customer on sensitive matter.
|
| 47 |
+
|
| 48 |
+
Examples
|
| 49 |
+
|
| 50 |
+
Greetings
|
| 51 |
+
|
| 52 |
+
User: "Hi, I am John."
|
| 53 |
+
AI: "Hi John. How can I assist you today?
|
| 54 |
+
|
| 55 |
+
Issue Resolution for Delayed product Shipment
|
| 56 |
+
|
| 57 |
+
User: "I am worried about the delayed Amazon shipment."
|
| 58 |
+
AI: "I undersatnd your concern, and I'm here to help. Let me check the
|
| 59 |
+
status of your shipment. If needed, we'll coordinate with the carrier to ensure
|
| 60 |
+
your product's safety and provide you with updates along the way."
|
| 61 |
+
|
| 62 |
+
Proactive Update Offer
|
| 63 |
+
|
| 64 |
+
User: "Can I get updates on my product shipment's address."
|
| 65 |
+
AI: "Absolutely! I can send you notification whenever your product's shipment
|
| 66 |
+
reaches a checkpoint or if there are any major updates. Would you like to set that
|
| 67 |
+
up ?"
|
| 68 |
+
|
| 69 |
+
Out of conext question
|
| 70 |
+
|
| 71 |
+
User: "What is the capital city of Nigeria ?"
|
| 72 |
+
AI: "Sorry, I do not know. I know only about Amazon products. In case you haave any furter
|
| 73 |
+
qiestions on the products and services of Amazon, I can help you."
|
| 74 |
+
|
| 75 |
+
Closure
|
| 76 |
+
|
| 77 |
+
User: "No Thank you."
|
| 78 |
+
AI: "Thank you for contacting Amazon. Have a nice day!"
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def initialize_generic_agent(llm_instance, memory_instance):
|
| 83 |
+
global llm, memory, prompt
|
| 84 |
+
llm = llm_instance
|
| 85 |
+
memory = memory_instance
|
| 86 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 87 |
+
("system", system_prompt),
|
| 88 |
+
("human", "{input}")
|
| 89 |
+
])
|
| 90 |
+
logger.info("generic agent initialized successfully")
|
| 91 |
+
|
| 92 |
+
def process(query):
|
| 93 |
+
chain = prompt | llm
|
| 94 |
+
response = chain.invoke({"input": query})
|
| 95 |
+
|
| 96 |
+
# Update memory if available
|
| 97 |
+
if memory:
|
| 98 |
+
memory.save_context({"input": query}, {"output": response.content})
|
| 99 |
+
return response.content
|
| 100 |
+
|
| 101 |
+
def clear_context():
|
| 102 |
+
"""Clear the conversation memory"""
|
| 103 |
+
try:
|
| 104 |
+
if memory:
|
| 105 |
+
memory.clear()
|
| 106 |
+
logger.info("Conversation context cleared successfully")
|
| 107 |
+
else:
|
| 108 |
+
logger.warning("No memory instance available to clear")
|
| 109 |
+
except Exception as e:
|
| 110 |
+
logger.error(f"Error clearing context: {str(e)}")
|
| 111 |
+
raise
|
agent/planning_agent.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_openai import ChatOpenAI
|
| 2 |
+
from langchain.agents import initialize_agent, AgentType
|
| 3 |
+
from langchain.tools import Tool
|
| 4 |
+
from langchain.memory import ConversationBufferMemory, SimpleMemory
|
| 5 |
+
import agent.router_agent as router_agent
|
| 6 |
+
import agent.product_review_agent as product_review_agent
|
| 7 |
+
import agent.generic_agent as generic_agent
|
| 8 |
+
import agent.composer_agent as composer_agent
|
| 9 |
+
import logging
|
| 10 |
+
|
| 11 |
+
# Set httpx (HTTP request) logging to WARNING or ERROR level
|
| 12 |
+
# This will hide the HTTP request logs while keeping agent thoughts visible
|
| 13 |
+
logging.getLogger("httpx").setLevel(logging.WARNING) # added on 23-Nob
|
| 14 |
+
|
| 15 |
+
# Configure logging
|
| 16 |
+
logging.basicConfig(level=logging.INFO)
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
# Global variables
|
| 20 |
+
llm = None
|
| 21 |
+
chat_memory = None
|
| 22 |
+
query_memory = None
|
| 23 |
+
agent = None
|
| 24 |
+
|
| 25 |
+
def initialize_planning_agent(llm_instance, chat_memory_instance, query_memory_instance):
|
| 26 |
+
global llm, chat_memory, query_memory, agent
|
| 27 |
+
|
| 28 |
+
llm = llm_instance
|
| 29 |
+
chat_memory = chat_memory_instance
|
| 30 |
+
query_memory = query_memory_instance
|
| 31 |
+
|
| 32 |
+
# Initialize agents
|
| 33 |
+
router_agent.initialize_router_agent(llm, chat_memory)
|
| 34 |
+
product_review_agent.initialize_product_review_agent(llm, chat_memory)
|
| 35 |
+
generic_agent.initialize_generic_agent(llm, chat_memory)
|
| 36 |
+
# composer_agent.initialize_composer_agent(llm, memory)
|
| 37 |
+
|
| 38 |
+
tools = [
|
| 39 |
+
Tool(
|
| 40 |
+
name="route_query",
|
| 41 |
+
func=route_query,
|
| 42 |
+
description="Determine query type. Returns either 'product_review' or 'generic'"
|
| 43 |
+
),
|
| 44 |
+
Tool(
|
| 45 |
+
name="get_product_info",
|
| 46 |
+
func=get_product_info,
|
| 47 |
+
description="Use this to get product-related data such as features, prices, availability, or reviews"
|
| 48 |
+
),
|
| 49 |
+
Tool(
|
| 50 |
+
name="handle_generic_query",
|
| 51 |
+
func=handle_generic_query,
|
| 52 |
+
description="Use this to get response to user queries which are generic and where the retrieval of product details are not required"
|
| 53 |
+
),
|
| 54 |
+
Tool(
|
| 55 |
+
name="compose_response",
|
| 56 |
+
func=compose_response,
|
| 57 |
+
description="Use this to only format the response. After this step, return the formatted response to main.py"
|
| 58 |
+
)
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
system_prompt = """You are an efficient AI planning agent. Follow these rules strictly:
|
| 63 |
+
|
| 64 |
+
CRITICAL INSTRUCTION:
|
| 65 |
+
For simple queries listed below, skip the route_query and directly go to handle_generic_query.
|
| 66 |
+
|
| 67 |
+
SIMPLE QUERIES (NEVER use tools):
|
| 68 |
+
1. Greetings: "hi", "hello", "hey", "good morning", "good evening", "good afternoon"
|
| 69 |
+
2. Farewells: "bye", "goodbye", "see you", "take care"
|
| 70 |
+
3. Thank you messages: "thanks", "thank you", "thanks a lot", "appreciate it"
|
| 71 |
+
4. Simple confirmations: "okay", "yes", "no", "sure", "alright"
|
| 72 |
+
5. Basic courtesy: "how are you?", "how are you doing?", "what's up?", "what are you doing?"
|
| 73 |
+
6. Simple acknowledgments: "got it", "understood", "I see"
|
| 74 |
+
|
| 75 |
+
FOR ALL OTHER QUERIES:
|
| 76 |
+
1. Use route_query to determine if query is product_review or generic
|
| 77 |
+
2. If route_query returns 'generic', use handle_generic_query and STOP
|
| 78 |
+
3. If route_query returns 'product_review', use get_product_info and STOP
|
| 79 |
+
|
| 80 |
+
EXAMPLES:
|
| 81 |
+
|
| 82 |
+
User: "Hi"
|
| 83 |
+
Thought: This is a Simple greeting, I will use handle_generic_query to get appropriate response
|
| 84 |
+
Action: handle_generic_query
|
| 85 |
+
Observation: "Hi! How can I help you today?"
|
| 86 |
+
Thought: I have got the final answer. I will use compose_responses to format the response.
|
| 87 |
+
Action: compose_responses
|
| 88 |
+
Final Answer: "Hi! How can I help you today?"
|
| 89 |
+
|
| 90 |
+
User: "I got my package delivered yesterday. It was delivered very late. I want to file a complaint."
|
| 91 |
+
Thought: This is a generic query that does not require product details. I will use handle_generic_query to get appropriate response.
|
| 92 |
+
Action: handle_generic_query
|
| 93 |
+
Action Input: User query: I got my package delivered yesterday. It was delivered very late. I want to file a complaint.
|
| 94 |
+
Observation: {'intermediate_steps': [], 'output': "I'm sorry to hear about the delay in your package delivery. I understand your frustration and I'm here to assist you with filing a complaint. To better assist you, could you please provide me with the tracking number of your package? Once I have that information, I will look into the matter and ensure that your feedback is addressed appropriately.", 'action': 'Final Answer', 'action_input': "I'm sorry to hear about the delay in your package delivery. I understand your frustration and I'm here to assist you with filing a complaint. To better assist you, could you please provide me with the tracking number of your package? Once I have that information, I will look into the matter and ensure that your feedback is addressed appropriately."}
|
| 95 |
+
Thought:I have got the final answer. I will use compose_responses to format the response.
|
| 96 |
+
Action: compose_responses
|
| 97 |
+
Final Answer: I'm sorry to hear about the delay in your package delivery. I understand your frustration and I'm here to assist you with filing a complaint. To better assist you, could you please provide me with the tracking number of your package? Once I have that information, I will look into the matter and ensure that your feedback is addressed appropriately.
|
| 98 |
+
|
| 99 |
+
Remember: For simple queries listed above, respond immediately with Final Answer WITHOUT using tools.
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
agent = initialize_agent(
|
| 103 |
+
tools,
|
| 104 |
+
llm,
|
| 105 |
+
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
| 106 |
+
verbose=True,
|
| 107 |
+
memory=chat_memory,
|
| 108 |
+
system_message=system_prompt,
|
| 109 |
+
early_stopping_method="generate",
|
| 110 |
+
max_iterations=2
|
| 111 |
+
)
|
| 112 |
+
logger.info("Planning agent initialized successfully")
|
| 113 |
+
|
| 114 |
+
def summarize_chat_history():
|
| 115 |
+
"""Summarize the chat history to retain context without overwhelming memory."""
|
| 116 |
+
try:
|
| 117 |
+
if chat_memory:
|
| 118 |
+
# Retrieve the chat history as a list of messages
|
| 119 |
+
chat_history = chat_memory.buffer # Assuming this is a list of messages
|
| 120 |
+
|
| 121 |
+
logger.info(f"Chat history type: {type(chat_history)}")
|
| 122 |
+
|
| 123 |
+
if chat_history:
|
| 124 |
+
# Extract text from AIMessage objects
|
| 125 |
+
text_messages = [msg.content for msg in chat_history if hasattr(msg, 'content')]
|
| 126 |
+
|
| 127 |
+
logger.info(f"Extracted messages: {text_messages}")
|
| 128 |
+
|
| 129 |
+
# Get the last 5 messages for the summary
|
| 130 |
+
summary = "\n".join(text_messages[-5:]) # Adjust as needed
|
| 131 |
+
logger.info(f"Generated summary: {summary}")
|
| 132 |
+
|
| 133 |
+
# Clear the old history
|
| 134 |
+
chat_memory.clear() # Clear the old history
|
| 135 |
+
|
| 136 |
+
# If the memory allows appending, do that
|
| 137 |
+
chat_memory.buffer.append(summary) # Append the summary
|
| 138 |
+
# Or if there's a method to set the buffer, do that:
|
| 139 |
+
# chat_memory.set_memory([summary]) # If such a method exists
|
| 140 |
+
except Exception as e:
|
| 141 |
+
logger.error(f"Error summarizing chat history: {str(e)}")
|
| 142 |
+
|
| 143 |
+
def route_query(query):
|
| 144 |
+
# Summarize chat history before routing
|
| 145 |
+
summarize_chat_history()
|
| 146 |
+
# Get original query from memory if needed
|
| 147 |
+
original_query = query_memory.memories.get('original_query', query)
|
| 148 |
+
return router_agent.classify_query(original_query)
|
| 149 |
+
|
| 150 |
+
def get_product_info(query):
|
| 151 |
+
# Summarize chat history before retrieving product info
|
| 152 |
+
summarize_chat_history()
|
| 153 |
+
# Get original query from memory if needed
|
| 154 |
+
original_query = query_memory.memories.get('original_query', query)
|
| 155 |
+
response = product_review_agent.process(original_query)
|
| 156 |
+
|
| 157 |
+
return {
|
| 158 |
+
"intermediate_steps": [],
|
| 159 |
+
"output": response,
|
| 160 |
+
"action": "Final Answer",
|
| 161 |
+
"action_input": response
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
def handle_generic_query(query):
|
| 165 |
+
# Summarize chat history before handling generic queries
|
| 166 |
+
summarize_chat_history()
|
| 167 |
+
# Get original query from memory if needed
|
| 168 |
+
original_query = query_memory.memories.get('original_query', query)
|
| 169 |
+
response = generic_agent.process(original_query)
|
| 170 |
+
return {
|
| 171 |
+
"intermediate_steps": [],
|
| 172 |
+
"output": response,
|
| 173 |
+
"action": "Final Answer",
|
| 174 |
+
"action_input": response
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
def compose_response(response):
|
| 178 |
+
return composer_agent.compose_response(response)
|
| 179 |
+
|
| 180 |
+
def execute(query):
|
| 181 |
+
try:
|
| 182 |
+
# Store original query
|
| 183 |
+
query_memory.memories['original_query'] = query
|
| 184 |
+
return agent.run(
|
| 185 |
+
f"Process this user query: {query}"
|
| 186 |
+
)
|
| 187 |
+
except Exception as e:
|
| 188 |
+
logger.error(f"Error in planning agent: {str(e)}")
|
| 189 |
+
return f"Error in planning agent: {str(e)}"
|
| 190 |
+
|
| 191 |
+
def clear_context():
|
| 192 |
+
if chat_memory:
|
| 193 |
+
chat_memory.clear()
|
| 194 |
+
if query_memory:
|
| 195 |
+
query_memory.memories.clear()
|
| 196 |
+
product_review_agent.clear_context()
|
| 197 |
+
generic_agent.clear_context()
|
| 198 |
+
|
agent/product_review_agent.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ***********************************************************************************************
|
| 2 |
+
# Instruction for using the program
|
| 3 |
+
# ***********************************************************************************************
|
| 4 |
+
# Please make sure the embeddings.npy file is available in data folder
|
| 5 |
+
# Please make sure the documents.pkl file is available in data folder
|
| 6 |
+
# Please set the path appropriately inside the program. You will find the below two statements
|
| 7 |
+
# where you need to mention the correct path name.
|
| 8 |
+
# embedding_path = '/workspaces/IISC_cap_langchain/data/embeddings.npy'
|
| 9 |
+
# documents_path = '/workspaces/IISC_cap_langchain/documents.pkl'
|
| 10 |
+
# ***********************************************************************************************
|
| 11 |
+
|
| 12 |
+
import openai
|
| 13 |
+
import numpy as np
|
| 14 |
+
import pandas as pd
|
| 15 |
+
from openai import OpenAI
|
| 16 |
+
from langchain_community.chat_models import ChatOpenAI
|
| 17 |
+
from langchain_community.document_loaders import CSVLoader
|
| 18 |
+
from langchain_community.embeddings import OpenAIEmbeddings
|
| 19 |
+
from langchain_community.vectorstores import Chroma
|
| 20 |
+
from langchain.prompts import ChatPromptTemplate
|
| 21 |
+
from langchain.schema.output_parser import StrOutputParser
|
| 22 |
+
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
|
| 23 |
+
|
| 24 |
+
import faiss
|
| 25 |
+
import warnings
|
| 26 |
+
import os
|
| 27 |
+
|
| 28 |
+
warnings.filterwarnings("ignore")
|
| 29 |
+
import pickle
|
| 30 |
+
import logging
|
| 31 |
+
|
| 32 |
+
# Configure logging
|
| 33 |
+
logging.basicConfig(level=logging.INFO)
|
| 34 |
+
logger = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
# Global variables
|
| 37 |
+
llm = None
|
| 38 |
+
chat_memory = None
|
| 39 |
+
# vectorstore = None
|
| 40 |
+
|
| 41 |
+
def initialize_product_review_agent(llm_instance, memory_instance):
|
| 42 |
+
"""Initialize the product review agent with LLM and memory instances"""
|
| 43 |
+
global llm, chat_memory
|
| 44 |
+
|
| 45 |
+
llm = llm_instance
|
| 46 |
+
chat_memory = memory_instance
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def process(query):
|
| 51 |
+
|
| 52 |
+
# Initialize the OpenAIEmbeddings
|
| 53 |
+
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002")
|
| 54 |
+
|
| 55 |
+
System_Prompt = """
|
| 56 |
+
Role and Capabilities:
|
| 57 |
+
You are an AI customer service specialist for Amazon, focusing on the various products available in Amazon. Your primary functions are:
|
| 58 |
+
1. Providing accurate product information including cost, availability, features, top review
|
| 59 |
+
2. Handling delivery-related queries
|
| 60 |
+
3. Addressing product availability
|
| 61 |
+
4. Offering technical support for electronics
|
| 62 |
+
|
| 63 |
+
Core Instructions:
|
| 64 |
+
1. Product Information:
|
| 65 |
+
- Provide detailed specifications and features
|
| 66 |
+
- Compare similar products when relevant
|
| 67 |
+
- Only discuss products found in the provided context
|
| 68 |
+
- Highlight key benefits and limitations
|
| 69 |
+
- Top review from customer
|
| 70 |
+
|
| 71 |
+
2. Price & Availability:
|
| 72 |
+
- Quote exact prices from the provided context
|
| 73 |
+
- Explain any pricing variations or discounts
|
| 74 |
+
- Provide clear stock availability information
|
| 75 |
+
- Mention delivery timeframes when available
|
| 76 |
+
|
| 77 |
+
3. Query Handling:
|
| 78 |
+
- Address the main query first, then provide additional relevant information
|
| 79 |
+
- For multi-part questions, structure answers in bullet points
|
| 80 |
+
- If information is missing from context, explicitly state this
|
| 81 |
+
- Suggest alternatives when a product is unavailable
|
| 82 |
+
|
| 83 |
+
Communication Guidelines:
|
| 84 |
+
1. Response Structure:
|
| 85 |
+
- Start with a direct answer to the query
|
| 86 |
+
- Provide supporting details and context
|
| 87 |
+
- End with a clear next step or call to action
|
| 88 |
+
- Include standard closing: "Thank you for choosing Amazon. Is there anything else I can help you with?"
|
| 89 |
+
|
| 90 |
+
2. Tone and Style:
|
| 91 |
+
- Professional yet friendly
|
| 92 |
+
- Clear and jargon-free language
|
| 93 |
+
- Empathetic and patient
|
| 94 |
+
- Concise but comprehensive
|
| 95 |
+
|
| 96 |
+
Limitations and Restrictions:
|
| 97 |
+
1. Only provide information present in the given context
|
| 98 |
+
2. Clearly state when information is not available
|
| 99 |
+
3. Never share personal or sensitive information
|
| 100 |
+
4. Don't make promises about delivery times unless explicitly stated in context
|
| 101 |
+
|
| 102 |
+
Error Handling:
|
| 103 |
+
1. Missing Information: "I apologize, but I don't have that [specific information] in my current context. Would you like me to provide related details about [alternative topic]?"
|
| 104 |
+
2. Out of Scope: "While I can't assist with [topic], I'd be happy to help you with electronics or home care products."
|
| 105 |
+
3. Technical Issues: "I apologize for any inconvenience. Could you please rephrase your question or provide more details?"
|
| 106 |
+
|
| 107 |
+
Response Format:
|
| 108 |
+
1. For product queries:
|
| 109 |
+
- Product name and model
|
| 110 |
+
- Price and availability
|
| 111 |
+
- Key features
|
| 112 |
+
- Top review
|
| 113 |
+
- Comparison among similar products (example : cell phone with cell phone)
|
| 114 |
+
- Recommendations if relevant
|
| 115 |
+
|
| 116 |
+
2. For service queries:
|
| 117 |
+
- Current status
|
| 118 |
+
- Next steps
|
| 119 |
+
- Timeline (if available)
|
| 120 |
+
- Contact options
|
| 121 |
+
|
| 122 |
+
Remember: Always verify information against the provided context before responding. Don't make assumptions or provide speculative information.
|
| 123 |
+
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
# Get existing chat history from memory
|
| 127 |
+
chat_history = ""
|
| 128 |
+
if chat_memory:
|
| 129 |
+
messages = chat_memory.chat_memory.messages
|
| 130 |
+
if messages:
|
| 131 |
+
chat_history = "\nPrevious conversation:\n"
|
| 132 |
+
for i in range(0, len(messages), 2):
|
| 133 |
+
if i + 1 < len(messages):
|
| 134 |
+
chat_history += f"Human: {messages[i].content}\n"
|
| 135 |
+
chat_history += f"Assistant: {messages[i+1].content}\n"
|
| 136 |
+
|
| 137 |
+
# Check if embeddings already exist
|
| 138 |
+
embedding_path = './data/embeddings.npy'
|
| 139 |
+
documents_path = './documents.pkl'
|
| 140 |
+
|
| 141 |
+
# Modify the get_embedding function to use LangChain's OpenAIEmbeddings
|
| 142 |
+
def get_embedding(text, engine="text-embedding-ada-002"):
|
| 143 |
+
return embeddings.embed_query(text)
|
| 144 |
+
|
| 145 |
+
try:
|
| 146 |
+
if not os.path.exists(embedding_path):
|
| 147 |
+
raise FileNotFoundError(f"Embedding file not found at: {embedding_path}")
|
| 148 |
+
if not os.path.exists(documents_path):
|
| 149 |
+
raise FileNotFoundError(f"Documents file not found at: {documents_path}")
|
| 150 |
+
except FileNotFoundError as e:
|
| 151 |
+
logger.error(str(e))
|
| 152 |
+
raise
|
| 153 |
+
|
| 154 |
+
if os.path.exists(embedding_path) and os.path.exists(documents_path):
|
| 155 |
+
# Load existing embeddings and documents
|
| 156 |
+
embeddings_list = np.load(embedding_path)
|
| 157 |
+
with open(documents_path, 'rb') as f:
|
| 158 |
+
documents = pickle.load(f)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
# Create FAISS index with faster search
|
| 162 |
+
embeddings_np = np.array(embeddings_list).astype('float32')
|
| 163 |
+
index=faiss.IndexFlatL2(len(embeddings_list[0]))
|
| 164 |
+
index.add(embeddings_np)
|
| 165 |
+
|
| 166 |
+
query_embedding = get_embedding(query)
|
| 167 |
+
query_embedding_np = np.array([query_embedding]).astype('float32')
|
| 168 |
+
|
| 169 |
+
_, indices = index.search(query_embedding_np, 2)
|
| 170 |
+
retrieved_docs = [documents[i] for i in indices[0]]
|
| 171 |
+
context = ' '.join(retrieved_docs)
|
| 172 |
+
print("context retrieved :", context)
|
| 173 |
+
print('*' * 100)
|
| 174 |
+
|
| 175 |
+
# Include chat history in the prompt for context
|
| 176 |
+
structured_prompt = f"""
|
| 177 |
+
Context:
|
| 178 |
+
{context}
|
| 179 |
+
|
| 180 |
+
{chat_history}
|
| 181 |
+
|
| 182 |
+
Current Query:
|
| 183 |
+
{query}
|
| 184 |
+
"""
|
| 185 |
+
|
| 186 |
+
print("structured prompt created :", structured_prompt)
|
| 187 |
+
print('*' * 100)
|
| 188 |
+
# Create messages for the chat model
|
| 189 |
+
messages = [
|
| 190 |
+
{"role": "system", "content": System_Prompt},
|
| 191 |
+
{"role": "user", "content": structured_prompt}
|
| 192 |
+
]
|
| 193 |
+
|
| 194 |
+
# For chat completion, you can use LangChain's ChatOpenAI
|
| 195 |
+
chat_model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.5)
|
| 196 |
+
response = chat_model.invoke(messages).content
|
| 197 |
+
|
| 198 |
+
# Update memory
|
| 199 |
+
if chat_memory:
|
| 200 |
+
chat_memory.chat_memory.add_user_message(query)
|
| 201 |
+
chat_memory.chat_memory.add_ai_message(response)
|
| 202 |
+
|
| 203 |
+
logger.info(f"Successfully processed query: {query}")
|
| 204 |
+
print("response returned by product_review_agent", response)
|
| 205 |
+
return response
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def clear_context():
|
| 209 |
+
"""Clear the conversation memory"""
|
| 210 |
+
if chat_memory:
|
| 211 |
+
chat_memory.clear()
|
| 212 |
+
logger.info("Conversation context cleared")
|
agent/router_agent.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_openai import ChatOpenAI
|
| 2 |
+
from langchain.prompts import ChatPromptTemplate
|
| 3 |
+
from langchain.memory import ConversationBufferMemory, SimpleMemory
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
# Configure logging
|
| 7 |
+
logging.basicConfig(level=logging.INFO)
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
+
|
| 10 |
+
# Global variables
|
| 11 |
+
llm = None
|
| 12 |
+
chat_memory = None
|
| 13 |
+
query_memory = None
|
| 14 |
+
prompt = None
|
| 15 |
+
|
| 16 |
+
def initialize_router_agent(llm_instance, chat_memory_instance):
|
| 17 |
+
global llm, chat_memory, prompt
|
| 18 |
+
llm = llm_instance
|
| 19 |
+
chat_memory = chat_memory_instance
|
| 20 |
+
|
| 21 |
+
system_prompt = """You are an intelligent query classification system for an e-commerce platform.
|
| 22 |
+
Your role is to accurately categorize incoming customer queries into one of two categories:
|
| 23 |
+
|
| 24 |
+
1. product_review:
|
| 25 |
+
- Queries about product features, specifications, or capabilities
|
| 26 |
+
- Questions about product prices and availability
|
| 27 |
+
- Requests for product reviews or comparisons
|
| 28 |
+
- Questions about product warranties or guarantees
|
| 29 |
+
- Inquiries about product shipping or delivery
|
| 30 |
+
- Questions about product compatibility or dimensions
|
| 31 |
+
- Requests for recommendations between products
|
| 32 |
+
|
| 33 |
+
2. generic:
|
| 34 |
+
- General customer service inquiries
|
| 35 |
+
- Account-related questions
|
| 36 |
+
- Technical support issues not related to specific products
|
| 37 |
+
- Website navigation help
|
| 38 |
+
- Payment or billing queries
|
| 39 |
+
- Return policy questions
|
| 40 |
+
- Company information requests
|
| 41 |
+
- Non-product related shipping questions
|
| 42 |
+
- Any other queries not directly related to specific products
|
| 43 |
+
|
| 44 |
+
INSTRUCTIONS:
|
| 45 |
+
- Analyze the input query carefully
|
| 46 |
+
- Respond ONLY with either "product_review" or "generic"
|
| 47 |
+
- Do not include any other text in your response
|
| 48 |
+
- If unsure, classify as "generic"
|
| 49 |
+
|
| 50 |
+
EXAMPLES:
|
| 51 |
+
|
| 52 |
+
User: "What are the features of the Samsung Galaxy S21?"
|
| 53 |
+
Assistant: product_review
|
| 54 |
+
|
| 55 |
+
User: "How much does the iPhone 13 Pro Max cost?"
|
| 56 |
+
Assistant: product_review
|
| 57 |
+
|
| 58 |
+
User: "Can you compare the Dell XPS 15 with the MacBook Pro?"
|
| 59 |
+
Assistant: product_review
|
| 60 |
+
|
| 61 |
+
User: "Is the Sony WH-1000XM4 headphone available in black?"
|
| 62 |
+
Assistant: product_review
|
| 63 |
+
|
| 64 |
+
User: "What's the battery life of the iPad Pro?"
|
| 65 |
+
Assistant: product_review
|
| 66 |
+
|
| 67 |
+
User: "I need help resetting my password"
|
| 68 |
+
Assistant: generic
|
| 69 |
+
|
| 70 |
+
User: "Where can I view my order history?"
|
| 71 |
+
Assistant: generic
|
| 72 |
+
|
| 73 |
+
User: "How do I update my shipping address?"
|
| 74 |
+
Assistant: generic
|
| 75 |
+
|
| 76 |
+
User: "What are your return policies?"
|
| 77 |
+
Assistant: generic
|
| 78 |
+
|
| 79 |
+
User: "I haven't received my refund yet"
|
| 80 |
+
Assistant: generic
|
| 81 |
+
|
| 82 |
+
User: "Do you ship internationally?"
|
| 83 |
+
Assistant: generic
|
| 84 |
+
|
| 85 |
+
User: "Can you recommend a good gaming laptop under $1000?"
|
| 86 |
+
Assistant: product_review
|
| 87 |
+
|
| 88 |
+
User: "What's the warranty period for electronics?"
|
| 89 |
+
Assistant: generic
|
| 90 |
+
|
| 91 |
+
User: "Is the Instant Pot dishwasher safe?"
|
| 92 |
+
Assistant: product_review
|
| 93 |
+
|
| 94 |
+
User: "How do I track my order?"
|
| 95 |
+
Assistant: generic
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 99 |
+
("system", system_prompt),
|
| 100 |
+
("human", "{input}")
|
| 101 |
+
])
|
| 102 |
+
logger.info("Router agent initialized successfully")
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def classify_query(query):
|
| 106 |
+
try:
|
| 107 |
+
# Create chain with memory
|
| 108 |
+
chain = prompt | llm
|
| 109 |
+
|
| 110 |
+
# Add query to chat history before classification
|
| 111 |
+
if chat_memory and hasattr(chat_memory, 'chat_memory'):
|
| 112 |
+
chat_memory.chat_memory.add_user_message(query)
|
| 113 |
+
|
| 114 |
+
# Classify the query
|
| 115 |
+
response = chain.invoke({"input": query})
|
| 116 |
+
category = response.content.strip().lower()
|
| 117 |
+
|
| 118 |
+
# Validate category
|
| 119 |
+
if category not in ["product_review", "generic"]:
|
| 120 |
+
category = "generic" # Default fallback
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
# Add classification result to chat history
|
| 124 |
+
if chat_memory and hasattr(chat_memory, 'chat_memory'):
|
| 125 |
+
chat_memory.chat_memory.add_ai_message(f"Query classified as: {category}")
|
| 126 |
+
|
| 127 |
+
logger.info(f"Query: {query}")
|
| 128 |
+
logger.info(f"Classification: {category}")
|
| 129 |
+
print("**** in router agent****")
|
| 130 |
+
print("query :", query)
|
| 131 |
+
print("category :", category)
|
| 132 |
+
|
| 133 |
+
return category
|
| 134 |
+
|
| 135 |
+
except Exception as e:
|
| 136 |
+
print(f"Error in routing: {str(e)}")
|
| 137 |
+
return "generic" # Default fallback on error
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def get_classification_history():
|
| 141 |
+
"""Retrieve classification history from memory"""
|
| 142 |
+
if chat_memory and hasattr(chat_memory, 'chat_memory'):
|
| 143 |
+
return chat_memory.chat_memory.messages
|
| 144 |
+
return []
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def clear_context():
|
| 148 |
+
"""Clear all memory contexts"""
|
| 149 |
+
if chat_memory:
|
| 150 |
+
chat_memory.clear()
|
| 151 |
+
logger.info("Router agent context cleared")
|
app.py
CHANGED
|
@@ -1,7 +1,97 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from langchain_community.chat_models import ChatOpenAI
|
| 3 |
+
from langchain.memory import ConversationBufferMemory, SimpleMemory
|
| 4 |
+
from langchain.agents import initialize_agent, AgentType
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
import os
|
| 7 |
+
import agent.planning_agent as planning_agent
|
| 8 |
+
import logging
|
| 9 |
+
|
| 10 |
+
# Configure logging
|
| 11 |
+
logging.basicConfig(level=logging.INFO)
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
# Global variables
|
| 15 |
+
llm = None
|
| 16 |
+
chat_memory = None
|
| 17 |
+
query_memory = None
|
| 18 |
+
|
| 19 |
+
def initialize_components():
|
| 20 |
+
global llm, chat_memory, query_memory
|
| 21 |
+
load_dotenv()
|
| 22 |
+
api_key = os.environ['OA_API']
|
| 23 |
+
os.environ['OPENAI_API_KEY'] = api_key
|
| 24 |
+
|
| 25 |
+
llm = ChatOpenAI(
|
| 26 |
+
model_name="gpt-3.5-turbo",
|
| 27 |
+
temperature=0,
|
| 28 |
+
api_key=api_key
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
# Initialize memories
|
| 32 |
+
chat_memory = ConversationBufferMemory(
|
| 33 |
+
memory_key="chat_history",
|
| 34 |
+
return_messages=True
|
| 35 |
+
)
|
| 36 |
+
query_memory = SimpleMemory()
|
| 37 |
+
|
| 38 |
+
# Initialize planning agent with both memories
|
| 39 |
+
planning_agent.initialize_planning_agent(llm, chat_memory, query_memory)
|
| 40 |
+
|
| 41 |
+
logger.info("Components initialized successfully")
|
| 42 |
+
|
| 43 |
+
def process_query(query, history):
|
| 44 |
+
try:
|
| 45 |
+
# Restore chat history from Gradio's history
|
| 46 |
+
if history:
|
| 47 |
+
for human_msg, ai_msg in history:
|
| 48 |
+
if chat_memory and hasattr(chat_memory, 'chat_memory'):
|
| 49 |
+
chat_memory.chat_memory.add_user_message(human_msg)
|
| 50 |
+
chat_memory.chat_memory.add_ai_message(ai_msg)
|
| 51 |
+
|
| 52 |
+
# Store original query in query memory
|
| 53 |
+
query_memory.memories['original_query'] = query
|
| 54 |
+
|
| 55 |
+
# Execute query through planning agent
|
| 56 |
+
response = planning_agent.execute(query)
|
| 57 |
+
|
| 58 |
+
# Add current interaction to chat memory
|
| 59 |
+
if chat_memory and hasattr(chat_memory, 'chat_memory'):
|
| 60 |
+
chat_memory.chat_memory.add_user_message(query)
|
| 61 |
+
chat_memory.chat_memory.add_ai_message(response)
|
| 62 |
+
|
| 63 |
+
return response
|
| 64 |
+
|
| 65 |
+
except Exception as e:
|
| 66 |
+
error_msg = f"Error processing query: {str(e)}"
|
| 67 |
+
logger.error(f"Error details: {str(e)}")
|
| 68 |
+
|
| 69 |
+
if chat_memory and hasattr(chat_memory, 'chat_memory'):
|
| 70 |
+
chat_memory.chat_memory.add_user_message(query)
|
| 71 |
+
chat_memory.chat_memory.add_ai_message(error_msg)
|
| 72 |
+
|
| 73 |
+
return error_msg
|
| 74 |
+
|
| 75 |
+
def clear_context():
|
| 76 |
+
planning_agent.clear_context()
|
| 77 |
+
chat_memory.clear()
|
| 78 |
+
query_memory.memories.clear()
|
| 79 |
+
return [], []
|
| 80 |
+
|
| 81 |
+
def create_gradio_app():
|
| 82 |
+
from interface import create_interface
|
| 83 |
+
return create_interface(process_query, clear_context)
|
| 84 |
+
|
| 85 |
+
def main():
|
| 86 |
+
"""Main application entry point"""
|
| 87 |
+
try:
|
| 88 |
+
initialize_components()
|
| 89 |
+
app = create_gradio_app()
|
| 90 |
+
app.queue()
|
| 91 |
+
app.launch(server_name="0.0.0.0", server_port=7860, share=True)
|
| 92 |
+
except Exception as e:
|
| 93 |
+
logger.error(f"Error in main: {str(e)}")
|
| 94 |
+
raise
|
| 95 |
+
|
| 96 |
+
if __name__ == "__main__":
|
| 97 |
+
main()
|
data/cleaned_dataset_full.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/embeddings.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:27b62b1c4d0fdddeb34c0985059ac7782e0d4ff385b8f27e87ed41ab1e0a83a3
|
| 3 |
+
size 12288128
|
documents.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0132d52582d53422098dfaec3a361e369e791db0a137d8b012ad920851f18b3f
|
| 3 |
+
size 2382882
|
interface.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
def create_interface(process_query, clear_context):
|
| 4 |
+
with gr.Blocks(title="AI Assistant") as demo:
|
| 5 |
+
chatbot = gr.Chatbot(
|
| 6 |
+
[],
|
| 7 |
+
elem_id="chatbot",
|
| 8 |
+
bubble_full_width=False,
|
| 9 |
+
height=400
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
with gr.Row():
|
| 13 |
+
msg = gr.Textbox(
|
| 14 |
+
label="Your Message",
|
| 15 |
+
placeholder="Type your message here...",
|
| 16 |
+
scale=8
|
| 17 |
+
)
|
| 18 |
+
submit = gr.Button("Submit", scale=1)
|
| 19 |
+
|
| 20 |
+
clear = gr.Button("Clear")
|
| 21 |
+
|
| 22 |
+
def process_message(message, history):
|
| 23 |
+
response = process_query(message, history)
|
| 24 |
+
history.append((message, response))
|
| 25 |
+
return "", history
|
| 26 |
+
|
| 27 |
+
msg.submit(
|
| 28 |
+
process_message,
|
| 29 |
+
[msg, chatbot],
|
| 30 |
+
[msg, chatbot]
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
submit.click(
|
| 34 |
+
process_message,
|
| 35 |
+
[msg, chatbot],
|
| 36 |
+
[msg, chatbot]
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
clear.click(
|
| 40 |
+
clear_context,
|
| 41 |
+
None,
|
| 42 |
+
[chatbot, msg]
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
return demo
|
requirements.txt
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LangChain Ecosystem
|
| 2 |
+
langchain==0.1.3
|
| 3 |
+
langchain_openai==0.0.5
|
| 4 |
+
langchain-community
|
| 5 |
+
langchain-core
|
| 6 |
+
|
| 7 |
+
# Vector Databases & Similarity Search
|
| 8 |
+
chromadb==0.5.0
|
| 9 |
+
faiss-cpu
|
| 10 |
+
|
| 11 |
+
# OpenAI
|
| 12 |
+
openai==1.10
|
| 13 |
+
tiktoken>=0.5.2
|
| 14 |
+
|
| 15 |
+
# UI Framework
|
| 16 |
+
gradio==4.44.1
|
| 17 |
+
setuptools>=65.5.1
|
| 18 |
+
|
| 19 |
+
# Data Processing & Utils
|
| 20 |
+
numpy==1.26.2
|
| 21 |
+
pandas==2.1.3
|
| 22 |
+
python-dotenv==1.0.0
|
| 23 |
+
|
| 24 |
+
# Core Dependencies
|
| 25 |
+
#pydantic==2.5.2
|
| 26 |
+
typing-extensions==4.8.0
|
| 27 |
+
requests==2.31.0
|