Chidamma commited on
Commit
a9f1d74
·
verified ·
1 Parent(s): 5bd134f

Upload 5 files

Browse files
agent/composer_agent.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ # Configure logging
4
+ logging.basicConfig(level=logging.INFO)
5
+ logger = logging.getLogger(__name__)
6
+
7
+ def compose_response(response: str) -> str:
8
+ """
9
+ Process and enhance the final response
10
+ """
11
+ try:
12
+ # Remove any system artifacts or unwanted patterns
13
+ print("*********** in composer agent *************")
14
+ print("response input received : ", response)
15
+ response = remove_system_artifacts(response)
16
+
17
+ # Apply standard formatting
18
+ response = format_response(response)
19
+
20
+ return response
21
+
22
+ except Exception as e:
23
+ logger.error(f"Error in composition: {str(e)}")
24
+ return response # Fallback to original
25
+
26
+ def remove_system_artifacts(text: str) -> str:
27
+ """Remove any system artifacts or unwanted patterns"""
28
+ artifacts = ["Assistant:", "AI:", "Human:", "User:"]
29
+ cleaned = text
30
+ for artifact in artifacts:
31
+ cleaned = cleaned.replace(artifact, "")
32
+ return cleaned.strip()
33
+
34
+ def format_response(text: str) -> str:
35
+ """Apply standard formatting"""
36
+ # Add proper spacing
37
+ formatted = text.replace("\n\n\n", "\n\n")
38
+
39
+ # Ensure proper capitalization
40
+ formatted = ". ".join(s.strip().capitalize() for s in formatted.split(". "))
41
+
42
+ # Ensure proper ending punctuation
43
+ if formatted and not formatted[-1] in ['.', '!', '?']:
44
+ formatted += '.'
45
+
46
+ return formatted
47
+
agent/generic_agent.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain.prompts import ChatPromptTemplate
3
+ from langchain.memory import ConversationBufferMemory
4
+ import logging
5
+
6
+ # Configure logging
7
+ logging.basicConfig(level=logging.INFO)
8
+ logger = logging.getLogger(__name__)
9
+
10
+ # Global variables
11
+ llm = None
12
+ memory = None
13
+ prompt = None
14
+
15
+ system_prompt = """
16
+ Role
17
+ You are a knowledgeable and compassionate customer support chatbot specializing in various
18
+ products available in Amazon product catalogue. Your goal is to provide accurate, detailed
19
+ and empathetic information in response to the customer queries on various issues, challenges
20
+ faced by customer strictly related to the products available in Amazon catalogue.
21
+ Your tone is warm, professional, and supportive, ensuring customers feel informed and reassured
22
+ during every interaction.
23
+
24
+ Instructions
25
+ Shipment Tracking: When a customer asks about their shipment, request the tracking number and
26
+ tell them you will call back in 1 hour and provide the status on customer's callback number.
27
+ Issue Resolution: For issues such as delays, incorrect addresses, or lost shipments, respond with
28
+ empathy. Explain next steps clearly, including any proactive measures taken to resolve or escalate
29
+ the issue.
30
+ Proactive Alerts: Offer customers the option to receive notifications about key updates, such as
31
+ when shipments reach major checkpints or encounter delays.
32
+ FAQ Handling: Address frequently asked questions about handling products, special packaging
33
+ requirements, and preferred delivery times with clarity and simplicity.
34
+ Tone and Language: Maintain a professional and caring tone, particularly when discussing delays or
35
+ challenges. Show understanding and reassurance.
36
+
37
+ Constraints
38
+ Privacy: Never disclose personal information beyond what has been verified and confirmed by the
39
+ customer. Always ask for consent before discussing details about shipments.
40
+ Conciseness: Ensure responses are clear and detailed, avoiding jargon unless necessary for conext.
41
+ Empathy in Communication: When addressing delays or challenges, prioritize empathy and acknowledge
42
+ the customer's concern. Provide next steps and resasssurance.
43
+ Accuracy: Ensure all information shared with customer are accurate and up-to-date. If the query is
44
+ outside Amazon's products and services, clearly say I do not know.
45
+ Jargon-Free Language: Use simple language to explain logistics terms or processes to customers,
46
+ particularly when dealing with customer on sensitive matter.
47
+
48
+ Examples
49
+
50
+ Greetings
51
+
52
+ User: "Hi, I am John."
53
+ AI: "Hi John. How can I assist you today?
54
+
55
+ Issue Resolution for Delayed product Shipment
56
+
57
+ User: "I am worried about the delayed Amazon shipment."
58
+ AI: "I undersatnd your concern, and I'm here to help. Let me check the
59
+ status of your shipment. If needed, we'll coordinate with the carrier to ensure
60
+ your product's safety and provide you with updates along the way."
61
+
62
+ Proactive Update Offer
63
+
64
+ User: "Can I get updates on my product shipment's address."
65
+ AI: "Absolutely! I can send you notification whenever your product's shipment
66
+ reaches a checkpoint or if there are any major updates. Would you like to set that
67
+ up ?"
68
+
69
+ Out of conext question
70
+
71
+ User: "What is the capital city of Nigeria ?"
72
+ AI: "Sorry, I do not know. I know only about Amazon products. In case you haave any furter
73
+ qiestions on the products and services of Amazon, I can help you."
74
+
75
+ Closure
76
+
77
+ User: "No Thank you."
78
+ AI: "Thank you for contacting Amazon. Have a nice day!"
79
+ """
80
+
81
+
82
+ def initialize_generic_agent(llm_instance, memory_instance):
83
+ global llm, memory, prompt
84
+ llm = llm_instance
85
+ memory = memory_instance
86
+ prompt = ChatPromptTemplate.from_messages([
87
+ ("system", system_prompt),
88
+ ("human", "{input}")
89
+ ])
90
+ logger.info("generic agent initialized successfully")
91
+
92
+ def process(query):
93
+ chain = prompt | llm
94
+ response = chain.invoke({"input": query})
95
+
96
+ # Update memory if available
97
+ if memory:
98
+ memory.save_context({"input": query}, {"output": response.content})
99
+ return response.content
100
+
101
+ def clear_context():
102
+ """Clear the conversation memory"""
103
+ try:
104
+ if memory:
105
+ memory.clear()
106
+ logger.info("Conversation context cleared successfully")
107
+ else:
108
+ logger.warning("No memory instance available to clear")
109
+ except Exception as e:
110
+ logger.error(f"Error clearing context: {str(e)}")
111
+ raise
agent/planning_agent.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain.agents import initialize_agent, AgentType
3
+ from clearml import Logger, Task
4
+ from langchain.tools import Tool
5
+ from langchain.memory import ConversationBufferMemory, SimpleMemory
6
+ from langchain_community.callbacks import ClearMLCallbackHandler
7
+ from langchain_core.callbacks import StdOutCallbackHandler
8
+ from langchain_community.utilities import SerpAPIWrapper
9
+ import agent.router_agent as router_agent
10
+ import agent.product_review_agent as product_review_agent
11
+ import agent.generic_agent as generic_agent
12
+ import agent.composer_agent as composer_agent
13
+ import logging
14
+
15
+ # Set httpx (HTTP request) logging to WARNING or ERROR level
16
+ # This will hide the HTTP request logs while keeping agent thoughts visible
17
+ logging.getLogger("httpx").setLevel(logging.WARNING) # added on 23-Nob
18
+
19
+ # Configure logging
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # Global variables
24
+ llm = None
25
+ chat_memory = None
26
+ query_memory = None
27
+ agent = None
28
+ clearml_callback = None
29
+
30
+ def initialize_planning_agent(llm_instance, chat_memory_instance, query_memory_instance, clearml_instance):
31
+ global llm, chat_memory, query_memory, agent, clearml_callback
32
+
33
+ llm = llm_instance
34
+ chat_memory = chat_memory_instance
35
+ query_memory = query_memory_instance
36
+ clearml_callback = clearml_instance
37
+ SERPAPI_API_KEY = "619f2302253fbe56448bcf82565caf2a3263d845944682533f10b09a0d1650e6"
38
+
39
+ # Initialize agents
40
+ router_agent.initialize_router_agent(llm, chat_memory)
41
+ product_review_agent.initialize_product_review_agent(llm, chat_memory)
42
+ generic_agent.initialize_generic_agent(llm, chat_memory)
43
+ # composer_agent.initialize_composer_agent(llm, memory)
44
+
45
+ #ltool = Tool("serpapi", llm=llm, callbacks=clearml_callback)
46
+ #ltool2 = load_tools("llm-math", llm=llm, callbacks=clearml_callback)
47
+
48
+ tools = [
49
+ Tool(
50
+ name="route_query",
51
+ func=route_query,
52
+ description="Determine query type. Returns either 'product_review' or 'generic'"
53
+ ),
54
+ Tool(
55
+ name="get_product_info",
56
+ func=get_product_info,
57
+ description="Use this to get product-related data such as features, prices, availability, or reviews"
58
+ ),
59
+ Tool(
60
+ name="handle_generic_query",
61
+ func=handle_generic_query,
62
+ description="Use this to get response to user queries which are generic and where the retrieval of product details are not required"
63
+ ),
64
+ Tool(
65
+ name="compose_response",
66
+ func=compose_response,
67
+ description="Use this to only format the response. After this step, return the formatted response to main.py"
68
+ )
69
+ ]
70
+
71
+ system_prompt = """You are an efficient AI planning agent. Follow these rules strictly:
72
+
73
+ CRITICAL INSTRUCTION:
74
+ For simple queries listed below, skip the route_query and directly go to handle_generic_query.
75
+
76
+ SIMPLE QUERIES (NEVER use tools):
77
+ 1. Greetings: "hi", "hello", "hey", "good morning", "good evening", "good afternoon"
78
+ 2. Farewells: "bye", "goodbye", "see you", "take care"
79
+ 3. Thank you messages: "thanks", "thank you", "thanks a lot", "appreciate it"
80
+ 4. Simple confirmations: "okay", "yes", "no", "sure", "alright"
81
+ 5. Basic courtesy: "how are you?", "how are you doing?", "what's up?", "what are you doing?"
82
+ 6. Simple acknowledgments: "got it", "understood", "I see"
83
+
84
+ FOR ALL OTHER QUERIES:
85
+ 1. Use route_query to determine if query is product_review or generic
86
+ 2. If route_query returns 'generic', use handle_generic_query and STOP
87
+ 3. If route_query returns 'product_review', use get_product_info and STOP
88
+
89
+ EXAMPLES:
90
+
91
+ User: "Hi"
92
+ Thought: This is a Simple greeting, I will use handle_generic_query to get appropriate response
93
+ Action: handle_generic_query
94
+ Observation: "Hi! How can I help you today?"
95
+ Thought: I have got the final answer. I will use compose_responses to format the response.
96
+ Action: compose_responses
97
+ Final Answer: "Hi! How can I help you today?"
98
+
99
+ User: "I got my package delivered yesterday. It was delivered very late. I want to file a complaint."
100
+ Thought: This is a generic query that does not require product details. I will use handle_generic_query to get appropriate response.
101
+ Action: handle_generic_query
102
+ Action Input: User query: I got my package delivered yesterday. It was delivered very late. I want to file a complaint.
103
+ Observation: {'intermediate_steps': [], 'output': "I'm sorry to hear about the delay in your package delivery. I understand your frustration and I'm here to assist you with filing a complaint. To better assist you, could you please provide me with the tracking number of your package? Once I have that information, I will look into the matter and ensure that your feedback is addressed appropriately.", 'action': 'Final Answer', 'action_input': "I'm sorry to hear about the delay in your package delivery. I understand your frustration and I'm here to assist you with filing a complaint. To better assist you, could you please provide me with the tracking number of your package? Once I have that information, I will look into the matter and ensure that your feedback is addressed appropriately."}
104
+ Thought:I have got the final answer. I will use compose_responses to format the response.
105
+ Action: compose_responses
106
+ Final Answer: I'm sorry to hear about the delay in your package delivery. I understand your frustration and I'm here to assist you with filing a complaint. To better assist you, could you please provide me with the tracking number of your package? Once I have that information, I will look into the matter and ensure that your feedback is addressed appropriately.
107
+
108
+ Remember: For simple queries listed above, respond immediately with Final Answer WITHOUT using tools.
109
+ """
110
+
111
+ agent = initialize_agent(
112
+ tools,
113
+ llm,
114
+ agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
115
+ callbacks=clearml_callback,
116
+ verbose=True,
117
+ memory=chat_memory,
118
+ system_message=system_prompt,
119
+ early_stopping_method="generate",
120
+ max_iterations=2
121
+ )
122
+ logger.info("Planning agent initialized successfully")
123
+
124
+ def summarize_chat_history():
125
+ """Summarize the chat history to retain context without overwhelming memory."""
126
+ try:
127
+ if chat_memory:
128
+ # Retrieve the chat history as a list of messages
129
+ chat_history = chat_memory.buffer # Assuming this is a list of messages
130
+
131
+ logger.info(f"Chat history type: {type(chat_history)}")
132
+
133
+ if chat_history:
134
+ # Extract text from AIMessage objects
135
+ text_messages = [msg.content for msg in chat_history if hasattr(msg, 'content')]
136
+
137
+ logger.info(f"Extracted messages: {text_messages}")
138
+
139
+ # Get the last 5 messages for the summary
140
+ summary = "\n".join(text_messages[-5:]) # Adjust as needed
141
+ logger.info(f"Generated summary: {summary}")
142
+
143
+ # Clear the old history
144
+ chat_memory.clear() # Clear the old history
145
+
146
+ # If the memory allows appending, do that
147
+ chat_memory.buffer.append(summary) # Append the summary
148
+ # Or if there's a method to set the buffer, do that:
149
+ # chat_memory.set_memory([summary]) # If such a method exists
150
+ except Exception as e:
151
+ logger.error(f"Error summarizing chat history: {str(e)}")
152
+
153
+ def route_query(query):
154
+ # Summarize chat history before routing
155
+ summarize_chat_history()
156
+ # Get original query from memory if needed
157
+ original_query = query_memory.memories.get('original_query', query)
158
+ return router_agent.classify_query(original_query)
159
+
160
+ def get_product_info(query):
161
+ # Summarize chat history before retrieving product info
162
+ summarize_chat_history()
163
+ # Get original query from memory if needed
164
+ original_query = query_memory.memories.get('original_query', query)
165
+ response = product_review_agent.process(original_query)
166
+
167
+ return {
168
+ "intermediate_steps": [],
169
+ "output": response,
170
+ "action": "Final Answer",
171
+ "action_input": response
172
+ }
173
+
174
+ def handle_generic_query(query):
175
+ # Summarize chat history before handling generic queries
176
+ summarize_chat_history()
177
+ # Get original query from memory if needed
178
+ original_query = query_memory.memories.get('original_query', query)
179
+ response = generic_agent.process(original_query)
180
+ return {
181
+ "intermediate_steps": [],
182
+ "output": response,
183
+ "action": "Final Answer",
184
+ "action_input": response
185
+ }
186
+
187
+ def compose_response(response):
188
+ return composer_agent.compose_response(response)
189
+
190
+ def execute(query):
191
+ try:
192
+ # Store original query
193
+ query_memory.memories['original_query'] = query
194
+ response = agent.run(
195
+ f"Process this user query: {query}")
196
+ # clearml_callback.flush_tracker(langchain_asset=agent,name="Planning agent", finish=True)
197
+ #clearml_callback.flush_tracker(langchain_asset=llm,name="Planning agent")
198
+ return response
199
+ except Exception as e:
200
+ logger.error(f"Error in planning agent: {str(e)}")
201
+ return f"Error in planning agent: {str(e)}"
202
+
203
+ def clear_context():
204
+ if chat_memory:
205
+ chat_memory.clear()
206
+ if query_memory:
207
+ query_memory.memories.clear()
208
+ product_review_agent.clear_context()
209
+ generic_agent.clear_context()
210
+
agent/product_review_agent.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ***********************************************************************************************
2
+ # Instruction for using the program
3
+ # ***********************************************************************************************
4
+ # Please make sure the embeddings.npy file is available in data folder
5
+ # Please make sure the documents.pkl file is available in data folder
6
+ # Please set the path appropriately inside the program. You will find the below two statements
7
+ # where you need to mention the correct path name.
8
+ # embedding_path = '/workspaces/IISC_cap_langchain/data/embeddings.npy'
9
+ # documents_path = '/workspaces/IISC_cap_langchain/documents.pkl'
10
+ # ***********************************************************************************************
11
+
12
+ import openai
13
+ import numpy as np
14
+ import pandas as pd
15
+ from openai import OpenAI
16
+ from langchain_community.chat_models import ChatOpenAI
17
+ from langchain_community.document_loaders import CSVLoader
18
+ from langchain_community.embeddings import OpenAIEmbeddings
19
+ from langchain_community.vectorstores import Chroma
20
+ from langchain.prompts import ChatPromptTemplate
21
+ from langchain.schema.output_parser import StrOutputParser
22
+ from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
23
+ from clearml import StorageManager, Dataset
24
+
25
+ import faiss
26
+ import warnings
27
+ import os
28
+
29
+ warnings.filterwarnings("ignore")
30
+ import pickle
31
+ import logging
32
+
33
+ # Configure logging
34
+ logging.basicConfig(level=logging.INFO)
35
+ logger = logging.getLogger(__name__)
36
+
37
+ # Global variables
38
+ llm = None
39
+ chat_memory = None
40
+ # vectorstore = None
41
+
42
+ def initialize_product_review_agent(llm_instance, memory_instance):
43
+ """Initialize the product review agent with LLM and memory instances"""
44
+ global llm, chat_memory
45
+
46
+ llm = llm_instance
47
+ chat_memory = memory_instance
48
+
49
+
50
+
51
+ def process(query):
52
+
53
+ # Initialize the OpenAIEmbeddings
54
+ embeddings = OpenAIEmbeddings(model="text-embedding-ada-002")
55
+
56
+ System_Prompt = """
57
+ Role and Capabilities:
58
+ You are an AI customer service specialist for Amazon, focusing on the various products available in Amazon. Your primary functions are:
59
+ 1. Providing accurate product information including cost, availability, features, top review
60
+ 2. Handling delivery-related queries
61
+ 3. Addressing product availability
62
+ 4. Offering technical support for electronics
63
+
64
+ Core Instructions:
65
+ 1. Product Information:
66
+ - Provide detailed specifications and features
67
+ - Compare similar products when relevant
68
+ - Only discuss products found in the provided context
69
+ - Highlight key benefits and limitations
70
+ - Top review from customer
71
+
72
+ 2. Price & Availability:
73
+ - Quote exact prices from the provided context
74
+ - Explain any pricing variations or discounts
75
+ - Provide clear stock availability information
76
+ - Mention delivery timeframes when available
77
+
78
+ 3. Query Handling:
79
+ - Address the main query first, then provide additional relevant information
80
+ - For multi-part questions, structure answers in bullet points
81
+ - If information is missing from context, explicitly state this
82
+ - Suggest alternatives when a product is unavailable
83
+
84
+ Communication Guidelines:
85
+ 1. Response Structure:
86
+ - Start with a direct answer to the query
87
+ - Provide supporting details and context
88
+ - End with a clear next step or call to action
89
+ - Include standard closing: "Thank you for choosing Amazon. Is there anything else I can help you with?"
90
+
91
+ 2. Tone and Style:
92
+ - Professional yet friendly
93
+ - Clear and jargon-free language
94
+ - Empathetic and patient
95
+ - Concise but comprehensive
96
+
97
+ Limitations and Restrictions:
98
+ 1. Only provide information present in the given context
99
+ 2. Clearly state when information is not available
100
+ 3. Never share personal or sensitive information
101
+ 4. Don't make promises about delivery times unless explicitly stated in context
102
+
103
+ Error Handling:
104
+ 1. Missing Information: "I apologize, but I don't have that [specific information] in my current context. Would you like me to provide related details about [alternative topic]?"
105
+ 2. Out of Scope: "While I can't assist with [topic], I'd be happy to help you with electronics or home care products."
106
+ 3. Technical Issues: "I apologize for any inconvenience. Could you please rephrase your question or provide more details?"
107
+
108
+ Response Format:
109
+ 1. For product queries:
110
+ - Product name and model
111
+ - Price and availability
112
+ - Key features
113
+ - Top review
114
+ - Comparison among similar products (example : cell phone with cell phone)
115
+ - Recommendations if relevant
116
+
117
+ 2. For service queries:
118
+ - Current status
119
+ - Next steps
120
+ - Timeline (if available)
121
+ - Contact options
122
+
123
+ Remember: Always verify information against the provided context before responding. Don't make assumptions or provide speculative information.
124
+
125
+ """
126
+
127
+ # Get existing chat history from memory
128
+ chat_history = ""
129
+ if chat_memory:
130
+ messages = chat_memory.chat_memory.messages
131
+ if messages:
132
+ chat_history = "\nPrevious conversation:\n"
133
+ for i in range(0, len(messages), 2):
134
+ if i + 1 < len(messages):
135
+ chat_history += f"Human: {messages[i].content}\n"
136
+ chat_history += f"Assistant: {messages[i+1].content}\n"
137
+
138
+ # Check if embeddings already exist
139
+ embedding_path = './data/embeddings.npy'
140
+ documents_path = './documents.pkl'
141
+
142
+ # Modify the get_embedding function to use LangChain's OpenAIEmbeddings
143
+ def get_embedding(text, engine="text-embedding-ada-002"):
144
+ return embeddings.embed_query(text)
145
+
146
+ try:
147
+ if not os.path.exists(embedding_path):
148
+ raise FileNotFoundError(f"Embedding file not found at: {embedding_path}")
149
+ if not os.path.exists(documents_path):
150
+ raise FileNotFoundError(f"Documents file not found at: {documents_path}")
151
+ except FileNotFoundError as e:
152
+ logger.error(str(e))
153
+ raise
154
+
155
+ if os.path.exists(embedding_path) and os.path.exists(documents_path):
156
+ # Load existing embeddings and documents
157
+ embeddings_list = np.load(embedding_path)
158
+ with open(documents_path, 'rb') as f:
159
+ documents = pickle.load(f)
160
+
161
+ dataset = Dataset.create(dataset_name="embeddingsdataset", dataset_project="dataset_embeddings")
162
+ dataset.add_files(path=documents_path)
163
+ dataset.upload()
164
+ dataset.finalize()
165
+
166
+ # Create FAISS index with faster search
167
+ embeddings_np = np.array(embeddings_list).astype('float32')
168
+ index=faiss.IndexFlatL2(len(embeddings_list[0]))
169
+ index.add(embeddings_np)
170
+
171
+ query_embedding = get_embedding(query)
172
+ query_embedding_np = np.array([query_embedding]).astype('float32')
173
+
174
+ _, indices = index.search(query_embedding_np, 2)
175
+ retrieved_docs = [documents[i] for i in indices[0]]
176
+ context = ' '.join(retrieved_docs)
177
+ print("context retrieved :", context)
178
+ print('*' * 100)
179
+
180
+ # Include chat history in the prompt for context
181
+ structured_prompt = f"""
182
+ Context:
183
+ {context}
184
+
185
+ {chat_history}
186
+
187
+ Current Query:
188
+ {query}
189
+ """
190
+
191
+ print("structured prompt created :", structured_prompt)
192
+ print('*' * 100)
193
+ # Create messages for the chat model
194
+ messages = [
195
+ {"role": "system", "content": System_Prompt},
196
+ {"role": "user", "content": structured_prompt}
197
+ ]
198
+
199
+ # For chat completion, you can use LangChain's ChatOpenAI
200
+ chat_model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.5)
201
+ response = chat_model.invoke(messages).content
202
+
203
+ # Update memory
204
+ if chat_memory:
205
+ chat_memory.chat_memory.add_user_message(query)
206
+ chat_memory.chat_memory.add_ai_message(response)
207
+
208
+ logger.info(f"Successfully processed query: {query}")
209
+ print("response returned by product_review_agent", response)
210
+ return response
211
+
212
+
213
+ def clear_context():
214
+ """Clear the conversation memory"""
215
+ if chat_memory:
216
+ chat_memory.clear()
217
+ logger.info("Conversation context cleared")
agent/router_agent.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain.prompts import ChatPromptTemplate
3
+ from langchain.memory import ConversationBufferMemory, SimpleMemory
4
+ import logging
5
+
6
+ # Configure logging
7
+ logging.basicConfig(level=logging.INFO)
8
+ logger = logging.getLogger(__name__)
9
+
10
+ # Global variables
11
+ llm = None
12
+ chat_memory = None
13
+ query_memory = None
14
+ prompt = None
15
+
16
+ def initialize_router_agent(llm_instance, chat_memory_instance):
17
+ global llm, chat_memory, prompt
18
+ llm = llm_instance
19
+ chat_memory = chat_memory_instance
20
+
21
+ system_prompt = """You are an intelligent query classification system for an e-commerce platform.
22
+ Your role is to accurately categorize incoming customer queries into one of two categories:
23
+
24
+ 1. product_review:
25
+ - Queries about product features, specifications, or capabilities
26
+ - Questions about product prices and availability
27
+ - Requests for product reviews or comparisons
28
+ - Questions about product warranties or guarantees
29
+ - Inquiries about product shipping or delivery
30
+ - Questions about product compatibility or dimensions
31
+ - Requests for recommendations between products
32
+
33
+ 2. generic:
34
+ - General customer service inquiries
35
+ - Account-related questions
36
+ - Technical support issues not related to specific products
37
+ - Website navigation help
38
+ - Payment or billing queries
39
+ - Return policy questions
40
+ - Company information requests
41
+ - Non-product related shipping questions
42
+ - Any other queries not directly related to specific products
43
+
44
+ INSTRUCTIONS:
45
+ - Analyze the input query carefully
46
+ - Respond ONLY with either "product_review" or "generic"
47
+ - Do not include any other text in your response
48
+ - If unsure, classify as "generic"
49
+
50
+ EXAMPLES:
51
+
52
+ User: "What are the features of the Samsung Galaxy S21?"
53
+ Assistant: product_review
54
+
55
+ User: "How much does the iPhone 13 Pro Max cost?"
56
+ Assistant: product_review
57
+
58
+ User: "Can you compare the Dell XPS 15 with the MacBook Pro?"
59
+ Assistant: product_review
60
+
61
+ User: "Is the Sony WH-1000XM4 headphone available in black?"
62
+ Assistant: product_review
63
+
64
+ User: "What's the battery life of the iPad Pro?"
65
+ Assistant: product_review
66
+
67
+ User: "I need help resetting my password"
68
+ Assistant: generic
69
+
70
+ User: "Where can I view my order history?"
71
+ Assistant: generic
72
+
73
+ User: "How do I update my shipping address?"
74
+ Assistant: generic
75
+
76
+ User: "What are your return policies?"
77
+ Assistant: generic
78
+
79
+ User: "I haven't received my refund yet"
80
+ Assistant: generic
81
+
82
+ User: "Do you ship internationally?"
83
+ Assistant: generic
84
+
85
+ User: "Can you recommend a good gaming laptop under $1000?"
86
+ Assistant: product_review
87
+
88
+ User: "What's the warranty period for electronics?"
89
+ Assistant: generic
90
+
91
+ User: "Is the Instant Pot dishwasher safe?"
92
+ Assistant: product_review
93
+
94
+ User: "How do I track my order?"
95
+ Assistant: generic
96
+ """
97
+
98
+ prompt = ChatPromptTemplate.from_messages([
99
+ ("system", system_prompt),
100
+ ("human", "{input}")
101
+ ])
102
+ logger.info("Router agent initialized successfully")
103
+
104
+
105
+ def classify_query(query):
106
+ try:
107
+ # Create chain with memory
108
+ chain = prompt | llm
109
+
110
+ # Add query to chat history before classification
111
+ if chat_memory and hasattr(chat_memory, 'chat_memory'):
112
+ chat_memory.chat_memory.add_user_message(query)
113
+
114
+ # Classify the query
115
+ response = chain.invoke({"input": query})
116
+ category = response.content.strip().lower()
117
+
118
+ # Validate category
119
+ if category not in ["product_review", "generic"]:
120
+ category = "generic" # Default fallback
121
+
122
+
123
+ # Add classification result to chat history
124
+ if chat_memory and hasattr(chat_memory, 'chat_memory'):
125
+ chat_memory.chat_memory.add_ai_message(f"Query classified as: {category}")
126
+
127
+ logger.info(f"Query: {query}")
128
+ logger.info(f"Classification: {category}")
129
+ print("**** in router agent****")
130
+ print("query :", query)
131
+ print("category :", category)
132
+
133
+ return category
134
+
135
+ except Exception as e:
136
+ print(f"Error in routing: {str(e)}")
137
+ return "generic" # Default fallback on error
138
+
139
+
140
+ def get_classification_history():
141
+ """Retrieve classification history from memory"""
142
+ if chat_memory and hasattr(chat_memory, 'chat_memory'):
143
+ return chat_memory.chat_memory.messages
144
+ return []
145
+
146
+
147
+ def clear_context():
148
+ """Clear all memory contexts"""
149
+ if chat_memory:
150
+ chat_memory.clear()
151
+ logger.info("Router agent context cleared")