Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import sqlite3 | |
| import pandas as pd | |
| import os | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.agents import Tool, initialize_agent | |
| from langchain.agents.agent_types import AgentType | |
| from langchain_community.utilities.sql_database import SQLDatabase | |
| from langchain_community.agent_toolkits import create_sql_agent | |
| from langchain.schema import HumanMessage | |
| # Set API keys and base | |
| os.environ['OPENAI_API_KEY'] = "gl-U2FsdGVkX187N1eV0CyPv1sUJRNjeg+05MCul6Lf06cDym4PRicIyZ4g0RtQUSMl" | |
| os.environ['OPENAI_BASE_URL'] = "https://aibe.mygreatlearning.com/openai/v1" | |
| llm = ChatOpenAI(model_name="gpt-4") | |
| connection = sqlite3.connect("kartify.db", check_same_thread=False) | |
| kartify_db = SQLDatabase.from_uri("sqlite:///kartify.db") | |
| sqlite_agent = create_sql_agent(llm, db=kartify_db, agent_type="openai-tools", verbose=False) | |
| def policy_tool_func(input: str) -> str: | |
| prompt = f"""Only respond about return or replacement if the user has explicitly asked about it in their query. | |
| Use the following context from order, shipment, and product policy data: | |
| {input} | |
| Your task (only if return or replacement is mentioned): | |
| 1. Check eligibility based on `actual_delivery` and product policy: | |
| - If `return_days_allowed` is 0, clearly state the product is not eligible for return. | |
| - If within window, mention last date allowed for return and replacement. | |
| - If the window has passed, say so. | |
| 2. Mention return conditions (e.g., “Sealed only”). | |
| 3. If `actual_delivery` is null, return/replacement is not yet applicable. | |
| 4. If any required info is missing, say so politely that i am connecting to human support. | |
| If the query does **not** mention return or replacement, do **not** include any information about it in your response. | |
| Respond clearly and briefly — no system or SQL steps.""" | |
| return llm([HumanMessage(content=prompt)]).content.strip() | |
| def answer_generation_tool(input: str) -> str: | |
| prompt = f"""You are a polite and formal customer assistant replying to a user query to customer about return, replacement, delivery, cancellation, or address change. | |
| Use the context provided below: | |
| {input} | |
| Guidelines: | |
| - Respond in a short, formal, and factual tone. | |
| - Do **not** add extra details that were not asked in the user's query. | |
| - Do **not** format like an email — avoid greetings, sign-offs, or explanations. | |
| - Do not offer return/replacement windows unless directly asked. | |
| - Do not mention cancellation or refund policies unless cancellation is requested. | |
| - Do not mention address change policy until asked. | |
| - If the product has **not been delivered**, mention that return/replacement cannot be processed yet. | |
| - If the product **has been delivered**, calculate based on delivery date whether return or replacement is still allowed. | |
| - If the order has already **shipped**, reply that address changes are not possible. | |
| - If any required info is missing, say so politely that i am connecting to human support. | |
| - If you do not what to answer now, say so politely that i am connecting to human support. | |
| - Never ask for any bank details | |
| Output: | |
| - Return only a single, relevant customer-facing message — no system instructions, reasoning, or metadata. | |
| """ | |
| return llm([HumanMessage(content=prompt)]).content.strip() | |
| def output_guard_check(model_output: str) -> str: | |
| prompt = f""" | |
| You are a content safety assistant. Your task is to classify if the assistant's response is appropriate. | |
| If the message contains: | |
| - Requests for bank details, OTPs, account numbers | |
| - Harassment or offensive tone | |
| - Privacy concerns or unsafe advice | |
| - Misunderstanding and miscommunication word | |
| - Phrases like "please contact customer service" or redirection to a human agent | |
| - Escalated this to our support team | |
| Return: BLOCK | |
| Otherwise, return: SAFE | |
| Response: {model_output} | |
| Output: | |
| """ | |
| return llm.predict(prompt).strip() | |
| def conversation_guard_check(history) -> str: | |
| chat_summary = "\n".join([f"Customer: {h['user']}\nAssistant: {h['assistant']}" for h in history]) | |
| prompt = f""" | |
| You are a conversation monitor AI. Review the entire conversation and classify if the assistant: | |
| - Repeatedly offered unnecessary return or replacement steps | |
| - Gave more than what the user asked | |
| - Missed signs of customer distress | |
| - Ignored user's refusal of an option | |
| If any of the above are TRUE, return BLOCK | |
| Else, return SAFE | |
| Conversation: | |
| {chat_summary} | |
| Output: | |
| """ | |
| return llm.predict(prompt).strip() | |
| tools = [ | |
| Tool(name="PolicyChecker", func=policy_tool_func, description="Check return and replacement eligibility."), | |
| Tool(name="AnswerGenerator", func=answer_generation_tool, description="Craft final response.") | |
| ] | |
| order_agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=False, handle_parsing_errors=True) | |
| st.title("📦 Order Query Assistant") | |
| customer_id = st.text_input("Enter your Customer ID:") | |
| if customer_id: | |
| query = """ | |
| SELECT | |
| order_id, | |
| product_description | |
| FROM | |
| orders | |
| WHERE | |
| customer_id = ? | |
| ORDER BY order_date DESC | |
| """ | |
| df = pd.read_sql_query(query, connection, params=(customer_id,)) | |
| if not df.empty: | |
| selected_order = st.selectbox("Select your Order:", df["order_id"] + " - " + df["product_description"]) | |
| order_id = selected_order.split(" - ")[0] | |
| if "order_context" not in st.session_state: | |
| with st.spinner("Loading order details..."): | |
| order_context_raw = sqlite_agent.invoke(f"Fetch all columns for order ID {order_id}") | |
| st.session_state.order_context = f"Order ID: {order_id}\n{order_context_raw}\nDate: 25 July" | |
| st.markdown("### Chat with Assistant") | |
| if "chat_history" not in st.session_state: | |
| st.session_state.chat_history = [] | |
| user_query = st.chat_input("How can I help you?") | |
| if user_query: | |
| intent_prompt = f"""You are an intent classifier...User Query: {user_query}""" | |
| intent = llm.predict(intent_prompt).strip() | |
| if intent == "0": | |
| st.chat_message("assistant").write("Sorry for the inconvenience. A human agent will assist you shortly.") | |
| elif intent == "1": | |
| st.chat_message("assistant").write("Thank you! I hope I was able to help.") | |
| else: | |
| full_prompt = f""" | |
| Context: | |
| {st.session_state.order_context} | |
| Customer Query: {user_query} | |
| Previous response: {st.session_state.chat_history} | |
| Use tools to reply. | |
| """ | |
| with st.spinner("Generating response..."): | |
| raw_response = order_agent.run(full_prompt) | |
| guard = output_guard_check(raw_response) | |
| if guard == "BLOCK": | |
| response = "Sorry for the inconvenience. Connecting to human support." | |
| else: | |
| response = raw_response | |
| st.chat_message("assistant").write(response) | |
| st.session_state.chat_history.append({"user": user_query, "assistant": response}) | |
| if conversation_guard_check(st.session_state.chat_history) == "BLOCK": | |
| st.chat_message("assistant").write("Let me connect you with a human agent.") | |
| st.stop() | |
| else: | |
| st.warning("No orders found for this customer ID.") | |