Spaces:
Sleeping
Sleeping
Deep commited on
Commit Β·
880d7e9
1
Parent(s): 455e197
some files added
Browse files- gemini_helper.py +428 -0
- voice_config.py +45 -0
- voice_service.py +581 -0
- voice_urls.py +9 -0
gemini_helper.py
ADDED
|
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Groq AI Helper for Natural Language Understanding with Structured Output
|
| 2 |
+
|
| 3 |
+
from groq import Groq
|
| 4 |
+
from pydantic import BaseModel, Field
|
| 5 |
+
from typing import Optional, Literal
|
| 6 |
+
import re
|
| 7 |
+
import json
|
| 8 |
+
from voice_config import GROQ_API_KEY, USE_GROQ_AI
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Pydantic Models for Structured Output
|
| 12 |
+
class RequestTypeResponse(BaseModel):
|
| 13 |
+
intent: Literal["exchange", "return"] = Field(description="Whether customer wants exchange or return")
|
| 14 |
+
confidence: Literal["high", "medium", "low"] = Field(description="Confidence level")
|
| 15 |
+
reasoning: str = Field(description="Brief explanation")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class OrderIdResponse(BaseModel):
|
| 19 |
+
order_id: Optional[str] = Field(description="Order ID extracted, null if not found")
|
| 20 |
+
found: bool = Field(description="Whether order ID was found")
|
| 21 |
+
interpretation: str = Field(description="How it was interpreted")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ReasonResponse(BaseModel):
|
| 25 |
+
reason: str = Field(description="Professional reason in 2nd person (You...), max 10 words")
|
| 26 |
+
category: Literal["size", "defect", "wrong_item", "preference", "delivery", "other"] = Field(description="Issue category")
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class PreferenceResponse(BaseModel):
|
| 30 |
+
preference: str = Field(description="Product description, max 15 words")
|
| 31 |
+
type: Literal["size", "color", "model", "feature", "other"] = Field(description="Preference type")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ConfirmationResponse(BaseModel):
|
| 35 |
+
confirmed: bool = Field(description="True if confirming, False if declining")
|
| 36 |
+
confidence: Literal["high", "medium", "low"] = Field(description="Confidence level")
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class CorrectionField(BaseModel):
|
| 40 |
+
field: Literal["request_type", "order_id", "reason", "exchange_preference", "everything"] = Field(description="Which field the customer wants to correct")
|
| 41 |
+
reasoning: str = Field(description="Why this field needs correction")
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class GroqAI:
|
| 45 |
+
def __init__(self):
|
| 46 |
+
print(f"\n{'='*60}")
|
| 47 |
+
print("π§ INITIALIZING GROQ AI")
|
| 48 |
+
print(f"USE_GROQ_AI = {USE_GROQ_AI}")
|
| 49 |
+
print(f"GROQ_API_KEY = {'SET ('+str(len(GROQ_API_KEY))+' chars)' if GROQ_API_KEY else 'NOT SET'}")
|
| 50 |
+
print(f"{'='*60}\n")
|
| 51 |
+
|
| 52 |
+
self.enabled = USE_GROQ_AI and GROQ_API_KEY
|
| 53 |
+
if self.enabled:
|
| 54 |
+
try:
|
| 55 |
+
self.client = Groq(api_key=GROQ_API_KEY)
|
| 56 |
+
self.model_name = "llama-3.3-70b-versatile" # Fast and powerful model
|
| 57 |
+
print("β
Groq AI initialized successfully")
|
| 58 |
+
except Exception as e:
|
| 59 |
+
print(f"β οΈ Groq AI initialization failed: {e}")
|
| 60 |
+
self.enabled = False
|
| 61 |
+
else:
|
| 62 |
+
print("β οΈ Groq AI is DISABLED - will use basic pattern matching")
|
| 63 |
+
if not USE_GROQ_AI:
|
| 64 |
+
print(" Reason: USE_GROQ_AI is False")
|
| 65 |
+
if not GROQ_API_KEY:
|
| 66 |
+
print(" Reason: GROQ_API_KEY is not set")
|
| 67 |
+
|
| 68 |
+
def extract_request_type(self, user_text, question_asked="Would you like exchange or return?"):
|
| 69 |
+
print(f"\n{'='*60}")
|
| 70 |
+
print(f"π― EXTRACT_REQUEST_TYPE")
|
| 71 |
+
print(f"π User Input (raw): '{user_text}'")
|
| 72 |
+
print(f"π User Input (repr): {repr(user_text)}")
|
| 73 |
+
print(f"π Input length: {len(user_text)} chars")
|
| 74 |
+
print(f"β Question: {question_asked}")
|
| 75 |
+
print(f"π Groq Enabled: {self.enabled}")
|
| 76 |
+
print(f"{'='*60}")
|
| 77 |
+
|
| 78 |
+
# Clean input
|
| 79 |
+
user_text_clean = user_text.strip().lower()
|
| 80 |
+
|
| 81 |
+
# Fast path: deterministic keyword detection to avoid model mistakes
|
| 82 |
+
strong_return_keywords = [
|
| 83 |
+
'return', 'refund', 'money back', 'send back', 'want to return',
|
| 84 |
+
'need to return', 'request a return', 'two kind of return'
|
| 85 |
+
]
|
| 86 |
+
strong_exchange_keywords = [
|
| 87 |
+
'exchange', 'swap', 'replace', 'replacement', 'change it', 'different product'
|
| 88 |
+
]
|
| 89 |
+
|
| 90 |
+
if any(keyword in user_text_clean for keyword in strong_return_keywords):
|
| 91 |
+
print(" β
FAST PATH: RETURN detected via keyword match")
|
| 92 |
+
return 'return'
|
| 93 |
+
if any(keyword in user_text_clean for keyword in strong_exchange_keywords):
|
| 94 |
+
print(" β
FAST PATH: EXCHANGE detected via keyword match")
|
| 95 |
+
return 'exchange'
|
| 96 |
+
|
| 97 |
+
# Try Groq AI first if enabled
|
| 98 |
+
if self.enabled:
|
| 99 |
+
try:
|
| 100 |
+
prompt = (
|
| 101 |
+
f"You are analyzing customer service dialogue.\n\n"
|
| 102 |
+
f"QUESTION: {question_asked}\n"
|
| 103 |
+
f"CUSTOMER SAYS: {user_text}\n\n"
|
| 104 |
+
f"TASK: Determine what the customer wants.\n\n"
|
| 105 |
+
f"YOU MUST CHOOSE EXACTLY ONE:\n"
|
| 106 |
+
f"A) 'exchange' - if they want to swap/replace/exchange the product\n"
|
| 107 |
+
f"B) 'return' - if they want to send back/refund/return the product\n\n"
|
| 108 |
+
f"EXAMPLES:\n"
|
| 109 |
+
f"'exchange' β exchange\n"
|
| 110 |
+
f"'return' β return\n"
|
| 111 |
+
f"'I want to exchange this' β exchange\n"
|
| 112 |
+
f"'can I get a refund' β return\n"
|
| 113 |
+
f"'I would like you to process an exchange' β exchange\n"
|
| 114 |
+
f"'swap it for another one' β exchange\n\n"
|
| 115 |
+
f"Look for these keywords:\n"
|
| 116 |
+
f"- exchange, swap, replace, change, different = EXCHANGE\n"
|
| 117 |
+
f"- return, refund, send back, money back = RETURN\n\n"
|
| 118 |
+
f"RESPOND WITH JSON ONLY:\n"
|
| 119 |
+
f'{{"intent": "exchange", "confidence": "high", "reasoning": "..."}}\n'
|
| 120 |
+
f'OR\n'
|
| 121 |
+
f'{{"intent": "return", "confidence": "high", "reasoning": "..."}}\n\n'
|
| 122 |
+
f"The 'intent' MUST be EXACTLY either 'exchange' or 'return' (lowercase).\n"
|
| 123 |
+
f"Choose the BEST match even if you're uncertain. NEVER return anything other than 'exchange' or 'return'."
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
response = self.client.chat.completions.create(
|
| 127 |
+
model=self.model_name,
|
| 128 |
+
messages=[{"role": "user", "content": prompt}],
|
| 129 |
+
response_format={"type": "json_object"},
|
| 130 |
+
temperature=0.0, # More deterministic
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
json_text = response.choices[0].message.content
|
| 134 |
+
print(f"[LLM_OUTPUT request_type] {json_text}")
|
| 135 |
+
result = RequestTypeResponse.model_validate_json(json_text)
|
| 136 |
+
print(f"β
Groq Result: {result.intent} ({result.confidence}) - {result.reasoning}")
|
| 137 |
+
|
| 138 |
+
# Validate result is one of the two options
|
| 139 |
+
if result.intent in ['exchange', 'return']:
|
| 140 |
+
return result.intent
|
| 141 |
+
else:
|
| 142 |
+
print(f"β οΈ Invalid intent from Groq: {result.intent}, falling back")
|
| 143 |
+
|
| 144 |
+
except Exception as e:
|
| 145 |
+
print(f"β Groq Error: {e}")
|
| 146 |
+
print(f"β Raw response: {json_text if 'json_text' in locals() else 'No response'}")
|
| 147 |
+
# ensure downstream always gets a value
|
| 148 |
+
return 'exchange'
|
| 149 |
+
|
| 150 |
+
# Fallback: Basic keyword matching (ALWAYS returns a result)
|
| 151 |
+
print(f"\nπ BASIC KEYWORD MATCHING")
|
| 152 |
+
print(f" Cleaned input: '{user_text_clean}'")
|
| 153 |
+
|
| 154 |
+
# Check for return keywords first
|
| 155 |
+
return_keywords = ['return', 'refund', 'money back', 'send back', 'don\'t want', 'cancel']
|
| 156 |
+
for keyword in return_keywords:
|
| 157 |
+
if keyword in user_text_clean:
|
| 158 |
+
print(f" β
RETURN detected (keyword: '{keyword}')")
|
| 159 |
+
return 'return'
|
| 160 |
+
|
| 161 |
+
# Check for exchange keywords
|
| 162 |
+
exchange_keywords = ['exchange', 'swap', 'replace', 'change', 'different']
|
| 163 |
+
for keyword in exchange_keywords:
|
| 164 |
+
if keyword in user_text_clean:
|
| 165 |
+
print(f" β
EXCHANGE detected (keyword: '{keyword}')")
|
| 166 |
+
return 'exchange'
|
| 167 |
+
|
| 168 |
+
# If no keywords found, make best guess based on partial matches
|
| 169 |
+
print(f" β οΈ No exact keywords found, checking partial matches...")
|
| 170 |
+
|
| 171 |
+
if any(word in user_text_clean for word in ['exch', 'swp', 'replac']):
|
| 172 |
+
print(f" β
EXCHANGE detected (partial match)")
|
| 173 |
+
return 'exchange'
|
| 174 |
+
|
| 175 |
+
if any(word in user_text_clean for word in ['retur', 'refun', 'back']):
|
| 176 |
+
print(f" β
RETURN detected (partial match)")
|
| 177 |
+
return 'return'
|
| 178 |
+
|
| 179 |
+
# Absolute last resort: default to exchange
|
| 180 |
+
print(f" β οΈ No matches found, defaulting to EXCHANGE")
|
| 181 |
+
return 'exchange'
|
| 182 |
+
|
| 183 |
+
def extract_order_id(self, user_text, question_asked="Please provide your order ID"):
|
| 184 |
+
print(f"\nπ― EXTRACT_ORDER_ID: {user_text}")
|
| 185 |
+
|
| 186 |
+
if not self.enabled:
|
| 187 |
+
return self._basic_extract_order_id(user_text)
|
| 188 |
+
|
| 189 |
+
try:
|
| 190 |
+
prompt = (
|
| 191 |
+
f"Question: {question_asked}\n"
|
| 192 |
+
f"Response: {user_text}\n\n"
|
| 193 |
+
"Extract any order number/ID from the customer's response.\n"
|
| 194 |
+
"Numbers can be spoken as words (e.g., 'one two three' = 123).\n"
|
| 195 |
+
"Look for patterns like: 'order 123', 'order number ABC', 'ORD-456', etc.\n\n"
|
| 196 |
+
"You MUST respond with valid JSON in this exact format:\n"
|
| 197 |
+
'{"order_id": "12345", "found": true, "interpretation": "Extracted from order 12345"}\n'
|
| 198 |
+
'OR if no order ID found:\n'
|
| 199 |
+
'{"order_id": null, "found": false, "interpretation": "No order ID mentioned"}'
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
response = self.client.chat.completions.create(
|
| 203 |
+
model=self.model_name,
|
| 204 |
+
messages=[{"role": "user", "content": prompt}],
|
| 205 |
+
response_format={"type": "json_object"},
|
| 206 |
+
temperature=0.1,
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
json_text = response.choices[0].message.content
|
| 210 |
+
print(f"π Groq JSON Response: {json_text}")
|
| 211 |
+
result = OrderIdResponse.model_validate_json(json_text)
|
| 212 |
+
|
| 213 |
+
if result.found and result.order_id:
|
| 214 |
+
order_id = re.sub(r'\D', '', result.order_id)
|
| 215 |
+
if order_id:
|
| 216 |
+
print(f"β
Order ID: {order_id}")
|
| 217 |
+
return order_id
|
| 218 |
+
|
| 219 |
+
return self._basic_extract_order_id(user_text)
|
| 220 |
+
|
| 221 |
+
except Exception as e:
|
| 222 |
+
print(f"β Groq Error: {e}")
|
| 223 |
+
print(f"β Raw response: {json_text if 'json_text' in locals() else 'No response'}")
|
| 224 |
+
return self._basic_extract_order_id(user_text)
|
| 225 |
+
|
| 226 |
+
def extract_reason(self, user_text, request_type, question_asked=None):
|
| 227 |
+
if not self.enabled:
|
| 228 |
+
return user_text
|
| 229 |
+
|
| 230 |
+
if not question_asked:
|
| 231 |
+
question_asked = f"Why do you want to {request_type}?"
|
| 232 |
+
|
| 233 |
+
try:
|
| 234 |
+
prompt = (
|
| 235 |
+
f"Question: {question_asked}\n"
|
| 236 |
+
f"Response: {user_text}\n\n"
|
| 237 |
+
"Convert their reason to professional format (max 10 words).\n\n"
|
| 238 |
+
"CRITICAL: Convert 1st person to 2nd person:\n"
|
| 239 |
+
"- 'I don't like' β 'You didn't like'\n"
|
| 240 |
+
"- 'I changed my mind' β 'You changed your mind'\n"
|
| 241 |
+
"- 'It's too small' β 'It's too small' (already neutral)\n"
|
| 242 |
+
"- 'I ordered wrong size' β 'You ordered wrong size'\n\n"
|
| 243 |
+
"Categorize as: size, defect, wrong_item, preference, delivery, other\n\n"
|
| 244 |
+
"You MUST respond with valid JSON in this exact format:\n"
|
| 245 |
+
'{"reason": "You didn\'t like the product", "category": "preference"}\n'
|
| 246 |
+
'OR\n'
|
| 247 |
+
'{"reason": "Wrong size ordered", "category": "size"}\n\n'
|
| 248 |
+
"The 'category' MUST be one of: size, defect, wrong_item, preference, delivery, other"
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
response = self.client.chat.completions.create(
|
| 252 |
+
model=self.model_name,
|
| 253 |
+
messages=[{"role": "user", "content": prompt}],
|
| 254 |
+
response_format={"type": "json_object"},
|
| 255 |
+
temperature=0.1,
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
json_text = response.choices[0].message.content
|
| 259 |
+
print(f"π Groq JSON Response: {json_text}")
|
| 260 |
+
result = ReasonResponse.model_validate_json(json_text)
|
| 261 |
+
print(f"β
Reason: {result.reason} ({result.category})")
|
| 262 |
+
return result.reason
|
| 263 |
+
|
| 264 |
+
except Exception as e:
|
| 265 |
+
print(f"β Groq Error: {e}")
|
| 266 |
+
print(f"β Raw response: {json_text if 'json_text' in locals() else 'No response'}")
|
| 267 |
+
return user_text
|
| 268 |
+
|
| 269 |
+
def extract_exchange_preference(self, user_text, question_asked="What would you prefer instead?"):
|
| 270 |
+
if not self.enabled:
|
| 271 |
+
return user_text
|
| 272 |
+
|
| 273 |
+
try:
|
| 274 |
+
prompt = (
|
| 275 |
+
f"Question: {question_asked}\n"
|
| 276 |
+
f"Response: {user_text}\n\n"
|
| 277 |
+
"Extract what product they want (max 15 words).\n"
|
| 278 |
+
"Categorize type as: size, color, model, feature, other\n\n"
|
| 279 |
+
"You MUST respond with valid JSON in this exact format:\n"
|
| 280 |
+
'{"preference": "Size large in black", "type": "size"}\n'
|
| 281 |
+
'OR\n'
|
| 282 |
+
'{"preference": "Different color - blue", "type": "color"}\n\n'
|
| 283 |
+
"The 'type' MUST be one of: size, color, model, feature, other"
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
response = self.client.chat.completions.create(
|
| 287 |
+
model=self.model_name,
|
| 288 |
+
messages=[{"role": "user", "content": prompt}],
|
| 289 |
+
response_format={"type": "json_object"},
|
| 290 |
+
temperature=0.1,
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
json_text = response.choices[0].message.content
|
| 294 |
+
print(f"π Groq JSON Response: {json_text}")
|
| 295 |
+
result = PreferenceResponse.model_validate_json(json_text)
|
| 296 |
+
print(f"β
Preference: {result.preference} ({result.type})")
|
| 297 |
+
return result.preference
|
| 298 |
+
|
| 299 |
+
except Exception as e:
|
| 300 |
+
print(f"β Groq Error: {e}")
|
| 301 |
+
print(f"β Raw response: {json_text if 'json_text' in locals() else 'No response'}")
|
| 302 |
+
return user_text
|
| 303 |
+
|
| 304 |
+
def is_confirmation(self, user_text, question_asked="Is this correct?"):
|
| 305 |
+
if not self.enabled:
|
| 306 |
+
return self._basic_is_confirmation(user_text)
|
| 307 |
+
|
| 308 |
+
try:
|
| 309 |
+
prompt = (
|
| 310 |
+
f"Question: {question_asked}\n"
|
| 311 |
+
f"Response: {user_text}\n\n"
|
| 312 |
+
"Is the customer confirming (yes) or declining (no)?\n\n"
|
| 313 |
+
"AFFIRMATIVE (confirming): yes, yeah, yep, sure, ok, okay, correct, right, go ahead, proceed, confirm\n"
|
| 314 |
+
"NEGATIVE (declining): no, nope, nah, wrong, incorrect, cancel, stop, wait\n\n"
|
| 315 |
+
"You MUST respond with valid JSON in this exact format:\n"
|
| 316 |
+
'{"confirmed": true, "confidence": "high"}\n'
|
| 317 |
+
'OR\n'
|
| 318 |
+
'{"confirmed": false, "confidence": "high"}\n\n'
|
| 319 |
+
"The 'confirmed' field MUST be boolean (true or false).\n"
|
| 320 |
+
"The 'confidence' MUST be 'high', 'medium', or 'low'."
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
response = self.client.chat.completions.create(
|
| 324 |
+
model=self.model_name,
|
| 325 |
+
messages=[{"role": "user", "content": prompt}],
|
| 326 |
+
response_format={"type": "json_object"},
|
| 327 |
+
temperature=0.1,
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
json_text = response.choices[0].message.content
|
| 331 |
+
print(f"π Groq JSON Response: {json_text}")
|
| 332 |
+
result = ConfirmationResponse.model_validate_json(json_text)
|
| 333 |
+
print(f"β
Confirmed: {result.confirmed} ({result.confidence})")
|
| 334 |
+
return result.confirmed
|
| 335 |
+
|
| 336 |
+
except Exception as e:
|
| 337 |
+
print(f"β Groq Error: {e}")
|
| 338 |
+
print(f"β Raw response: {json_text if 'json_text' in locals() else 'No response'}")
|
| 339 |
+
return self._basic_is_confirmation(user_text)
|
| 340 |
+
|
| 341 |
+
def _basic_extract_request_type(self, text):
|
| 342 |
+
"""Legacy method - not used anymore, kept for compatibility"""
|
| 343 |
+
text_lower = text.lower().strip()
|
| 344 |
+
|
| 345 |
+
return_keywords = ['return', 'refund', 'money back', 'send back']
|
| 346 |
+
for keyword in return_keywords:
|
| 347 |
+
if keyword in text_lower:
|
| 348 |
+
return 'return'
|
| 349 |
+
|
| 350 |
+
exchange_keywords = ['exchange', 'swap', 'replace', 'change', 'different']
|
| 351 |
+
for keyword in exchange_keywords:
|
| 352 |
+
if keyword in text_lower:
|
| 353 |
+
return 'exchange'
|
| 354 |
+
|
| 355 |
+
# Default to exchange if nothing found
|
| 356 |
+
return 'exchange'
|
| 357 |
+
|
| 358 |
+
def _basic_extract_order_id(self, text):
|
| 359 |
+
print(f"π Basic order ID extraction: {text}")
|
| 360 |
+
match = re.search(r'order[\s#]*(\d+)|(\d+)', text.lower())
|
| 361 |
+
if match:
|
| 362 |
+
order_id = match.group(1) or match.group(2)
|
| 363 |
+
print(f"β
Found: {order_id}")
|
| 364 |
+
return order_id
|
| 365 |
+
print("β No order ID found")
|
| 366 |
+
return None
|
| 367 |
+
|
| 368 |
+
def _basic_is_confirmation(self, text):
|
| 369 |
+
confirmations = ['yes', 'yeah', 'yep', 'correct', 'right', 'sure', 'ok', 'okay']
|
| 370 |
+
return any(word in text.lower() for word in confirmations)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def identify_correction_field(self, user_text, current_data):
|
| 374 |
+
"""When user says no to confirmation, identify which field they want to correct"""
|
| 375 |
+
if not self.enabled:
|
| 376 |
+
return "everything"
|
| 377 |
+
|
| 378 |
+
try:
|
| 379 |
+
confirmation_summary = (
|
| 380 |
+
f"Request type: {current_data.get('request_type', 'N/A')}\n"
|
| 381 |
+
f"Order ID: {current_data.get('order_id', 'N/A')}\n"
|
| 382 |
+
f"Reason: {current_data.get('reason', 'N/A')}\n"
|
| 383 |
+
)
|
| 384 |
+
if current_data.get('request_type') == 'exchange':
|
| 385 |
+
confirmation_summary += f"Exchange preference: {current_data.get('exchange_preference', 'N/A')}\n"
|
| 386 |
+
|
| 387 |
+
prompt = (
|
| 388 |
+
f"The customer declined the confirmation. Here's what we have:\n"
|
| 389 |
+
f"{confirmation_summary}\n"
|
| 390 |
+
f"Customer said: {user_text}\n\n"
|
| 391 |
+
"Which field do they want to correct?\n\n"
|
| 392 |
+
"Options: request_type, order_id, reason, exchange_preference, everything\n\n"
|
| 393 |
+
"Examples:\n"
|
| 394 |
+
"- 'wrong order number' = order_id\n"
|
| 395 |
+
"- 'I want exchange not return' = request_type\n"
|
| 396 |
+
"- 'reason is wrong' = reason\n"
|
| 397 |
+
"- 'start over' = everything\n\n"
|
| 398 |
+
"You MUST respond with valid JSON in this exact format:\n"
|
| 399 |
+
'{\"field\": \"order_id\", \"reasoning\": \"Customer mentioned wrong order number\"}\n'
|
| 400 |
+
'OR\n'
|
| 401 |
+
'{\"field\": \"everything\", \"reasoning\": \"Customer wants to start over\"}\n\n'
|
| 402 |
+
"The 'field' MUST be one of: request_type, order_id, reason, exchange_preference, everything"
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
response = self.client.chat.completions.create(
|
| 406 |
+
model=self.model_name,
|
| 407 |
+
messages=[{"role": "user", "content": prompt}],
|
| 408 |
+
response_format={"type": "json_object"},
|
| 409 |
+
temperature=0.1,
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
json_text = response.choices[0].message.content
|
| 413 |
+
print(f"π Groq JSON Response: {json_text}")
|
| 414 |
+
result = CorrectionField.model_validate_json(json_text)
|
| 415 |
+
print(f"β
Correction needed for: {result.field} - {result.reasoning}")
|
| 416 |
+
return result.field
|
| 417 |
+
|
| 418 |
+
except Exception as e:
|
| 419 |
+
print(f"β Groq Error: {e}")
|
| 420 |
+
print(f"β Raw response: {json_text if 'json_text' in locals() else 'No response'}")
|
| 421 |
+
return "everything"
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
# Global instance
|
| 425 |
+
groq_ai = GroqAI()
|
| 426 |
+
|
| 427 |
+
# Backwards compatibility alias
|
| 428 |
+
gemini_ai = groq_ai
|
voice_config.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Voice Bot Configuration with Groq TTS
|
| 2 |
+
|
| 3 |
+
# Groq AI Configuration (for NLU + TTS)
|
| 4 |
+
GROQ_API_KEY = "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" # Get free at: https://console.groq.com/keys
|
| 5 |
+
USE_GROQ_AI = True # Set to False to use basic pattern matching
|
| 6 |
+
|
| 7 |
+
# Groq TTS Configuration (Orpheus by Canopy Labs)
|
| 8 |
+
GROQ_TTS_MODEL = "canopylabs/orpheus-v1-english" # Orpheus English TTS
|
| 9 |
+
GROQ_TTS_VOICE = "autumn" # Default voice (female, natural)
|
| 10 |
+
|
| 11 |
+
# Available Groq Orpheus English voices:
|
| 12 |
+
# Female: autumn, diana, hannah
|
| 13 |
+
# Male: austin, daniel, troy
|
| 14 |
+
#
|
| 15 |
+
# Vocal directions (English only): wrap in brackets, e.g. [cheerful], [whisper]
|
| 16 |
+
# Note: Input limit is 200 characters per API call (auto-chunked)
|
| 17 |
+
|
| 18 |
+
# Conversation Flow Configuration
|
| 19 |
+
CONVERSATION_TEMPLATES = {
|
| 20 |
+
'greeting': "Hello! I'm your return and exchange assistant. Would you like to process an exchange or a return?",
|
| 21 |
+
'ask_exchange_or_return': "Please say 'exchange' if you want to exchange a product, or 'return' if you want to return it.",
|
| 22 |
+
'ask_order_id': "Please provide your order ID. You can say something like 'order 123'.",
|
| 23 |
+
'ask_reason': "Please tell me the reason for the {request_type}.",
|
| 24 |
+
'ask_exchange_preference': "What product would you prefer instead? Please describe what you'd like to exchange it for.",
|
| 25 |
+
'confirm_details': "Let me confirm: You want to {request_type} order {order_id} because {reason}. {extra_info}Is this correct?",
|
| 26 |
+
'ask_what_wrong': "What did we get wrong?",
|
| 27 |
+
'processing': "Perfect! I'm processing your {request_type} request now.",
|
| 28 |
+
'exchange_pickup_scheduled': "Your exchange pickup has been scheduled for {pickup_date}.",
|
| 29 |
+
'exchange_delivery_scheduled': "Your new product will be delivered by {delivery_date}.",
|
| 30 |
+
'return_scheduled': "Your return pickup has been scheduled for {pickup_date}.",
|
| 31 |
+
'success': "Your {request_type} request has been successfully processed! Tracking number: {tracking_number}",
|
| 32 |
+
'error': "I'm sorry, I didn't understand that. Could you please repeat?",
|
| 33 |
+
'goodbye': "Thank you for using our service. Have a great day!"
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
# Database Configuration
|
| 37 |
+
DATABASE_INTEGRATION = True # Set to False to disable database updates
|
| 38 |
+
|
| 39 |
+
# API Endpoints
|
| 40 |
+
DELHIVERY_API_KEY = "your_delhivery_api_key" # For production
|
| 41 |
+
OPENLEAF_API_KEY = "your_openleaf_api_key" # For production
|
| 42 |
+
|
| 43 |
+
# Return/Exchange Settings
|
| 44 |
+
DEFAULT_RETURN_DAYS = 7
|
| 45 |
+
EXCHANGE_DELIVERY_DAYS = 5
|
voice_service.py
ADDED
|
@@ -0,0 +1,581 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Voice Bot Service β TTS handled by browser speechSynthesis
|
| 2 |
+
|
| 3 |
+
import requests
|
| 4 |
+
import re
|
| 5 |
+
from datetime import datetime, timedelta
|
| 6 |
+
from django.apps import apps
|
| 7 |
+
from django.http import JsonResponse, HttpResponse
|
| 8 |
+
from django.views.decorators.csrf import csrf_exempt
|
| 9 |
+
from django.views.decorators.http import require_http_methods
|
| 10 |
+
from django.utils import timezone
|
| 11 |
+
import json
|
| 12 |
+
from voice_config import *
|
| 13 |
+
from gemini_helper import gemini_ai
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _get_voice_models():
|
| 17 |
+
"""Lazy import of VoiceSession / VoiceMessage to avoid AppRegistryNotReady."""
|
| 18 |
+
VoiceSession = apps.get_model('store', 'VoiceSession')
|
| 19 |
+
VoiceMessage = apps.get_model('store', 'VoiceMessage')
|
| 20 |
+
return VoiceSession, VoiceMessage
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class GroqTTS:
|
| 24 |
+
"""Text-to-Speech using Groq Orpheus TTS API"""
|
| 25 |
+
|
| 26 |
+
MAX_INPUT_LEN = 200 # Groq Orpheus hard limit per request
|
| 27 |
+
|
| 28 |
+
def __init__(self):
|
| 29 |
+
self.api_key = GROQ_API_KEY
|
| 30 |
+
self.model = GROQ_TTS_MODEL
|
| 31 |
+
self.voice = GROQ_TTS_VOICE
|
| 32 |
+
self.base_url = "https://api.groq.com/openai/v1"
|
| 33 |
+
|
| 34 |
+
# ββ internal: single chunk (β€200 chars) ββββββββββββββββββββββββ
|
| 35 |
+
def _tts_chunk(self, text):
|
| 36 |
+
"""Send a single β€200-char chunk to Groq and return WAV bytes."""
|
| 37 |
+
url = f"{self.base_url}/audio/speech"
|
| 38 |
+
|
| 39 |
+
headers = {
|
| 40 |
+
"Authorization": f"Bearer {self.api_key.strip()}",
|
| 41 |
+
"Content-Type": "application/json",
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
data = {
|
| 45 |
+
"model": self.model,
|
| 46 |
+
"input": text,
|
| 47 |
+
"voice": self.voice,
|
| 48 |
+
"response_format": "wav",
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
response = requests.post(url, json=data, headers=headers)
|
| 52 |
+
response.raise_for_status()
|
| 53 |
+
return response.content
|
| 54 |
+
|
| 55 |
+
# ββ split long text into β€200-char pieces at sentence boundaries ββ
|
| 56 |
+
@staticmethod
|
| 57 |
+
def _split_text(text, limit=200):
|
| 58 |
+
"""Break *text* into chunks of at most *limit* characters,
|
| 59 |
+
splitting on sentence-ending punctuation when possible."""
|
| 60 |
+
chunks = []
|
| 61 |
+
while text:
|
| 62 |
+
if len(text) <= limit:
|
| 63 |
+
chunks.append(text)
|
| 64 |
+
break
|
| 65 |
+
# Try to split at the last sentence-end within the limit
|
| 66 |
+
segment = text[:limit]
|
| 67 |
+
split_pos = -1
|
| 68 |
+
for sep in ['. ', '! ', '? ', '.\'', '!\"', '?\"']:
|
| 69 |
+
idx = segment.rfind(sep)
|
| 70 |
+
if idx > split_pos:
|
| 71 |
+
split_pos = idx + len(sep)
|
| 72 |
+
if split_pos <= 0:
|
| 73 |
+
# Fall back to last space
|
| 74 |
+
split_pos = segment.rfind(' ')
|
| 75 |
+
if split_pos <= 0:
|
| 76 |
+
split_pos = limit # hard cut
|
| 77 |
+
chunks.append(text[:split_pos].strip())
|
| 78 |
+
text = text[split_pos:].strip()
|
| 79 |
+
return [c for c in chunks if c]
|
| 80 |
+
|
| 81 |
+
# ββ public API (handles any length) ββββββββββββββββββββββββββββ
|
| 82 |
+
def text_to_speech(self, text):
|
| 83 |
+
"""
|
| 84 |
+
Convert text to speech using Groq Orpheus TTS.
|
| 85 |
+
Automatically chunks text >200 chars and concatenates WAV output.
|
| 86 |
+
Returns audio data in WAV format.
|
| 87 |
+
"""
|
| 88 |
+
import struct, io
|
| 89 |
+
chunks = self._split_text(text, self.MAX_INPUT_LEN)
|
| 90 |
+
if not chunks:
|
| 91 |
+
return None
|
| 92 |
+
|
| 93 |
+
try:
|
| 94 |
+
if len(chunks) == 1:
|
| 95 |
+
return self._tts_chunk(chunks[0])
|
| 96 |
+
|
| 97 |
+
# Multiple chunks: concatenate raw PCM from each WAV
|
| 98 |
+
pcm_parts = []
|
| 99 |
+
wav_params = None # (channels, sample_width, sample_rate)
|
| 100 |
+
for chunk in chunks:
|
| 101 |
+
wav_bytes = self._tts_chunk(chunk)
|
| 102 |
+
buf = io.BytesIO(wav_bytes)
|
| 103 |
+
# Parse minimal WAV header (44 bytes canonical)
|
| 104 |
+
buf.read(4) # RIFF
|
| 105 |
+
buf.read(4) # file size
|
| 106 |
+
buf.read(4) # WAVE
|
| 107 |
+
buf.read(4) # fmt
|
| 108 |
+
fmt_size = struct.unpack('<I', buf.read(4))[0]
|
| 109 |
+
fmt_data = buf.read(fmt_size)
|
| 110 |
+
channels = struct.unpack('<H', fmt_data[2:4])[0]
|
| 111 |
+
sample_rate = struct.unpack('<I', fmt_data[4:8])[0]
|
| 112 |
+
bits = struct.unpack('<H', fmt_data[14:16])[0]
|
| 113 |
+
if wav_params is None:
|
| 114 |
+
wav_params = (channels, bits // 8, sample_rate)
|
| 115 |
+
# Skip to data chunk
|
| 116 |
+
while True:
|
| 117 |
+
chunk_id = buf.read(4)
|
| 118 |
+
if not chunk_id:
|
| 119 |
+
break
|
| 120 |
+
chunk_size = struct.unpack('<I', buf.read(4))[0]
|
| 121 |
+
if chunk_id == b'data':
|
| 122 |
+
pcm_parts.append(buf.read(chunk_size))
|
| 123 |
+
break
|
| 124 |
+
else:
|
| 125 |
+
buf.read(chunk_size)
|
| 126 |
+
|
| 127 |
+
# Rebuild single WAV
|
| 128 |
+
pcm = b''.join(pcm_parts)
|
| 129 |
+
ch, sw, sr = wav_params
|
| 130 |
+
data_size = len(pcm)
|
| 131 |
+
header = struct.pack(
|
| 132 |
+
'<4sI4s4sIHHIIHH4sI',
|
| 133 |
+
b'RIFF', 36 + data_size, b'WAVE',
|
| 134 |
+
b'fmt ', 16, 1, ch, sr, sr * ch * sw, ch * sw, sw * 8,
|
| 135 |
+
b'data', data_size,
|
| 136 |
+
)
|
| 137 |
+
return header + pcm
|
| 138 |
+
|
| 139 |
+
except requests.exceptions.RequestException as e:
|
| 140 |
+
print(f"Groq TTS API Error: {e}")
|
| 141 |
+
return None
|
| 142 |
+
except Exception as e:
|
| 143 |
+
print(f"Groq TTS processing error: {e}")
|
| 144 |
+
return None
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# Backward-compatible alias so existing references keep working
|
| 148 |
+
ElevenLabsTTS = GroqTTS
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
class ConversationFlow:
|
| 152 |
+
"""Manages the conversation flow for returns and exchanges"""
|
| 153 |
+
|
| 154 |
+
def __init__(self):
|
| 155 |
+
self.tts = ElevenLabsTTS()
|
| 156 |
+
self.state = {}
|
| 157 |
+
|
| 158 |
+
def process_user_input(self, session_id, user_text, current_step):
|
| 159 |
+
"""Process user input and generate appropriate response"""
|
| 160 |
+
|
| 161 |
+
# Initialize session state
|
| 162 |
+
if session_id not in self.state:
|
| 163 |
+
self.state[session_id] = {
|
| 164 |
+
'step': 'greeting',
|
| 165 |
+
'data': {}
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
session = self.state[session_id]
|
| 169 |
+
|
| 170 |
+
# Process based on current step
|
| 171 |
+
if current_step == 'greeting':
|
| 172 |
+
# Only short-circuit when the user clearly says exchange/return.
|
| 173 |
+
# Without a keyword match we ask explicitly β this prevents
|
| 174 |
+
# inputs like "I would be 23" from being misclassified.
|
| 175 |
+
user_lower = user_text.strip().lower()
|
| 176 |
+
intent_keywords = [
|
| 177 |
+
'return', 'refund', 'money back', 'send back',
|
| 178 |
+
'exchange', 'swap', 'replace', 'replacement', 'different product',
|
| 179 |
+
]
|
| 180 |
+
has_intent = any(kw in user_lower for kw in intent_keywords)
|
| 181 |
+
|
| 182 |
+
if has_intent:
|
| 183 |
+
question_asked = CONVERSATION_TEMPLATES['ask_exchange_or_return']
|
| 184 |
+
request_type = self._extract_request_type(user_text, question_asked)
|
| 185 |
+
print(f"[flow] greeting -> keyword found -> classified: {request_type}")
|
| 186 |
+
session['data']['request_type'] = request_type
|
| 187 |
+
response_text = CONVERSATION_TEMPLATES['ask_order_id']
|
| 188 |
+
next_step = 'ask_order_id'
|
| 189 |
+
else:
|
| 190 |
+
print(f"[flow] greeting -> no intent keyword in '{user_text}', asking explicitly")
|
| 191 |
+
response_text = CONVERSATION_TEMPLATES['ask_exchange_or_return']
|
| 192 |
+
next_step = 'ask_exchange_or_return'
|
| 193 |
+
|
| 194 |
+
elif current_step == 'ask_exchange_or_return':
|
| 195 |
+
question_asked = CONVERSATION_TEMPLATES['ask_exchange_or_return']
|
| 196 |
+
print(f"\n{'='*70}")
|
| 197 |
+
print(f"π€ PROCESSING STEP: ask_exchange_or_return")
|
| 198 |
+
print(f"π User input: '{user_text}'")
|
| 199 |
+
print(f"β Question asked: '{question_asked}'")
|
| 200 |
+
print(f"{'='*70}")
|
| 201 |
+
|
| 202 |
+
request_type = self._extract_request_type(user_text, question_asked)
|
| 203 |
+
|
| 204 |
+
print(f"\n{'='*70}")
|
| 205 |
+
print(f"π EXTRACTION RESULT: '{request_type}'")
|
| 206 |
+
print(f"π Result type: {type(request_type)}")
|
| 207 |
+
print(f"π Result is truthy: {bool(request_type)}")
|
| 208 |
+
print(f"{'='*70}\n")
|
| 209 |
+
|
| 210 |
+
# request_type should NEVER be None now, but just in case
|
| 211 |
+
if request_type and request_type in ['exchange', 'return']:
|
| 212 |
+
session['data']['request_type'] = request_type
|
| 213 |
+
print(f"β
SUCCESS: Request type set to: {request_type}")
|
| 214 |
+
response_text = CONVERSATION_TEMPLATES['ask_order_id']
|
| 215 |
+
next_step = 'ask_order_id'
|
| 216 |
+
else:
|
| 217 |
+
print(f"β UNEXPECTED: Got invalid request_type: {request_type}")
|
| 218 |
+
print(f"β οΈ This should not happen - defaulting to exchange")
|
| 219 |
+
session['data']['request_type'] = 'exchange'
|
| 220 |
+
response_text = CONVERSATION_TEMPLATES['ask_order_id']
|
| 221 |
+
next_step = 'ask_order_id'
|
| 222 |
+
|
| 223 |
+
elif current_step == 'ask_order_id':
|
| 224 |
+
question_asked = CONVERSATION_TEMPLATES['ask_order_id']
|
| 225 |
+
print(f"\nπ€ PROCESSING STEP: ask_order_id")
|
| 226 |
+
print(f"π User input: '{user_text}'")
|
| 227 |
+
|
| 228 |
+
order_id = self._extract_order_id(user_text, question_asked)
|
| 229 |
+
|
| 230 |
+
print(f"π Extraction result: {order_id}")
|
| 231 |
+
|
| 232 |
+
if order_id:
|
| 233 |
+
session['data']['order_id'] = order_id
|
| 234 |
+
print(f"β
Order ID set to: {order_id}")
|
| 235 |
+
request_type = session['data']['request_type']
|
| 236 |
+
response_text = CONVERSATION_TEMPLATES['ask_reason'].format(request_type=request_type)
|
| 237 |
+
next_step = 'ask_reason'
|
| 238 |
+
else:
|
| 239 |
+
print(f"β οΈ Could not extract order ID from: '{user_text}'")
|
| 240 |
+
response_text = "I couldn't find an order number. Please say something like 'order 123' or 'my order number is 456'."
|
| 241 |
+
next_step = 'ask_order_id'
|
| 242 |
+
|
| 243 |
+
elif current_step == 'ask_reason':
|
| 244 |
+
# Clean up and standardize the reason using Gemini
|
| 245 |
+
request_type = session['data']['request_type']
|
| 246 |
+
question_asked = CONVERSATION_TEMPLATES['ask_reason'].format(request_type=request_type)
|
| 247 |
+
cleaned_reason = gemini_ai.extract_reason(user_text, request_type, question_asked)
|
| 248 |
+
session['data']['reason'] = cleaned_reason
|
| 249 |
+
print(f"π€ Gemini cleaned reason: '{user_text}' -> '{cleaned_reason}'")
|
| 250 |
+
|
| 251 |
+
if request_type == 'exchange':
|
| 252 |
+
response_text = CONVERSATION_TEMPLATES['ask_exchange_preference']
|
| 253 |
+
next_step = 'ask_exchange_preference'
|
| 254 |
+
else:
|
| 255 |
+
response_text = self._generate_confirmation(session['data'])
|
| 256 |
+
next_step = 'confirm_details'
|
| 257 |
+
|
| 258 |
+
elif current_step == 'ask_exchange_preference':
|
| 259 |
+
question_asked = CONVERSATION_TEMPLATES['ask_exchange_preference']
|
| 260 |
+
session['data']['exchange_preference'] = gemini_ai.extract_exchange_preference(user_text, question_asked)
|
| 261 |
+
print(f"π€ Gemini cleaned preference: '{user_text}' -> '{session['data']['exchange_preference']}'")
|
| 262 |
+
response_text = self._generate_confirmation(session['data'])
|
| 263 |
+
next_step = 'confirm_details'
|
| 264 |
+
|
| 265 |
+
elif current_step == 'confirm_details':
|
| 266 |
+
confirmation_question = self._generate_confirmation(session['data'])
|
| 267 |
+
if self._is_confirmation(user_text, confirmation_question):
|
| 268 |
+
result = self._process_request(session['data'])
|
| 269 |
+
response_text = result['message']
|
| 270 |
+
next_step = 'completed'
|
| 271 |
+
|
| 272 |
+
# Save to database
|
| 273 |
+
if DATABASE_INTEGRATION:
|
| 274 |
+
self._save_to_database(session['data'], result)
|
| 275 |
+
else:
|
| 276 |
+
# BUG FIX 2: User said no - ask what was wrong
|
| 277 |
+
response_text = CONVERSATION_TEMPLATES['ask_what_wrong']
|
| 278 |
+
next_step = 'identify_correction'
|
| 279 |
+
|
| 280 |
+
elif current_step == 'identify_correction':
|
| 281 |
+
# User specified what they want to correct
|
| 282 |
+
field_to_correct = gemini_ai.identify_correction_field(user_text, session['data'])
|
| 283 |
+
print(f"π§ User wants to correct: {field_to_correct}")
|
| 284 |
+
|
| 285 |
+
# Redirect to the appropriate question to get NEW answer
|
| 286 |
+
if field_to_correct == 'request_type':
|
| 287 |
+
response_text = CONVERSATION_TEMPLATES['ask_exchange_or_return']
|
| 288 |
+
next_step = 'ask_exchange_or_return'
|
| 289 |
+
elif field_to_correct == 'order_id':
|
| 290 |
+
response_text = CONVERSATION_TEMPLATES['ask_order_id']
|
| 291 |
+
next_step = 'ask_order_id'
|
| 292 |
+
elif field_to_correct == 'reason':
|
| 293 |
+
request_type = session['data']['request_type']
|
| 294 |
+
response_text = CONVERSATION_TEMPLATES['ask_reason'].format(request_type=request_type)
|
| 295 |
+
next_step = 'ask_reason'
|
| 296 |
+
elif field_to_correct == 'exchange_preference':
|
| 297 |
+
response_text = CONVERSATION_TEMPLATES['ask_exchange_preference']
|
| 298 |
+
next_step = 'ask_exchange_preference'
|
| 299 |
+
else: # everything or unrecognized
|
| 300 |
+
# Start over
|
| 301 |
+
session['data'] = {}
|
| 302 |
+
response_text = CONVERSATION_TEMPLATES['ask_exchange_or_return']
|
| 303 |
+
next_step = 'ask_exchange_or_return'
|
| 304 |
+
|
| 305 |
+
else:
|
| 306 |
+
response_text = CONVERSATION_TEMPLATES['greeting']
|
| 307 |
+
next_step = 'greeting'
|
| 308 |
+
|
| 309 |
+
# Update session
|
| 310 |
+
session['step'] = next_step
|
| 311 |
+
|
| 312 |
+
return {
|
| 313 |
+
'text': response_text,
|
| 314 |
+
'next_step': next_step,
|
| 315 |
+
'session_data': session['data'],
|
| 316 |
+
'valid_answers': self._get_valid_answers(next_step),
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
def _extract_request_type(self, text, question_asked):
|
| 320 |
+
"""Extract whether user wants exchange or return using Gemini AI with question context"""
|
| 321 |
+
result = gemini_ai.extract_request_type(text, question_asked)
|
| 322 |
+
if result:
|
| 323 |
+
print(f"π€ Gemini understood '{text}' as: {result}")
|
| 324 |
+
return result
|
| 325 |
+
|
| 326 |
+
def _extract_order_id(self, text, question_asked):
|
| 327 |
+
"""Extract order ID from user input using Gemini AI with question context"""
|
| 328 |
+
result = gemini_ai.extract_order_id(text, question_asked)
|
| 329 |
+
if result:
|
| 330 |
+
print(f"π€ Gemini extracted order ID from '{text}': {result}")
|
| 331 |
+
return result
|
| 332 |
+
|
| 333 |
+
def _is_confirmation(self, text, question_asked):
|
| 334 |
+
"""Check if user is confirming using Gemini AI with question context"""
|
| 335 |
+
result = gemini_ai.is_confirmation(text, question_asked)
|
| 336 |
+
print(f"π€ Gemini understood '{text}' as confirmation: {result}")
|
| 337 |
+
return result
|
| 338 |
+
|
| 339 |
+
def _get_valid_answers(self, step):
|
| 340 |
+
"""Return the set of valid / example answers for a given step.
|
| 341 |
+
For ask_order_id we pull real order IDs from the database."""
|
| 342 |
+
if step == 'greeting' or step == 'ask_exchange_or_return':
|
| 343 |
+
return ['exchange', 'return', 'I want to exchange', 'I want to return']
|
| 344 |
+
elif step == 'ask_order_id':
|
| 345 |
+
try:
|
| 346 |
+
Order = apps.get_model('store', 'Order')
|
| 347 |
+
order_ids = list(
|
| 348 |
+
Order.objects.order_by('-id').values_list('id', flat=True)[:20]
|
| 349 |
+
)
|
| 350 |
+
return [str(oid) for oid in order_ids] if order_ids else ['No orders found']
|
| 351 |
+
except Exception as e:
|
| 352 |
+
print(f'[valid_answers] DB error: {e}')
|
| 353 |
+
return ['order 123', '12345']
|
| 354 |
+
elif step == 'ask_reason':
|
| 355 |
+
return ['wrong size', 'defective product', "I don't like it", 'color mismatch', 'poor quality']
|
| 356 |
+
elif step == 'ask_exchange_preference':
|
| 357 |
+
return ['size large', 'different color - blue', 'size medium', 'black color']
|
| 358 |
+
elif step == 'confirm_details':
|
| 359 |
+
return ['yes', "yes that's correct", 'no', 'change the order number', 'wrong reason']
|
| 360 |
+
return []
|
| 361 |
+
def _generate_confirmation(self, data):
|
| 362 |
+
"""Generate confirmation message"""
|
| 363 |
+
request_type = data['request_type']
|
| 364 |
+
order_id = data['order_id']
|
| 365 |
+
reason = data['reason']
|
| 366 |
+
|
| 367 |
+
extra_info = ""
|
| 368 |
+
if request_type == 'exchange':
|
| 369 |
+
extra_info = f"You want {data.get('exchange_preference', 'a different product')} instead. "
|
| 370 |
+
|
| 371 |
+
return CONVERSATION_TEMPLATES['confirm_details'].format(
|
| 372 |
+
request_type=request_type,
|
| 373 |
+
order_id=order_id,
|
| 374 |
+
reason=reason,
|
| 375 |
+
extra_info=extra_info
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
def _process_request(self, data):
|
| 379 |
+
"""Process the return/exchange request"""
|
| 380 |
+
request_type = data['request_type']
|
| 381 |
+
|
| 382 |
+
if request_type == 'exchange':
|
| 383 |
+
return self._process_exchange(data)
|
| 384 |
+
else:
|
| 385 |
+
return self._process_return(data)
|
| 386 |
+
|
| 387 |
+
def _process_exchange(self, data):
|
| 388 |
+
"""Process exchange request"""
|
| 389 |
+
tracking_number = f"EXG-{data['order_id']}-{datetime.now().strftime('%Y%m%d%H%M%S')}"
|
| 390 |
+
pickup_date = (datetime.now() + timedelta(days=2)).strftime('%B %d, %Y')
|
| 391 |
+
delivery_date = (datetime.now() + timedelta(days=EXCHANGE_DELIVERY_DAYS)).strftime('%B %d, %Y')
|
| 392 |
+
|
| 393 |
+
message = CONVERSATION_TEMPLATES['processing'].format(request_type='exchange')
|
| 394 |
+
message += " " + CONVERSATION_TEMPLATES['exchange_pickup_scheduled'].format(pickup_date=pickup_date)
|
| 395 |
+
message += " " + CONVERSATION_TEMPLATES['exchange_delivery_scheduled'].format(delivery_date=delivery_date)
|
| 396 |
+
message += " " + CONVERSATION_TEMPLATES['success'].format(
|
| 397 |
+
request_type='exchange',
|
| 398 |
+
tracking_number=tracking_number
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
return {
|
| 402 |
+
'message': message,
|
| 403 |
+
'tracking_number': tracking_number,
|
| 404 |
+
'pickup_date': pickup_date,
|
| 405 |
+
'delivery_date': delivery_date
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
def _process_return(self, data):
|
| 409 |
+
"""Process return request"""
|
| 410 |
+
tracking_number = f"RET-{data['order_id']}-{datetime.now().strftime('%Y%m%d%H%M%S')}"
|
| 411 |
+
pickup_date = (datetime.now() + timedelta(days=2)).strftime('%B %d, %Y')
|
| 412 |
+
|
| 413 |
+
message = CONVERSATION_TEMPLATES['processing'].format(request_type='return')
|
| 414 |
+
message += " " + CONVERSATION_TEMPLATES['return_scheduled'].format(pickup_date=pickup_date)
|
| 415 |
+
message += " " + CONVERSATION_TEMPLATES['success'].format(
|
| 416 |
+
request_type='return',
|
| 417 |
+
tracking_number=tracking_number
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
return {
|
| 421 |
+
'message': message,
|
| 422 |
+
'tracking_number': tracking_number,
|
| 423 |
+
'pickup_date': pickup_date
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
def _save_to_database(self, data, result):
|
| 427 |
+
"""Save exchange/return data to database"""
|
| 428 |
+
try:
|
| 429 |
+
# Import your models dynamically
|
| 430 |
+
Order = apps.get_model('shop', 'Order')
|
| 431 |
+
OrderDetail = apps.get_model('shop', 'OrderDetail')
|
| 432 |
+
|
| 433 |
+
order_id = data.get('order_id')
|
| 434 |
+
|
| 435 |
+
# Find the order
|
| 436 |
+
try:
|
| 437 |
+
order = Order.objects.get(id=order_id)
|
| 438 |
+
order_details = OrderDetail.objects.filter(order=order)
|
| 439 |
+
|
| 440 |
+
# Update all order details
|
| 441 |
+
for detail in order_details:
|
| 442 |
+
detail.return_reason = data.get('reason', '')
|
| 443 |
+
detail.return_status = 'Returned'
|
| 444 |
+
detail.return_date = datetime.now()
|
| 445 |
+
detail.days_to_return = DEFAULT_RETURN_DAYS
|
| 446 |
+
|
| 447 |
+
if data['request_type'] == 'exchange':
|
| 448 |
+
detail.is_exchanged = True
|
| 449 |
+
detail.exchange_order = data.get('exchange_preference', '')
|
| 450 |
+
|
| 451 |
+
detail.save()
|
| 452 |
+
|
| 453 |
+
print(f"β
Database updated for Order {order_id}")
|
| 454 |
+
print(f" Reason: {data.get('reason')}")
|
| 455 |
+
print(f" Type: {data['request_type']}")
|
| 456 |
+
print(f" Tracking: {result.get('tracking_number')}")
|
| 457 |
+
|
| 458 |
+
except Order.DoesNotExist:
|
| 459 |
+
print(f"β Order {order_id} not found in database")
|
| 460 |
+
|
| 461 |
+
except Exception as e:
|
| 462 |
+
print(f"β Database error: {e}")
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
# Global conversation flow instance
|
| 466 |
+
conversation_flow = ConversationFlow()
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
# Django Views
|
| 470 |
+
@csrf_exempt
|
| 471 |
+
@require_http_methods(["POST"])
|
| 472 |
+
def start_conversation(request):
|
| 473 |
+
"""Start a new conversation session"""
|
| 474 |
+
import uuid
|
| 475 |
+
session_id = str(uuid.uuid4())
|
| 476 |
+
|
| 477 |
+
# Generate greeting audio
|
| 478 |
+
tts = ElevenLabsTTS()
|
| 479 |
+
greeting_text = CONVERSATION_TEMPLATES['greeting']
|
| 480 |
+
|
| 481 |
+
# Create DB session record
|
| 482 |
+
try:
|
| 483 |
+
VoiceSession, _ = _get_voice_models()
|
| 484 |
+
VoiceSession.objects.create(session_id=session_id)
|
| 485 |
+
except Exception as e:
|
| 486 |
+
print(f'[voice] DB session create error: {e}')
|
| 487 |
+
|
| 488 |
+
return JsonResponse({
|
| 489 |
+
'session_id': session_id,
|
| 490 |
+
'message': greeting_text,
|
| 491 |
+
'next_step': 'greeting',
|
| 492 |
+
'gemini_enabled': gemini_ai.enabled,
|
| 493 |
+
'valid_answers': ['exchange', 'return', 'I want to exchange', 'I want to return'],
|
| 494 |
+
})
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
@csrf_exempt
|
| 498 |
+
@require_http_methods(["POST"])
|
| 499 |
+
def process_voice(request):
|
| 500 |
+
"""Process user voice input and return audio response"""
|
| 501 |
+
try:
|
| 502 |
+
data = json.loads(request.body)
|
| 503 |
+
session_id = data.get('session_id')
|
| 504 |
+
user_text = data.get('text')
|
| 505 |
+
current_step = data.get('current_step')
|
| 506 |
+
|
| 507 |
+
# Process input
|
| 508 |
+
# Log what goes into the LLM (and the current state)
|
| 509 |
+
print(f"[voice] LLM_INPUT step={current_step} user='{user_text}'")
|
| 510 |
+
|
| 511 |
+
result = conversation_flow.process_user_input(session_id, user_text, current_step)
|
| 512 |
+
print(
|
| 513 |
+
f"[voice] session={session_id} step={current_step} -> next={result['next_step']} "
|
| 514 |
+
f"user='{user_text}' response='{result['text'][:200]}'"
|
| 515 |
+
)
|
| 516 |
+
# Log LLM outputs / parsed session data for debugging intent extraction
|
| 517 |
+
try:
|
| 518 |
+
print(f"[voice] LLM_OUTPUT data={json.dumps(result.get('session_data', {}))}")
|
| 519 |
+
except Exception:
|
| 520 |
+
print(f"[voice] LLM_OUTPUT data={result.get('session_data')}")
|
| 521 |
+
|
| 522 |
+
# ββ Persist turn to database ββββββββββββββββββββββββββββββββββ
|
| 523 |
+
try:
|
| 524 |
+
VoiceSession, VoiceMessage = _get_voice_models()
|
| 525 |
+
vs = VoiceSession.objects.filter(session_id=session_id).first()
|
| 526 |
+
if vs:
|
| 527 |
+
turn_num = vs.messages.count() + 1
|
| 528 |
+
VoiceMessage.objects.create(
|
| 529 |
+
session=vs,
|
| 530 |
+
turn_number=turn_num,
|
| 531 |
+
step=current_step,
|
| 532 |
+
next_step=result['next_step'],
|
| 533 |
+
user_text=user_text,
|
| 534 |
+
bot_text=result['text'],
|
| 535 |
+
extracted_value=str(result.get('session_data', {}).get(
|
| 536 |
+
{'greeting': 'request_type',
|
| 537 |
+
'ask_exchange_or_return': 'request_type',
|
| 538 |
+
'ask_order_id': 'order_id',
|
| 539 |
+
'ask_reason': 'reason',
|
| 540 |
+
'ask_exchange_preference': 'exchange_preference',
|
| 541 |
+
'confirm_details': 'confirmed',
|
| 542 |
+
}.get(current_step, ''), '')),
|
| 543 |
+
session_data_snapshot=result.get('session_data', {}),
|
| 544 |
+
)
|
| 545 |
+
# Update session-level fields from collected data
|
| 546 |
+
sd = result.get('session_data', {})
|
| 547 |
+
vs.request_type = sd.get('request_type', vs.request_type)
|
| 548 |
+
vs.order_id = sd.get('order_id', vs.order_id)
|
| 549 |
+
vs.reason = sd.get('reason', vs.reason)
|
| 550 |
+
vs.exchange_preference = sd.get('exchange_preference', vs.exchange_preference)
|
| 551 |
+
vs.session_data_json = sd
|
| 552 |
+
if result['next_step'] == 'completed':
|
| 553 |
+
vs.status = 'completed'
|
| 554 |
+
vs.ended_at = timezone.now()
|
| 555 |
+
vs.tracking_number = sd.get('tracking_number', '')
|
| 556 |
+
vs.save()
|
| 557 |
+
except Exception as e:
|
| 558 |
+
print(f'[voice] DB log error: {e}')
|
| 559 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 560 |
+
# Return JSON β TTS is handled in the browser via speechSynthesis
|
| 561 |
+
return JsonResponse({
|
| 562 |
+
'text': result['text'],
|
| 563 |
+
'next_step': result['next_step'],
|
| 564 |
+
'session_data': result['session_data'],
|
| 565 |
+
'valid_answers': result.get('valid_answers', []),
|
| 566 |
+
})
|
| 567 |
+
|
| 568 |
+
except Exception as e:
|
| 569 |
+
return JsonResponse({'error': str(e)}, status=500)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
@csrf_exempt
|
| 573 |
+
@require_http_methods(["POST"])
|
| 574 |
+
def get_speech(request):
|
| 575 |
+
"""Return the text as JSON β browser handles TTS via speechSynthesis"""
|
| 576 |
+
try:
|
| 577 |
+
data = json.loads(request.body)
|
| 578 |
+
text = data.get('text', '')
|
| 579 |
+
return JsonResponse({'text': text})
|
| 580 |
+
except Exception as e:
|
| 581 |
+
return JsonResponse({'error': str(e)}, status=500)
|
voice_urls.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# URL Configuration for Voice Bot
|
| 2 |
+
from django.urls import path
|
| 3 |
+
from voice_service import start_conversation, process_voice, get_speech
|
| 4 |
+
|
| 5 |
+
urlpatterns = [
|
| 6 |
+
path('start/', start_conversation, name='start_conversation'),
|
| 7 |
+
path('process/', process_voice, name='process_voice'),
|
| 8 |
+
path('speak/', get_speech, name='get_speech'),
|
| 9 |
+
]
|