rairo commited on
Commit
c83774d
·
verified ·
1 Parent(s): c601c21

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +469 -427
main.py CHANGED
@@ -1,12 +1,12 @@
1
  """
2
- main.py — Pricelyst Shopping Advisor (Jessica Edition - Grounded Data & Memory)
3
 
4
  ✅ Flask API
5
- ✅ Firebase Admin persistence
6
- ✅ Gemini via google-genai SDK (Fixed & Robust)
7
- RAG (Retrieval Augmented Generation) for Shopping Plans
8
- Long-Term Memory (Personal Details Extraction)
9
- ✅ Real Pricing Logic (No Hallucinations)
10
 
11
  ENV VARS:
12
  - GOOGLE_API_KEY=...
@@ -20,10 +20,10 @@ import os
20
  import re
21
  import json
22
  import time
23
- import base64
24
  import logging
25
  from datetime import datetime, timezone
26
- from typing import Any, Dict, List, Optional
27
 
28
  import requests
29
  import pandas as pd
@@ -38,7 +38,7 @@ logging.basicConfig(
38
  )
39
  logger = logging.getLogger("pricelyst-advisor")
40
 
41
- # ––––– Gemini (NEW SDK) –––––
42
 
43
  try:
44
  from google import genai
@@ -65,19 +65,17 @@ from firebase_admin import credentials, firestore
65
 
66
  FIREBASE_ENV = os.environ.get("FIREBASE", "")
67
 
68
- def init_firestore_from_env() -> firestore.Client:
69
  if firebase_admin._apps:
70
  return firestore.client()
71
-
72
  if not FIREBASE_ENV:
73
  logger.warning("FIREBASE env var missing. Persistence disabled.")
74
  return None
75
-
76
  try:
77
  sa_info = json.loads(FIREBASE_ENV)
78
  cred = credentials.Certificate(sa_info)
79
  firebase_admin.initialize_app(cred)
80
- logger.info("Firebase initialized successfully.")
81
  return firestore.client()
82
  except Exception as e:
83
  logger.critical("Failed to initialize Firebase: %s", e)
@@ -85,543 +83,587 @@ def init_firestore_from_env() -> firestore.Client:
85
 
86
  db = init_firestore_from_env()
87
 
88
- # ––––– External API (Pricelyst) –––––
89
 
90
  PRICE_API_BASE = os.environ.get("PRICE_API_BASE", "https://api.pricelyst.co.zw").rstrip("/")
91
- HTTP_TIMEOUT = 25
92
-
93
- # ––––– Flask –––––
94
-
95
- app = Flask(__name__)
96
- CORS(app)
 
 
 
 
 
 
 
 
 
97
 
98
- # ––––– In-memory product cache –––––
99
 
100
- PRODUCT_CACHE_TTL_SEC = 60 * 15 # 15 minutes
101
- _product_cache: Dict[str, Any] = {
102
  "ts": 0,
103
- "df_offers": pd.DataFrame(),
104
- "raw_count": 0,
105
  }
106
 
107
- # ––––– Static Data (Fallbacks) –––––
108
-
109
- ZIM_ESSENTIALS = {
110
- "fuel": {"price": 1.58, "unit": "L", "retailer": "Pump Price"},
111
- "petrol": {"price": 1.58, "unit": "L", "retailer": "Pump Price"},
112
- "diesel": {"price": 1.65, "unit": "L", "retailer": "Pump Price"},
113
- "bread": {"price": 1.00, "unit": "loaf", "retailer": "Standard"},
114
- "gas": {"price": 2.00, "unit": "kg", "retailer": "LPG Market"},
115
- "electricity": {"price": 20.00, "unit": "est. month", "retailer": "ZESA"},
116
- "zesa": {"price": 20.00, "unit": "est. month", "retailer": "ZESA"},
117
- }
118
 
119
  # =========================
120
- # Helpers
121
  # =========================
122
 
123
- def now_utc_iso() -> str:
124
- return datetime.now(timezone.utc).isoformat()
 
 
125
 
126
- def _coerce_float(v: Any) -> float:
127
  try:
128
- if v is None: return 0.0
129
- return float(v)
130
- except Exception:
131
  return 0.0
132
 
133
- def _norm_str(s: Any) -> str:
134
- s = "" if s is None else str(s)
135
- s = s.strip().lower()
136
- s = re.sub(r"\s+", " ", s)
137
- return s
138
-
139
- def _safe_json_loads(s: str, fallback: Any):
140
- try:
141
- # Clean potential markdown wrapping
142
- if "```json" in s:
143
- s = s.split("```json")[1].split("```")[0]
144
- elif "```" in s:
145
- s = s.split("```")[0]
146
- return json.loads(s)
147
- except Exception:
148
- return fallback
149
-
150
- # =========================
151
- # Firestore
152
- # =========================
153
-
154
- def profile_ref(profile_id: str):
155
- if not db: return None
156
- return db.collection("pricelyst_profiles").document(profile_id)
157
-
158
- def get_profile(profile_id: str) -> Dict[str, Any]:
159
- if not db: return {}
160
- try:
161
- ref = profile_ref(profile_id)
162
- doc = ref.get()
163
- if doc.exists:
164
- return doc.to_dict() or {}
165
-
166
- data = {
167
- "profile_id": profile_id,
168
- "created_at": now_utc_iso(),
169
- "updated_at": now_utc_iso(),
170
- "username": None,
171
- "memory_summary": "",
172
- "preferences": {},
173
- "counters": {"chats": 0, "calls": 0}
174
- }
175
- ref.set(data)
176
- return data
177
- except Exception as e:
178
- logger.error("DB Error get_profile: %s", e)
179
- return {}
180
-
181
- def update_profile(profile_id: str, patch: Dict[str, Any]) -> None:
182
- if not db: return
183
- try:
184
- patch = dict(patch)
185
- patch["updated_at"] = now_utc_iso()
186
- profile_ref(profile_id).set(patch, merge=True)
187
- except Exception as e:
188
- logger.error("DB Error update_profile: %s", e)
189
-
190
- def log_call(profile_id: str, payload: Dict[str, Any]) -> str:
191
- if not db: return str(int(time.time()))
192
- try:
193
- ref = db.collection("pricelyst_profiles").document(profile_id).collection("call_logs").document()
194
- ref.set({
195
- **payload,
196
- "ts": now_utc_iso()
197
- })
198
- return ref.id
199
- except Exception as e:
200
- logger.error("DB Error log_call: %s", e)
201
- return ""
202
-
203
- # =========================
204
- # Data Ingestion (ETL)
205
- # =========================
206
-
207
- def fetch_products(max_pages: int = 10, per_page: int = 50) -> List[Dict[str, Any]]:
208
- """Fetch raw products from Pricelyst API."""
209
  all_products = []
210
- for p in range(1, max_pages + 1):
 
 
211
  try:
212
- url = f"{PRICE_API_BASE}/api/v1/products"
213
- r = requests.get(url, params={"page": p, "perPage": per_page}, timeout=HTTP_TIMEOUT)
 
214
  r.raise_for_status()
215
- data = r.json().get("data") or []
 
216
  if not data: break
 
217
  all_products.extend(data)
218
 
219
- # Pagination check
220
- meta = r.json()
221
- if p >= (meta.get("totalPages") or 999):
222
  break
 
223
  except Exception as e:
224
- logger.warning(f"Product fetch error page {p}: {e}")
225
  break
226
- return all_products
227
 
228
- def flatten_products_to_df(products: List[Dict[str, Any]]) -> pd.DataFrame:
229
- """
230
- Strict mapping of the nested JSON structure to a flat search index.
231
- Structure: product -> prices[] -> retailer
232
- """
233
  rows = []
234
- for p in products:
235
  try:
236
  p_id = p.get("id")
237
  p_name = p.get("name") or "Unknown"
238
- p_desc = p.get("description") or ""
239
 
240
- # Get Primary Category
241
- cat_name = "General"
242
- cats = p.get("categories") or []
243
- if cats and isinstance(cats, list) and len(cats) > 0:
244
- cat_name = cats[0].get("name") or "General"
245
-
246
- # Brand
247
- brand_name = (p.get("brand") or {}).get("brand_name") or ""
 
 
248
 
249
- # Iterate Prices (Real Offers)
250
  prices = p.get("prices") or []
251
 
252
- # Fallback if no prices found
253
  if not prices:
254
- base_price = _coerce_float(p.get("price"))
255
- if base_price > 0:
256
- rows.append({
257
- "product_id": p_id,
258
- "product_name": p_name,
259
- "clean_name": _norm_str(p_name),
260
- "description": p_desc,
261
- "category": cat_name,
262
- "brand": brand_name,
263
- "retailer": "Pricelyst Base",
264
- "price": base_price,
265
- "image": p.get("thumbnail") or p.get("image"),
266
- })
267
  continue
268
 
269
  for offer in prices:
270
- retailer_obj = offer.get("retailer") or {}
271
- retailer_name = retailer_obj.get("name") or "Unknown Store"
272
- price_val = _coerce_float(offer.get("price"))
273
 
274
  if price_val > 0:
275
  rows.append({
276
  "product_id": p_id,
277
  "product_name": p_name,
278
- "clean_name": _norm_str(p_name),
279
- "description": p_desc,
280
- "category": cat_name,
281
  "brand": brand_name,
282
- "retailer": retailer_name,
 
283
  "price": price_val,
284
- "image": p.get("thumbnail") or p.get("image"),
 
 
285
  })
286
-
287
- except Exception as e:
288
  continue
289
 
290
  df = pd.DataFrame(rows)
291
  return df
292
 
293
- def get_data_index(force_refresh: bool = False) -> pd.DataFrame:
294
- """Singleton accessor for the product Dataframe."""
295
- global _product_cache
296
-
297
- is_stale = (time.time() - _product_cache["ts"]) > PRODUCT_CACHE_TTL_SEC
298
- if force_refresh or is_stale or _product_cache["df_offers"].empty:
299
- logger.info("Refreshing Product Index...")
300
- try:
301
- raw_products = fetch_products(max_pages=15)
302
- df = flatten_products_to_df(raw_products)
303
-
304
- _product_cache["ts"] = time.time()
305
- _product_cache["df_offers"] = df
306
- _product_cache["raw_count"] = len(raw_products)
307
- logger.info(f"Index Refreshed: {len(df)} offers from {len(raw_products)} products.")
308
- except Exception as e:
309
- logger.error(f"Failed to refresh index: {e}")
310
- if isinstance(_product_cache["df_offers"], pd.DataFrame):
311
- return _product_cache["df_offers"]
312
- return pd.DataFrame()
313
-
314
- return _product_cache["df_offers"]
315
 
316
  # =========================
317
- # Search & Matching Logic
318
  # =========================
319
 
320
- def search_index(df: pd.DataFrame, query: str, limit: int = 5) -> List[Dict[str, Any]]:
321
- """
322
- Search the DF using token overlap + substring matching.
323
- """
324
- if df.empty: return []
325
 
326
- q_norm = _norm_str(query)
327
  q_tokens = set(q_norm.split())
328
 
329
- def score_text(text):
330
- if not isinstance(text, str): return 0
331
- text_tokens = set(text.split())
332
- if not text_tokens: return 0
333
- intersection = q_tokens.intersection(text_tokens)
334
- return len(intersection) / len(q_tokens)
335
-
336
- temp_df = df.copy()
337
- temp_df['score'] = temp_df['clean_name'].apply(score_text)
338
 
339
- # Filter for relevant matches
340
- matches = temp_df[ (temp_df['score'] > 0.4) | (temp_df['clean_name'].str.contains(q_norm, regex=False)) ]
 
 
 
 
 
 
 
 
 
 
341
 
342
- if matches.empty:
343
- # Fallback: Try searching category
344
- matches = temp_df[temp_df['category'].str.lower().str.contains(q_norm, na=False)]
345
 
346
- if matches.empty:
347
- return []
 
 
 
 
 
348
 
349
- # Sort by Score desc, then Price asc
350
- matches = matches.sort_values(by=['score', 'price'], ascending=[False, True])
351
-
352
- # Unique product logic
353
- unique_products = []
354
- seen_ids = set()
355
 
356
- for _, row in matches.iterrows():
357
- pid = row['product_id']
358
- if pid in seen_ids: continue
359
- seen_ids.add(pid)
 
 
360
 
361
- unique_products.append({
362
- "id": pid,
363
- "name": row['product_name'],
364
- "price": row['price'],
365
- "retailer": row['retailer'],
366
- "category": row['category'],
367
- "image": row['image']
368
  })
369
- if len(unique_products) >= limit: break
370
-
371
- return unique_products
372
 
373
- # =========================
374
- # Gemini Functions (FIXED & ROBUST)
375
- # =========================
376
-
377
- def gemini_generate_text(system_prompt: str, user_prompt: str) -> str:
378
- """Standard text generation."""
379
- if not _gemini_client: return ""
380
- try:
381
- # Simplified call using contents string directly
382
- response = _gemini_client.models.generate_content(
383
- model=GEMINI_MODEL,
384
- contents=system_prompt + "\n\n" + user_prompt,
385
- config=types.GenerateContentConfig(
386
- temperature=0.4
387
- )
388
- )
389
- return response.text or ""
390
- except Exception as e:
391
- logger.error(f"Gemini Text Error: {e}")
392
- return ""
393
 
394
- def gemini_generate_json(system_prompt: str, user_prompt: str) -> Dict[str, Any]:
395
- """JSON generation with strict parsing."""
396
- if not _gemini_client: return {}
397
- try:
398
- response = _gemini_client.models.generate_content(
399
- model=GEMINI_MODEL,
400
- contents=system_prompt + "\n\n" + user_prompt,
401
- config=types.GenerateContentConfig(
402
- response_mime_type="application/json",
403
- temperature=0.2
404
- )
405
- )
406
- return _safe_json_loads(response.text, {})
407
- except Exception as e:
408
- logger.error(f"Gemini JSON Error: {e}")
409
- return {}
410
-
411
- # =========================
412
- # Long Term Memory Engine
413
- # =========================
414
-
415
- MEMORY_SYSTEM_PROMPT = """
416
- You are the Memory Manager for Jessica, an AI Shopping Assistant.
417
- Your job is to update the User's "Memory Summary" based on their latest conversation.
418
-
419
- INPUTS:
420
- 1. Current Memory: The existing summary of what we know about the user.
421
- 2. New Transcript: The latest conversation.
422
-
423
- GOAL:
424
- Update the Current Memory to include new details. Focus on:
425
- - Names (User, Family, Friends)
426
- - Dietary preferences or allergies
427
- - Budget habits (e.g., "likes cheap meat", "buys bulk")
428
- - Life events (e.g., "hosting a braai on Friday", "wife's birthday")
429
- - Feedback (e.g., "loved the T-bone suggestion")
430
 
431
- OUTPUT:
432
- Return ONLY the updated text summary. Keep it concise (max 150 words).
433
- """
 
 
 
 
 
 
 
 
 
434
 
435
- def update_long_term_memory(profile_id: str, transcript: str) -> None:
436
- """Updates the user's profile memory summary based on the new call."""
437
- if len(transcript) < 20: return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438
 
439
- prof = get_profile(profile_id)
440
- current_memory = prof.get("memory_summary", "")
 
 
 
 
441
 
442
- user_prompt = f"CURRENT MEMORY:\n{current_memory}\n\nNEW TRANSCRIPT:\n{transcript}"
443
 
444
- try:
445
- new_memory = gemini_generate_text(MEMORY_SYSTEM_PROMPT, user_prompt)
446
- if new_memory and len(new_memory) > 10:
447
- update_profile(profile_id, {"memory_summary": new_memory})
448
- logger.info(f"Memory updated for {profile_id}")
449
- except Exception as e:
450
- logger.error(f"Memory update failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
 
452
  # =========================
453
- # Shopping Plan Engine (RAG)
454
  # =========================
455
 
456
- EXTRACT_SYSTEM_PROMPT = """
457
- You are a Shopping Assistant Data Extractor.
458
- Analyze the transcript and extract a list of shopping items the user implicitly or explicitly wants.
459
- Return JSON: { "items": [ { "name": "searchable term", "qty": "quantity string" } ] }
460
- If no items found, return { "items": [] }.
461
- """
462
-
463
- SYNTHESIS_SYSTEM_PROMPT = """
464
- You are Jessica, Pricelyst's Shopping Advisor.
465
- Generate a shopping plan based on the USER TRANSCRIPT and the DATA CONTEXT provided.
466
-
467
- RULES:
468
- 1. USE REAL DATA: Use the prices and retailers found in DATA CONTEXT.
469
- 2. ESTIMATES: If context says "FOUND: FALSE", use your best guess for Zimbabwe prices and mark as "(Est)".
470
- 3. FORMAT: Return strict JSON with a 'markdown_content' field containing a professional report.
471
-
472
- JSON SCHEMA:
473
- {
474
- "is_actionable": true,
475
- "title": "Short Title",
476
- "markdown_content": "# Title\n\n..."
477
- }
478
- """
479
-
480
- def build_shopping_plan(transcript: str) -> Dict[str, Any]:
481
  """
482
- RAG Pipeline: Extract -> Search -> Synthesize
 
 
483
  """
484
- if len(transcript) < 10:
485
- return {"is_actionable": False}
486
-
487
- # 1. Extract
488
- extraction = gemini_generate_json(EXTRACT_SYSTEM_PROMPT, f"TRANSCRIPT:\n{transcript}")
489
- items_requested = extraction.get("items", [])
 
 
 
 
490
 
491
- if not items_requested:
492
- return {"is_actionable": False}
 
 
 
 
 
 
 
 
 
 
493
 
494
- df = get_data_index()
 
495
 
496
- # 2. Retrieval (Grounding)
497
- context_lines = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
498
 
499
- for item in items_requested:
500
- term = item.get("name", "")
501
- qty_str = item.get("qty", "1")
502
-
503
- # Check Essentials Fallback
504
- ess_key = next((k for k in ZIM_ESSENTIALS if k in term.lower()), None)
505
- if ess_key:
506
- data = ZIM_ESSENTIALS[ess_key]
507
- context_lines.append(f"- ITEM: {term} | SOURCE: Market Rate | PRICE: ${data['price']} | RETAILER: {data['retailer']}")
508
- continue
509
-
510
- # Search DB
511
- hits = search_index(df, term, limit=1)
512
- if hits:
513
- best = hits[0]
514
- context_lines.append(f"- ITEM: {term} | FOUND: TRUE | PRODUCT: {best['name']} | PRICE: ${best['price']} | RETAILER: {best['retailer']}")
515
- else:
516
- context_lines.append(f"- ITEM: {term} | FOUND: FALSE | NOTE: Needs estimation.")
517
-
518
- data_context = "\n".join(context_lines)
519
- logger.info(f"Plan Context:\n{data_context}")
520
-
521
- # 3. Synthesis
522
- final_prompt = f"TRANSCRIPT:\n{transcript}\n\nDATA CONTEXT (Real Prices):\n{data_context}"
523
- plan = gemini_generate_json(SYNTHESIS_SYSTEM_PROMPT, final_prompt)
524
 
525
- return plan
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
526
 
527
  # =========================
528
- # API Endpoints
529
  # =========================
530
 
531
  @app.get("/health")
532
  def health():
533
- df = get_data_index()
534
  return jsonify({
535
  "ok": True,
536
- "ts": now_utc_iso(),
537
- "products_indexed": len(df)
538
  })
539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540
  @app.post("/api/call-briefing")
541
  def call_briefing():
542
  """
543
- Returns memory context to the frontend to pass to ElevenLabs.
 
544
  """
545
  body = request.get_json(silent=True) or {}
546
- profile_id = body.get("profile_id")
547
  username = body.get("username")
548
 
549
- if not profile_id:
550
- return jsonify({"ok": False, "error": "Missing profile_id"}), 400
551
-
552
- prof = get_profile(profile_id)
553
- if username:
554
- update_profile(profile_id, {"username": username})
555
-
556
- # Prepare intelligence payload
557
- kpi_data = {
558
- "username": username or prof.get("username") or "Friend",
559
- "market_rates": ZIM_ESSENTIALS,
560
- "tone": "practical_zimbabwe",
561
- "system_instruction": "You are Jessica. If user asks about 'how was the party?', check 'memory_summary' variable."
 
 
 
562
  }
563
-
564
  return jsonify({
565
  "ok": True,
566
  "memory_summary": prof.get("memory_summary", ""),
567
- "kpi_snapshot": json.dumps(kpi_data)
568
  })
569
 
570
  @app.post("/api/log-call-usage")
571
  def log_call_usage():
572
  """
573
- 1. Update Memory (Async logic, effectively)
574
- 2. Generate Shopping Plan (Ground Truth)
575
- 3. Persist Log
576
  """
577
  body = request.get_json(silent=True) or {}
578
- profile_id = body.get("profile_id")
579
  transcript = body.get("transcript", "")
580
 
581
- if not profile_id:
582
- return jsonify({"ok": False, "error": "Missing profile_id"}), 400
583
-
584
- logger.info(f"Processing Call for {profile_id}. Transcript Len: {len(transcript)}")
585
-
586
- # 1. Update Long Term Memory
587
- update_long_term_memory(profile_id, transcript)
588
-
589
- # 2. Generate Plan
590
- plan_data = {}
591
- plan_id = None
592
 
593
- if len(transcript) > 20:
 
594
  try:
595
- plan_data = build_shopping_plan(transcript)
 
 
 
596
 
597
- if plan_data.get("is_actionable"):
598
- plan_ref = db.collection("pricelyst_profiles").document(profile_id).collection("shopping_plans").document()
599
- plan_data["id"] = plan_ref.id
600
- plan_data["created_at"] = now_utc_iso()
601
- plan_ref.set(plan_data)
602
- plan_id = plan_ref.id
603
- logger.info(f"Plan Created: {plan_id}")
604
  except Exception as e:
605
- logger.error(f"Plan Gen Error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
 
607
  # 3. Log Call
608
- log_call(profile_id, {
609
- "transcript": transcript,
610
- "duration": body.get("duration_seconds"),
611
- "plan_id": plan_id
612
- })
 
613
 
614
  return jsonify({
615
  "ok": True,
616
  "shopping_plan": plan_data if plan_data.get("is_actionable") else None
617
  })
618
 
619
- # ––––– CRUD: Shopping Plans –––––
620
 
621
  @app.get("/api/shopping-plans")
622
  def list_plans():
623
  pid = request.args.get("profile_id")
624
- if not pid: return jsonify({"ok": False}), 400
625
  try:
626
  docs = db.collection("pricelyst_profiles").document(pid).collection("shopping_plans") \
627
  .order_by("created_at", direction=firestore.Query.DESCENDING).limit(10).stream()
@@ -633,12 +675,12 @@ def list_plans():
633
  @app.delete("/api/shopping-plans/<plan_id>")
634
  def delete_plan(plan_id):
635
  pid = request.args.get("profile_id")
636
- if not pid: return jsonify({"ok": False}), 400
637
  try:
638
  db.collection("pricelyst_profiles").document(pid).collection("shopping_plans").document(plan_id).delete()
639
  return jsonify({"ok": True})
640
- except Exception as e:
641
- return jsonify({"ok": False, "error": str(e)}), 500
642
 
643
  # =========================
644
  # Main
@@ -646,9 +688,9 @@ def delete_plan(plan_id):
646
 
647
  if __name__ == "__main__":
648
  port = int(os.environ.get("PORT", 7860))
649
- # Pre-warm cache
650
  try:
651
- get_data_index(force_refresh=True)
652
  except:
653
  pass
654
  app.run(host="0.0.0.0", port=port)
 
1
  """
2
+ main.py — Pricelyst Shopping Advisor (Analyst Edition)
3
 
4
  ✅ Flask API
5
+ ✅ Firebase Admin Persistence
6
+ ✅ Gemini via google-genai SDK (Robust)
7
+ "Analyst Engine": Python Math for Baskets, ZESA, & Fuel
8
+ Ground Truth Data: Uses /api/v1/product-listing
9
+ ✅ Real-Time Basket Optimization
10
 
11
  ENV VARS:
12
  - GOOGLE_API_KEY=...
 
20
  import re
21
  import json
22
  import time
23
+ import math
24
  import logging
25
  from datetime import datetime, timezone
26
+ from typing import Any, Dict, List, Optional, Tuple
27
 
28
  import requests
29
  import pandas as pd
 
38
  )
39
  logger = logging.getLogger("pricelyst-advisor")
40
 
41
+ # ––––– Gemini SDK –––––
42
 
43
  try:
44
  from google import genai
 
65
 
66
  FIREBASE_ENV = os.environ.get("FIREBASE", "")
67
 
68
+ def init_firestore_from_env() -> Optional[firestore.Client]:
69
  if firebase_admin._apps:
70
  return firestore.client()
 
71
  if not FIREBASE_ENV:
72
  logger.warning("FIREBASE env var missing. Persistence disabled.")
73
  return None
 
74
  try:
75
  sa_info = json.loads(FIREBASE_ENV)
76
  cred = credentials.Certificate(sa_info)
77
  firebase_admin.initialize_app(cred)
78
+ logger.info("Firebase initialized.")
79
  return firestore.client()
80
  except Exception as e:
81
  logger.critical("Failed to initialize Firebase: %s", e)
 
83
 
84
  db = init_firestore_from_env()
85
 
86
+ # ––––– External API –––––
87
 
88
  PRICE_API_BASE = os.environ.get("PRICE_API_BASE", "https://api.pricelyst.co.zw").rstrip("/")
89
+ HTTP_TIMEOUT = 30
90
+
91
+ # ––––– Static Data (Zim Context) –––––
92
+
93
+ ZIM_UTILITIES = {
94
+ "fuel_petrol": 1.58, # USD per Litre
95
+ "fuel_diesel": 1.65, # USD per Litre
96
+ "gas_lpg": 2.00, # USD per kg
97
+ "bread": 1.00, # USD fixed
98
+ # ZESA Estimates (Simplified Stepped Tariff)
99
+ "zesa_step_1": {"limit": 50, "rate": 0.04}, # First 50 units (Life line)
100
+ "zesa_step_2": {"limit": 150, "rate": 0.09}, # Next 150
101
+ "zesa_step_3": {"limit": 9999, "rate": 0.14}, # Balance
102
+ "zesa_levy": 0.06 # 6% REA levy approx
103
+ }
104
 
105
+ # ––––– Cache –––––
106
 
107
+ PRODUCT_CACHE_TTL = 60 * 20 # 20 mins
108
+ _data_cache: Dict[str, Any] = {
109
  "ts": 0,
110
+ "df": pd.DataFrame(), # Columns: [id, name, clean_name, brand, category, retailer, price, views, image]
111
+ "raw_count": 0
112
  }
113
 
114
+ app = Flask(__name__)
115
+ CORS(app)
 
 
 
 
 
 
 
 
 
116
 
117
  # =========================
118
+ # 1. ETL Layer (Ingestion)
119
  # =========================
120
 
121
+ def _norm(s: Any) -> str:
122
+ """Normalize string for fuzzy search."""
123
+ if not s: return ""
124
+ return str(s).strip().lower()
125
 
126
+ def _coerce_price(v: Any) -> float:
127
  try:
128
+ return float(v) if v is not None else 0.0
129
+ except:
 
130
  return 0.0
131
 
132
+ def fetch_and_flatten_data() -> pd.DataFrame:
133
+ """
134
+ Fetches from /api/v1/product-listing and flattens into an analytical DF.
135
+ Each row represents a single 'Offer' (Product X at Retailer Y).
136
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  all_products = []
138
+ page = 1
139
+
140
+ while True:
141
  try:
142
+ # New Endpoint Structure
143
+ url = f"{PRICE_API_BASE}/api/v1/product-listing"
144
+ r = requests.get(url, params={"page": page, "perPage": 50}, timeout=HTTP_TIMEOUT)
145
  r.raise_for_status()
146
+ payload = r.json()
147
+ data = payload.get("data") or []
148
  if not data: break
149
+
150
  all_products.extend(data)
151
 
152
+ meta = payload
153
+ if page >= (meta.get("totalPages") or 99):
 
154
  break
155
+ page += 1
156
  except Exception as e:
157
+ logger.error(f"ETL Error on page {page}: {e}")
158
  break
 
159
 
160
+ # Flattening Logic
 
 
 
 
161
  rows = []
162
+ for p in all_products:
163
  try:
164
  p_id = p.get("id")
165
  p_name = p.get("name") or "Unknown"
166
+ clean_name = _norm(p_name)
167
 
168
+ # Category & Brand extraction
169
+ # Based on user JSON: 'category' is an object inside product
170
+ cat_obj = p.get("category") or {}
171
+ cat_name = cat_obj.get("name") or "General"
172
+
173
+ brand_obj = p.get("brand") or {}
174
+ brand_name = brand_obj.get("brand_name") or ""
175
+
176
+ views = int(p.get("view_count") or 0)
177
+ image = p.get("thumbnail") or p.get("image")
178
 
179
+ # Prices array
180
  prices = p.get("prices") or []
181
 
182
+ # If no prices, we still index product for "Knowledge" but with price=0
183
  if not prices:
184
+ rows.append({
185
+ "product_id": p_id,
186
+ "product_name": p_name,
187
+ "clean_name": clean_name,
188
+ "brand": brand_name,
189
+ "category": cat_name,
190
+ "retailer": "Listing",
191
+ "price": 0.0,
192
+ "views": views,
193
+ "image": image,
194
+ "is_offer": False
195
+ })
 
196
  continue
197
 
198
  for offer in prices:
199
+ retailer = offer.get("retailer") or {}
200
+ r_name = retailer.get("name") or "Unknown Store"
201
+ price_val = _coerce_price(offer.get("price"))
202
 
203
  if price_val > 0:
204
  rows.append({
205
  "product_id": p_id,
206
  "product_name": p_name,
207
+ "clean_name": clean_name,
 
 
208
  "brand": brand_name,
209
+ "category": cat_name,
210
+ "retailer": r_name,
211
  "price": price_val,
212
+ "views": views,
213
+ "image": image,
214
+ "is_offer": True
215
  })
216
+ except:
 
217
  continue
218
 
219
  df = pd.DataFrame(rows)
220
  return df
221
 
222
+ def get_market_index(force_refresh: bool = False) -> pd.DataFrame:
223
+ """Singleton access to the Dataframe."""
224
+ global _data_cache
225
+ if force_refresh or _data_cache["df"].empty or (time.time() - _data_cache["ts"] > PRODUCT_CACHE_TTL):
226
+ logger.info("ETL: Refreshing Market Index...")
227
+ df = fetch_and_flatten_data()
228
+ _data_cache["df"] = df
229
+ _data_cache["ts"] = time.time()
230
+ _data_cache["raw_count"] = len(df)
231
+ logger.info(f"ETL: Loaded {len(df)} market offers.")
232
+ return _data_cache["df"]
 
 
 
 
 
 
 
 
 
 
 
233
 
234
  # =========================
235
+ # 2. Analyst Engine (Math Logic)
236
  # =========================
237
 
238
+ def search_products_fuzzy(df: pd.DataFrame, query: str, limit: int = 10) -> pd.DataFrame:
239
+ """Finds products matching query (Name, Brand, or Category)."""
240
+ if df.empty or not query: return df
 
 
241
 
242
+ q_norm = _norm(query)
243
  q_tokens = set(q_norm.split())
244
 
245
+ # Quick filter: String contains
246
+ mask_name = df['clean_name'].str.contains(q_norm, regex=False)
247
+ mask_brand = df['brand'].str.lower().str.contains(q_norm, regex=False)
248
+ mask_cat = df['category'].str.lower().str.contains(q_norm, regex=False)
249
+
250
+ matches = df[mask_name | mask_brand | mask_cat].copy()
 
 
 
251
 
252
+ # Simple Scoring
253
+ def scorer(row):
254
+ score = 0
255
+ if q_norm in row['clean_name']: score += 10
256
+ if q_norm == row['clean_name']: score += 20
257
+ # Popularity boost
258
+ score += math.log(row['views'] + 1) * 0.5
259
+ return score
260
+
261
+ if not matches.empty:
262
+ matches['score'] = matches.apply(scorer, axis=1)
263
+ return matches.sort_values('score', ascending=False).head(limit)
264
 
265
+ return matches
 
 
266
 
267
+ def calculate_basket_optimization(item_names: List[str]) -> Dict[str, Any]:
268
+ """
269
+ Killer Question: 'Where should I buy this list?'
270
+ Returns: Best Store, Missing Items, Total Cost.
271
+ """
272
+ df = get_market_index()
273
+ if df.empty: return {"error": "No data"}
274
 
275
+ basket_results = []
276
+ missing_global = []
277
+
278
+ # 1. Resolve Items to Real Products
279
+ found_items = [] # list of (item_query, product_id, product_name)
 
280
 
281
+ for item in item_names:
282
+ # Find best matching product (using popularity tie-breaker)
283
+ hits = search_products_fuzzy(df[df['is_offer']==True], item, limit=5)
284
+ if hits.empty:
285
+ missing_global.append(item)
286
+ continue
287
 
288
+ # Pick the most popular product that matches this query
289
+ best_prod = hits.sort_values('views', ascending=False).iloc[0]
290
+ found_items.append({
291
+ "query": item,
292
+ "product_id": best_prod['product_id'],
293
+ "name": best_prod['product_name']
 
294
  })
 
 
 
295
 
296
+ if not found_items:
297
+ return {"actionable": False, "reason": "No items found in database."}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
 
299
+ # 2. Calculate Totals Per Retailer
300
+ # We only care about retailers that stock these products
301
+ target_pids = [x['product_id'] for x in found_items]
302
+
303
+ # Filter DF to only relevant products
304
+ relevant_offers = df[df['product_id'].isin(target_pids) & df['is_offer']]
305
+
306
+ # Group by Retailer
307
+ retailer_stats = []
308
+ all_retailers = relevant_offers['retailer'].unique()
309
+
310
+ for retailer in all_retailers:
311
+ r_df = relevant_offers[relevant_offers['retailer'] == retailer]
312
+
313
+ found_count = len(r_df)
314
+ total_price = r_df['price'].sum()
315
+
316
+ # Identify what this retailer has vs misses
317
+ retailer_pids = r_df['product_id'].tolist()
318
+ missing_in_store = [x['name'] for x in found_items if x['product_id'] not in retailer_pids]
319
+ found_names = [x['name'] for x in found_items if x['product_id'] in retailer_pids]
320
+
321
+ retailer_stats.append({
322
+ "retailer": retailer,
323
+ "total_price": float(total_price),
324
+ "item_count": found_count,
325
+ "coverage_percent": (found_count / len(found_items)) * 100,
326
+ "missing": missing_in_store,
327
+ "found_items": found_names
328
+ })
 
 
 
 
 
 
329
 
330
+ # 3. Sort by: Coverage (Desc), then Price (Asc)
331
+ retailer_stats.sort(key=lambda x: (-x['coverage_percent'], x['total_price']))
332
+
333
+ best_option = retailer_stats[0] if retailer_stats else None
334
+
335
+ return {
336
+ "actionable": True,
337
+ "basket_items": [x['name'] for x in found_items],
338
+ "global_missing": missing_global,
339
+ "best_store": best_option,
340
+ "all_stores": retailer_stats[:3] # Return top 3 for comparison
341
+ }
342
 
343
+ def calculate_zesa_units(amount_usd: float) -> Dict[str, Any]:
344
+ """
345
+ Killer Question: 'How much ZESA do I get for $20?'
346
+ Uses a simplified tiered logic (Approximation of ZESA tariff).
347
+ """
348
+ remaining = amount_usd / 1.06 # Remove 6% levy approx
349
+ units = 0.0
350
+ breakdown = []
351
+
352
+ # Tier 1: First 50 units (Cheap)
353
+ t1 = ZIM_UTILITIES["zesa_step_1"]
354
+ cost_t1 = t1["limit"] * t1["rate"]
355
+
356
+ if remaining > cost_t1:
357
+ units += t1["limit"]
358
+ remaining -= cost_t1
359
+ breakdown.append(f"First {t1['limit']} units @ ${t1['rate']}")
360
+
361
+ # Tier 2: Next 150
362
+ t2 = ZIM_UTILITIES["zesa_step_2"]
363
+ cost_t2 = t2["limit"] * t2["rate"]
364
+
365
+ if remaining > cost_t2:
366
+ units += t2["limit"]
367
+ remaining -= cost_t2
368
+ breakdown.append(f"Next {t2['limit']} units @ ${t2['rate']}")
369
+
370
+ # Tier 3: Balance (Expensive)
371
+ t3 = ZIM_UTILITIES["zesa_step_3"]
372
+ bought = remaining / t3["rate"]
373
+ units += bought
374
+ breakdown.append(f"Remaining ${(remaining + cost_t1 + cost_t2):.2f} bought {bought:.1f} units @ ${t3['rate']}")
375
+ else:
376
+ bought = remaining / t2["rate"]
377
+ units += bought
378
+ breakdown.append(f"Balance bought {bought:.1f} units @ ${t2['rate']}")
379
+ else:
380
+ bought = remaining / t1["rate"]
381
+ units += bought
382
+ breakdown.append(f"All {bought:.1f} units @ ${t1['rate']}")
383
+
384
+ return {
385
+ "amount_usd": amount_usd,
386
+ "est_units_kwh": round(units, 1),
387
+ "breakdown": breakdown,
388
+ "note": "Estimates include ~6% REA levy. Actual units depend on your last purchase date."
389
+ }
390
 
391
+ def get_product_intelligence(query: str) -> Dict[str, Any]:
392
+ """
393
+ Killer Question: 'Is this price reasonable?' / 'Most Popular?'
394
+ """
395
+ df = get_market_index()
396
+ hits = search_products_fuzzy(df[df['is_offer']], query, limit=10)
397
 
398
+ if hits.empty: return {"found": False}
399
 
400
+ # Group by product ID to find the specific product stats
401
+ best_match_pid = hits.iloc[0]['product_id']
402
+ product_rows = df[(df['product_id'] == best_match_pid) & (df['is_offer'])]
403
+
404
+ if product_rows.empty: return {"found": False}
405
+
406
+ min_price = product_rows['price'].min()
407
+ max_price = product_rows['price'].max()
408
+ avg_price = product_rows['price'].mean()
409
+ cheapest_row = product_rows.loc[product_rows['price'].idxmin()]
410
+
411
+ return {
412
+ "found": True,
413
+ "name": cheapest_row['product_name'],
414
+ "brand": cheapest_row['brand'],
415
+ "category": cheapest_row['category'],
416
+ "view_count": int(cheapest_row['views']),
417
+ "price_stats": {
418
+ "min": float(min_price),
419
+ "max": float(max_price),
420
+ "avg": float(avg_price),
421
+ "spread": float(max_price - min_price)
422
+ },
423
+ "best_deal": {
424
+ "retailer": cheapest_row['retailer'],
425
+ "price": float(min_price)
426
+ },
427
+ "all_offers": product_rows[['retailer', 'price']].to_dict('records')
428
+ }
429
 
430
  # =========================
431
+ # 3. Gemini Context Layer
432
  # =========================
433
 
434
+ def generate_analyst_response(transcript: str) -> Dict[str, Any]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435
  """
436
+ 1. Detect Intent (Basket? Utility? Single Item?)
437
+ 2. Run Python Analyst Function.
438
+ 3. Generate Text Response.
439
  """
440
+ if not _gemini_client: return {"message": "AI Brain offline."}
441
+
442
+ # Step A: Intent Classification
443
+ INTENT_PROMPT = """
444
+ Analyze the user input. Return JSON.
445
+ Intents:
446
+ - "BASKET": User has a list of items (e.g. "Oil, bread and rice").
447
+ - "UTILITY": User asks about ZESA, Fuel, Gas prices or units.
448
+ - "PRODUCT_INTEL": User asks for "Cheapest X", "Price of X", "Popular X".
449
+ - "CHAT": General conversation.
450
 
451
+ Output: { "intent": "...", "items": ["..."], "utility_type": "zesa/fuel/gas", "amount": number }
452
+ """
453
+
454
+ try:
455
+ resp = _gemini_client.models.generate_content(
456
+ model=GEMINI_MODEL,
457
+ contents=INTENT_PROMPT + "\nInput: " + transcript,
458
+ config=types.GenerateContentConfig(response_mime_type="application/json")
459
+ )
460
+ parsed = json.loads(resp.text)
461
+ except:
462
+ parsed = {"intent": "CHAT"}
463
 
464
+ intent = parsed.get("intent")
465
+ data_context = {}
466
 
467
+ # Step B: Execute Analyst Logic
468
+ if intent == "BASKET":
469
+ items = parsed.get("items", [])
470
+ if items:
471
+ data_context = calculate_basket_optimization(items)
472
+
473
+ elif intent == "UTILITY":
474
+ u_type = parsed.get("utility_type", "")
475
+ amt = parsed.get("amount") or 0
476
+ if "zesa" in u_type and amt > 0:
477
+ data_context = calculate_zesa_units(float(amt))
478
+ elif "fuel" in u_type or "petrol" in u_type:
479
+ rate = ZIM_UTILITIES["fuel_petrol"]
480
+ data_context = {"type": "Petrol", "rate": rate, "units": amt / rate}
481
+
482
+ elif intent == "PRODUCT_INTEL":
483
+ items = parsed.get("items", [])
484
+ if items:
485
+ data_context = get_product_intelligence(items[0])
486
+
487
+ # Step C: Synthesis (Speak based on Data)
488
+ SYNTHESIS_PROMPT = f"""
489
+ You are Jessica, the Pricelyst Analyst.
490
+ User Input: "{transcript}"
491
 
492
+ ANALYST DATA (Strictly use this):
493
+ {json.dumps(data_context, indent=2)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494
 
495
+ If 'actionable' is false or data is empty, suggest what data you need.
496
+ If basket data exists, summarize: "The best store for your basket is [Retailer] at $[Total]."
497
+ If ZESA data exists, be precise about units.
498
+ Keep it helpful and Zimbabwean.
499
+ """
500
+
501
+ final_resp = _gemini_client.models.generate_content(
502
+ model=GEMINI_MODEL,
503
+ contents=SYNTHESIS_PROMPT
504
+ )
505
+
506
+ return {
507
+ "intent": intent,
508
+ "analyst_data": data_context,
509
+ "message": final_resp.text
510
+ }
511
 
512
  # =========================
513
+ # 4. Endpoints
514
  # =========================
515
 
516
  @app.get("/health")
517
  def health():
518
+ df = get_market_index()
519
  return jsonify({
520
  "ok": True,
521
+ "offers_indexed": len(df),
522
+ "api_source": PRICE_API_BASE
523
  })
524
 
525
+ @app.post("/chat")
526
+ def chat():
527
+ """Text Chat Interface."""
528
+ body = request.get_json(silent=True) or {}
529
+ msg = body.get("message", "")
530
+ pid = body.get("profile_id")
531
+
532
+ if not pid: return jsonify({"ok": False}), 400
533
+
534
+ response_data = generate_analyst_response(msg)
535
+
536
+ # Log interaction
537
+ if db:
538
+ db.collection("pricelyst_profiles").document(pid).collection("chat_logs").add({
539
+ "message": msg,
540
+ "response": response_data,
541
+ "ts": datetime.now(timezone.utc).isoformat()
542
+ })
543
+
544
+ return jsonify({"ok": True, "data": response_data})
545
+
546
  @app.post("/api/call-briefing")
547
  def call_briefing():
548
  """
549
+ Context for ElevenLabs.
550
+ Crucially: We DO NOT send the whole database. We send Memory + Utilities.
551
  """
552
  body = request.get_json(silent=True) or {}
553
+ pid = body.get("profile_id")
554
  username = body.get("username")
555
 
556
+ if not pid: return jsonify({"ok": False}), 400
557
+
558
+ prof = {}
559
+ if db:
560
+ ref = db.collection("pricelyst_profiles").document(pid)
561
+ doc = ref.get()
562
+ if doc.exists:
563
+ prof = doc.to_dict()
564
+ else:
565
+ ref.set({"created_at": datetime.now(timezone.utc).isoformat()})
566
+
567
+ # Simple snapshot
568
+ kpi_snapshot = {
569
+ "username": username or prof.get("username", "Friend"),
570
+ "utilities": ZIM_UTILITIES,
571
+ "instructions": "You are Jessica. If asked for prices, say you can check the live system. For ZESA/Fuel, use the 'utilities' variable."
572
  }
573
+
574
  return jsonify({
575
  "ok": True,
576
  "memory_summary": prof.get("memory_summary", ""),
577
+ "kpi_snapshot": json.dumps(kpi_snapshot)
578
  })
579
 
580
  @app.post("/api/log-call-usage")
581
  def log_call_usage():
582
  """
583
+ Post-Call Processor.
584
+ 1. Update Memory.
585
+ 2. Generate Grounded Shopping Plan.
586
  """
587
  body = request.get_json(silent=True) or {}
588
+ pid = body.get("profile_id")
589
  transcript = body.get("transcript", "")
590
 
591
+ if not pid: return jsonify({"ok": False}), 400
592
+
593
+ logger.info(f"Processing Call {pid}. Len: {len(transcript)}")
 
 
 
 
 
 
 
 
594
 
595
+ # 1. Update Memory (Gemini)
596
+ if len(transcript) > 20 and db:
597
  try:
598
+ prof_ref = db.collection("pricelyst_profiles").document(pid)
599
+ curr_mem = prof_ref.get().to_dict().get("memory_summary", "")
600
+
601
+ mem_prompt = f"Update this memory summary with new details from the transcript (names, preferences, budget):\nOLD: {curr_mem}\nTRANSCRIPT: {transcript}"
602
 
603
+ resp = _gemini_client.models.generate_content(
604
+ model=GEMINI_MODEL,
605
+ contents=mem_prompt
606
+ )
607
+ prof_ref.set({"memory_summary": resp.text}, merge=True)
 
 
608
  except Exception as e:
609
+ logger.error(f"Memory Update Failed: {e}")
610
+
611
+ # 2. Generate Plan (Analyst Engine Integration)
612
+ # We re-run the Analyst logic specifically for the plan
613
+ analyst_result = generate_analyst_response(transcript)
614
+ plan_data = {}
615
+
616
+ if analyst_result.get("intent") == "BASKET" and analyst_result.get("analyst_data", {}).get("actionable"):
617
+ # We have a valid basket!
618
+ data = analyst_result["analyst_data"]
619
+ best = data["best_store"]
620
+
621
+ # Markdown Generation
622
+ md = f"# Your Shopping Plan\n\n"
623
+ md += f"**Best Store:** {best['retailer']}\n"
624
+ md += f"**Total Cost:** ${best['total_price']:.2f} (for {best['item_count']} items)\n\n"
625
+
626
+ md += "| Item | Found? |\n|---|---|\n"
627
+ for item in data['basket_items']:
628
+ found = "✅" if item in best['found_items'] else "❌"
629
+ md += f"| {item} | {found} |\n"
630
+
631
+ if data['global_missing']:
632
+ md += f"\n**Missing from Market:** {', '.join(data['global_missing'])}"
633
+
634
+ plan_data = {
635
+ "is_actionable": True,
636
+ "title": f"Plan: {best['retailer']} (${best['total_price']:.2f})",
637
+ "markdown_content": md,
638
+ "items": data['basket_items']
639
+ }
640
+
641
+ # Save Plan
642
+ if db:
643
+ db.collection("pricelyst_profiles").document(pid).collection("shopping_plans").add({
644
+ **plan_data,
645
+ "created_at": datetime.now(timezone.utc).isoformat()
646
+ })
647
 
648
  # 3. Log Call
649
+ if db:
650
+ db.collection("pricelyst_profiles").document(pid).collection("call_logs").add({
651
+ "transcript": transcript,
652
+ "analyst_result": analyst_result,
653
+ "ts": datetime.now(timezone.utc).isoformat()
654
+ })
655
 
656
  return jsonify({
657
  "ok": True,
658
  "shopping_plan": plan_data if plan_data.get("is_actionable") else None
659
  })
660
 
661
+ # ––––– Shopping Plan CRUD (Standard) –––––
662
 
663
  @app.get("/api/shopping-plans")
664
  def list_plans():
665
  pid = request.args.get("profile_id")
666
+ if not pid or not db: return jsonify({"ok": False}), 400
667
  try:
668
  docs = db.collection("pricelyst_profiles").document(pid).collection("shopping_plans") \
669
  .order_by("created_at", direction=firestore.Query.DESCENDING).limit(10).stream()
 
675
  @app.delete("/api/shopping-plans/<plan_id>")
676
  def delete_plan(plan_id):
677
  pid = request.args.get("profile_id")
678
+ if not pid or not db: return jsonify({"ok": False}), 400
679
  try:
680
  db.collection("pricelyst_profiles").document(pid).collection("shopping_plans").document(plan_id).delete()
681
  return jsonify({"ok": True})
682
+ except:
683
+ return jsonify({"ok": False}), 500
684
 
685
  # =========================
686
  # Main
 
688
 
689
  if __name__ == "__main__":
690
  port = int(os.environ.get("PORT", 7860))
691
+ # Pre-warm Cache
692
  try:
693
+ get_market_index(force_refresh=True)
694
  except:
695
  pass
696
  app.run(host="0.0.0.0", port=port)