Pepguy commited on
Commit
ac8db0c
·
verified ·
1 Parent(s): ab5ea02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +534 -543
app.py CHANGED
@@ -1,59 +1,114 @@
1
- # server_gemini_seg.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  import os
4
- import io
5
  import json
6
- import base64
7
  import logging
8
  import uuid
9
  import time
10
  import difflib
11
- from typing import List, Dict, Any, Tuple, Optional
12
 
13
  from flask import Flask, request, jsonify
14
  from flask_cors import CORS
15
- from PIL import Image, ImageOps
16
- import numpy as np
17
- import cv2
18
 
19
- # genai client
20
- from google import genai
21
- from google.genai import types
 
 
 
 
 
 
22
 
23
- # Firebase Admin (in-memory JSON init)
24
  try:
25
  import firebase_admin
26
- from firebase_admin import credentials as fb_credentials, storage as fb_storage
27
-
28
- FIREBASE_ADMIN_AVAILABLE = True
29
  except Exception:
30
  firebase_admin = None
31
  fb_credentials = None
32
- fb_storage = None
33
- FIREBASE_ADMIN_AVAILABLE = False
34
 
35
  logging.basicConfig(level=logging.INFO)
36
- log = logging.getLogger("wardrobe-server")
37
-
38
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "")
39
- if not GEMINI_API_KEY:
40
- log.warning("GEMINI_API_KEY not set — gemini calls will fail (but fallback still works).")
 
 
 
 
 
 
 
 
 
 
41
 
42
- client = genai.Client(api_key=GEMINI_API_KEY) if GEMINI_API_KEY else None
 
 
43
 
44
- # Firebase config (read service account JSON from env)
45
- FIREBASE_ADMIN_JSON = os.getenv("FIREBASE_ADMIN_JSON", "").strip()
46
- FIREBASE_STORAGE_BUCKET = os.getenv("FIREBASE_STORAGE_BUCKET", "").strip() # optional override
47
 
48
- if FIREBASE_ADMIN_JSON and not FIREBASE_ADMIN_AVAILABLE:
49
- log.warning("FIREBASE_ADMIN_JSON provided but firebase-admin SDK is not installed. Install firebase-admin.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- app = Flask(__name__)
52
- CORS(app)
53
 
54
- # ---------- Categories mapping (map model 'type' to frontend categories) ----------
55
- # NOTE: If frontend has a definitive categories array, replace this list with that array.
56
- # We use difflib.get_close_matches to pick the closest category from CATEGORIES.
57
  CATEGORIES = [
58
  "top",
59
  "shirt",
@@ -82,466 +137,400 @@ CATEGORIES = [
82
 
83
 
84
  def map_type_to_category(item_type: str) -> str:
85
- """Map a model-produced type string to the closest category from CATEGORIES.
86
- Falls back to 'unknown' if no reasonable match is found.
87
- """
88
  if not item_type:
89
  return "others"
90
  t = item_type.strip().lower()
91
- # direct hit
92
  if t in CATEGORIES:
93
  return t
94
- # try splitting or common plural handling
95
  t_clean = t.rstrip("s")
96
  if t_clean in CATEGORIES:
97
  return t_clean
98
- # fuzzy match
99
  matches = difflib.get_close_matches(t, CATEGORIES, n=1, cutoff=0.6)
100
  if matches:
101
  return matches[0]
102
- # attempt to match by token intersection
103
  for token in t.replace("_", " ").split():
104
  if token in CATEGORIES:
105
  return token
106
  return "others"
107
 
108
 
109
- # ---------- Firebase init helpers ----------
110
 
111
- _firebase_app = None
 
 
 
 
 
112
 
113
 
114
- def init_firebase_admin_if_needed():
115
- global _firebase_app
116
- if _firebase_app is not None:
117
- return _firebase_app
118
- if not FIREBASE_ADMIN_JSON:
119
- log.info("No FIREBASE_ADMIN_JSON env var set; skipping Firebase admin init.")
120
- return None
121
- if not FIREBASE_ADMIN_AVAILABLE:
122
- raise RuntimeError("firebase-admin not installed (pip install firebase-admin)")
123
- try:
124
- sa_obj = json.loads(FIREBASE_ADMIN_JSON)
125
- except Exception as e:
126
- log.exception("Failed parsing FIREBASE_ADMIN_JSON: %s", e)
127
- raise
128
- bucket_name = FIREBASE_STORAGE_BUCKET or (sa_obj.get("project_id") and f"{sa_obj.get('project_id')}.appspot.com")
129
- if not bucket_name:
130
- raise RuntimeError(
131
- "Could not determine storage bucket. Set FIREBASE_STORAGE_BUCKET or include project_id in service account JSON."
132
- )
133
- try:
134
- cred = fb_credentials.Certificate(sa_obj)
135
- _firebase_app = firebase_admin.initialize_app(cred, {"storageBucket": bucket_name})
136
- log.info("Initialized firebase admin with bucket: %s", bucket_name)
137
- return _firebase_app
138
- except Exception as e:
139
- log.exception("Failed to initialize firebase admin: %s", e)
140
- raise
141
 
 
142
 
143
- def upload_b64_to_firebase(base64_str: str, path: str, content_type="image/jpeg", metadata: dict = None) -> str:
144
- """Upload base64 string to Firebase Storage at `path`. Optionally attach metadata dict (custom metadata).
145
- Returns a public URL when possible, otherwise returns gs:///.
 
 
146
  """
147
- if not FIREBASE_ADMIN_JSON:
148
- raise RuntimeError("FIREBASE_ADMIN_JSON not set")
149
- init_firebase_admin_if_needed()
150
- if not FIREBASE_ADMIN_AVAILABLE:
151
- raise RuntimeError("firebase-admin not available")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
- raw = base64_str
154
- if raw.startswith("data:"):
155
- raw = raw.split(",", 1)[1]
156
- raw = raw.replace("\n", "").replace("\r", "")
157
- data = base64.b64decode(raw)
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  try:
160
- bucket = fb_storage.bucket()
161
- blob = bucket.blob(path)
162
- blob.upload_from_string(data, content_type=content_type)
163
- if metadata:
164
- try:
165
- blob.metadata = {k: (json.dumps(v) if not isinstance(v, str) else v) for k, v in metadata.items()}
166
- blob.patch()
167
- except Exception as me:
168
- log.warning("Failed to patch metadata for %s: %s", path, me)
169
- try:
170
- blob.make_public()
171
- return blob.public_url
172
- except Exception as e:
173
- log.warning("Could not make blob public: %s", e)
174
- return f"gs://{bucket.name}/{path}"
 
 
 
 
 
 
 
 
175
  except Exception as e:
176
- log.exception("Firebase upload error for path %s: %s", path, e)
177
- raise
178
 
179
 
180
- # ---------- Image helpers (with EXIF transpose) ----------
181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
 
183
- def read_image_bytes(file_storage) -> Tuple[np.ndarray, int, int, bytes]:
184
- """Read bytes, apply EXIF orientation, return BGR numpy, width, height and raw bytes."""
185
- data = file_storage.read()
186
- img = Image.open(io.BytesIO(data))
187
- try:
188
- img = ImageOps.exif_transpose(img)
189
- except Exception:
190
- pass
191
- img = img.convert("RGB")
192
- w, h = img.size
193
- arr = np.array(img)[:, :, ::-1] # RGB -> BGR
194
- return arr, w, h, data
195
-
196
-
197
- def crop_and_b64(bgr_img: np.ndarray, x: int, y: int, w: int, h: int, max_side=512) -> str:
198
- h_img, w_img = bgr_img.shape[:2]
199
- x = max(0, int(x))
200
- y = max(0, int(y))
201
- x2 = min(w_img, int(x + w))
202
- y2 = min(h_img, int(y + h))
203
- crop = bgr_img[y:y2, x:x2]
204
- if crop.size == 0:
205
- return ""
206
- max_dim = max(crop.shape[0], crop.shape[1])
207
- if max_dim > max_side:
208
- scale = max_side / max_dim
209
- crop = cv2.resize(crop, (int(crop.shape[1] * scale), int(crop.shape[0] * scale)), interpolation=cv2.INTER_AREA)
210
- _, jpeg = cv2.imencode(".jpg", crop, [int(cv2.IMWRITE_JPEG_QUALITY), 82])
211
- return base64.b64encode(jpeg.tobytes()).decode("ascii")
212
-
213
-
214
- def fallback_contour_crops(bgr_img, max_items=8) -> List[Dict[str, Any]]:
215
- gray = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
216
- blur = cv2.GaussianBlur(gray, (7, 7), 0)
217
- thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 6)
218
- kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
219
- closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
220
- contours, _ = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
221
- h_img, w_img = bgr_img.shape[:2]
222
- min_area = (w_img * h_img) * 0.005
223
- items = []
224
- for cnt in sorted(contours, key=cv2.contourArea, reverse=True):
225
- if len(items) >= max_items:
226
- break
227
- area = cv2.contourArea(cnt)
228
- if area < min_area:
229
- continue
230
- x, y, w, h = cv2.boundingRect(cnt)
231
- pad_x, pad_y = int(w * 0.07), int(h * 0.07)
232
- x = max(0, x - pad_x)
233
- y = max(0, y - pad_y)
234
- w = min(w_img - x, w + pad_x * 2)
235
- h = min(h_img - y, h + pad_y * 2)
236
- b64 = crop_and_b64(bgr_img, x, y, w, h)
237
- if not b64:
238
- continue
239
- items.append(
240
  {
241
- "id": str(uuid.uuid4()),
242
- "label": "unknown",
243
- "confidence": min(0.95, max(0.25, area / (w_img * h_img))),
244
- "bbox": {"x": x, "y": y, "w": w, "h": h},
245
- "thumbnail_b64": b64,
246
- "source": "fallback",
247
  }
248
- )
249
- if not items:
250
- h_half, w_half = h_img // 2, w_img // 2
251
- rects = [(0, 0, w_half, h_half), (w_half, 0, w_half, h_half), (0, h_half, w_half, h_half), (w_half, h_half, w_half, h_half)]
252
- for r in rects:
253
- b64 = crop_and_b64(bgr_img, r[0], r[1], r[2], r[3])
254
- if b64:
255
- items.append(
256
- {
257
- "id": str(uuid.uuid4()),
258
- "label": "unknown",
259
- "confidence": 0.3,
260
- "bbox": {"x": r[0], "y": r[1], "w": r[2], "h": r[3]},
261
- "thumbnail_b64": b64,
262
- "source": "fallback-grid",
263
- }
264
- )
265
- return items
266
-
267
-
268
- # ---------- AI analysis helper ----------
269
-
270
-
271
- def analyze_crop_with_gemini(jpeg_b64: str) -> Dict[str, Any]:
272
- """Run Gemini on the cropped image bytes to extract:
273
- type (one-word category like 'shoe', 'jacket', 'dress'),
274
- summary (single-line description), brand (string or empty), tags (array of short descriptors)
275
- Returns dict, falls back to empty/defaults on error or missing key.
276
- """
277
  if not client:
278
- return {"type": "unknown", "summary": "", "brand": "", "tags": []}
 
 
279
  try:
280
- # prepare prompt
281
  prompt = (
282
- "You are an assistant that identifies clothing item characteristics from an image. "
283
- "Return only a JSON object with keys: type (single word like 'shoe','top','jacket'), "
284
- "summary (a single short sentence, one line), brand (brand name if visible else empty string), "
285
- "tags (an array of short single-word tags describing visible attributes, e.g. ['striped','leather','white']). "
286
- "Keep values short and concise."
287
  )
288
-
289
- contents = [types.Content(role="user", parts=[types.Part.from_text(text=prompt)])]
290
-
291
- # attach the image bytes
292
- image_bytes = base64.b64decode(jpeg_b64)
293
- contents.append(types.Content(role="user", parts=[types.Part.from_bytes(data=image_bytes, mime_type="image/jpeg")]))
294
-
295
- schema = {
296
- "type": "object",
297
- "properties": {
298
- "type": {"type": "string"},
299
- "summary": {"type": "string"},
300
- "brand": {"type": "string"},
301
- "tags": {"type": "array", "items": {"type": "string"}},
302
- },
303
- "required": ["type", "summary"],
304
- }
305
- cfg = types.GenerateContentConfig(response_mime_type="application/json", response_schema=schema)
306
-
307
- # call model (use the same model family you used before)
308
- resp = client.models.generate_content(model="gemini-2.5-flash-lite", contents=contents, config=cfg)
309
- text = resp.text or ""
310
- parsed = {}
311
- try:
312
- parsed = json.loads(text)
313
- # coerce expected shapes
314
- parsed["type"] = str(parsed.get("type", "")).strip()
315
- parsed["summary"] = str(parsed.get("summary", "")).strip()
316
- parsed["brand"] = str(parsed.get("brand", "")).strip()
317
- tags = parsed.get("tags", [])
318
- if not isinstance(tags, list):
319
- tags = []
320
- parsed["tags"] = [str(t).strip() for t in tags if str(t).strip()]
321
- except Exception as e:
322
- log.warning("Failed parsing Gemini analysis JSON: %s — raw: %s", e, (text[:300] if text else ""))
323
- parsed = {"type": "unknown", "summary": "", "brand": "", "tags": []}
324
- return {
325
- "type": parsed.get("type", "unknown") or "unknown",
326
- "summary": parsed.get("summary", "") or "",
327
- "brand": parsed.get("brand", "") or "",
328
- "tags": parsed.get("tags", []) or [],
329
- }
330
  except Exception as e:
331
- log.exception("analyze_crop_with_gemini failure: %s", e)
332
- return {"type": "unknown", "summary": "", "brand": "", "tags": []}
333
-
 
334
 
335
- # ---------- Main / processing ----------
336
 
 
337
 
338
- @app.route("/process", methods=["POST"])
339
- def process_image():
340
- if "photo" not in request.files:
341
- return jsonify({"error": "missing photo"}), 400
342
- file = request.files["photo"]
343
-
344
- uid = (request.form.get("uid") or request.args.get("uid") or "anon").strip() or "anon"
 
345
  try:
346
- bgr_img, img_w, img_h, raw_bytes = read_image_bytes(file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
  except Exception as e:
348
- log.error("invalid image: %s", e)
349
- return jsonify({"error": "invalid image"}), 400
350
-
351
- session_id = str(uuid.uuid4())
352
-
353
- # Detection prompt (same as before)
354
- user_prompt = (
355
- "You are an assistant that extracts clothing detections from a single image. "
356
- "Return a JSON object with a single key 'items' which is an array. Each item must have: "
357
- "label (string, short like 'top','skirt','sneakers'), "
358
- "bbox with normalized coordinates between 0 and 1: {x, y, w, h} where x,y are top-left relative to width/height, "
359
- "confidence (0-1). Example output: {\"items\":[{\"label\":\"top\",\"bbox\":{\"x\":0.1,\"y\":0.2,\"w\":0.3,\"h\":0.4},\"confidence\":0.95}]} "
360
- "Output ONLY valid JSON. If you cannot detect any clothing confidently, return {\"items\":[]}."
361
- )
362
 
363
- try:
364
- contents = [types.Content(role="user", parts=[types.Part.from_text(text=user_prompt)])]
365
- contents.append(types.Content(role="user", parts=[types.Part.from_bytes(data=raw_bytes, mime_type="image/jpeg")]))
366
 
367
- schema = {
368
- "type": "object",
369
- "properties": {
370
- "items": {
371
- "type": "array",
372
- "items": {
373
- "type": "object",
374
- "properties": {
375
- "label": {"type": "string"},
376
- "bbox": {
377
- "type": "object",
378
- "properties": {
379
- "x": {"type": "number"},
380
- "y": {"type": "number"},
381
- "w": {"type": "number"},
382
- "h": {"type": "number"},
383
- },
384
- "required": ["x", "y", "w", "h"],
385
- },
386
- "confidence": {"type": "number"},
387
- },
388
- "required": ["label", "bbox", "confidence"],
389
- },
390
- }
391
- },
392
- "required": ["items"],
393
- }
394
-
395
- cfg = types.GenerateContentConfig(response_mime_type="application/json", response_schema=schema)
396
 
397
- log.info("Calling Gemini model for detection (gemini-2.5-flash-lite)...")
398
- model_resp = client.models.generate_content(model="gemini-2.5-flash-lite", contents=contents, config=cfg) if client else None
399
- raw_text = (model_resp.text or "") if model_resp else ""
400
- log.info("Gemini raw response length: %d", len(raw_text))
401
 
402
- parsed = None
403
- try:
404
- parsed = json.loads(raw_text) if raw_text else None
405
- except Exception as e:
406
- log.warning("Could not parse Gemini JSON: %s", e)
407
- parsed = None
408
-
409
- items_out: List[Dict[str, Any]] = []
410
- if parsed and isinstance(parsed.get("items"), list) and len(parsed["items"]) > 0:
411
- for it in parsed["items"]:
412
- try:
413
- label = str(it.get("label", "unknown"))[:48]
414
- bbox = it.get("bbox", {})
415
- nx = float(bbox.get("x", 0))
416
- ny = float(bbox.get("y", 0))
417
- nw = float(bbox.get("w", 0))
418
- nh = float(bbox.get("h", 0))
419
- nx = max(0.0, min(1.0, nx))
420
- ny = max(0.0, min(1.0, ny))
421
- nw = max(0.0, min(1.0, nw))
422
- nh = max(0.0, min(1.0, nh))
423
- px = int(nx * img_w)
424
- py = int(ny * img_h)
425
- pw = int(nw * img_w)
426
- ph = int(nh * img_h)
427
- if pw <= 8 or ph <= 8:
428
- continue
429
- b64 = crop_and_b64(bgr_img, px, py, pw, ph)
430
- if not b64:
431
- continue
432
- item_obj = {
433
- "id": str(uuid.uuid4()),
434
- "label": label,
435
- "confidence": float(it.get("confidence", 0.5)),
436
- "bbox": {"x": px, "y": py, "w": pw, "h": ph},
437
- "thumbnail_b64": b64,
438
- "source": "gemini",
439
- }
440
- # Add placeholder analysis/title; will be filled later if analysis runs
441
- item_obj["analysis"] = {"type": "unknown", "summary": "", "brand": "", "tags": []}
442
- item_obj["title"] = "unknown"
443
- items_out.append(item_obj)
444
- except Exception as e:
445
- log.warning("skipping item due to error: %s", e)
446
- else:
447
- log.info("Gemini returned no items or parse failed — using fallback contour crops.")
448
- items_out = fallback_contour_crops(bgr_img, max_items=8)
449
- # ensure analysis/title placeholders
450
- for itm in items_out:
451
- itm.setdefault("analysis", {"type": "unknown", "summary": "", "brand": "", "tags": []})
452
- itm.setdefault("title", "unknown")
453
-
454
- # Perform AI analysis per crop (if possible) and auto-upload to firebase with metadata (tmp + session)
455
- if FIREBASE_ADMIN_JSON and FIREBASE_ADMIN_AVAILABLE:
456
- try:
457
- init_firebase_admin_if_needed()
458
- bucket = fb_storage.bucket()
459
- except Exception as e:
460
- log.exception("Firebase admin init for upload failed: %s", e)
461
- bucket = None
462
 
463
- safe_uid = "".join(ch for ch in uid if ch.isalnum() or ch in ("-", "_")) or "anon"
464
- for itm in items_out:
465
- b64 = itm.get("thumbnail_b64")
466
- if not b64:
467
- continue
468
- # analyze
469
- try:
470
- analysis = analyze_crop_with_gemini(b64) if client else {"type": "unknown", "summary": "", "brand": "", "tags": []}
471
- except Exception as ae:
472
- log.warning("analysis failed: %s", ae)
473
- analysis = {"type": "unknown", "summary": "", "brand": "", "tags": []}
474
-
475
- # attach analysis and map to frontend category/title
476
- itm["analysis"] = analysis
477
- mapped_title = map_type_to_category(analysis.get("type", "") or itm.get("label", ""))
478
- itm["title"] = mapped_title
479
-
480
- item_id = itm.get("id") or str(uuid.uuid4())
481
- path = f"detected/{safe_uid}/{item_id}.jpg"
482
- try:
483
- metadata = {
484
- "tmp": "true",
485
- "session_id": session_id,
486
- "uploaded_by": safe_uid,
487
- "uploaded_at": str(int(time.time())),
488
- # store AI fields as JSON strings for later inspection
489
- "ai_type": analysis.get("type", ""),
490
- "ai_brand": analysis.get("brand", ""),
491
- "ai_summary": analysis.get("summary", ""),
492
- "ai_tags": json.dumps(analysis.get("tags", [])),
493
- }
494
- url = upload_b64_to_firebase(b64, path, content_type="image/jpeg", metadata=metadata)
495
- itm["thumbnail_url"] = url
496
- itm["thumbnail_path"] = path
497
- itm.pop("thumbnail_b64", None)
498
- itm["_session_id"] = session_id
499
- log.debug("Auto-uploaded thumbnail for %s -> %s (session=%s)", item_id, url, session_id)
500
- except Exception as up_e:
501
- log.warning("Auto-upload failed for %s: %s", item_id, up_e)
502
- # keep thumbnail_b64 and analysis for client fallback
503
- else:
504
- if not FIREBASE_ADMIN_JSON:
505
- log.info("FIREBASE_ADMIN_JSON not set; skipping server-side thumbnail upload.")
506
- else:
507
- log.info("Firebase admin SDK not available; skipping server-side thumbnail upload.")
508
- # For items without firebase upload, still attempt local analysis mapping
509
- for itm in items_out:
510
- if "analysis" not in itm or not itm["analysis"]:
511
- # attempt lightweight analysis mapping using label
512
- itm.setdefault("analysis", {"type": itm.get("label", "unknown"), "summary": "", "brand": "", "tags": []})
513
- mapped_title = map_type_to_category(itm["analysis"].get("type", "") or itm.get("label", ""))
514
- itm["title"] = mapped_title
515
-
516
- return jsonify({"ok": True, "items": items_out, "session_id": session_id, "debug": {"raw_model_text": (raw_text or "")[:1600]}}), 200
517
- except Exception as ex:
518
- log.exception("Processing error: %s", ex)
519
- try:
520
- items_out = fallback_contour_crops(bgr_img, max_items=8)
521
- for itm in items_out:
522
- itm.setdefault("analysis", {"type": "unknown", "summary": "", "brand": "", "tags": []})
523
- itm["title"] = map_type_to_category(itm["analysis"].get("type", "") or itm.get("label", ""))
524
- return jsonify({"ok": True, "items": items_out, "session_id": session_id, "debug": {"error": str(ex)}}), 200
525
- except Exception as e2:
526
- log.exception("Fallback also failed: %s", e2)
527
- return jsonify({"error": "internal failure", "detail": str(e2)}), 500
528
 
 
529
 
530
- # ---------- Finalize endpoint: keep selected and delete only session's temp files ----------
 
531
 
532
 
533
- @app.route("/finalize_detections", methods=["POST"])
534
- def finalize_detections():
535
  """
536
- Body JSON: { "uid": "user123", "keep_ids": ["id1","id2",...], "session_id": "<session id from /process>" }
537
-
538
- Server will delete only detected/<uid>/* files whose:
539
- - metadata.tmp == "true"
540
- - metadata.session_id == session_id
541
- - item_id NOT in keep_ids
542
-
543
- Returns:
544
- { ok: True, kept: [...], deleted: [...], errors: [...] }
545
  """
546
  try:
547
  body = request.get_json(force=True)
@@ -549,138 +538,140 @@ def finalize_detections():
549
  return jsonify({"error": "invalid json"}), 400
550
 
551
  uid = (body.get("uid") or request.args.get("uid") or "anon").strip() or "anon"
552
- keep_ids = set(body.get("keep_ids") or [])
553
- session_id = (body.get("session_id") or request.args.get("session_id") or "").strip()
 
554
 
555
- if not session_id:
556
- return jsonify({"error": "session_id required for finalize to avoid unsafe deletes"}), 400
557
-
558
- if not FIREBASE_ADMIN_JSON or not FIREBASE_ADMIN_AVAILABLE:
559
- return jsonify({"error": "firebase admin not configured"}), 500
560
 
 
561
  try:
562
- init_firebase_admin_if_needed()
563
- bucket = fb_storage.bucket()
564
  except Exception as e:
565
- log.exception("Firebase init error in finalize: %s", e)
566
- return jsonify({"error": "firebase admin init failed", "detail": str(e)}), 500
567
-
568
- safe_uid = "".join(ch for ch in uid if ch.isalnum() or ch in ("-", "_")) or "anon"
569
- prefix = f"detected/{safe_uid}/"
570
-
571
- kept = []
572
- deleted = []
573
- errors = []
574
 
575
  try:
576
- blobs = list(bucket.list_blobs(prefix=prefix))
577
- for blob in blobs:
578
- try:
579
- name = blob.name
580
- fname = name.split("/")[-1]
581
- if "." not in fname:
582
- continue
583
- item_id = fname.rsplit(".", 1)[0]
584
-
585
- md = blob.metadata or {}
586
- # only consider temporary files matching this session id
587
- if str(md.get("session_id", "")) != session_id or str(md.get("tmp", "")).lower() not in ("true", "1", "yes"):
588
- continue
589
 
590
- if item_id in keep_ids:
591
- # ensure public URL available if possible
592
- try:
593
- blob.make_public()
594
- url = blob.public_url
595
- except Exception:
596
- url = f"gs://{bucket.name}/{name}"
597
-
598
- # extract AI metadata (if present)
599
- ai_type = md.get("ai_type") or ""
600
- ai_brand = md.get("ai_brand") or ""
601
- ai_summary = md.get("ai_summary") or ""
602
- ai_tags_raw = md.get("ai_tags") or "[]"
603
- try:
604
- ai_tags = json.loads(ai_tags_raw) if isinstance(ai_tags_raw, str) else ai_tags_raw
605
- except Exception:
606
- ai_tags = []
607
- kept.append(
608
- {
609
- "id": item_id,
610
- "thumbnail_url": url,
611
- "thumbnail_path": name,
612
- "analysis": {"type": ai_type, "brand": ai_brand, "summary": ai_summary, "tags": ai_tags},
613
- }
614
- )
615
- else:
616
- try:
617
- blob.delete()
618
- deleted.append(item_id)
619
- except Exception as de:
620
- errors.append({"id": item_id, "error": str(de)})
621
- except Exception as e:
622
- errors.append({"blob": getattr(blob, "name", None), "error": str(e)})
623
- return jsonify({"ok": True, "kept": kept, "deleted": deleted, "errors": errors}), 200
624
  except Exception as e:
625
- log.exception("finalize_detections error: %s", e)
626
- return jsonify({"error": "internal", "detail": str(e)}), 500
627
 
 
 
 
 
628
 
629
- # ---------- Clear session: delete all temporary files for a session ----------
630
 
631
 
632
- @app.route("/clear_session", methods=["POST"])
633
- def clear_session():
634
  """
635
- Body JSON: { "session_id": "", "uid": "" }
636
- Deletes all detected//* blobs where metadata.session_id == session_id and metadata.tmp == "true".
 
 
 
 
637
  """
638
  try:
639
  body = request.get_json(force=True)
640
  except Exception:
641
  return jsonify({"error": "invalid json"}), 400
642
 
643
- session_id = (body.get("session_id") or request.args.get("session_id") or "").strip()
644
- uid = (body.get("uid") or request.args.get("uid") or "anon").strip() or "anon"
645
-
646
- if not session_id:
647
- return jsonify({"error": "session_id required"}), 400
648
 
649
- if not FIREBASE_ADMIN_JSON or not FIREBASE_ADMIN_AVAILABLE:
650
- return jsonify({"error": "firebase admin not configured"}), 500
651
 
652
  try:
653
- init_firebase_admin_if_needed()
654
- bucket = fb_storage.bucket()
655
  except Exception as e:
656
- log.exception("Firebase init error in clear_session: %s", e)
657
- return jsonify({"error": "firebase admin init failed", "detail": str(e)}), 500
658
 
659
- safe_uid = "".join(ch for ch in uid if ch.isalnum() or ch in ("-", "_")) or "anon"
660
- prefix = f"detected/{safe_uid}/"
661
 
662
- deleted = []
663
- errors = []
 
 
 
 
 
 
 
 
 
 
664
  try:
665
- blobs = list(bucket.list_blobs(prefix=prefix))
666
- for blob in blobs:
667
- try:
668
- md = blob.metadata or {}
669
- if str(md.get("session_id", "")) == session_id and str(md.get("tmp", "")).lower() in ("true", "1", "yes"):
670
- try:
671
- blob.delete()
672
- deleted.append(blob.name.split("/")[-1].rsplit(".", 1)[0])
673
- except Exception as de:
674
- errors.append({"blob": blob.name, "error": str(de)})
675
- except Exception as e:
676
- errors.append({"blob": getattr(blob, "name", None), "error": str(e)})
677
- return jsonify({"ok": True, "deleted": deleted, "errors": errors}), 200
678
- except Exception as e:
679
- log.exception("clear_session error: %s", e)
680
- return jsonify({"error": "internal", "detail": str(e)}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
681
 
682
 
683
  if __name__ == "__main__":
684
  port = int(os.getenv("PORT", 7860))
685
- log.info("Starting server on 0.0.0.0:%d", port)
686
  app.run(host="0.0.0.0", port=port, debug=True)
 
1
+ # server_suggestions.py
2
+ """
3
+ Suggestion-focused server for outfit recommendations.
4
+
5
+ Endpoints:
6
+ - POST /suggest_candidates -> produce candidate outfits (Step 1)
7
+ - POST /refine_candidates -> refine candidates with constraints (Step 2)
8
+ - POST /finalize_suggestion -> finalize a candidate and return the suggestion (Step 3)
9
+
10
+ This server intentionally omits image detection and upload endpoints — it expects
11
+ the client to send pre-extracted wardrobe item summaries (id, analysis, title, thumbnail_url...).
12
+
13
+ Features:
14
+ - Optional Gemini integration (GEMINI_API_KEY)
15
+ - Optional Firestore integration (FIREBASE_ADMIN_JSON) for:
16
+ * reading/writing user summary at users/{uid}
17
+ * reading recent suggestions in collection 'suggestions' for the past week
18
+ All Firestore operations are wrapped with graceful failures so the service still works
19
+ when Firestore isn't configured.
20
+ """
21
 
22
  import os
 
23
  import json
 
24
  import logging
25
  import uuid
26
  import time
27
  import difflib
28
+ from typing import List, Dict, Any, Set, Optional
29
 
30
  from flask import Flask, request, jsonify
31
  from flask_cors import CORS
 
 
 
32
 
33
+ # optional genai client
34
+ try:
35
+ from google import genai
36
+ from google.genai import types
37
+ GENAI_AVAILABLE = True
38
+ except Exception:
39
+ genai = None
40
+ types = None
41
+ GENAI_AVAILABLE = False
42
 
43
+ # optional firebase admin (Firestore)
44
  try:
45
  import firebase_admin
46
+ from firebase_admin import credentials as fb_credentials
47
+ from firebase_admin import firestore as fb_firestore_module
48
+ FIREBASE_AVAILABLE = True
49
  except Exception:
50
  firebase_admin = None
51
  fb_credentials = None
52
+ fb_firestore_module = None
53
+ FIREBASE_AVAILABLE = False
54
 
55
  logging.basicConfig(level=logging.INFO)
56
+ log = logging.getLogger("suggestion-server")
57
+
58
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "").strip()
59
+ if GEMINI_API_KEY and GENAI_AVAILABLE:
60
+ client = genai.Client(api_key=GEMINI_API_KEY)
61
+ log.info("Gemini client configured.")
62
+ else:
63
+ client = None
64
+ if GEMINI_API_KEY and not GENAI_AVAILABLE:
65
+ log.warning("GEMINI_API_KEY provided but genai SDK not installed. Gemini disabled.")
66
+ else:
67
+ log.info("GEMINI_API_KEY not provided; using fallback heuristics.")
68
+
69
+ # Firestore service account JSON in env variable (stringified JSON)
70
+ FIREBASE_ADMIN_JSON = os.getenv("FIREBASE_ADMIN_JSON", "").strip()
71
 
72
+ # Firestore runtime state
73
+ _firestore_client = None
74
+ _firebase_app = None
75
 
 
 
 
76
 
77
+ def init_firestore_if_needed():
78
+ """
79
+ Initialize firebase admin and Firestore client if FIREBASE_ADMIN_JSON is set.
80
+ Returns Firestore client or None on failure/unconfigured.
81
+ """
82
+ global _firestore_client, _firebase_app
83
+ if _firestore_client is not None:
84
+ return _firestore_client
85
+ if not FIREBASE_ADMIN_JSON:
86
+ log.info("No FIREBASE_ADMIN_JSON env var set; Firestore not initialized.")
87
+ return None
88
+ if not FIREBASE_AVAILABLE:
89
+ log.warning("FIREBASE_ADMIN_JSON provided but firebase-admin SDK not installed; skip Firestore init.")
90
+ return None
91
+ try:
92
+ sa_obj = json.loads(FIREBASE_ADMIN_JSON)
93
+ except Exception as e:
94
+ log.exception("Failed parsing FIREBASE_ADMIN_JSON: %s", e)
95
+ return None
96
+ try:
97
+ cred = fb_credentials.Certificate(sa_obj)
98
+ # If app already exists, avoid re-initializing
99
+ try:
100
+ _firebase_app = firebase_admin.get_app()
101
+ except Exception:
102
+ _firebase_app = firebase_admin.initialize_app(cred)
103
+ _firestore_client = fb_firestore_module.client()
104
+ log.info("Initialized Firestore client.")
105
+ return _firestore_client
106
+ except Exception as e:
107
+ log.exception("Failed to initialize Firestore: %s", e)
108
+ return None
109
 
 
 
110
 
111
+ # ---------- Category mapping (kept small and deterministic) ----------
 
 
112
  CATEGORIES = [
113
  "top",
114
  "shirt",
 
137
 
138
 
139
  def map_type_to_category(item_type: str) -> str:
 
 
 
140
  if not item_type:
141
  return "others"
142
  t = item_type.strip().lower()
 
143
  if t in CATEGORIES:
144
  return t
 
145
  t_clean = t.rstrip("s")
146
  if t_clean in CATEGORIES:
147
  return t_clean
 
148
  matches = difflib.get_close_matches(t, CATEGORIES, n=1, cutoff=0.6)
149
  if matches:
150
  return matches[0]
 
151
  for token in t.replace("_", " ").split():
152
  if token in CATEGORIES:
153
  return token
154
  return "others"
155
 
156
 
157
+ # ---------- Lightweight helpers for brands and matching ----------
158
 
159
+ def _safe_item_brand(itm: Dict[str, Any]) -> str:
160
+ analysis = itm.get("analysis") or {}
161
+ brand = analysis.get("brand") if isinstance(analysis, dict) else None
162
+ if not brand:
163
+ brand = itm.get("brand") or ""
164
+ return str(brand).strip()
165
 
166
 
167
+ def _item_matches_brand_constraints(itm: Dict[str, Any], require_brands: Set[str], reject_brands: Set[str]) -> bool:
168
+ brand = _safe_item_brand(itm).lower()
169
+ if require_brands and brand and brand not in require_brands:
170
+ return False
171
+ if reject_brands and brand and brand in reject_brands:
172
+ return False
173
+ return True
174
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
+ # ---------- Naive candidate generator (fallback) ----------
177
 
178
+ def naive_generate_candidates(wardrobe_items: List[Dict[str, Any]],
179
+ user_inputs: Dict[str, Any],
180
+ user_profile: Dict[str, Any],
181
+ past_week_items: List[Dict[str, Any]],
182
+ max_candidates: int = 6) -> List[Dict[str, Any]]:
183
  """
184
+ Simple combinatorial generator that groups items by category and composes outfits.
185
+ Returns a list of candidate dicts: { id, items: [...], score, reason, notes }.
186
+ """
187
+ grouped = {}
188
+ for itm in wardrobe_items:
189
+ title = (itm.get("title") or (itm.get("analysis") or {}).get("type") or itm.get("label") or "")
190
+ cat = map_type_to_category(title)
191
+ grouped.setdefault(cat, []).append(itm)
192
+
193
+ def pick(cat, n=3):
194
+ arr = grouped.get(cat, [])[:]
195
+ arr.sort(key=lambda x: float(x.get("confidence", 0.5)), reverse=True)
196
+ return arr[:n]
197
+
198
+ tops = pick("top", 5) + pick("shirt", 3) + pick("tshirt", 3)
199
+ bottoms = pick("pants", 4) + pick("jeans", 3) + pick("skirt", 2)
200
+ outer = pick("jacket", 3) + pick("coat", 2)
201
+ shoes = pick("shoe", 4) + pick("sneaker", 3) + pick("boot", 2) + pick("heels", 2)
202
+ dresses = grouped.get("dress", [])[:4]
203
+ accessories = pick("accessory", 3) + pick("bag", 2)
204
+
205
+ seeds = dresses + tops
206
+ if not seeds:
207
+ seeds = wardrobe_items[:6]
208
+
209
+ past_ids = {x.get("id") for x in (past_week_items or []) if x.get("id")}
210
+ candidates = []
211
+ used = set()
212
+
213
+ for seed in seeds:
214
+ for b in (bottoms[:3] or [None]):
215
+ for sh in (shoes[:3] or [None]):
216
+ items = [seed]
217
+ if b and b.get("id") != seed.get("id"):
218
+ items.append(b)
219
+ if sh and sh.get("id") not in {seed.get("id'), b.get('id') if b else None}:
220
+ items.append(sh)
221
+ ids = tuple(sorted([str(x.get("id")) for x in items if x.get("id")]))
222
+ if ids in used:
223
+ continue
224
+ used.add(ids)
225
+ score = sum(float(x.get("confidence", 0.5)) for x in items) / max(1, len(items))
226
+ if any(x.get("id") in past_ids for x in items if x.get("id")):
227
+ score -= 0.15
228
+ # add small randomization
229
+ score = max(0, min(1, score + (0.05 * (0.5 - (hash(ids) % 100) / 100.0))))
230
+ candidate = {
231
+ "id": str(uuid.uuid4()),
232
+ "items": [{"id": x.get("id"), "label": x.get("label"), "title": x.get("title"), "thumbnail_url": x.get("thumbnail_url"), "analysis": x.get("analysis", {})} for x in items],
233
+ "score": round(float(score), 3),
234
+ "reason": "Auto combo",
235
+ "notes": "",
236
+ }
237
+ candidates.append(candidate)
238
+ if len(candidates) >= max_candidates:
239
+ break
240
+ if len(candidates) >= max_candidates:
241
+ break
242
+ if len(candidates) >= max_candidates:
243
+ break
244
+
245
+ candidates.sort(key=lambda c: c.get("score", 0), reverse=True)
246
+ return candidates
247
 
 
 
 
 
 
248
 
249
+ # ---------- Gemini-backed candidate generator (optional) ----------
250
+
251
+ def generate_candidates_with_gemini(wardrobe_items: List[Dict[str, Any]],
252
+ user_inputs: Dict[str, Any],
253
+ user_profile: Dict[str, Any],
254
+ past_week_items: List[Dict[str, Any]],
255
+ max_candidates: int = 6) -> List[Dict[str, Any]]:
256
+ if not client:
257
+ log.info("Gemini not configured; using naive generator.")
258
+ return naive_generate_candidates(wardrobe_items, user_inputs, user_profile, past_week_items, max_candidates)
259
+
260
+ # create concise wardrobe entries for the prompt
261
+ summarized = []
262
+ for it in wardrobe_items:
263
+ a = it.get("analysis") or {}
264
+ summarized.append({
265
+ "id": it.get("id"),
266
+ "type": a.get("type") or it.get("title") or it.get("label") or "",
267
+ "summary": (a.get("summary") or "")[:180],
268
+ "brand": (a.get("brand") or "")[:80],
269
+ "tags": a.get("tags") or [],
270
+ "thumbnail_url": it.get("thumbnail_url") or ""
271
+ })
272
+
273
+ prompt = (
274
+ "You are a stylist assistant. Given a user's WARDROBE array (id,type,summary,brand,tags),\n"
275
+ "and USER_INPUT object containing moods, appearances, events, activity, preferred/excluded colors, keyBrands, etc.,\n"
276
+ "and a list PAST_WEEK of recently used item ids, produce up to {max} candidate outfits.\n\n"
277
+ "Return only valid JSON: {\"candidates\": [ {\"id\": \"..\", \"item_ids\": [..], \"score\": 0-1, \"notes\": \"one-line explanation\", \"short_reason\": \"phrase\"}, ... ]}\n\n"
278
+ "Prefer diverse, practical combinations and avoid reusing PAST_WEEK item ids when possible.\n\n"
279
+ "WARDROBE = {wardrobe}\nUSER_INPUT = {u}\nPAST_WEEK = {p}\n".format(max=max_candidates, wardrobe=json.dumps(summarized), u=json.dumps(user_inputs), p=json.dumps([p.get("id") for p in (past_week_items or [])]))
280
+ )
281
+
282
+ contents = [types.Content(role="user", parts=[types.Part.from_text(text=prompt)])]
283
+ schema = {
284
+ "type": "object",
285
+ "properties": {
286
+ "candidates": {
287
+ "type": "array",
288
+ "items": {
289
+ "type": "object",
290
+ "properties": {
291
+ "id": {"type": "string"},
292
+ "item_ids": {"type": "array", "items": {"type": "string"}},
293
+ "score": {"type": "number"},
294
+ "notes": {"type": "string"},
295
+ "short_reason": {"type": "string"},
296
+ },
297
+ "required": ["id", "item_ids"],
298
+ },
299
+ }
300
+ },
301
+ "required": ["candidates"],
302
+ }
303
+ cfg = types.GenerateContentConfig(response_mime_type="application/json", response_schema=schema)
304
  try:
305
+ resp = client.models.generate_content(model="gemini-2.5-flash-lite", contents=contents, config=cfg)
306
+ raw = resp.text or ""
307
+ parsed = json.loads(raw)
308
+ id_map = {str(it.get("id")): it for it in wardrobe_items}
309
+ out = []
310
+ for c in parsed.get("candidates", [])[:max_candidates]:
311
+ items = []
312
+ for iid in c.get("item_ids", []):
313
+ itm = id_map.get(str(iid))
314
+ if itm:
315
+ items.append({"id": itm.get("id"), "label": itm.get("label"), "title": itm.get("title"), "thumbnail_url": itm.get("thumbnail_url"), "analysis": itm.get("analysis", {})})
316
+ out.append({
317
+ "id": c.get("id") or str(uuid.uuid4()),
318
+ "items": items,
319
+ "score": float(c.get("score", 0.5)),
320
+ "reason": c.get("short_reason") or "",
321
+ "notes": (c.get("notes") or "")[:300],
322
+ })
323
+ if not out:
324
+ log.warning("Gemini returned no candidates; falling back.")
325
+ return naive_generate_candidates(wardrobe_items, user_inputs, user_profile, past_week_items, max_candidates)
326
+ out.sort(key=lambda x: x.get("score", 0), reverse=True)
327
+ return out[:max_candidates]
328
  except Exception as e:
329
+ log.exception("Gemini candidate generation failed: %s", e)
330
+ return naive_generate_candidates(wardrobe_items, user_inputs, user_profile, past_week_items, max_candidates)
331
 
332
 
333
+ # ---------- Refinement logic (Step 2) ----------
334
 
335
+ def refine_candidates_with_constraints(candidates: List[Dict[str, Any]],
336
+ wardrobe_items: List[Dict[str, Any]],
337
+ constraints: Dict[str, Any]) -> Dict[str, Any]:
338
+ """
339
+ Apply brand constraints, schedule constraints and attach item metadata.
340
+ Returns dict with keys: refined, rerun_required (bool), rerun_hint, removed (list).
341
+ """
342
+ require_brands = set([b.lower() for b in (constraints.get("require_brands") or []) if b])
343
+ reject_brands = set([b.lower() for b in (constraints.get("reject_brands") or []) if b])
344
+ past_ids = set([x.get("id") for x in (constraints.get("past_week_items") or []) if x.get("id")])
345
+ allow_rerun = bool(constraints.get("allow_rerun", False))
346
+
347
+ id_map = {str(it.get("id")): it for it in wardrobe_items}
348
+ refined = []
349
+ removed = []
350
+
351
+ for cand in candidates:
352
+ items = cand.get("items") or []
353
+ # If items are lightweight (only id), resolve them
354
+ resolved_items = []
355
+ for i in items:
356
+ iid = str(i.get("id"))
357
+ full = id_map.get(iid)
358
+ if full:
359
+ resolved_items.append(full)
360
+ else:
361
+ resolved_items.append(i)
362
+ # Check brand constraints
363
+ if require_brands:
364
+ if not any((_safe_item_brand(it).lower() in require_brands) for it in resolved_items):
365
+ removed.append({"id": cand.get("id"), "reason": "missing required brand"})
366
+ continue
367
+ if reject_brands:
368
+ if any((_safe_item_brand(it).lower() in reject_brands) for it in resolved_items):
369
+ removed.append({"id": cand.get("id"), "reason": "contains rejected brand"})
370
+ continue
371
+ # Check schedule conflict
372
+ if past_ids and any((it.get("id") in past_ids) for it in resolved_items):
373
+ if not allow_rerun:
374
+ removed.append({"id": cand.get("id"), "reason": "uses recent items"})
375
+ continue
376
+ else:
377
+ cand["_conflict_with_schedule"] = True
378
 
379
+ # attach metadata
380
+ cand["items"] = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
  {
382
+ "id": it.get("id"),
383
+ "label": it.get("label"),
384
+ "title": it.get("title"),
385
+ "thumbnail_url": it.get("thumbnail_url"),
386
+ "analysis": it.get("analysis", {}),
387
+ "confidence": it.get("confidence", 0.5),
388
  }
389
+ for it in resolved_items
390
+ ]
391
+ refined.append(cand)
392
+
393
+ if not refined:
394
+ hint = "All candidates filtered out. Consider loosening brand/schedule constraints or allow rerun."
395
+ return {"refined": [], "rerun_required": True, "rerun_hint": hint, "removed": removed}
396
+ refined.sort(key=lambda c: c.get("score", 0), reverse=True)
397
+ return {"refined": refined, "rerun_required": False, "rerun_hint": "", "removed": removed}
398
+
399
+
400
+ # ---------- Final note generation (Step 3) ----------
401
+
402
+ def finalize_suggestion_note_with_gemini(candidate: Dict[str, Any], user_inputs: Dict[str, Any], user_profile: Dict[str, Any]) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
  if not client:
404
+ moods = ", ".join(user_inputs.get("moods", [])[:2])
405
+ events = ", ".join(user_inputs.get("events", [])[:1])
406
+ return f"Because you chose {moods or 'your mood'} for {events or 'your event'} — practical and stylish."
407
  try:
 
408
  prompt = (
409
+ "You are a concise stylist. Given CANDIDATE_ITEMS (list of short item descriptions) and USER_INPUT, "
410
+ "write a single short friendly sentence (<=18 words) explaining why this outfit was chosen. Return plain text.\n\n"
 
 
 
411
  )
412
+ candidate_items = []
413
+ for it in candidate.get("items", []):
414
+ desc = (it.get("analysis") or {}).get("summary") or it.get("label") or it.get("title") or ""
415
+ brand = (it.get("analysis") or {}).get("brand") or ""
416
+ candidate_items.append({"id": it.get("id"), "desc": desc[:160], "brand": brand[:60]})
417
+ contents = [
418
+ types.Content(role="user", parts=[types.Part.from_text(text=prompt)]),
419
+ types.Content(role="user", parts=[types.Part.from_text(text="CANDIDATE_ITEMS: " + json.dumps(candidate_items))]),
420
+ types.Content(role="user", parts=[types.Part.from_text(text="USER_INPUT: " + json.dumps(user_inputs or {}))]),
421
+ types.Content(role="user", parts=[types.Part.from_text(text="Return only a single short sentence.")]),
422
+ ]
423
+ resp = client.models.generate_content(model="gemini-2.5-flash-lite", contents=contents)
424
+ text = (resp.text or "").strip()
425
+ return text.splitlines()[0] if text else "A curated outfit chosen for your preferences."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
  except Exception as e:
427
+ log.exception("Gemini finalize note failed: %s", e)
428
+ moods = ", ".join(user_inputs.get("moods", [])[:2])
429
+ events = ", ".join(user_inputs.get("events", [])[:1])
430
+ return f"Because you chose {moods or 'your mood'}, for {events or 'your event'} — practical and stylish."
431
 
 
432
 
433
+ # ---------- Firestore helpers for user summary & recent suggestions (graceful) ----------
434
 
435
+ def get_or_create_user_summary(uid: str, fallback_from_inputs: Dict[str, Any]) -> str:
436
+ """
437
+ Attempts to read users/{uid} document's 'summary' field.
438
+ If missing or Firestore not configured, creates a heuristic summary and writes it back (if possible).
439
+ Returns the summary string.
440
+ """
441
+ fs = init_firestore_if_needed()
442
+ gen_summary = None
443
  try:
444
+ if not fs:
445
+ # Firestore not available
446
+ log.info("Firestore not available. Building local user summary.")
447
+ gen_summary = _heuristic_summary_from_inputs(fallback_from_inputs)
448
+ return gen_summary
449
+ doc_ref = fs.collection("users").document(uid)
450
+ doc = doc_ref.get()
451
+ if doc.exists:
452
+ data = doc.to_dict() or {}
453
+ summary = data.get("summary")
454
+ if summary:
455
+ return summary
456
+ # if doc exists but no summary, create one
457
+ gen_summary = _heuristic_summary_from_inputs(fallback_from_inputs)
458
+ try:
459
+ doc_ref.set({"summary": gen_summary, "updatedAt": int(time.time())}, merge=True)
460
+ log.info("Wrote generated summary into users/%s", uid)
461
+ except Exception as e:
462
+ log.warning("Failed to write generated summary to Firestore: %s", e)
463
+ return gen_summary
464
+ else:
465
+ # doc doesn't exist -> create with heuristic summary
466
+ gen_summary = _heuristic_summary_from_inputs(fallback_from_inputs)
467
+ try:
468
+ doc_ref.set({"summary": gen_summary, "createdAt": int(time.time())})
469
+ log.info("Created users/%s with summary", uid)
470
+ except Exception as e:
471
+ log.warning("Failed to create user doc in Firestore: %s", e)
472
+ return gen_summary
473
  except Exception as e:
474
+ log.exception("Error fetching/creating user summary: %s", e)
475
+ return gen_summary or _heuristic_summary_from_inputs(fallback_from_inputs)
 
 
 
 
 
 
 
 
 
 
 
 
476
 
 
 
 
477
 
478
+ def fetch_recent_suggestions(uid: str, days: int = 7) -> List[Dict[str, Any]]:
479
+ """
480
+ Attempts to fetch recent suggestions from collection 'suggestions' for uid within `days`.
481
+ Returns a list of item dicts used recently (flattened items).
482
+ If Firestore not available or query fails, returns empty list.
483
+ """
484
+ fs = init_firestore_if_needed()
485
+ if not fs:
486
+ return []
487
+ try:
488
+ cutoff = int(time.time()) - days * 86400
489
+ q = fs.collection("suggestions").where("uid", "==", uid).where("createdAtTs", ">=", cutoff).limit(50)
490
+ docs = q.get()
491
+ items = []
492
+ for d in docs:
493
+ dd = d.to_dict() or {}
494
+ for it in dd.get("items", []) or []:
495
+ items.append({"id": it.get("id"), "label": it.get("label")})
496
+ return items
497
+ except Exception as e:
498
+ log.warning("Failed to fetch recent suggestions: %s", e)
499
+ return []
 
 
 
 
 
 
 
500
 
 
 
 
 
501
 
502
+ def _heuristic_summary_from_inputs(user_inputs: Dict[str, Any]) -> str:
503
+ moods = user_inputs.get("moods") or []
504
+ brands = user_inputs.get("keyBrands") or []
505
+ events = user_inputs.get("events") or []
506
+ parts = []
507
+ if moods:
508
+ parts.append("moods: " + ", ".join(moods[:3]))
509
+ if brands:
510
+ parts.append("likes brands: " + ", ".join(brands[:3]))
511
+ if events:
512
+ parts.append("often for: " + ", ".join(events[:2]))
513
+ if not parts:
514
+ return "A user who likes simple, practical outfits."
515
+ return " & ".join(parts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
516
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
517
 
518
+ # ---------- Flask app and endpoints ----------
519
 
520
+ app = Flask(__name__)
521
+ CORS(app)
522
 
523
 
524
+ @app.route("/suggest_candidates", methods=["POST"])
525
+ def suggest_candidates():
526
  """
527
+ Step 1: Accepts wardrobe items and user inputs, returns candidate outfits.
528
+ Body JSON:
529
+ - uid (optional)
530
+ - wardrobe_items: array of items { id, title, analysis: {type,summary,brand,tags}, thumbnail_url (optional), confidence (optional) }
531
+ - user_inputs: object
532
+ - max_candidates: optional int
533
+ Response: { ok: True, candidates: [...], user_summary: "...", debug: {...} }
 
 
534
  """
535
  try:
536
  body = request.get_json(force=True)
 
538
  return jsonify({"error": "invalid json"}), 400
539
 
540
  uid = (body.get("uid") or request.args.get("uid") or "anon").strip() or "anon"
541
+ wardrobe_items = body.get("wardrobe_items") or []
542
+ user_inputs = body.get("user_inputs") or {}
543
+ max_c = int(body.get("max_candidates") or 6)
544
 
545
+ if not isinstance(wardrobe_items, list) or not isinstance(user_inputs, dict):
546
+ return jsonify({"error": "wardrobe_items(list) and user_inputs(object) required"}), 400
 
 
 
547
 
548
+ # attempt to fetch user summary and recent usage from Firestore (graceful)
549
  try:
550
+ user_summary = get_or_create_user_summary(uid, user_inputs)
 
551
  except Exception as e:
552
+ log.warning("get_or_create_user_summary failed: %s", e)
553
+ user_summary = _heuristic_summary_from_inputs(user_inputs)
 
 
 
 
 
 
 
554
 
555
  try:
556
+ past_week_items = fetch_recent_suggestions(uid, days=7) or []
557
+ except Exception as e:
558
+ log.warning("fetch_recent_suggestions failed: %s", e)
559
+ past_week_items = []
 
 
 
 
 
 
 
 
 
560
 
561
+ # generate candidates (Gemini or naive)
562
+ try:
563
+ candidates = generate_candidates_with_gemini(wardrobe_items, user_inputs, {"summary": user_summary}, past_week_items, max_candidates=max_c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
564
  except Exception as e:
565
+ log.exception("candidate generation failed: %s", e)
566
+ candidates = naive_generate_candidates(wardrobe_items, user_inputs, {"summary": user_summary}, past_week_items, max_candidates=max_c)
567
 
568
+ # ensure thumbnails exist as None if missing
569
+ for c in candidates:
570
+ for it in c.get("items", []):
571
+ it.setdefault("thumbnail_url", it.get("thumbnail_url") or None)
572
 
573
+ return jsonify({"ok": True, "candidates": candidates, "user_summary": user_summary}), 200
574
 
575
 
576
+ @app.route("/refine_candidates", methods=["POST"])
577
+ def refine_candidates():
578
  """
579
+ Step 2: Apply constraints and attach item metadata.
580
+ Body JSON:
581
+ - wardrobe_items: full items list
582
+ - candidates: candidates returned by /suggest_candidates
583
+ - constraints: { require_brands: [...], reject_brands: [...], past_week_items: [...], allow_rerun: bool }
584
+ Response: { ok: True, refined: [...], rerun_required: bool, rerun_hint: str, removed: [...] }
585
  """
586
  try:
587
  body = request.get_json(force=True)
588
  except Exception:
589
  return jsonify({"error": "invalid json"}), 400
590
 
591
+ wardrobe_items = body.get("wardrobe_items") or []
592
+ candidates = body.get("candidates") or []
593
+ constraints = body.get("constraints") or {}
 
 
594
 
595
+ if not isinstance(wardrobe_items, list) or not isinstance(candidates, list):
596
+ return jsonify({"error": "wardrobe_items(list) and candidates(list) required"}), 400
597
 
598
  try:
599
+ result = refine_candidates_with_constraints(candidates, wardrobe_items, constraints)
600
+ return jsonify({"ok": True, **result}), 200
601
  except Exception as e:
602
+ log.exception("refine_candidates failed: %s", e)
603
+ return jsonify({"error": "internal", "detail": str(e)}), 500
604
 
 
 
605
 
606
+ @app.route("/finalize_suggestion", methods=["POST"])
607
+ def finalize_suggestion():
608
+ """
609
+ Step 3: Finalize a candidate and return a suggestion object.
610
+ Body JSON:
611
+ - uid (optional)
612
+ - candidate (object) OR candidate_id + candidates (list)
613
+ - user_inputs (optional)
614
+ - user_profile (optional)
615
+ Response:
616
+ { ok: True, suggestion: { id, items, thumbnail_urls, note, createdAt, meta } }
617
+ """
618
  try:
619
+ body = request.get_json(force=True)
620
+ except Exception:
621
+ return jsonify({"error": "invalid json"}), 400
622
+
623
+ uid = (body.get("uid") or request.args.get("uid") or "anon").strip() or "anon"
624
+ candidate = body.get("candidate")
625
+ if not candidate:
626
+ candidate_id = body.get("candidate_id")
627
+ candidates = body.get("candidates") or []
628
+ if candidate_id and isinstance(candidates, list):
629
+ candidate = next((c for c in candidates if c.get("id") == candidate_id), None)
630
+
631
+ if not candidate:
632
+ return jsonify({"error": "candidate required (object or candidate_id + candidates)"}), 400
633
+
634
+ user_inputs = body.get("user_inputs") or {}
635
+ user_profile = body.get("user_profile") or {}
636
+
637
+ # attach thumbnails list
638
+ thumb_urls = [it.get("thumbnail_url") for it in candidate.get("items", []) if it.get("thumbnail_url")]
639
+
640
+ # create final note (Gemini or heuristic)
641
+ note = finalize_suggestion_note_with_gemini(candidate, user_inputs, user_profile)
642
+
643
+ suggestion = {
644
+ "id": candidate.get("id") or str(uuid.uuid4()),
645
+ "items": candidate.get("items", []),
646
+ "thumbnail_urls": thumb_urls,
647
+ "note": note,
648
+ "createdAt": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
649
+ "createdAtTs": int(time.time()),
650
+ "meta": {"source": "server_pipeline", "user_inputs": user_inputs, "user_profile": user_profile},
651
+ "uid": uid,
652
+ }
653
+
654
+ # Try to persist suggestion into Firestore.collection("suggestions") if configured (graceful)
655
+ fs = init_firestore_if_needed()
656
+ if fs:
657
+ try:
658
+ col = fs.collection("suggestions")
659
+ doc_id = suggestion["id"]
660
+ col.document(doc_id).set(suggestion)
661
+ log.info("Persisted suggestion %s for uid=%s", doc_id, uid)
662
+ except Exception as e:
663
+ log.warning("Failed to persist suggestion to Firestore: %s", e)
664
+
665
+ return jsonify({"ok": True, "suggestion": suggestion}), 200
666
+
667
+
668
+ # Basic health endpoint
669
+ @app.route("/health", methods=["GET"])
670
+ def health():
671
+ return jsonify({"ok": True, "time": int(time.time()), "gemini": bool(client), "firestore": bool(init_firestore_if_needed())}), 200
672
 
673
 
674
  if __name__ == "__main__":
675
  port = int(os.getenv("PORT", 7860))
676
+ log.info("Starting suggestion server on 0.0.0.0:%d", port)
677
  app.run(host="0.0.0.0", port=port, debug=True)