aniket9909 commited on
Commit
8802dca
·
verified ·
1 Parent(s): 7d13bd0

Update analysis.py

Browse files
Files changed (1) hide show
  1. analysis.py +417 -82
analysis.py CHANGED
@@ -2,8 +2,9 @@ import os
2
  import json
3
  import time
4
  import hashlib
 
5
  from datetime import datetime
6
- from typing import Optional, Dict, Any
7
 
8
  from google import genai
9
  from google.genai.types import Part
@@ -16,6 +17,10 @@ from mimetypes import guess_type
16
  API_KEY = os.getenv("GEMINI_API_KEY")
17
  MODEL_COMBINED = "models/gemini-2.5-flash"
18
 
 
 
 
 
19
 
20
  _analysis_cache = {}
21
  _usage_log = []
@@ -74,34 +79,137 @@ def retry_with_backoff(func, max_retries: int = 3, initial_delay: float = 2.0):
74
 
75
 
76
  # =========================
77
- # MAIN GEMINI SKIN ANALYSIS
78
  # =========================
79
- def analyze_skin_complete(
80
- image_path: str,
81
- use_cache: bool = True,
82
- max_retries: int = 3
83
- ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
- # Cache key based on image hash
86
- cache_key = f"complete_v2_{get_image_hash(image_path)}"
87
- if use_cache and cache_key in _analysis_cache:
88
- print("✓ Using cached analysis results")
89
- return _analysis_cache[cache_key]
90
 
91
- def _call():
92
- client = load_client()
93
-
94
- # Read image bytes
95
- with open(image_path, "rb") as f:
96
- image_bytes = f.read()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
- mime_type, _ = guess_type(image_path)
99
- mime_type = mime_type or "image/jpeg"
100
- image_part = Part.from_bytes(data=image_bytes, mime_type=mime_type)
101
 
102
- # UPDATED FULL PROMPT FROM SCRIPT #2
103
- prompt = """
104
- You are an advanced AI skin analysis system. Analyze the face in this image comprehensively.
 
 
 
 
105
 
106
  Return STRICT JSON with ALL these fields (use exact field names):
107
 
@@ -161,26 +269,104 @@ Return STRICT JSON with ALL these fields (use exact field names):
161
  }
162
  }
163
 
164
- DETAILED ANALYSIS GUIDELINES:
165
-
166
- PORES:
167
- - Assess pore visibility across different facial zones
168
- - Consider pore size relative to skin type
169
- - Note if pores appear stretched, enlarged, or clogged
170
- - T-zone (forehead, nose, chin) typically has more prominent pores
171
- - Cheeks may show different pore characteristics
172
-
173
- WRINKLES:
174
- - Distinguish between dynamic (expression) and static (at rest) wrinkles
175
- - Forehead lines: horizontal lines across forehead
176
- - Frown lines: vertical lines between eyebrows (11 lines)
177
- - Crow's feet: radiating lines from outer eye corners
178
- - Nasolabial folds: lines from nose to mouth corners
179
- - Marionette lines: lines from mouth corners downward
180
- - Assess depth (superficial vs deep wrinkles)
181
- - Consider fine lines vs established wrinkles
182
-
183
- CRITICAL RULES:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  - Return ONLY raw JSON, no markdown formatting
185
  - No explanations, no text outside JSON
186
  - All float values must be between 0.0 and 1.0
@@ -189,59 +375,166 @@ CRITICAL RULES:
189
  - Do NOT guess or infer anything not visible
190
  - Ensure all fields are present in the response
191
  - If a feature is not visible or applicable, use 0.0
 
192
  """
193
 
194
- # --- API CALL WITH TIMING ---
195
- start_time = time.time()
196
- response = client.models.generate_content(
197
- model=MODEL_COMBINED,
198
- contents=[prompt, image_part],
199
- config={"temperature": 0, "top_p": 1, "top_k": 1}
200
- )
201
- elapsed = time.time() - start_time
202
 
203
- # Clean response text
204
- if not response or not response.candidates:
205
- raise RuntimeError("Unable to process image at this time")
 
 
 
206
 
207
- parts = response.candidates[0].content.parts
208
- text_chunks = [p.text for p in parts if hasattr(p, "text") and p.text]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
- if not text_chunks:
211
- raise RuntimeError("Unable to process image at this time")
 
212
 
213
- clean_text = "\n".join(text_chunks)
214
- clean_text = clean_text.replace("```json", "").replace("```", "").strip()
215
 
 
 
216
 
217
- # Convert to dict
218
- try:
219
- result = json.loads(clean_text)
220
- except json.JSONDecodeError:
221
- raise RuntimeError("Unable to process image at this time")
222
 
 
 
 
 
 
 
 
 
 
223
 
224
- # Estimate token usage
225
- estimated_tokens = len(prompt) / 4 + len(clean_text) / 4 + 1000
226
- cost = (estimated_tokens / 1_000_000) * 0.075
227
 
228
- log_api_usage(int(estimated_tokens), cost, success=True)
229
 
230
- print(f"✓ Analysis completed in {elapsed:.2f}s (est. cost: ${cost:.6f})")
231
 
232
- return result
233
 
234
- try:
235
- result = retry_with_backoff(_call, max_retries=max_retries)
236
- except Exception as e:
237
- print(f"❌ Final failure: {e}")
238
- log_api_usage(0, 0, success=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  return None
240
 
241
- if result and use_cache:
242
- _analysis_cache[cache_key] = result
 
 
 
 
 
 
 
 
243
 
244
- return result
245
 
246
  # =========================
247
  # SCORE FUNCTIONS
@@ -410,6 +703,10 @@ def build_detected_text(category, severity):
410
  # HIGH-LEVEL ANALYSIS WRAPPER
411
  # =========================
412
  def get_comprehensive_analysis(image_path):
 
 
 
 
413
  raw = analyze_skin_complete(image_path)
414
  if not raw:
415
  return None
@@ -473,9 +770,13 @@ def get_comprehensive_analysis(image_path):
473
  "age_analysis": raw["age_analysis"],
474
  "metadata": {
475
  "analyzed_at": datetime.now().isoformat(),
476
- "model_used": MODEL_COMBINED
 
 
477
  }
478
  }
 
 
479
  # =========================
480
  # HTML REPORT GENERATOR
481
  # =========================
@@ -514,15 +815,49 @@ def generate_html_report(analysis, user_info, output_path="new_report.html"):
514
 
515
  html = html.replace("{{hydration_severity_label}}", analysis["severity_info"]["hydration"]["label"])
516
  html = html.replace("{{hydration_detected_text}}", analysis["severity_info"]["hydration"]["text"])
 
517
  # User Info
518
  html = html.replace("{{full_name}}", str(user_info.get("name", "")))
519
  html = html.replace("{{age}}", str(user_info.get("age", "")))
520
  html = html.replace("{{phone}}", str(user_info.get("phone", "")))
521
  html = html.replace("{{gender}}", str(user_info.get("gender", "")))
522
 
523
-
524
  # Write final HTML
525
  with open(output_path, "w", encoding="utf-8") as f:
526
  f.write(html)
527
 
528
  return output_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import json
3
  import time
4
  import hashlib
5
+ import numpy as np
6
  from datetime import datetime
7
+ from typing import Optional, Dict, Any, List
8
 
9
  from google import genai
10
  from google.genai.types import Part
 
17
  API_KEY = os.getenv("GEMINI_API_KEY")
18
  MODEL_COMBINED = "models/gemini-2.5-flash"
19
 
20
+ # Consistency settings
21
+ ENABLE_MULTI_PASS = True # Set to False to disable multi-pass validation
22
+ VALIDATION_PASSES = 3 # Number of analyses to run for averaging
23
+ MAX_VARIANCE_THRESHOLD = 0.15 # Maximum allowed variance before warning
24
 
25
  _analysis_cache = {}
26
  _usage_log = []
 
79
 
80
 
81
  # =========================
82
+ # NORMALIZATION FUNCTIONS
83
  # =========================
84
+ def normalize_score(value: float, threshold: float = 0.05) -> float:
85
+ """
86
+ Normalize a score to reduce noise and improve consistency.
87
+
88
+ Args:
89
+ value: Raw score between 0.0 and 1.0
90
+ threshold: Values below this are set to 0.0
91
+
92
+ Returns:
93
+ Normalized score rounded to 2 decimal places
94
+ """
95
+ if value < threshold:
96
+ return 0.0
97
+ elif value > (1.0 - threshold):
98
+ return 1.0
99
+
100
+ # Round to 2 decimal places for consistency
101
+ return round(value, 2)
102
+
103
+
104
+ def normalize_category_scores(category_data: dict, threshold: float = 0.05) -> dict:
105
+ """Apply normalization to all scores in a category."""
106
+ normalized = {}
107
+ for key, value in category_data.items():
108
+ if isinstance(value, (int, float)):
109
+ normalized[key] = normalize_score(float(value), threshold)
110
+ else:
111
+ normalized[key] = value
112
+ return normalized
113
 
 
 
 
 
 
114
 
115
+ # =========================
116
+ # AVERAGING FUNCTIONS
117
+ # =========================
118
+ def average_analyses(results: List[dict]) -> dict:
119
+ """
120
+ Average multiple analysis results for improved consistency.
121
+
122
+ Args:
123
+ results: List of analysis dictionaries
124
+
125
+ Returns:
126
+ Averaged analysis dictionary
127
+ """
128
+ if not results:
129
+ return None
130
+
131
+ if len(results) == 1:
132
+ return results[0]
133
+
134
+ # Initialize with first result structure
135
+ averaged = json.loads(json.dumps(results[0])) # Deep copy
136
+
137
+ # Average each numeric field in main categories
138
+ for category in ["hydration", "pigmentation", "acne", "pores", "wrinkles"]:
139
+ if category in averaged:
140
+ for field in averaged[category]:
141
+ if isinstance(averaged[category][field], (int, float)):
142
+ values = [
143
+ r[category][field]
144
+ for r in results
145
+ if category in r and field in r[category]
146
+ ]
147
+ if values:
148
+ averaged[category][field] = round(sum(values) / len(values), 3)
149
+
150
+ # Average age analysis (integers)
151
+ if "age_analysis" in averaged:
152
+ for field in ["eye_age", "skin_age"]:
153
+ if field in averaged["age_analysis"]:
154
+ values = [
155
+ r["age_analysis"][field]
156
+ for r in results
157
+ if "age_analysis" in r and field in r["age_analysis"]
158
+ ]
159
+ if values:
160
+ averaged["age_analysis"][field] = round(sum(values) / len(values))
161
+
162
+ # Fitzpatrick type - use mode (most common value)
163
+ if "fitzpatrick_type" in averaged["age_analysis"]:
164
+ values = [
165
+ r["age_analysis"]["fitzpatrick_type"]
166
+ for r in results
167
+ if "age_analysis" in r and "fitzpatrick_type" in r["age_analysis"]
168
+ ]
169
+ if values:
170
+ averaged["age_analysis"]["fitzpatrick_type"] = max(set(values), key=values.count)
171
+
172
+ return averaged
173
+
174
+
175
+ def calculate_variance(results: List[dict], category: str) -> float:
176
+ """
177
+ Calculate variance for a specific category across multiple results.
178
+
179
+ Args:
180
+ results: List of analysis dictionaries
181
+ category: Category name (e.g., "acne", "wrinkles")
182
+
183
+ Returns:
184
+ Maximum variance across all fields in the category
185
+ """
186
+ if len(results) < 2:
187
+ return 0.0
188
+
189
+ category_values = []
190
+ for result in results:
191
+ if category in result:
192
+ values = [
193
+ v for v in result[category].values()
194
+ if isinstance(v, (int, float))
195
+ ]
196
+ category_values.append(values)
197
+
198
+ if not category_values or len(category_values) < 2:
199
+ return 0.0
200
+
201
+ # Calculate variance for each field
202
+ variances = np.var(category_values, axis=0)
203
+ return float(np.max(variances))
204
 
 
 
 
205
 
206
+ # =========================
207
+ # ENHANCED PROMPT
208
+ # =========================
209
+ def get_analysis_prompt() -> str:
210
+ """Returns the enhanced prompt with objective scoring criteria."""
211
+ return """
212
+ You are an advanced AI skin analysis system. Analyze the face in this image comprehensively using OBJECTIVE, CONSISTENT criteria.
213
 
214
  Return STRICT JSON with ALL these fields (use exact field names):
215
 
 
269
  }
270
  }
271
 
272
+ ═══════════════════════════════════════════════════════════════
273
+ ACNE ANALYSIS - OBJECTIVE COUNTING CRITERIA (CRITICAL FOR CONSISTENCY)
274
+ ════════════════════════���══════════════════════════════════════
275
+
276
+ **ACTIVE_ACNE** - Count visible inflamed red/pink lesions (pustules, papules):
277
+ • 0.00-0.15: 0 lesions (clear skin)
278
+ • 0.15-0.30: 1-2 small lesions
279
+ • 0.30-0.50: 3-5 lesions
280
+ • 0.50-0.70: 6-10 lesions
281
+ • 0.70-0.85: 11-20 lesions
282
+ • 0.85-1.00: 20+ lesions or widespread
283
+
284
+ **COMEDONES** - Count visible blackheads/whiteheads (small dark or white bumps):
285
+ • 0.00-0.15: 0-3 comedones
286
+ • 0.15-0.30: 4-8 comedones
287
+ • 0.30-0.50: 9-15 comedones
288
+ • 0.50-0.70: 16-25 comedones
289
+ • 0.70-0.85: 26-40 comedones
290
+ • 0.85-1.00: 40+ comedones
291
+
292
+ **CYSTIC_ACNE** - Count deep, large, painful-looking nodules or cysts:
293
+ • 0.00-0.20: None visible
294
+ • 0.20-0.40: 1 small nodule
295
+ • 0.40-0.60: 2-3 nodules or 1 large cyst
296
+ • 0.60-0.80: 4-6 nodules/cysts
297
+ • 0.80-1.00: 7+ nodules or very large/multiple cysts
298
+
299
+ **INFLAMMATION** - Assess redness, swelling around lesions:
300
+ • 0.00-0.20: No redness, minimal inflammation
301
+ • 0.20-0.40: Slight pink around 1-2 spots
302
+ • 0.40-0.60: Moderate redness around several lesions
303
+ • 0.60-0.80: Strong redness, visible swelling
304
+ • 0.80-1.00: Severe widespread inflammation
305
+
306
+ **OILINESS** - Assess visible shine/oily appearance:
307
+ • 0.00-0.25: Matte, no visible oil
308
+ • 0.25-0.50: Slight shine in T-zone
309
+ • 0.50-0.75: Noticeable shine on forehead, nose, chin
310
+ • 0.75-1.00: Very shiny/greasy appearance overall
311
+
312
+ **SCARRING** - Count visible acne scars (pitted, raised, or discolored):
313
+ • 0.00-0.20: No visible scars
314
+ • 0.20-0.40: 1-3 minor scars
315
+ • 0.40-0.60: 4-8 visible scars
316
+ • 0.60-0.80: 9-15 scars
317
+ • 0.80-1.00: 15+ scars or severe pitting
318
+
319
+ **CONGESTION** - Overall appearance of clogged, rough texture:
320
+ • 0.00-0.25: Smooth, clear pores
321
+ • 0.25-0.50: Some roughness, minor congestion
322
+ • 0.50-0.75: Noticeable rough texture, many clogged pores
323
+ • 0.75-1.00: Severely congested, bumpy texture
324
+
325
+ ═══════════════════════════════════════════════════════════════
326
+ CONSISTENCY ENFORCEMENT RULES
327
+ ═══════════════════════════════════════════════════════════════
328
+
329
+ 1. **COUNT, DON'T ESTIMATE**: Scan the entire face systematically and COUNT actual visible features
330
+ 2. **USE THE SAME SCALE EVERY TIME**: Always use the exact ranges above
331
+ 3. **BE CONSERVATIVE**: If uncertain between two ranges, choose the LOWER score
332
+ 4. **ZERO MEANS NONE**: Use 0.0 only when a feature is completely absent
333
+ 5. **SYSTEMATIC SCANNING**:
334
+ - Divide face into zones: forehead, cheeks (left/right), nose, chin
335
+ - Count features in each zone, then sum
336
+ - This ensures you don't miss or double-count features
337
+ 6. **IGNORE LIGHTING VARIATIONS**: Base assessment on actual skin features, not shadows or highlights
338
+ 7. **ONE ANALYSIS = ONE RULESET**: Never change your interpretation mid-analysis
339
+
340
+ ═══════════════════════════════════════════════════════════════
341
+ ADDITIONAL DETAILED GUIDELINES
342
+ ═══════════════════════════════════════════════════════════════
343
+
344
+ **PORES:**
345
+ - Scan T-zone (forehead, nose, chin) separately from cheeks
346
+ - Small pores (barely visible) = 0.0-0.3
347
+ - Medium pores (clearly visible) = 0.3-0.6
348
+ - Large pores (very prominent) = 0.6-1.0
349
+
350
+ **WRINKLES:**
351
+ - Fine lines (only visible up close) = 0.0-0.3
352
+ - Moderate wrinkles (clearly visible) = 0.3-0.6
353
+ - Deep wrinkles (with visible depth/shadows) = 0.6-1.0
354
+ - Distinguish dynamic (expression) vs static (at rest)
355
+
356
+ **PIGMENTATION:**
357
+ - Count distinct dark spots
358
+ - Assess overall tone evenness across face
359
+ - Under-eye darkness: compare to surrounding skin tone
360
+
361
+ **HYDRATION:**
362
+ - Flakiness: visible dry patches or peeling
363
+ - Radiance: natural healthy glow vs dull appearance
364
+ - Fine lines: thin lines from dehydration (not age)
365
+
366
+ ═══════════════════════════════════════════════════════════════
367
+ CRITICAL OUTPUT RULES
368
+ ═══════════════════════════════════════════════════════════════
369
+
370
  - Return ONLY raw JSON, no markdown formatting
371
  - No explanations, no text outside JSON
372
  - All float values must be between 0.0 and 1.0
 
375
  - Do NOT guess or infer anything not visible
376
  - Ensure all fields are present in the response
377
  - If a feature is not visible or applicable, use 0.0
378
+ - Round all floats to 2 decimal places maximum
379
  """
380
 
 
 
 
 
 
 
 
 
381
 
382
+ # =========================
383
+ # SINGLE ANALYSIS CALL
384
+ # =========================
385
+ def _perform_single_analysis(image_path: str) -> dict:
386
+ """Perform a single analysis call to Gemini API."""
387
+ client = load_client()
388
 
389
+ # Read image bytes
390
+ with open(image_path, "rb") as f:
391
+ image_bytes = f.read()
392
+
393
+ mime_type, _ = guess_type(image_path)
394
+ mime_type = mime_type or "image/jpeg"
395
+ image_part = Part.from_bytes(data=image_bytes, mime_type=mime_type)
396
+
397
+ prompt = get_analysis_prompt()
398
+
399
+ # --- API CALL WITH TIMING ---
400
+ start_time = time.time()
401
+ response = client.models.generate_content(
402
+ model=MODEL_COMBINED,
403
+ contents=[prompt, image_part],
404
+ config={
405
+ "temperature": 0,
406
+ "top_p": 1,
407
+ "top_k": 1,
408
+ # Add seed if/when supported: "seed": 42
409
+ }
410
+ )
411
+ elapsed = time.time() - start_time
412
 
413
+ # Clean response text
414
+ if not response or not response.candidates:
415
+ raise RuntimeError("Unable to process image at this time")
416
 
417
+ parts = response.candidates[0].content.parts
418
+ text_chunks = [p.text for p in parts if hasattr(p, "text") and p.text]
419
 
420
+ if not text_chunks:
421
+ raise RuntimeError("Unable to process image at this time")
422
 
423
+ clean_text = "\n".join(text_chunks)
424
+ clean_text = clean_text.replace("```json", "").replace("```", "").strip()
 
 
 
425
 
426
+ # Convert to dict
427
+ try:
428
+ result = json.loads(clean_text)
429
+ except json.JSONDecodeError:
430
+ raise RuntimeError("Unable to process image at this time")
431
+
432
+ # Estimate token usage
433
+ estimated_tokens = len(prompt) / 4 + len(clean_text) / 4 + 1000
434
+ cost = (estimated_tokens / 1_000_000) * 0.075
435
 
436
+ log_api_usage(int(estimated_tokens), cost, success=True)
 
 
437
 
438
+ print(f"✓ Analysis completed in {elapsed:.2f}s (est. cost: ${cost:.6f})")
439
 
440
+ return result
441
 
 
442
 
443
+ # =========================
444
+ # MAIN GEMINI SKIN ANALYSIS WITH MULTI-PASS
445
+ # =========================
446
+ def analyze_skin_complete(
447
+ image_path: str,
448
+ use_cache: bool = True,
449
+ max_retries: int = 3,
450
+ enable_multipass: bool = None
451
+ ):
452
+ """
453
+ Perform comprehensive skin analysis with optional multi-pass validation.
454
+
455
+ Args:
456
+ image_path: Path to image file
457
+ use_cache: Whether to use cached results
458
+ max_retries: Maximum retry attempts per API call
459
+ enable_multipass: Override global ENABLE_MULTI_PASS setting
460
+
461
+ Returns:
462
+ Analysis dictionary with normalized, consistent scores
463
+ """
464
+ # Use global setting if not explicitly overridden
465
+ if enable_multipass is None:
466
+ enable_multipass = ENABLE_MULTI_PASS
467
+
468
+ # Cache key based on image hash
469
+ cache_key = f"complete_v3_mp{int(enable_multipass)}_{get_image_hash(image_path)}"
470
+ if use_cache and cache_key in _analysis_cache:
471
+ print("✓ Using cached analysis results")
472
+ return _analysis_cache[cache_key]
473
+
474
+ def _call():
475
+ return _perform_single_analysis(image_path)
476
+
477
+ results = []
478
+
479
+ if enable_multipass:
480
+ # Multi-pass validation
481
+ print(f"🔄 Running {VALIDATION_PASSES}-pass analysis for consistency...")
482
+
483
+ for i in range(VALIDATION_PASSES):
484
+ try:
485
+ result = retry_with_backoff(_call, max_retries=max_retries)
486
+ if result:
487
+ results.append(result)
488
+ print(f" ✓ Pass {i+1}/{VALIDATION_PASSES} completed")
489
+
490
+ # Small delay between passes to avoid rate limiting
491
+ if i < VALIDATION_PASSES - 1:
492
+ time.sleep(0.5)
493
+
494
+ except Exception as e:
495
+ print(f" ⚠️ Pass {i+1} failed: {e}")
496
+ continue
497
+
498
+ if not results:
499
+ print(f"❌ All {VALIDATION_PASSES} passes failed")
500
+ log_api_usage(0, 0, success=False)
501
+ return None
502
+
503
+ # Calculate variance for acne category
504
+ acne_variance = calculate_variance(results, "acne")
505
+
506
+ if acne_variance > MAX_VARIANCE_THRESHOLD:
507
+ print(f"⚠️ High variance detected in acne analysis: {acne_variance:.3f}")
508
+ print(f" Averaging {len(results)} results for improved consistency")
509
+ else:
510
+ print(f"✓ Low variance detected: {acne_variance:.3f} - Results are consistent")
511
+
512
+ # Average all results
513
+ final_result = average_analyses(results)
514
+
515
+ else:
516
+ # Single-pass analysis
517
+ try:
518
+ final_result = retry_with_backoff(_call, max_retries=max_retries)
519
+ except Exception as e:
520
+ print(f"❌ Analysis failed: {e}")
521
+ log_api_usage(0, 0, success=False)
522
+ return None
523
+
524
+ if not final_result:
525
  return None
526
 
527
+ # Apply normalization to all categories
528
+ for category in ["hydration", "pigmentation", "acne", "pores", "wrinkles"]:
529
+ if category in final_result:
530
+ final_result[category] = normalize_category_scores(final_result[category])
531
+
532
+ # Cache the final result
533
+ if use_cache:
534
+ _analysis_cache[cache_key] = final_result
535
+
536
+ return final_result
537
 
 
538
 
539
  # =========================
540
  # SCORE FUNCTIONS
 
703
  # HIGH-LEVEL ANALYSIS WRAPPER
704
  # =========================
705
  def get_comprehensive_analysis(image_path):
706
+ """
707
+ Get comprehensive skin analysis with all scores and metadata.
708
+ This is the main entry point called by the Flask API.
709
+ """
710
  raw = analyze_skin_complete(image_path)
711
  if not raw:
712
  return None
 
770
  "age_analysis": raw["age_analysis"],
771
  "metadata": {
772
  "analyzed_at": datetime.now().isoformat(),
773
+ "model_used": MODEL_COMBINED,
774
+ "multipass_enabled": ENABLE_MULTI_PASS,
775
+ "validation_passes": VALIDATION_PASSES if ENABLE_MULTI_PASS else 1
776
  }
777
  }
778
+
779
+
780
  # =========================
781
  # HTML REPORT GENERATOR
782
  # =========================
 
815
 
816
  html = html.replace("{{hydration_severity_label}}", analysis["severity_info"]["hydration"]["label"])
817
  html = html.replace("{{hydration_detected_text}}", analysis["severity_info"]["hydration"]["text"])
818
+
819
  # User Info
820
  html = html.replace("{{full_name}}", str(user_info.get("name", "")))
821
  html = html.replace("{{age}}", str(user_info.get("age", "")))
822
  html = html.replace("{{phone}}", str(user_info.get("phone", "")))
823
  html = html.replace("{{gender}}", str(user_info.get("gender", "")))
824
 
 
825
  # Write final HTML
826
  with open(output_path, "w", encoding="utf-8") as f:
827
  f.write(html)
828
 
829
  return output_path
830
+
831
+
832
+ # =========================
833
+ # UTILITY FUNCTIONS
834
+ # =========================
835
+ def get_usage_statistics():
836
+ """Get API usage statistics."""
837
+ if not _usage_log:
838
+ return {
839
+ "total_calls": 0,
840
+ "successful_calls": 0,
841
+ "failed_calls": 0,
842
+ "total_cost": 0.0,
843
+ "total_tokens": 0
844
+ }
845
+
846
+ successful = [log for log in _usage_log if log["success"]]
847
+ failed = [log for log in _usage_log if not log["success"]]
848
+
849
+ return {
850
+ "total_calls": len(_usage_log),
851
+ "successful_calls": len(successful),
852
+ "failed_calls": len(failed),
853
+ "total_cost": sum(log["cost"] for log in _usage_log),
854
+ "total_tokens": sum(log["tokens"] for log in _usage_log),
855
+ "average_cost_per_call": sum(log["cost"] for log in successful) / len(successful) if successful else 0
856
+ }
857
+
858
+
859
+ def clear_cache():
860
+ """Clear the analysis cache."""
861
+ global _analysis_cache
862
+ _analysis_cache = {}
863
+ print("✓ Analysis cache cleared")