import os import json import time import hashlib import numpy as np from datetime import datetime from typing import Optional, Dict, Any, List from google import genai from google.genai.types import Part from mimetypes import guess_type # ========================= # CONFIGURATION # ========================= API_KEY = os.getenv("GEMINI_API_KEY") MODEL_COMBINED = "models/gemini-2.5-flash" # Consistency settings ENABLE_MULTI_PASS = True # Set to False to disable multi-pass validation VALIDATION_PASSES = 3 # Number of analyses to run for averaging MAX_VARIANCE_THRESHOLD = 0.15 # Maximum allowed variance before warning _analysis_cache = {} _usage_log = [] # ========================= # CLIENT / HELPERS # ========================= def load_client(): return genai.Client(api_key=API_KEY) def get_image_hash(image_path: str) -> str: with open(image_path, "rb") as f: return hashlib.md5(f.read()).hexdigest() def log_api_usage(tokens_used: int, cost: float, success: bool = True): _usage_log.append({ "timestamp": datetime.now().isoformat(), "tokens": tokens_used, "cost": cost, "success": success }) with open("api_usage.log", "a") as f: f.write(f"{datetime.now()},{tokens_used},{cost},{success}\n") def retry_with_backoff(func, max_retries: int = 3, initial_delay: float = 2.0): delay = initial_delay for attempt in range(max_retries): try: return func() except Exception as e: error_msg = str(e).lower() retryable = any(k in error_msg for k in [ "500", "503", "502", "504", "timeout", "overload", "unavailable", "internal error", "service unavailable" ]) if retryable and attempt < max_retries - 1: wait = delay * (2 ** attempt) print(f"⚠️ Attempt {attempt+1}/{max_retries} failed: {e}") print(f" Retrying in {wait:.1f}s...") time.sleep(wait) elif attempt == max_retries - 1: print(f"❌ All {max_retries} attempts failed: {e}") raise else: # Non-retryable error, raise immediately raise return None # ========================= # NORMALIZATION FUNCTIONS # ========================= def normalize_score(value: float, threshold: float = 0.05) -> float: """ Normalize a score to reduce noise and improve consistency. Args: value: Raw score between 0.0 and 1.0 threshold: Values below this are set to 0.0 Returns: Normalized score rounded to 2 decimal places """ if value < threshold: return 0.0 elif value > (1.0 - threshold): return 1.0 # Round to 2 decimal places for consistency return round(value, 2) def normalize_category_scores(category_data: dict, threshold: float = 0.05) -> dict: """Apply normalization to all scores in a category.""" normalized = {} for key, value in category_data.items(): if isinstance(value, (int, float)): normalized[key] = normalize_score(float(value), threshold) else: normalized[key] = value return normalized # ========================= # AVERAGING FUNCTIONS # ========================= def average_analyses(results: List[dict]) -> dict: """ Average multiple analysis results for improved consistency. Args: results: List of analysis dictionaries Returns: Averaged analysis dictionary """ if not results: return None if len(results) == 1: return results[0] # Initialize with first result structure averaged = json.loads(json.dumps(results[0])) # Deep copy # Average each numeric field in main categories for category in ["hydration", "pigmentation", "acne", "pores", "wrinkles"]: if category in averaged: for field in averaged[category]: if isinstance(averaged[category][field], (int, float)): values = [ r[category][field] for r in results if category in r and field in r[category] ] if values: averaged[category][field] = round(sum(values) / len(values), 3) # Average age analysis (integers) if "age_analysis" in averaged: for field in ["eye_age", "skin_age"]: if field in averaged["age_analysis"]: values = [ r["age_analysis"][field] for r in results if "age_analysis" in r and field in r["age_analysis"] ] if values: averaged["age_analysis"][field] = round(sum(values) / len(values)) # Fitzpatrick type - use mode (most common value) if "fitzpatrick_type" in averaged["age_analysis"]: values = [ r["age_analysis"]["fitzpatrick_type"] for r in results if "age_analysis" in r and "fitzpatrick_type" in r["age_analysis"] ] if values: averaged["age_analysis"]["fitzpatrick_type"] = max(set(values), key=values.count) return averaged def calculate_variance(results: List[dict], category: str) -> float: """ Calculate variance for a specific category across multiple results. Args: results: List of analysis dictionaries category: Category name (e.g., "acne", "wrinkles") Returns: Maximum variance across all fields in the category """ if len(results) < 2: return 0.0 category_values = [] for result in results: if category in result: values = [ v for v in result[category].values() if isinstance(v, (int, float)) ] category_values.append(values) if not category_values or len(category_values) < 2: return 0.0 # Calculate variance for each field variances = np.var(category_values, axis=0) return float(np.max(variances)) # ========================= # ENHANCED PROMPT # ========================= def get_analysis_prompt() -> str: """Returns the enhanced prompt with objective scoring criteria.""" return """ You are an advanced AI skin analysis system. Analyze the face in this image comprehensively using OBJECTIVE, CONSISTENT criteria. Return STRICT JSON with ALL these fields (use exact field names): { "hydration": { "texture": float (0.0-1.0, smoothness level), "radiance": float (0.0-1.0, natural glow), "flakiness": float (0.0-1.0, visible dry flakes - higher is worse), "oil_balance": float (0.0-1.0, healthy surface moisture), "fine_lines": float (0.0-1.0, dryness lines - higher is worse) }, "pigmentation": { "dark_spots": float (0.0-1.0, severity of dark spots), "hyperpigmentation": float (0.0-1.0, overall hyperpigmentation), "under_eye_pigmentation": float (0.0-1.0, dark circles), "redness": float (0.0-1.0, skin redness), "melanin_unevenness": float (0.0-1.0, uneven melanin distribution), "uv_damage": float (0.0-1.0, visible UV damage), "overall_evenness": float (0.0-1.0, overall skin tone evenness) }, "acne": { "active_acne": float (0.0-1.0, active breakouts), "comedones": float (0.0-1.0, blackheads/whiteheads), "cystic_acne": float (0.0-1.0, deep cystic acne), "inflammation": float (0.0-1.0, inflammatory response), "oiliness": float (0.0-1.0, excess sebum production), "scarring": float (0.0-1.0, acne scarring), "congestion": float (0.0-1.0, pore congestion) }, "pores": { "visibility": float (0.0-1.0, how visible/prominent pores are), "size": float (0.0-1.0, average pore size - larger is worse), "enlarged_pores": float (0.0-1.0, percentage of enlarged pores), "clogged_pores": float (0.0-1.0, degree of pore clogging), "texture_roughness": float (0.0-1.0, roughness due to pores), "t_zone_prominence": float (0.0-1.0, pore visibility in T-zone), "cheek_prominence": float (0.0-1.0, pore visibility on cheeks) }, "wrinkles": { "forehead_lines": float (0.0-1.0, horizontal forehead wrinkles), "frown_lines": float (0.0-1.0, glabellar lines between eyebrows), "crows_feet": float (0.0-1.0, eye corner wrinkles), "nasolabial_folds": float (0.0-1.0, nose-to-mouth lines), "marionette_lines": float (0.0-1.0, mouth-to-chin lines), "under_eye_wrinkles": float (0.0-1.0, fine lines under eyes), "lip_lines": float (0.0-1.0, perioral wrinkles around mouth), "neck_lines": float (0.0-1.0, horizontal neck wrinkles if visible), "overall_severity": float (0.0-1.0, overall wrinkle severity), "depth": float (0.0-1.0, average depth of wrinkles), "dynamic_wrinkles": float (0.0-1.0, expression-related wrinkles), "static_wrinkles": float (0.0-1.0, wrinkles at rest) }, "age_analysis": { "fitzpatrick_type": integer (1-6, skin type based on melanin), "eye_age": integer (estimated age of eye area), "skin_age": integer (estimated overall skin age) } } ═══════════════════════════════════════════════════════════════ ACNE ANALYSIS - OBJECTIVE COUNTING CRITERIA (CRITICAL FOR CONSISTENCY) ════════════════════════���══════════════════════════════════════ **ACTIVE_ACNE** - Count visible inflamed red/pink lesions (pustules, papules): • 0.00-0.15: 0 lesions (clear skin) • 0.15-0.30: 1-2 small lesions • 0.30-0.50: 3-5 lesions • 0.50-0.70: 6-10 lesions • 0.70-0.85: 11-20 lesions • 0.85-1.00: 20+ lesions or widespread **COMEDONES** - Count visible blackheads/whiteheads (small dark or white bumps): • 0.00-0.15: 0-3 comedones • 0.15-0.30: 4-8 comedones • 0.30-0.50: 9-15 comedones • 0.50-0.70: 16-25 comedones • 0.70-0.85: 26-40 comedones • 0.85-1.00: 40+ comedones **CYSTIC_ACNE** - Count deep, large, painful-looking nodules or cysts: • 0.00-0.20: None visible • 0.20-0.40: 1 small nodule • 0.40-0.60: 2-3 nodules or 1 large cyst • 0.60-0.80: 4-6 nodules/cysts • 0.80-1.00: 7+ nodules or very large/multiple cysts **INFLAMMATION** - Assess redness, swelling around lesions: • 0.00-0.20: No redness, minimal inflammation • 0.20-0.40: Slight pink around 1-2 spots • 0.40-0.60: Moderate redness around several lesions • 0.60-0.80: Strong redness, visible swelling • 0.80-1.00: Severe widespread inflammation **OILINESS** - Assess visible shine/oily appearance: • 0.00-0.25: Matte, no visible oil • 0.25-0.50: Slight shine in T-zone • 0.50-0.75: Noticeable shine on forehead, nose, chin • 0.75-1.00: Very shiny/greasy appearance overall **SCARRING** - Count visible acne scars (pitted, raised, or discolored): • 0.00-0.20: No visible scars • 0.20-0.40: 1-3 minor scars • 0.40-0.60: 4-8 visible scars • 0.60-0.80: 9-15 scars • 0.80-1.00: 15+ scars or severe pitting **CONGESTION** - Overall appearance of clogged, rough texture: • 0.00-0.25: Smooth, clear pores • 0.25-0.50: Some roughness, minor congestion • 0.50-0.75: Noticeable rough texture, many clogged pores • 0.75-1.00: Severely congested, bumpy texture ═══════════════════════════════════════════════════════════════ CONSISTENCY ENFORCEMENT RULES ═══════════════════════════════════════════════════════════════ 1. **COUNT, DON'T ESTIMATE**: Scan the entire face systematically and COUNT actual visible features 2. **USE THE SAME SCALE EVERY TIME**: Always use the exact ranges above 3. **BE CONSERVATIVE**: If uncertain between two ranges, choose the LOWER score 4. **ZERO MEANS NONE**: Use 0.0 only when a feature is completely absent 5. **SYSTEMATIC SCANNING**: - Divide face into zones: forehead, cheeks (left/right), nose, chin - Count features in each zone, then sum - This ensures you don't miss or double-count features 6. **IGNORE LIGHTING VARIATIONS**: Base assessment on actual skin features, not shadows or highlights 7. **ONE ANALYSIS = ONE RULESET**: Never change your interpretation mid-analysis ═══════════════════════════════════════════════════════════════ ADDITIONAL DETAILED GUIDELINES ═══════════════════════════════════════════════════════════════ **PORES:** - Scan T-zone (forehead, nose, chin) separately from cheeks - Small pores (barely visible) = 0.0-0.3 - Medium pores (clearly visible) = 0.3-0.6 - Large pores (very prominent) = 0.6-1.0 **WRINKLES:** - Fine lines (only visible up close) = 0.0-0.3 - Moderate wrinkles (clearly visible) = 0.3-0.6 - Deep wrinkles (with visible depth/shadows) = 0.6-1.0 - Distinguish dynamic (expression) vs static (at rest) **PIGMENTATION:** - Count distinct dark spots - Assess overall tone evenness across face - Under-eye darkness: compare to surrounding skin tone **HYDRATION:** - Flakiness: visible dry patches or peeling - Radiance: natural healthy glow vs dull appearance - Fine lines: thin lines from dehydration (not age) ═══════════════════════════════════════════════════════════════ CRITICAL OUTPUT RULES ═══════════════════════════════════════════════════════════════ - Return ONLY raw JSON, no markdown formatting - No explanations, no text outside JSON - All float values must be between 0.0 and 1.0 - All integer values must be positive integers - Base analysis ONLY on visible features in the image - Do NOT guess or infer anything not visible - Ensure all fields are present in the response - If a feature is not visible or applicable, use 0.0 - Round all floats to 2 decimal places maximum """ # ========================= # SINGLE ANALYSIS CALL # ========================= def _perform_single_analysis(image_path: str) -> dict: """Perform a single analysis call to Gemini API.""" client = load_client() # Read image bytes with open(image_path, "rb") as f: image_bytes = f.read() mime_type, _ = guess_type(image_path) mime_type = mime_type or "image/jpeg" image_part = Part.from_bytes(data=image_bytes, mime_type=mime_type) prompt = get_analysis_prompt() # --- API CALL WITH TIMING --- start_time = time.time() response = client.models.generate_content( model=MODEL_COMBINED, contents=[prompt, image_part], config={ "temperature": 0, "top_p": 1, "top_k": 1, # Add seed if/when supported: "seed": 42 } ) elapsed = time.time() - start_time # Clean response text if not response or not response.candidates: raise RuntimeError("Unable to process image at this time") parts = response.candidates[0].content.parts text_chunks = [p.text for p in parts if hasattr(p, "text") and p.text] if not text_chunks: raise RuntimeError("Unable to process image at this time") clean_text = "\n".join(text_chunks) clean_text = clean_text.replace("```json", "").replace("```", "").strip() # Convert to dict try: result = json.loads(clean_text) except json.JSONDecodeError: raise RuntimeError("Unable to process image at this time") # Estimate token usage estimated_tokens = len(prompt) / 4 + len(clean_text) / 4 + 1000 cost = (estimated_tokens / 1_000_000) * 0.075 log_api_usage(int(estimated_tokens), cost, success=True) print(f"✓ Analysis completed in {elapsed:.2f}s (est. cost: ${cost:.6f})") return result # ========================= # MAIN GEMINI SKIN ANALYSIS WITH MULTI-PASS # ========================= def analyze_skin_complete( image_path: str, use_cache: bool = True, max_retries: int = 3, enable_multipass: bool = None ): """ Perform comprehensive skin analysis with optional multi-pass validation. Args: image_path: Path to image file use_cache: Whether to use cached results max_retries: Maximum retry attempts per API call enable_multipass: Override global ENABLE_MULTI_PASS setting Returns: Analysis dictionary with normalized, consistent scores """ # Use global setting if not explicitly overridden if enable_multipass is None: enable_multipass = ENABLE_MULTI_PASS # Cache key based on image hash cache_key = f"complete_v3_mp{int(enable_multipass)}_{get_image_hash(image_path)}" if use_cache and cache_key in _analysis_cache: print("✓ Using cached analysis results") return _analysis_cache[cache_key] def _call(): return _perform_single_analysis(image_path) results = [] if enable_multipass: # Multi-pass validation print(f"🔄 Running {VALIDATION_PASSES}-pass analysis for consistency...") for i in range(VALIDATION_PASSES): try: result = retry_with_backoff(_call, max_retries=max_retries) if result: results.append(result) print(f" ✓ Pass {i+1}/{VALIDATION_PASSES} completed") # Small delay between passes to avoid rate limiting if i < VALIDATION_PASSES - 1: time.sleep(0.5) except Exception as e: print(f" ⚠️ Pass {i+1} failed: {e}") continue if not results: print(f"❌ All {VALIDATION_PASSES} passes failed") log_api_usage(0, 0, success=False) return None # Calculate variance for acne category acne_variance = calculate_variance(results, "acne") if acne_variance > MAX_VARIANCE_THRESHOLD: print(f"⚠️ High variance detected in acne analysis: {acne_variance:.3f}") print(f" Averaging {len(results)} results for improved consistency") else: print(f"✓ Low variance detected: {acne_variance:.3f} - Results are consistent") # Average all results final_result = average_analyses(results) else: # Single-pass analysis try: final_result = retry_with_backoff(_call, max_retries=max_retries) except Exception as e: print(f"❌ Analysis failed: {e}") log_api_usage(0, 0, success=False) return None if not final_result: return None # Apply normalization to all categories for category in ["hydration", "pigmentation", "acne", "pores", "wrinkles"]: if category in final_result: final_result[category] = normalize_category_scores(final_result[category]) # Cache the final result if use_cache: _analysis_cache[cache_key] = final_result return final_result # ========================= # SCORE FUNCTIONS # ========================= def compute_hydration_score(h): if not h: return None try: return round( h["radiance"]*30 + (1-h["flakiness"])*25 + (1-h["fine_lines"])*20 + h["oil_balance"]*15 + h["texture"]*10, 1 ) except: return None def compute_pigmentation_score(p): if not p: return None try: return round( p["hyperpigmentation"]*30 + p["dark_spots"]*25 + p["melanin_unevenness"]*20 + p["under_eye_pigmentation"]*10 + p["uv_damage"]*10 + p["redness"]*5, 1 ) except: return None def compute_acne_score(a): if not a: return None try: return round( a["active_acne"]*40 + a["comedones"]*20 + a["inflammation"]*15 + a["cystic_acne"]*15 + a["scarring"]*10, 1 ) except: return None def compute_pores_score(p): if not p: return None try: return round( p["visibility"]*25 + p["size"]*25 + p["enlarged_pores"]*20 + p["clogged_pores"]*15 + p["texture_roughness"]*15, 1 ) except: return None def compute_wrinkles_score(w): if not w: return None try: return round( w["overall_severity"]*30 + w["depth"]*20 + w["forehead_lines"]*10 + w["crows_feet"]*10 + w["nasolabial_folds"]*10 + w["frown_lines"]*8 + w["static_wrinkles"]*7 + w["under_eye_wrinkles"]*5, 1 ) except: return None # ========================= # GRADES # ========================= def grade_wrinkles(p): if p <= 5: return "Grade 1 (Absent or barely visible fine lines)" elif p <= 25: return "Grade 2 (Shallow wrinkles visible only with muscle movement)" elif p <= 50: return "Grade 3 (Moderately deep lines, visible at rest and movement)" elif p <= 75: return "Grade 4 (Deep, persistent wrinkles with visible folds)" else: return "Grade 5 (Very deep wrinkles, pronounced folds)" def grade_acne(p): if p <= 25: return "Grade 1 (Mostly comedones, little/no inflammation)" elif p <= 50: return "Grade 2 (Papules/pustules with mild inflammation)" elif p <= 75: return "Grade 3 (Numerous papules, pustules, occasional nodules)" else: return "Grade 4 (Severe nodules, cysts, widespread scarring)" def grade_pigmentation(p): if p == 0: return "Grade 0 (Normal skin tone with no visible pigmentation)" elif p <= 25: return "Grade 1 (Mild brown patches or spots)" elif p <= 50: return "Grade 2 (Moderate uneven tone)" else: return "Grade 3 (Severe pigmentation covering large areas)" def grade_pores(p): if p == 0: return "Grade 0 (Barely visible pores)" elif p <= 25: return "Grade 1 (Mild pore visibility)" elif p <= 50: return "Grade 2 (Noticeable pores)" else: return "Grade 3 (Large, prominent pores)" def grade_hydration(p): if p <= 33: return "Grade 1 (Well hydrated)" elif p <= 66: return "Grade 2 (Moderate dehydration)" else: return "Grade 3 (Severe dehydration)" def severity_label(percent): if percent <= 33: return "Mild" elif percent <= 66: return "Moderate" else: return "Severe" # ========================= # DETECTED TEXT # ========================= def build_detected_text(category, severity): s = severity.lower() mappings = { "wrinkles": { "mild": "Fine surface lines are present but minimal.", "moderate": "Visible wrinkles are noticeable at rest and with expression.", "severe": "Deep and prominent wrinkles detected across multiple regions." }, "acne": { "mild": "Almost no breakouts or comedones with minimal inflammation.", "moderate": "Inflamed acne lesions are visibly present.", "severe": "Severe acne with widespread inflammation and deeper lesions." }, "pores": { "mild": "Slight pore visibility with minimal enlargement.", "moderate": "Noticeable pore enlargement across key facial zones.", "severe": "Strong pore prominence with significant enlargement." }, "pigmentation": { "mild": "Light unevenness or a few small dark spots.", "moderate": "Moderate pigmentation patches are visibly noticeable.", "severe": "Widespread pigmentation with strong uneven tone." }, "hydration": { "mild": "Skin appears well-hydrated with minimal dryness.", "moderate": "Moderate dryness visible with uneven moisture retention.", "severe": "Significant dehydration signs with flakiness or dull texture." } } return mappings.get(category, {}).get(s, "") # ========================= # HIGH-LEVEL ANALYSIS WRAPPER # ========================= def get_comprehensive_analysis(image_path): """ Get comprehensive skin analysis with all scores and metadata. This is the main entry point called by the Flask API. """ raw = analyze_skin_complete(image_path) if not raw: return None # FRONTEND SCORES (Higher is better) hydration = compute_hydration_score(raw["hydration"]) pig = 100 - compute_pigmentation_score(raw["pigmentation"]) acne = 100 - compute_acne_score(raw["acne"]) pores = 100 - compute_pores_score(raw["pores"]) wrinkles = 100 - compute_wrinkles_score(raw["wrinkles"]) # BACKEND SEVERITY sev_pig = 100 - pig sev_acne = 100 - acne sev_pores = 100 - pores sev_wrinkles = 100 - wrinkles sev_hydration = 100 - hydration grades = { "hydration": grade_hydration(sev_hydration), "pigmentation": grade_pigmentation(sev_pig), "acne": grade_acne(sev_acne), "pores": grade_pores(sev_pores), "wrinkles": grade_wrinkles(sev_wrinkles), } severity_output = { "wrinkles": { "label": severity_label(sev_wrinkles), "text": build_detected_text("wrinkles", severity_label(sev_wrinkles)) }, "acne": { "label": severity_label(sev_acne), "text": build_detected_text("acne", severity_label(sev_acne)) }, "pores": { "label": severity_label(sev_pores), "text": build_detected_text("pores", severity_label(sev_pores)) }, "pigmentation": { "label": severity_label(sev_pig), "text": build_detected_text("pigmentation", severity_label(sev_pig)) }, "hydration": { "label": severity_label(sev_hydration), "text": build_detected_text("hydration", severity_label(sev_hydration)) } } return { "raw_data": raw, "scores": { "hydration": hydration, "pigmentation": pig, "acne": acne, "pores": pores, "wrinkles": wrinkles }, "grades": grades, "severity_info": severity_output, "age_analysis": raw["age_analysis"], "metadata": { "analyzed_at": datetime.now().isoformat(), "model_used": MODEL_COMBINED, "multipass_enabled": ENABLE_MULTI_PASS, "validation_passes": VALIDATION_PASSES if ENABLE_MULTI_PASS else 1 } } # ========================= # HTML REPORT GENERATOR # ========================= def generate_html_report(analysis, user_info, output_path="new_report.html"): """Injects analysis values into the HTML template.""" with open("report_template.html", "r", encoding="utf-8") as f: html = f.read() # Scores html = html.replace("{{wrinkles_score}}", str(analysis["scores"]["wrinkles"])) html = html.replace("{{acne_score}}", str(analysis["scores"]["acne"])) html = html.replace("{{pores_score}}", str(analysis["scores"]["pores"])) html = html.replace("{{pigmentation_score}}", str(analysis["scores"]["pigmentation"])) html = html.replace("{{hydration_score}}", str(analysis["scores"]["hydration"])) # Grades html = html.replace("{{wrinkles_grade}}", analysis["grades"]["wrinkles"]) html = html.replace("{{acne_grade}}", analysis["grades"]["acne"]) html = html.replace("{{pores_grade}}", analysis["grades"]["pores"]) html = html.replace("{{pigmentation_grade}}", analysis["grades"]["pigmentation"]) html = html.replace("{{hydration_grade}}", analysis["grades"]["hydration"]) # Severity labels + text html = html.replace("{{wrinkles_severity_label}}", analysis["severity_info"]["wrinkles"]["label"]) html = html.replace("{{wrinkles_detected_text}}", analysis["severity_info"]["wrinkles"]["text"]) html = html.replace("{{acne_severity_label}}", analysis["severity_info"]["acne"]["label"]) html = html.replace("{{acne_detected_text}}", analysis["severity_info"]["acne"]["text"]) html = html.replace("{{pores_severity_label}}", analysis["severity_info"]["pores"]["label"]) html = html.replace("{{pores_detected_text}}", analysis["severity_info"]["pores"]["text"]) html = html.replace("{{pig_severity_label}}", analysis["severity_info"]["pigmentation"]["label"]) html = html.replace("{{pig_detected_text}}", analysis["severity_info"]["pigmentation"]["text"]) html = html.replace("{{hydration_severity_label}}", analysis["severity_info"]["hydration"]["label"]) html = html.replace("{{hydration_detected_text}}", analysis["severity_info"]["hydration"]["text"]) # User Info html = html.replace("{{full_name}}", str(user_info.get("name", ""))) html = html.replace("{{age}}", str(user_info.get("age", ""))) html = html.replace("{{phone}}", str(user_info.get("phone", ""))) html = html.replace("{{gender}}", str(user_info.get("gender", ""))) # Write final HTML with open(output_path, "w", encoding="utf-8") as f: f.write(html) return output_path # ========================= # UTILITY FUNCTIONS # ========================= def get_usage_statistics(): """Get API usage statistics.""" if not _usage_log: return { "total_calls": 0, "successful_calls": 0, "failed_calls": 0, "total_cost": 0.0, "total_tokens": 0 } successful = [log for log in _usage_log if log["success"]] failed = [log for log in _usage_log if not log["success"]] return { "total_calls": len(_usage_log), "successful_calls": len(successful), "failed_calls": len(failed), "total_cost": sum(log["cost"] for log in _usage_log), "total_tokens": sum(log["tokens"] for log in _usage_log), "average_cost_per_call": sum(log["cost"] for log in successful) / len(successful) if successful else 0 } def clear_cache(): """Clear the analysis cache.""" global _analysis_cache _analysis_cache = {} print("✓ Analysis cache cleared")