LogicGoInfotechSpaces commited on
Commit
5b70d76
·
verified ·
1 Parent(s): 7e05725

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1424 -5
app.py CHANGED
@@ -1411,6 +1411,1425 @@
1411
 
1412
 
1413
  # --------------------- List Images Endpoint ---------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1414
  import os
1415
  os.environ["OMP_NUM_THREADS"] = "1"
1416
  import shutil
@@ -1483,13 +2902,15 @@ COLLAGE_MAKER_DB_URL = os.getenv("COLLAGE_MAKER_DB_URL")
1483
  collage_maker_client = None
1484
  collage_maker_db = None
1485
  collage_media_clicks_col = None
 
1486
  if COLLAGE_MAKER_DB_URL:
1487
  try:
1488
  collage_maker_client = AsyncIOMotorClient(COLLAGE_MAKER_DB_URL)
1489
  collage_maker_db = collage_maker_client.adminPanel
1490
  collage_media_clicks_col = collage_maker_db.media_clicks
 
1491
  except Exception as e:
1492
- logger.warning(f"MongoDB ai-enhancer connection failed (optional): {e}")
1493
 
1494
  # AI Enhancer DB (optional)
1495
 
@@ -1671,8 +3092,8 @@ def get_app_db_collections(appname: Optional[str] = None):
1671
  app = appname.strip().lower()
1672
 
1673
  if app == "collage-maker":
1674
- if collage_media_clicks_col is not None and subcategories_col is not None:
1675
- return collage_media_clicks_col, subcategories_col
1676
  logger.warning("Collage-maker DB not configured, falling back to admin")
1677
 
1678
  elif app == "ai-enhancer":
@@ -2828,5 +4249,3 @@ fastapi_app = mount_gradio_app(
2828
 
2829
  if __name__ == "__main__":
2830
  uvicorn.run(fastapi_app, host="0.0.0.0", port=7860)
2831
-
2832
-
 
1411
 
1412
 
1413
  # --------------------- List Images Endpoint ---------------------
1414
+ # import os
1415
+ # os.environ["OMP_NUM_THREADS"] = "1"
1416
+ # import shutil
1417
+ # import uuid
1418
+ # import cv2
1419
+ # import numpy as np
1420
+ # import threading
1421
+ # import subprocess
1422
+ # import logging
1423
+ # import tempfile
1424
+ # import sys
1425
+ # from datetime import datetime,timedelta
1426
+ # import tempfile
1427
+ # import insightface
1428
+ # from insightface.app import FaceAnalysis
1429
+ # from huggingface_hub import hf_hub_download
1430
+ # from fastapi import FastAPI, UploadFile, File, HTTPException, Response, Depends, Security, Form
1431
+ # from fastapi.responses import RedirectResponse
1432
+ # from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
1433
+ # from motor.motor_asyncio import AsyncIOMotorClient
1434
+ # from bson import ObjectId
1435
+ # from bson.errors import InvalidId
1436
+ # import httpx
1437
+ # import uvicorn
1438
+ # import gradio as gr
1439
+ # from gradio import mount_gradio_app
1440
+ # from PIL import Image
1441
+ # import io
1442
+ # # from scipy import ndimage
1443
+ # # DigitalOcean Spaces
1444
+ # import boto3
1445
+ # from botocore.client import Config
1446
+ # from typing import Optional
1447
+
1448
+ # # --------------------- Logging ---------------------
1449
+ # logging.basicConfig(level=logging.INFO)
1450
+ # logger = logging.getLogger(__name__)
1451
+
1452
+ # # --------------------- Secrets & Paths ---------------------
1453
+ # REPO_ID = "HariLogicgo/face_swap_models"
1454
+ # MODELS_DIR = "./models"
1455
+ # os.makedirs(MODELS_DIR, exist_ok=True)
1456
+
1457
+ # HF_TOKEN = os.getenv("HF_TOKEN")
1458
+ # API_SECRET_TOKEN = os.getenv("API_SECRET_TOKEN")
1459
+
1460
+ # DO_SPACES_REGION = os.getenv("DO_SPACES_REGION", "blr1")
1461
+ # DO_SPACES_ENDPOINT = f"https://{DO_SPACES_REGION}.digitaloceanspaces.com"
1462
+ # DO_SPACES_KEY = os.getenv("DO_SPACES_KEY")
1463
+ # DO_SPACES_SECRET = os.getenv("DO_SPACES_SECRET")
1464
+ # DO_SPACES_BUCKET = os.getenv("DO_SPACES_BUCKET")
1465
+
1466
+ # # NEW admin DB (with error handling for missing env vars)
1467
+ # ADMIN_MONGO_URL = os.getenv("ADMIN_MONGO_URL")
1468
+ # admin_client = None
1469
+ # admin_db = None
1470
+ # subcategories_col = None
1471
+ # media_clicks_col = None
1472
+ # if ADMIN_MONGO_URL:
1473
+ # try:
1474
+ # admin_client = AsyncIOMotorClient(ADMIN_MONGO_URL)
1475
+ # admin_db = admin_client.adminPanel
1476
+ # subcategories_col = admin_db.subcategories
1477
+ # media_clicks_col = admin_db.media_clicks
1478
+ # except Exception as e:
1479
+ # logger.warning(f"MongoDB admin connection failed (optional): {e}")
1480
+
1481
+ # # Collage Maker DB (optional)
1482
+ # COLLAGE_MAKER_DB_URL = os.getenv("COLLAGE_MAKER_DB_URL")
1483
+ # collage_maker_client = None
1484
+ # collage_maker_db = None
1485
+ # collage_media_clicks_col = None
1486
+ # if COLLAGE_MAKER_DB_URL:
1487
+ # try:
1488
+ # collage_maker_client = AsyncIOMotorClient(COLLAGE_MAKER_DB_URL)
1489
+ # collage_maker_db = collage_maker_client.adminPanel
1490
+ # collage_media_clicks_col = collage_maker_db.media_clicks
1491
+ # except Exception as e:
1492
+ # logger.warning(f"MongoDB ai-enhancer connection failed (optional): {e}")
1493
+
1494
+ # # AI Enhancer DB (optional)
1495
+
1496
+ # AI_ENHANCER_DB_URL = os.getenv("AI_ENHANCER_DB_URL")
1497
+ # ai_enhancer_client = None
1498
+ # ai_enhancer_db = None
1499
+ # ai_enhancer_media_clicks_col = None
1500
+ # ai_enhancer_subcategories_col = None
1501
+
1502
+ # if AI_ENHANCER_DB_URL:
1503
+ # try:
1504
+ # ai_enhancer_client = AsyncIOMotorClient(AI_ENHANCER_DB_URL)
1505
+ # ai_enhancer_db = ai_enhancer_client.test # 🔴 test database
1506
+ # ai_enhancer_media_clicks_col = ai_enhancer_db.media_clicks
1507
+ # ai_enhancer_subcategories_col = ai_enhancer_db.subcategories
1508
+ # except Exception as e:
1509
+ # logger.warning(f"MongoDB ai-enhancer connection failed (optional): {e}")
1510
+
1511
+
1512
+ # def get_media_clicks_collection(appname: Optional[str] = None):
1513
+ # """Return the media clicks collection for the given app (default: main admin)."""
1514
+ # if appname and str(appname).strip().lower() == "collage-maker":
1515
+ # return collage_media_clicks_col
1516
+ # return media_clicks_col
1517
+
1518
+
1519
+ # # OLD logs DB
1520
+ # MONGODB_URL = os.getenv("MONGODB_URL")
1521
+ # client = None
1522
+ # database = None
1523
+
1524
+ # # --------------------- Download Models ---------------------
1525
+ # def download_models():
1526
+ # try:
1527
+ # logger.info("Downloading models...")
1528
+ # inswapper_path = hf_hub_download(
1529
+ # repo_id=REPO_ID,
1530
+ # filename="models/inswapper_128.onnx",
1531
+ # repo_type="model",
1532
+ # local_dir=MODELS_DIR,
1533
+ # token=HF_TOKEN
1534
+ # )
1535
+
1536
+ # buffalo_files = ["1k3d68.onnx", "2d106det.onnx", "genderage.onnx", "det_10g.onnx", "w600k_r50.onnx"]
1537
+ # for f in buffalo_files:
1538
+ # hf_hub_download(
1539
+ # repo_id=REPO_ID,
1540
+ # filename=f"models/buffalo_l/" + f,
1541
+ # repo_type="model",
1542
+ # local_dir=MODELS_DIR,
1543
+ # token=HF_TOKEN
1544
+ # )
1545
+
1546
+ # logger.info("Models downloaded successfully.")
1547
+ # return inswapper_path
1548
+ # except Exception as e:
1549
+ # logger.error(f"Model download failed: {e}")
1550
+ # raise
1551
+
1552
+ # try:
1553
+ # inswapper_path = download_models()
1554
+
1555
+ # # --------------------- Face Analysis + Swapper ---------------------
1556
+ # providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
1557
+ # face_analysis_app = FaceAnalysis(name="buffalo_l", root=MODELS_DIR, providers=providers)
1558
+ # face_analysis_app.prepare(ctx_id=0, det_size=(640, 640))
1559
+ # swapper = insightface.model_zoo.get_model(inswapper_path, providers=providers)
1560
+ # logger.info("Face analysis models loaded successfully")
1561
+ # except Exception as e:
1562
+ # logger.error(f"Failed to initialize face analysis models: {e}")
1563
+ # # Set defaults to prevent crash
1564
+ # inswapper_path = None
1565
+ # face_analysis_app = None
1566
+ # swapper = None
1567
+
1568
+ # # --------------------- CodeFormer ---------------------
1569
+ # CODEFORMER_PATH = "CodeFormer/inference_codeformer.py"
1570
+
1571
+ # def ensure_codeformer():
1572
+ # try:
1573
+ # if not os.path.exists("CodeFormer"):
1574
+ # logger.info("CodeFormer not found, cloning repository...")
1575
+ # subprocess.run("git clone https://github.com/sczhou/CodeFormer.git", shell=True, check=True)
1576
+ # subprocess.run("pip install -r CodeFormer/requirements.txt", shell=True, check=False) # Non-critical deps
1577
+
1578
+ # # Always ensure BasicSR is installed from local directory
1579
+ # # This is needed for Hugging Face Spaces where BasicSR can't be installed from GitHub
1580
+ # if os.path.exists("CodeFormer/basicsr/setup.py"):
1581
+ # logger.info("Installing BasicSR from local directory...")
1582
+ # subprocess.run("python CodeFormer/basicsr/setup.py develop", shell=True, check=True)
1583
+ # logger.info("BasicSR installed successfully")
1584
+
1585
+ # # Install realesrgan after BasicSR is installed (realesrgan depends on BasicSR)
1586
+ # # This must be done after BasicSR installation to avoid PyPI install issues
1587
+ # try:
1588
+ # import realesrgan
1589
+ # logger.info("RealESRGAN already installed")
1590
+ # except ImportError:
1591
+ # logger.info("Installing RealESRGAN...")
1592
+ # subprocess.run("pip install --no-cache-dir realesrgan", shell=True, check=True)
1593
+ # logger.info("RealESRGAN installed successfully")
1594
+
1595
+ # # Download models if CodeFormer exists (fixed logic)
1596
+ # if os.path.exists("CodeFormer"):
1597
+ # try:
1598
+ # subprocess.run("python CodeFormer/scripts/download_pretrained_models.py facelib", shell=True, check=False, timeout=300)
1599
+ # except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
1600
+ # logger.warning("Failed to download facelib models (optional)")
1601
+ # try:
1602
+ # subprocess.run("python CodeFormer/scripts/download_pretrained_models.py CodeFormer", shell=True, check=False, timeout=300)
1603
+ # except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
1604
+ # logger.warning("Failed to download CodeFormer models (optional)")
1605
+ # except Exception as e:
1606
+ # logger.error(f"CodeFormer setup failed: {e}")
1607
+ # logger.warning("Continuing without CodeFormer features...")
1608
+
1609
+ # ensure_codeformer()
1610
+ # # --------------------- FastAPI ---------------------
1611
+ # fastapi_app = FastAPI()
1612
+
1613
+ # @fastapi_app.on_event("startup")
1614
+ # async def startup_db():
1615
+ # global client, database
1616
+ # if MONGODB_URL:
1617
+ # try:
1618
+ # logger.info("Initializing MongoDB for API logs...")
1619
+ # client = AsyncIOMotorClient(MONGODB_URL)
1620
+ # database = client.FaceSwap
1621
+ # logger.info("MongoDB initialized for API logs")
1622
+ # except Exception as e:
1623
+ # logger.warning(f"MongoDB connection failed (optional): {e}")
1624
+ # client = None
1625
+ # database = None
1626
+ # else:
1627
+ # logger.warning("MONGODB_URL not set, skipping MongoDB initialization")
1628
+
1629
+ # @fastapi_app.on_event("shutdown")
1630
+ # async def shutdown_db():
1631
+ # global client, admin_client, collage_maker_client
1632
+ # if client is not None:
1633
+ # client.close()
1634
+ # logger.info("MongoDB connection closed")
1635
+ # if admin_client is not None:
1636
+ # admin_client.close()
1637
+ # logger.info("Admin MongoDB connection closed")
1638
+ # if collage_maker_client is not None:
1639
+ # collage_maker_client.close()
1640
+ # logger.info("Collage Maker MongoDB connection closed")
1641
+
1642
+ # # --------------------- Auth ---------------------
1643
+ # security = HTTPBearer()
1644
+
1645
+ # def verify_token(credentials: HTTPAuthorizationCredentials = Security(security)):
1646
+ # if credentials.credentials != API_SECRET_TOKEN:
1647
+ # raise HTTPException(status_code=401, detail="Invalid or missing token")
1648
+ # return credentials.credentials
1649
+
1650
+ # # --------------------- DB Selector ---------------------
1651
+ # # def get_media_clicks_collection(appname: Optional[str] = None):
1652
+ # # """
1653
+ # # Returns the correct media_clicks collection based on appname.
1654
+ # # Defaults to the primary admin database when no appname is provided
1655
+ # # or when the requested database is unavailable.
1656
+ # # """
1657
+ # # if appname:
1658
+ # # normalized = appname.strip().lower()
1659
+ # # if normalized == "collage-maker":
1660
+ # # if collage_media_clicks_col is not None:
1661
+ # # return collage_media_clicks_col
1662
+ # # logger.warning("COLLAGE_MAKER_DB_URL not configured; falling back to default media_clicks collection")
1663
+ # # return media_clicks_col
1664
+ # def get_app_db_collections(appname: Optional[str] = None):
1665
+ # """
1666
+ # Returns (media_clicks_collection, subcategories_collection)
1667
+ # based on appname.
1668
+ # """
1669
+
1670
+ # if appname:
1671
+ # app = appname.strip().lower()
1672
+
1673
+ # if app == "collage-maker":
1674
+ # if collage_media_clicks_col is not None and subcategories_col is not None:
1675
+ # return collage_media_clicks_col, subcategories_col
1676
+ # logger.warning("Collage-maker DB not configured, falling back to admin")
1677
+
1678
+ # elif app == "ai-enhancer":
1679
+ # if ai_enhancer_media_clicks_col is not None and ai_enhancer_subcategories_col is not None:
1680
+ # return ai_enhancer_media_clicks_col, ai_enhancer_subcategories_col
1681
+ # logger.warning("AI-Enhancer DB not configured, falling back to admin")
1682
+
1683
+ # # default fallback
1684
+ # return media_clicks_col, subcategories_col
1685
+
1686
+
1687
+
1688
+ # # --------------------- Logging API Hits ---------------------
1689
+ # async def log_faceswap_hit(token: str, status: str = "success"):
1690
+ # global database
1691
+ # if database is None:
1692
+ # return
1693
+ # await database.api_logs.insert_one({
1694
+ # "token": token,
1695
+ # "endpoint": "/faceswap",
1696
+ # "status": status,
1697
+ # "timestamp": datetime.utcnow()
1698
+ # })
1699
+
1700
+ # # --------------------- Face Swap Pipeline ---------------------
1701
+ # swap_lock = threading.Lock()
1702
+
1703
+ # def enhance_image_with_codeformer(rgb_img, temp_dir=None):
1704
+ # if temp_dir is None:
1705
+ # temp_dir = os.path.join(tempfile.gettempdir(), f"enhance_{uuid.uuid4().hex[:8]}")
1706
+ # os.makedirs(temp_dir, exist_ok=True)
1707
+
1708
+ # input_path = os.path.join(temp_dir, "input.jpg")
1709
+ # cv2.imwrite(input_path, cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR))
1710
+
1711
+ # python_cmd = sys.executable if sys.executable else "python3"
1712
+ # cmd = (
1713
+ # f"{python_cmd} {CODEFORMER_PATH} "
1714
+ # f"-w 0.7 "
1715
+ # f"--input_path {input_path} "
1716
+ # f"--output_path {temp_dir} "
1717
+ # f"--bg_upsampler realesrgan "
1718
+ # f"--face_upsample"
1719
+ # )
1720
+
1721
+ # result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
1722
+ # if result.returncode != 0:
1723
+ # raise RuntimeError(result.stderr)
1724
+
1725
+ # final_dir = os.path.join(temp_dir, "final_results")
1726
+ # files = [f for f in os.listdir(final_dir) if f.endswith(".png")]
1727
+ # if not files:
1728
+ # raise RuntimeError("No enhanced output")
1729
+
1730
+ # final_path = os.path.join(final_dir, files[0])
1731
+ # enhanced = cv2.imread(final_path)
1732
+ # return cv2.cvtColor(enhanced, cv2.COLOR_BGR2RGB)
1733
+
1734
+ # def multi_face_swap(src_img, tgt_img):
1735
+ # src_bgr = cv2.cvtColor(src_img, cv2.COLOR_RGB2BGR)
1736
+ # tgt_bgr = cv2.cvtColor(tgt_img, cv2.COLOR_RGB2BGR)
1737
+
1738
+ # src_faces = face_analysis_app.get(src_bgr)
1739
+ # tgt_faces = face_analysis_app.get(tgt_bgr)
1740
+
1741
+ # if not src_faces or not tgt_faces:
1742
+ # raise ValueError("No faces detected")
1743
+
1744
+ # def face_sort_key(face):
1745
+ # x1, y1, x2, y2 = face.bbox
1746
+ # area = (x2 - x1) * (y2 - y1)
1747
+ # cx = (x1 + x2) / 2
1748
+ # return (-area, cx)
1749
+
1750
+ # # Split by gender
1751
+ # src_male = [f for f in src_faces if f.gender == 1]
1752
+ # src_female = [f for f in src_faces if f.gender == 0]
1753
+
1754
+ # tgt_male = [f for f in tgt_faces if f.gender == 1]
1755
+ # tgt_female = [f for f in tgt_faces if f.gender == 0]
1756
+
1757
+ # # Sort inside gender groups
1758
+ # src_male = sorted(src_male, key=face_sort_key)
1759
+ # src_female = sorted(src_female, key=face_sort_key)
1760
+
1761
+ # tgt_male = sorted(tgt_male, key=face_sort_key)
1762
+ # tgt_female = sorted(tgt_female, key=face_sort_key)
1763
+
1764
+ # # Build final swap pairs
1765
+ # pairs = []
1766
+
1767
+ # for s, t in zip(src_male, tgt_male):
1768
+ # pairs.append((s, t))
1769
+
1770
+ # for s, t in zip(src_female, tgt_female):
1771
+ # pairs.append((s, t))
1772
+
1773
+ # # Fallback if gender mismatch
1774
+ # if not pairs:
1775
+ # src_faces = sorted(src_faces, key=face_sort_key)
1776
+ # tgt_faces = sorted(tgt_faces, key=face_sort_key)
1777
+ # pairs = list(zip(src_faces, tgt_faces))
1778
+
1779
+ # result_img = tgt_bgr.copy()
1780
+
1781
+ # for src_face, _ in pairs:
1782
+ # # 🔁 re-detect current target faces
1783
+ # if face_analysis_app is None:
1784
+ # raise ValueError("Face analysis models not initialized. Please ensure models are downloaded.")
1785
+ # current_faces = face_analysis_app.get(result_img)
1786
+ # current_faces = sorted(current_faces, key=face_sort_key)
1787
+
1788
+ # # choose best matching gender
1789
+ # candidates = [
1790
+ # f for f in current_faces if f.gender == src_face.gender
1791
+ # ] or current_faces
1792
+
1793
+ # target_face = candidates[0]
1794
+
1795
+ # if swapper is None:
1796
+ # raise ValueError("Face swap models not initialized. Please ensure models are downloaded.")
1797
+ # result_img = swapper.get(
1798
+ # result_img,
1799
+ # target_face,
1800
+ # src_face,
1801
+ # paste_back=True
1802
+ # )
1803
+
1804
+ # return cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB)
1805
+
1806
+
1807
+
1808
+ # def face_swap_and_enhance(src_img, tgt_img, temp_dir=None):
1809
+ # try:
1810
+ # with swap_lock:
1811
+ # # Use a temp dir for intermediate files
1812
+ # if temp_dir is None:
1813
+ # temp_dir = os.path.join(tempfile.gettempdir(), f"faceswap_work_{uuid.uuid4().hex[:8]}")
1814
+ # if os.path.exists(temp_dir):
1815
+ # shutil.rmtree(temp_dir)
1816
+ # os.makedirs(temp_dir, exist_ok=True)
1817
+
1818
+ # src_bgr = cv2.cvtColor(src_img, cv2.COLOR_RGB2BGR)
1819
+ # tgt_bgr = cv2.cvtColor(tgt_img, cv2.COLOR_RGB2BGR)
1820
+
1821
+ # src_faces = face_analysis_app.get(src_bgr)
1822
+ # tgt_faces = face_analysis_app.get(tgt_bgr)
1823
+ # if face_analysis_app is None:
1824
+ # return None, None, "❌ Face analysis models not initialized. Please ensure models are downloaded."
1825
+ # if not src_faces or not tgt_faces:
1826
+ # return None, None, "❌ Face not detected in one of the images"
1827
+
1828
+ # swapped_path = os.path.join(temp_dir, f"swapped_{uuid.uuid4().hex[:8]}.jpg")
1829
+ # if swapper is None:
1830
+ # return None, None, "❌ Face swap models not initialized. Please ensure models are downloaded."
1831
+ # swapped_bgr = swapper.get(tgt_bgr, tgt_faces[0], src_faces[0])
1832
+ # if swapped_bgr is None:
1833
+ # return None, None, "❌ Face swap failed"
1834
+
1835
+ # cv2.imwrite(swapped_path, swapped_bgr)
1836
+
1837
+ # python_cmd = sys.executable if sys.executable else "python3"
1838
+ # cmd = f"{python_cmd} {CODEFORMER_PATH} -w 0.7 --input_path {swapped_path} --output_path {temp_dir} --bg_upsampler realesrgan --face_upsample"
1839
+ # result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
1840
+ # if result.returncode != 0:
1841
+ # return None, None, f"❌ CodeFormer failed:\n{result.stderr}"
1842
+
1843
+ # final_results_dir = os.path.join(temp_dir, "final_results")
1844
+ # final_files = [f for f in os.listdir(final_results_dir) if f.endswith(".png")]
1845
+ # if not final_files:
1846
+ # return None, None, "❌ No enhanced image found"
1847
+
1848
+ # final_path = os.path.join(final_results_dir, final_files[0])
1849
+ # final_img_bgr = cv2.imread(final_path)
1850
+ # if final_img_bgr is None:
1851
+ # return None, None, "❌ Failed to read enhanced image file"
1852
+ # final_img = cv2.cvtColor(final_img_bgr, cv2.COLOR_BGR2RGB)
1853
+
1854
+ # return final_img, final_path, ""
1855
+
1856
+ # except Exception as e:
1857
+ # return None, None, f"❌ Error: {str(e)}"
1858
+
1859
+ # def compress_image(
1860
+ # image_bytes: bytes,
1861
+ # max_size=(1280, 1280), # max width/height
1862
+ # quality=75 # JPEG quality (60–80 is ideal)
1863
+ # ) -> bytes:
1864
+ # """
1865
+ # Compress image by resizing and lowering quality.
1866
+ # Returns compressed image bytes.
1867
+ # """
1868
+ # img = Image.open(io.BytesIO(image_bytes)).convert("RGB")
1869
+
1870
+ # # Resize while maintaining aspect ratio
1871
+ # img.thumbnail(max_size, Image.LANCZOS)
1872
+
1873
+ # output = io.BytesIO()
1874
+ # img.save(
1875
+ # output,
1876
+ # format="JPEG",
1877
+ # quality=quality,
1878
+ # optimize=True,
1879
+ # progressive=True
1880
+ # )
1881
+
1882
+ # return output.getvalue()
1883
+
1884
+ # # --------------------- DigitalOcean Spaces Helper ---------------------
1885
+ # def get_spaces_client():
1886
+ # session = boto3.session.Session()
1887
+ # client = session.client(
1888
+ # 's3',
1889
+ # region_name=DO_SPACES_REGION,
1890
+ # endpoint_url=DO_SPACES_ENDPOINT,
1891
+ # aws_access_key_id=DO_SPACES_KEY,
1892
+ # aws_secret_access_key=DO_SPACES_SECRET,
1893
+ # config=Config(signature_version='s3v4')
1894
+ # )
1895
+ # return client
1896
+
1897
+ # def upload_to_spaces(file_bytes, key, content_type="image/png"):
1898
+ # client = get_spaces_client()
1899
+ # client.put_object(Bucket=DO_SPACES_BUCKET, Key=key, Body=file_bytes, ContentType=content_type, ACL='public-read')
1900
+ # return f"{DO_SPACES_ENDPOINT}/{DO_SPACES_BUCKET}/{key}"
1901
+
1902
+ # def download_from_spaces(key):
1903
+ # client = get_spaces_client()
1904
+ # obj = client.get_object(Bucket=DO_SPACES_BUCKET, Key=key)
1905
+ # return obj['Body'].read()
1906
+
1907
+ # def build_multi_faceswap_gradio():
1908
+ # with gr.Blocks() as demo:
1909
+ # gr.Markdown("## 👩‍❤️‍👨 Multi Face Swap (Couple → Couple)")
1910
+
1911
+ # with gr.Row():
1912
+ # src = gr.Image(type="numpy", label="Source Image (2 Faces)")
1913
+ # tgt = gr.Image(type="numpy", label="Target Image (2 Faces)")
1914
+
1915
+ # out = gr.Image(type="numpy", label="Swapped Result")
1916
+ # error = gr.Textbox(label="Logs", interactive=False)
1917
+
1918
+ # def process(src_img, tgt_img):
1919
+ # try:
1920
+ # swapped = multi_face_swap(src_img, tgt_img)
1921
+ # enhanced = enhance_image_with_codeformer(swapped)
1922
+ # return enhanced, ""
1923
+ # except Exception as e:
1924
+ # return None, str(e)
1925
+
1926
+ # btn = gr.Button("Swap Faces")
1927
+ # btn.click(process, [src, tgt], [out, error])
1928
+
1929
+ # return demo
1930
+
1931
+ # def mandatory_enhancement(rgb_img):
1932
+ # """
1933
+ # Always runs CodeFormer on the final image.
1934
+ # Fail-safe: returns original if enhancement fails.
1935
+ # """
1936
+ # try:
1937
+ # return enhance_image_with_codeformer(rgb_img)
1938
+ # except Exception as e:
1939
+ # logger.error(f"CodeFormer failed, returning original: {e}")
1940
+ # return rgb_img
1941
+
1942
+ # # --------------------- API Endpoints ---------------------
1943
+ # @fastapi_app.get("/")
1944
+ # async def root():
1945
+ # """Root endpoint"""
1946
+ # return {
1947
+ # "success": True,
1948
+ # "message": "FaceSwap API",
1949
+ # "data": {
1950
+ # "version": "1.0.0",
1951
+ # "Product Name":"Beauty Camera - GlowCam AI Studio",
1952
+ # "Released By" : "LogicGo Infotech"
1953
+ # }
1954
+ # }
1955
+ # @fastapi_app.get("/health")
1956
+ # async def health():
1957
+ # return {"status": "healthy"}
1958
+
1959
+ # from fastapi import Form
1960
+ # import requests
1961
+ # @fastapi_app.get("/test-admin-db")
1962
+ # async def test_admin_db():
1963
+ # try:
1964
+ # doc = await admin_db.list_collection_names()
1965
+ # return {"ok": True, "collections": doc}
1966
+ # except Exception as e:
1967
+ # return {"ok": False, "error": str(e), "url": ADMIN_MONGO_URL}
1968
+
1969
+ # @fastapi_app.post("/face-swap", dependencies=[Depends(verify_token)])
1970
+ # async def face_swap_api(
1971
+ # source: UploadFile = File(...),
1972
+ # target_category_id: str = Form(None),
1973
+ # new_category_id: str = Form(None),
1974
+ # user_id: Optional[str] = Form(None),
1975
+ # appname: Optional[str] = Form(None),
1976
+ # credentials: HTTPAuthorizationCredentials = Security(security)
1977
+ # ):
1978
+ # start_time = datetime.utcnow()
1979
+
1980
+ # try:
1981
+ # # ------------------------------------------------------------------
1982
+ # # VALIDATION
1983
+ # # ------------------------------------------------------------------
1984
+ # # --------------------------------------------------------------
1985
+ # # BACKWARD COMPATIBILITY FOR OLD ANDROID VERSIONS
1986
+ # # --------------------------------------------------------------
1987
+ # if target_category_id == "":
1988
+ # target_category_id = None
1989
+
1990
+ # if new_category_id == "":
1991
+ # new_category_id = None
1992
+
1993
+ # if user_id == "":
1994
+ # user_id = None
1995
+
1996
+ # # media_clicks_collection = get_media_clicks_collection(appname)
1997
+ # media_clicks_collection, subcategories_collection = get_app_db_collections(appname)
1998
+
1999
+
2000
+ # logger.info(f"[FaceSwap] Incoming request → target_category_id={target_category_id}, new_category_id={new_category_id}, user_id={user_id}")
2001
+
2002
+ # if target_category_id and new_category_id:
2003
+ # raise HTTPException(400, "Provide only one of new_category_id or target_category_id.")
2004
+
2005
+ # if not target_category_id and not new_category_id:
2006
+ # raise HTTPException(400, "Either new_category_id or target_category_id is required.")
2007
+
2008
+ # # ------------------------------------------------------------------
2009
+ # # READ SOURCE IMAGE
2010
+ # # ------------------------------------------------------------------
2011
+ # src_bytes = await source.read()
2012
+ # src_key = f"faceswap/source/{uuid.uuid4().hex}_{source.filename}"
2013
+ # upload_to_spaces(src_bytes, src_key, content_type=source.content_type)
2014
+
2015
+ # # ------------------------------------------------------------------
2016
+ # # CASE 1 : new_category_id → MongoDB lookup
2017
+ # # ------------------------------------------------------------------
2018
+ # if new_category_id:
2019
+
2020
+ # # doc = await subcategories_col.find_one({
2021
+ # # "asset_images._id": ObjectId(new_category_id)
2022
+ # # })
2023
+ # doc = await subcategories_collection.find_one({
2024
+ # "asset_images._id": ObjectId(new_category_id)
2025
+ # })
2026
+
2027
+
2028
+ # if not doc:
2029
+ # raise HTTPException(404, "Asset image not found in database")
2030
+
2031
+ # # extract correct asset
2032
+ # asset = next(
2033
+ # (img for img in doc["asset_images"] if str(img["_id"]) == new_category_id),
2034
+ # None
2035
+ # )
2036
+
2037
+ # if not asset:
2038
+ # raise HTTPException(404, "Asset image URL not found")
2039
+
2040
+ # # correct URL
2041
+ # target_url = asset["url"]
2042
+
2043
+ # # correct categoryId (ObjectId)
2044
+ # #category_oid = doc["categoryId"] # <-- DO NOT CONVERT TO STRING
2045
+ # subcategory_oid = doc["_id"]
2046
+
2047
+ # # ------------------------------------------------------------------#
2048
+ # # # MEDIA_CLICKS (ONLY IF user_id PRESENT)
2049
+ # # ------------------------------------------------------------------#
2050
+ # if user_id and media_clicks_collection is not None:
2051
+ # try:
2052
+ # user_id_clean = user_id.strip()
2053
+ # if not user_id_clean:
2054
+ # raise ValueError("user_id cannot be empty")
2055
+ # try:
2056
+ # user_oid = ObjectId(user_id_clean)
2057
+ # except (InvalidId, ValueError) as e:
2058
+ # logger.error(f"Invalid user_id format: {user_id_clean}")
2059
+ # raise ValueError(f"Invalid user_id format: {user_id_clean}")
2060
+
2061
+ # now = datetime.utcnow()
2062
+
2063
+ # # Normalize dates (UTC midnight)
2064
+ # today_date = datetime(now.year, now.month, now.day)
2065
+
2066
+ # # -------------------------------------------------
2067
+ # # STEP 1: Ensure root document exists
2068
+ # # -------------------------------------------------
2069
+ # await media_clicks_collection.update_one(
2070
+ # {"userId": user_oid},
2071
+ # {
2072
+ # "$setOnInsert": {
2073
+ # "userId": user_oid,
2074
+ # "createdAt": now,
2075
+ # "ai_edit_complete": 0,
2076
+ # "ai_edit_daily_count": []
2077
+ # }
2078
+ # },
2079
+ # upsert=True
2080
+ # )
2081
+ # # -------------------------------------------------
2082
+ # # STEP 2: Handle DAILY USAGE (BINARY, NO DUPLICATES)
2083
+ # # -------------------------------------------------
2084
+ # doc = await media_clicks_collection.find_one(
2085
+ # {"userId": user_oid},
2086
+ # {"ai_edit_daily_count": 1}
2087
+ # )
2088
+
2089
+ # daily_entries = doc.get("ai_edit_daily_count", []) if doc else []
2090
+
2091
+ # # Normalize today to UTC midnight
2092
+ # today_date = datetime(now.year, now.month, now.day)
2093
+
2094
+ # # Build normalized date → count map (THIS ENFORCES UNIQUENESS)
2095
+ # daily_map = {}
2096
+ # for entry in daily_entries:
2097
+ # d = entry["date"]
2098
+ # if isinstance(d, datetime):
2099
+ # d = datetime(d.year, d.month, d.day)
2100
+ # daily_map[d] = entry["count"] # overwrite = no duplicates
2101
+
2102
+ # # Determine last recorded date
2103
+ # last_date = max(daily_map.keys()) if daily_map else today_date
2104
+
2105
+ # # Fill ALL missing days with count = 0
2106
+ # next_day = last_date + timedelta(days=1)
2107
+ # while next_day < today_date:
2108
+ # daily_map.setdefault(next_day, 0)
2109
+ # next_day += timedelta(days=1)
2110
+
2111
+ # # Mark today as used (binary)
2112
+ # daily_map[today_date] = 1
2113
+
2114
+ # # Rebuild list: OLDEST → NEWEST
2115
+ # final_daily_entries = [
2116
+ # {"date": d, "count": daily_map[d]}
2117
+ # for d in sorted(daily_map.keys())
2118
+ # ]
2119
+
2120
+ # # Keep only last 32 days
2121
+ # final_daily_entries = final_daily_entries[-32:]
2122
+
2123
+ # # Atomic replace
2124
+ # await media_clicks_collection.update_one(
2125
+ # {"userId": user_oid},
2126
+ # {
2127
+ # "$set": {
2128
+ # "ai_edit_daily_count": final_daily_entries,
2129
+ # "updatedAt": now
2130
+ # }
2131
+ # }
2132
+ # )
2133
+
2134
+ # # -------------------------------------------------
2135
+ # # STEP 3: Try updating existing subCategory
2136
+ # # -------------------------------------------------
2137
+ # update_result = await media_clicks_collection.update_one(
2138
+ # {
2139
+ # "userId": user_oid,
2140
+ # "subCategories.subCategoryId": subcategory_oid
2141
+ # },
2142
+ # {
2143
+ # "$inc": {
2144
+ # "subCategories.$.click_count": 1,
2145
+ # "ai_edit_complete": 1
2146
+ # },
2147
+ # "$set": {
2148
+ # "subCategories.$.lastClickedAt": now,
2149
+ # "ai_edit_last_date": now,
2150
+ # "updatedAt": now
2151
+ # }
2152
+ # }
2153
+ # )
2154
+
2155
+ # # -------------------------------------------------
2156
+ # # STEP 4: Push subCategory if missing
2157
+ # # -------------------------------------------------
2158
+ # if update_result.matched_count == 0:
2159
+ # await media_clicks_collection.update_one(
2160
+ # {"userId": user_oid},
2161
+ # {
2162
+ # "$inc": {
2163
+ # "ai_edit_complete": 1
2164
+ # },
2165
+ # "$set": {
2166
+ # "ai_edit_last_date": now,
2167
+ # "updatedAt": now
2168
+ # },
2169
+ # "$push": {
2170
+ # "subCategories": {
2171
+ # "subCategoryId": subcategory_oid,
2172
+ # "click_count": 1,
2173
+ # "lastClickedAt": now
2174
+ # }
2175
+ # }
2176
+ # }
2177
+ # )
2178
+
2179
+ # # -------------------------------------------------
2180
+ # # STEP 5: Sort subCategories by lastClickedAt (ascending - oldest first)
2181
+ # # -------------------------------------------------
2182
+ # user_doc = await media_clicks_collection.find_one({"userId": user_oid})
2183
+ # if user_doc and "subCategories" in user_doc:
2184
+ # subcategories = user_doc["subCategories"]
2185
+ # # Sort by lastClickedAt in ascending order (oldest first)
2186
+ # # Handle missing or None dates by using datetime.min
2187
+ # subcategories_sorted = sorted(
2188
+ # subcategories,
2189
+ # key=lambda x: x.get("lastClickedAt") if x.get("lastClickedAt") is not None else datetime.min
2190
+ # )
2191
+ # # Update with sorted array
2192
+ # await media_clicks_collection.update_one(
2193
+ # {"userId": user_oid},
2194
+ # {
2195
+ # "$set": {
2196
+ # "subCategories": subcategories_sorted,
2197
+ # "updatedAt": now
2198
+ # }
2199
+ # }
2200
+ # )
2201
+
2202
+ # logger.info(
2203
+ # "[MEDIA_CLICK] user=%s subCategory=%s ai_edit_complete++ daily_tracked",
2204
+ # user_id,
2205
+ # str(subcategory_oid)
2206
+ # )
2207
+
2208
+ # except Exception as media_err:
2209
+ # logger.error(f"MEDIA_CLICK ERROR: {media_err}")
2210
+ # elif user_id and media_clicks_collection is None:
2211
+ # logger.warning("Media clicks collection unavailable; skipping media click tracking")
2212
+
2213
+ # # # ------------------------------------------------------------------
2214
+ # # # CASE 2 : target_category_id → DigitalOcean path (unchanged logic)
2215
+ # # # ------------------------------------------------------------------
2216
+ # if target_category_id:
2217
+ # client = get_spaces_client()
2218
+ # base_prefix = "faceswap/target/"
2219
+ # resp = client.list_objects_v2(
2220
+ # Bucket=DO_SPACES_BUCKET, Prefix=base_prefix, Delimiter="/"
2221
+ # )
2222
+
2223
+ # # Extract categories from the CommonPrefixes
2224
+ # categories = [p["Prefix"].split("/")[2] for p in resp.get("CommonPrefixes", [])]
2225
+
2226
+ # target_url = None
2227
+
2228
+ # # --- FIX STARTS HERE ---
2229
+ # for category in categories:
2230
+ # original_prefix = f"faceswap/target/{category}/original/"
2231
+ # thumb_prefix = f"faceswap/target/{category}/thumb/" # Keep for file list check (optional but safe)
2232
+
2233
+ # # List objects in original/
2234
+ # original_objects = client.list_objects_v2(
2235
+ # Bucket=DO_SPACES_BUCKET, Prefix=original_prefix
2236
+ # ).get("Contents", [])
2237
+
2238
+ # # List objects in thumb/ (optional: for the old code's extra check)
2239
+ # thumb_objects = client.list_objects_v2(
2240
+ # Bucket=DO_SPACES_BUCKET, Prefix=thumb_prefix
2241
+ # ).get("Contents", [])
2242
+
2243
+ # # Extract only the filenames and filter for .png
2244
+ # original_filenames = sorted([
2245
+ # obj["Key"].split("/")[-1] for obj in original_objects
2246
+ # if obj["Key"].split("/")[-1].endswith(".png")
2247
+ # ])
2248
+ # thumb_filenames = [
2249
+ # obj["Key"].split("/")[-1] for obj in thumb_objects
2250
+ # ]
2251
+
2252
+ # # Replicate the old indexing logic based on sorted filenames
2253
+ # for idx, filename in enumerate(original_filenames, start=1):
2254
+ # cid = f"{category.lower()}image_{idx}"
2255
+
2256
+ # # Optional: Replicate the thumb file check for 100% parity
2257
+ # # if filename in thumb_filenames and cid == target_category_id:
2258
+ # # Simpler check just on the ID, assuming thumb files are present
2259
+ # if cid == target_category_id:
2260
+ # # Construct the final target URL using the full prefix and the filename
2261
+ # target_url = f"{DO_SPACES_ENDPOINT}/{DO_SPACES_BUCKET}/{original_prefix}{filename}"
2262
+ # break
2263
+
2264
+ # if target_url:
2265
+ # break
2266
+ # # --- FIX ENDS HERE ---
2267
+
2268
+ # if not target_url:
2269
+ # raise HTTPException(404, "Target categoryId not found")
2270
+ # # # ------------------------------------------------------------------
2271
+ # # # DOWNLOAD TARGET IMAGE
2272
+ # # # ------------------------------------------------------------------
2273
+ # async with httpx.AsyncClient(timeout=30.0) as client:
2274
+ # response = await client.get(target_url)
2275
+ # response.raise_for_status()
2276
+ # tgt_bytes = response.content
2277
+
2278
+ # src_bgr = cv2.imdecode(np.frombuffer(src_bytes, np.uint8), cv2.IMREAD_COLOR)
2279
+ # tgt_bgr = cv2.imdecode(np.frombuffer(tgt_bytes, np.uint8), cv2.IMREAD_COLOR)
2280
+
2281
+ # if src_bgr is None or tgt_bgr is None:
2282
+ # raise HTTPException(400, "Invalid image data")
2283
+
2284
+ # src_rgb = cv2.cvtColor(src_bgr, cv2.COLOR_BGR2RGB)
2285
+ # tgt_rgb = cv2.cvtColor(tgt_bgr, cv2.COLOR_BGR2RGB)
2286
+
2287
+ # # ------------------------------------------------------------------
2288
+ # # FACE SWAP EXECUTION
2289
+ # # ------------------------------------------------------------------
2290
+ # final_img, final_path, err = face_swap_and_enhance(src_rgb, tgt_rgb)
2291
+
2292
+ # # #--------------------Version 2.0 ----------------------------------------#
2293
+ # # final_img, final_path, err = enhanced_face_swap_and_enhance(src_rgb, tgt_rgb)
2294
+ # # #--------------------Version 2.0 ----------------------------------------#
2295
+
2296
+ # if err:
2297
+ # raise HTTPException(500, err)
2298
+
2299
+ # with open(final_path, "rb") as f:
2300
+ # result_bytes = f.read()
2301
+
2302
+ # result_key = f"faceswap/result/{uuid.uuid4().hex}_enhanced.png"
2303
+ # result_url = upload_to_spaces(result_bytes, result_key)
2304
+ # # -------------------------------------------------
2305
+ # # COMPRESS IMAGE (2–3 MB target)
2306
+ # # -------------------------------------------------
2307
+ # compressed_bytes = compress_image(
2308
+ # image_bytes=result_bytes,
2309
+ # max_size=(1280, 1280),
2310
+ # quality=72
2311
+ # )
2312
+
2313
+ # compressed_key = f"faceswap/result/{uuid.uuid4().hex}_enhanced_compressed.jpg"
2314
+ # compressed_url = upload_to_spaces(
2315
+ # compressed_bytes,
2316
+ # compressed_key,
2317
+ # content_type="image/jpeg"
2318
+ # )
2319
+ # end_time = datetime.utcnow()
2320
+ # response_time_ms = (end_time - start_time).total_seconds() * 1000
2321
+
2322
+ # if database is not None:
2323
+ # log_entry = {
2324
+ # "endpoint": "/face-swap",
2325
+ # "status": "success",
2326
+ # "response_time_ms": response_time_ms,
2327
+ # "timestamp": end_time
2328
+ # }
2329
+ # if appname:
2330
+ # log_entry["appname"] = appname
2331
+ # await database.api_logs.insert_one(log_entry)
2332
+
2333
+
2334
+ # return {
2335
+ # "result_key": result_key,
2336
+ # "result_url": result_url,
2337
+ # "Compressed_Image_URL": compressed_url
2338
+ # }
2339
+
2340
+ # except Exception as e:
2341
+ # end_time = datetime.utcnow()
2342
+ # response_time_ms = (end_time - start_time).total_seconds() * 1000
2343
+
2344
+ # if database is not None:
2345
+ # log_entry = {
2346
+ # "endpoint": "/face-swap",
2347
+ # "status": "fail",
2348
+ # "response_time_ms": response_time_ms,
2349
+ # "timestamp": end_time,
2350
+ # "error": str(e)
2351
+ # }
2352
+ # if appname:
2353
+ # log_entry["appname"] = appname
2354
+ # await database.api_logs.insert_one(log_entry)
2355
+
2356
+ # raise HTTPException(500, f"Face swap failed: {str(e)}")
2357
+
2358
+ # @fastapi_app.get("/preview/{result_key:path}")
2359
+ # async def preview_result(result_key: str):
2360
+ # try:
2361
+ # img_bytes = download_from_spaces(result_key)
2362
+ # except Exception:
2363
+ # raise HTTPException(status_code=404, detail="Result not found")
2364
+ # return Response(
2365
+ # content=img_bytes,
2366
+ # media_type="image/png",
2367
+ # headers={"Content-Disposition": "inline; filename=result.png"}
2368
+ # )
2369
+
2370
+ # @fastapi_app.post("/multi-face-swap", dependencies=[Depends(verify_token)])
2371
+ # async def multi_face_swap_api(
2372
+ # source_image: UploadFile = File(...),
2373
+ # target_image: UploadFile = File(...)
2374
+ # ):
2375
+ # start_time = datetime.utcnow()
2376
+
2377
+ # try:
2378
+ # # -----------------------------
2379
+ # # Read images
2380
+ # # -----------------------------
2381
+ # src_bytes = await source_image.read()
2382
+ # tgt_bytes = await target_image.read()
2383
+
2384
+ # src_bgr = cv2.imdecode(np.frombuffer(src_bytes, np.uint8), cv2.IMREAD_COLOR)
2385
+ # tgt_bgr = cv2.imdecode(np.frombuffer(tgt_bytes, np.uint8), cv2.IMREAD_COLOR)
2386
+
2387
+ # if src_bgr is None or tgt_bgr is None:
2388
+ # raise HTTPException(400, "Invalid image data")
2389
+
2390
+ # src_rgb = cv2.cvtColor(src_bgr, cv2.COLOR_BGR2RGB)
2391
+ # tgt_rgb = cv2.cvtColor(tgt_bgr, cv2.COLOR_BGR2RGB)
2392
+
2393
+ # # -----------------------------
2394
+ # # Multi-face swap
2395
+ # # -----------------------------
2396
+ # swapped_rgb = multi_face_swap(src_rgb, tgt_rgb)
2397
+
2398
+ # # -----------------------------
2399
+ # # 🔥 MANDATORY ENHANCEMENT
2400
+ # # -----------------------------
2401
+ # final_rgb = mandatory_enhancement(swapped_rgb)
2402
+
2403
+ # final_bgr = cv2.cvtColor(final_rgb, cv2.COLOR_RGB2BGR)
2404
+
2405
+ # # -----------------------------
2406
+ # # Save temp result
2407
+ # # -----------------------------
2408
+ # temp_dir = tempfile.mkdtemp(prefix="multi_faceswap_")
2409
+ # result_path = os.path.join(temp_dir, "result.png")
2410
+ # cv2.imwrite(result_path, final_bgr)
2411
+
2412
+ # with open(result_path, "rb") as f:
2413
+ # result_bytes = f.read()
2414
+
2415
+ # # -----------------------------
2416
+ # # Upload
2417
+ # # -----------------------------
2418
+ # result_key = f"faceswap/multi/{uuid.uuid4().hex}.png"
2419
+ # result_url = upload_to_spaces(
2420
+ # result_bytes,
2421
+ # result_key,
2422
+ # content_type="image/png"
2423
+ # )
2424
+
2425
+ # return {
2426
+ # "result_key": result_key,
2427
+ # "result_url": result_url
2428
+ # }
2429
+
2430
+ # except Exception as e:
2431
+ # raise HTTPException(status_code=500, detail=str(e))
2432
+
2433
+
2434
+ # @fastapi_app.post("/face-swap-couple", dependencies=[Depends(verify_token)])
2435
+ # async def face_swap_api(
2436
+ # image1: UploadFile = File(...),
2437
+ # image2: Optional[UploadFile] = File(None),
2438
+ # target_category_id: str = Form(None),
2439
+ # new_category_id: str = Form(None),
2440
+ # user_id: Optional[str] = Form(None),
2441
+ # appname: Optional[str] = Form(None),
2442
+ # credentials: HTTPAuthorizationCredentials = Security(security)
2443
+ # ):
2444
+ # """
2445
+ # Production-ready face swap endpoint supporting:
2446
+ # - Multiple source images (image1 + optional image2)
2447
+ # - Gender-based pairing
2448
+ # - Merged faces from multiple sources
2449
+ # - Mandatory CodeFormer enhancement
2450
+ # """
2451
+ # start_time = datetime.utcnow()
2452
+
2453
+ # try:
2454
+ # # -----------------------------
2455
+ # # Validate input
2456
+ # # -----------------------------
2457
+ # if target_category_id == "":
2458
+ # target_category_id = None
2459
+ # if new_category_id == "":
2460
+ # new_category_id = None
2461
+ # if user_id == "":
2462
+ # user_id = None
2463
+
2464
+ # media_clicks_collection = get_media_clicks_collection(appname)
2465
+
2466
+ # if target_category_id and new_category_id:
2467
+ # raise HTTPException(400, "Provide only one of new_category_id or target_category_id.")
2468
+ # if not target_category_id and not new_category_id:
2469
+ # raise HTTPException(400, "Either new_category_id or target_category_id is required.")
2470
+
2471
+ # logger.info(f"[FaceSwap] Incoming request → target_category_id={target_category_id}, new_category_id={new_category_id}, user_id={user_id}")
2472
+
2473
+ # # -----------------------------
2474
+ # # Read source images
2475
+ # # -----------------------------
2476
+ # src_images = []
2477
+ # img1_bytes = await image1.read()
2478
+ # src1 = cv2.imdecode(np.frombuffer(img1_bytes, np.uint8), cv2.IMREAD_COLOR)
2479
+ # if src1 is None:
2480
+ # raise HTTPException(400, "Invalid image1 data")
2481
+ # src_images.append(cv2.cvtColor(src1, cv2.COLOR_BGR2RGB))
2482
+
2483
+ # if image2:
2484
+ # img2_bytes = await image2.read()
2485
+ # src2 = cv2.imdecode(np.frombuffer(img2_bytes, np.uint8), cv2.IMREAD_COLOR)
2486
+ # if src2 is not None:
2487
+ # src_images.append(cv2.cvtColor(src2, cv2.COLOR_BGR2RGB))
2488
+
2489
+ # # -----------------------------
2490
+ # # Resolve target image
2491
+ # # -----------------------------
2492
+ # target_url = None
2493
+ # if new_category_id:
2494
+ # doc = await subcategories_col.find_one({
2495
+ # "asset_images._id": ObjectId(new_category_id)
2496
+ # })
2497
+
2498
+ # if not doc:
2499
+ # raise HTTPException(404, "Asset image not found in database")
2500
+
2501
+ # asset = next(
2502
+ # (img for img in doc["asset_images"] if str(img["_id"]) == new_category_id),
2503
+ # None
2504
+ # )
2505
+
2506
+ # if not asset:
2507
+ # raise HTTPException(404, "Asset image URL not found")
2508
+
2509
+ # target_url = asset["url"]
2510
+ # subcategory_oid = doc["_id"]
2511
+
2512
+ # if user_id and media_clicks_collection is not None:
2513
+ # try:
2514
+ # user_id_clean = user_id.strip()
2515
+ # if not user_id_clean:
2516
+ # raise ValueError("user_id cannot be empty")
2517
+ # try:
2518
+ # user_oid = ObjectId(user_id_clean)
2519
+ # except (InvalidId, ValueError):
2520
+ # logger.error(f"Invalid user_id format: {user_id_clean}")
2521
+ # raise ValueError(f"Invalid user_id format: {user_id_clean}")
2522
+
2523
+ # now = datetime.utcnow()
2524
+
2525
+ # # Step 1: ensure root document exists
2526
+ # await media_clicks_collection.update_one(
2527
+ # {"userId": user_oid},
2528
+ # {
2529
+ # "$setOnInsert": {
2530
+ # "userId": user_oid,
2531
+ # "createdAt": now,
2532
+ # "ai_edit_complete": 0,
2533
+ # "ai_edit_daily_count": []
2534
+ # }
2535
+ # },
2536
+ # upsert=True
2537
+ # )
2538
+
2539
+ # # Step 2: handle daily usage (binary, no duplicates)
2540
+ # doc = await media_clicks_collection.find_one(
2541
+ # {"userId": user_oid},
2542
+ # {"ai_edit_daily_count": 1}
2543
+ # )
2544
+
2545
+ # daily_entries = doc.get("ai_edit_daily_count", []) if doc else []
2546
+
2547
+ # today_date = datetime(now.year, now.month, now.day)
2548
+
2549
+ # daily_map = {}
2550
+ # for entry in daily_entries:
2551
+ # d = entry["date"]
2552
+ # if isinstance(d, datetime):
2553
+ # d = datetime(d.year, d.month, d.day)
2554
+ # daily_map[d] = entry["count"]
2555
+
2556
+ # last_date = max(daily_map.keys()) if daily_map else None
2557
+
2558
+ # if last_date != today_date:
2559
+ # daily_map[today_date] = 1
2560
+
2561
+ # final_daily_entries = [
2562
+ # {"date": d, "count": daily_map[d]}
2563
+ # for d in sorted(daily_map.keys())
2564
+ # ]
2565
+
2566
+ # final_daily_entries = final_daily_entries[-32:]
2567
+
2568
+ # await media_clicks_collection.update_one(
2569
+ # {"userId": user_oid},
2570
+ # {
2571
+ # "$set": {
2572
+ # "ai_edit_daily_count": final_daily_entries,
2573
+ # "updatedAt": now
2574
+ # }
2575
+ # }
2576
+ # )
2577
+
2578
+ # # Step 3: try updating existing subCategory
2579
+ # update_result = await media_clicks_collection.update_one(
2580
+ # {
2581
+ # "userId": user_oid,
2582
+ # "subCategories.subCategoryId": subcategory_oid
2583
+ # },
2584
+ # {
2585
+ # "$inc": {
2586
+ # "subCategories.$.click_count": 1,
2587
+ # "ai_edit_complete": 1
2588
+ # },
2589
+ # "$set": {
2590
+ # "subCategories.$.lastClickedAt": now,
2591
+ # "ai_edit_last_date": now,
2592
+ # "updatedAt": now
2593
+ # }
2594
+ # }
2595
+ # )
2596
+
2597
+ # # Step 4: push subCategory if missing
2598
+ # if update_result.matched_count == 0:
2599
+ # await media_clicks_collection.update_one(
2600
+ # {"userId": user_oid},
2601
+ # {
2602
+ # "$inc": {
2603
+ # "ai_edit_complete": 1
2604
+ # },
2605
+ # "$set": {
2606
+ # "ai_edit_last_date": now,
2607
+ # "updatedAt": now
2608
+ # },
2609
+ # "$push": {
2610
+ # "subCategories": {
2611
+ # "subCategoryId": subcategory_oid,
2612
+ # "click_count": 1,
2613
+ # "lastClickedAt": now
2614
+ # }
2615
+ # }
2616
+ # }
2617
+ # )
2618
+
2619
+ # # Step 5: sort subCategories by lastClickedAt (ascending)
2620
+ # user_doc = await media_clicks_collection.find_one({"userId": user_oid})
2621
+ # if user_doc and "subCategories" in user_doc:
2622
+ # subcategories = user_doc["subCategories"]
2623
+ # subcategories_sorted = sorted(
2624
+ # subcategories,
2625
+ # key=lambda x: x.get("lastClickedAt") if x.get("lastClickedAt") is not None else datetime.min
2626
+ # )
2627
+ # await media_clicks_collection.update_one(
2628
+ # {"userId": user_oid},
2629
+ # {
2630
+ # "$set": {
2631
+ # "subCategories": subcategories_sorted,
2632
+ # "updatedAt": now
2633
+ # }
2634
+ # }
2635
+ # )
2636
+
2637
+ # logger.info(
2638
+ # "[MEDIA_CLICK] user=%s subCategory=%s ai_edit_complete++ daily_tracked",
2639
+ # user_id,
2640
+ # str(subcategory_oid)
2641
+ # )
2642
+
2643
+ # except Exception as media_err:
2644
+ # logger.error(f"MEDIA_CLICK ERROR: {media_err}")
2645
+ # elif user_id and media_clicks_collection is None:
2646
+ # logger.warning("Media clicks collection unavailable; skipping media click tracking")
2647
+
2648
+ # if target_category_id:
2649
+ # client = get_spaces_client()
2650
+ # base_prefix = "faceswap/target/"
2651
+ # resp = client.list_objects_v2(
2652
+ # Bucket=DO_SPACES_BUCKET, Prefix=base_prefix, Delimiter="/"
2653
+ # )
2654
+
2655
+ # categories = [p["Prefix"].split("/")[2] for p in resp.get("CommonPrefixes", [])]
2656
+
2657
+ # for category in categories:
2658
+ # original_prefix = f"faceswap/target/{category}/original/"
2659
+ # thumb_prefix = f"faceswap/target/{category}/thumb/"
2660
+
2661
+ # original_objects = client.list_objects_v2(
2662
+ # Bucket=DO_SPACES_BUCKET, Prefix=original_prefix
2663
+ # ).get("Contents", [])
2664
+
2665
+ # thumb_objects = client.list_objects_v2(
2666
+ # Bucket=DO_SPACES_BUCKET, Prefix=thumb_prefix
2667
+ # ).get("Contents", [])
2668
+
2669
+ # original_filenames = sorted([
2670
+ # obj["Key"].split("/")[-1] for obj in original_objects
2671
+ # if obj["Key"].split("/")[-1].endswith(".png")
2672
+ # ])
2673
+
2674
+ # for idx, filename in enumerate(original_filenames, start=1):
2675
+ # cid = f"{category.lower()}image_{idx}"
2676
+ # if cid == target_category_id:
2677
+ # target_url = f"{DO_SPACES_ENDPOINT}/{DO_SPACES_BUCKET}/{original_prefix}{filename}"
2678
+ # break
2679
+
2680
+ # if target_url:
2681
+ # break
2682
+
2683
+ # if not target_url:
2684
+ # raise HTTPException(404, "Target categoryId not found")
2685
+
2686
+ # async with httpx.AsyncClient(timeout=30.0) as client:
2687
+ # response = await client.get(target_url)
2688
+ # response.raise_for_status()
2689
+ # tgt_bytes = response.content
2690
+
2691
+ # tgt_bgr = cv2.imdecode(np.frombuffer(tgt_bytes, np.uint8), cv2.IMREAD_COLOR)
2692
+ # if tgt_bgr is None:
2693
+ # raise HTTPException(400, "Invalid target image data")
2694
+
2695
+ # # -----------------------------
2696
+ # # Merge all source faces
2697
+ # # -----------------------------
2698
+ # all_src_faces = []
2699
+ # for img in src_images:
2700
+ # faces = face_analysis_app.get(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
2701
+ # all_src_faces.extend(faces)
2702
+
2703
+ # if not all_src_faces:
2704
+ # raise HTTPException(400, "No faces detected in source images")
2705
+
2706
+ # tgt_faces = face_analysis_app.get(tgt_bgr)
2707
+ # if not tgt_faces:
2708
+ # raise HTTPException(400, "No faces detected in target image")
2709
+
2710
+ # # -----------------------------
2711
+ # # Gender-based pairing
2712
+ # # -----------------------------
2713
+ # def face_sort_key(face):
2714
+ # x1, y1, x2, y2 = face.bbox
2715
+ # area = (x2 - x1) * (y2 - y1)
2716
+ # cx = (x1 + x2) / 2
2717
+ # return (-area, cx)
2718
+
2719
+ # # Separate by gender
2720
+ # src_male = sorted([f for f in all_src_faces if f.gender == 1], key=face_sort_key)
2721
+ # src_female = sorted([f for f in all_src_faces if f.gender == 0], key=face_sort_key)
2722
+ # tgt_male = sorted([f for f in tgt_faces if f.gender == 1], key=face_sort_key)
2723
+ # tgt_female = sorted([f for f in tgt_faces if f.gender == 0], key=face_sort_key)
2724
+
2725
+ # pairs = []
2726
+ # for s, t in zip(src_male, tgt_male):
2727
+ # pairs.append((s, t))
2728
+ # for s, t in zip(src_female, tgt_female):
2729
+ # pairs.append((s, t))
2730
+
2731
+ # # fallback if gender mismatch
2732
+ # if not pairs:
2733
+ # src_all = sorted(all_src_faces, key=face_sort_key)
2734
+ # tgt_all = sorted(tgt_faces, key=face_sort_key)
2735
+ # pairs = list(zip(src_all, tgt_all))
2736
+
2737
+ # # -----------------------------
2738
+ # # Perform face swap
2739
+ # # -----------------------------
2740
+ # with swap_lock:
2741
+ # result_img = tgt_bgr.copy()
2742
+ # for src_face, _ in pairs:
2743
+ # if face_analysis_app is None:
2744
+ # raise HTTPException(status_code=500, detail="Face analysis models not initialized. Please ensure models are downloaded.")
2745
+ # current_faces = sorted(face_analysis_app.get(result_img), key=face_sort_key)
2746
+ # candidates = [f for f in current_faces if f.gender == src_face.gender] or current_faces
2747
+ # target_face = candidates[0]
2748
+ # if swapper is None:
2749
+ # raise HTTPException(status_code=500, detail="Face swap models not initialized. Please ensure models are downloaded.")
2750
+ # result_img = swapper.get(result_img, target_face, src_face, paste_back=True)
2751
+
2752
+ # result_rgb = cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB)
2753
+
2754
+ # # -----------------------------
2755
+ # # Mandatory enhancement
2756
+ # # -----------------------------
2757
+ # enhanced_rgb = mandatory_enhancement(result_rgb)
2758
+ # enhanced_bgr = cv2.cvtColor(enhanced_rgb, cv2.COLOR_RGB2BGR)
2759
+
2760
+ # # -----------------------------
2761
+ # # Save, upload, compress
2762
+ # # -----------------------------
2763
+ # temp_dir = tempfile.mkdtemp(prefix="faceswap_")
2764
+ # final_path = os.path.join(temp_dir, "result.png")
2765
+ # cv2.imwrite(final_path, enhanced_bgr)
2766
+
2767
+ # with open(final_path, "rb") as f:
2768
+ # result_bytes = f.read()
2769
+
2770
+ # result_key = f"faceswap/result/{uuid.uuid4().hex}_enhanced.png"
2771
+ # result_url = upload_to_spaces(result_bytes, result_key)
2772
+
2773
+ # compressed_bytes = compress_image(result_bytes, max_size=(1280, 1280), quality=72)
2774
+ # compressed_key = f"faceswap/result/{uuid.uuid4().hex}_enhanced_compressed.jpg"
2775
+ # compressed_url = upload_to_spaces(compressed_bytes, compressed_key, content_type="image/jpeg")
2776
+
2777
+ # # -----------------------------
2778
+ # # Log API usage
2779
+ # # -----------------------------
2780
+ # end_time = datetime.utcnow()
2781
+ # response_time_ms = (end_time - start_time).total_seconds() * 1000
2782
+ # if database is not None:
2783
+ # log_entry = {
2784
+ # "endpoint": "/face-swap-couple",
2785
+ # "status": "success",
2786
+ # "response_time_ms": response_time_ms,
2787
+ # "timestamp": end_time
2788
+ # }
2789
+ # if appname:
2790
+ # log_entry["appname"] = appname
2791
+ # await database.api_logs.insert_one(log_entry)
2792
+
2793
+ # return {
2794
+ # "result_key": result_key,
2795
+ # "result_url": result_url,
2796
+ # "compressed_url": compressed_url
2797
+ # }
2798
+
2799
+ # except Exception as e:
2800
+ # end_time = datetime.utcnow()
2801
+ # response_time_ms = (end_time - start_time).total_seconds() * 1000
2802
+ # if database is not None:
2803
+ # log_entry = {
2804
+ # "endpoint": "/face-swap-couple",
2805
+ # "status": "fail",
2806
+ # "response_time_ms": response_time_ms,
2807
+ # "timestamp": end_time,
2808
+ # "error": str(e)
2809
+ # }
2810
+ # if appname:
2811
+ # log_entry["appname"] = appname
2812
+ # await database.api_logs.insert_one(log_entry)
2813
+ # raise HTTPException(500, f"Face swap failed: {str(e)}")
2814
+
2815
+
2816
+
2817
+
2818
+ # # --------------------- Mount Gradio ---------------------
2819
+
2820
+ # multi_faceswap_app = build_multi_faceswap_gradio()
2821
+ # fastapi_app = mount_gradio_app(
2822
+ # fastapi_app,
2823
+ # multi_faceswap_app,
2824
+ # path="/gradio-couple-faceswap"
2825
+ # )
2826
+
2827
+
2828
+
2829
+ # if __name__ == "__main__":
2830
+ # uvicorn.run(fastapi_app, host="0.0.0.0", port=7860)
2831
+
2832
+
2833
  import os
2834
  os.environ["OMP_NUM_THREADS"] = "1"
2835
  import shutil
 
2902
  collage_maker_client = None
2903
  collage_maker_db = None
2904
  collage_media_clicks_col = None
2905
+ collage_subcategories_col = None
2906
  if COLLAGE_MAKER_DB_URL:
2907
  try:
2908
  collage_maker_client = AsyncIOMotorClient(COLLAGE_MAKER_DB_URL)
2909
  collage_maker_db = collage_maker_client.adminPanel
2910
  collage_media_clicks_col = collage_maker_db.media_clicks
2911
+ collage_subcategories_col = collage_maker_db.subcategories
2912
  except Exception as e:
2913
+ logger.warning(f"MongoDB collage-maker connection failed (optional): {e}")
2914
 
2915
  # AI Enhancer DB (optional)
2916
 
 
3092
  app = appname.strip().lower()
3093
 
3094
  if app == "collage-maker":
3095
+ if collage_media_clicks_col is not None and collage_subcategories_col is not None:
3096
+ return collage_media_clicks_col, collage_subcategories_col
3097
  logger.warning("Collage-maker DB not configured, falling back to admin")
3098
 
3099
  elif app == "ai-enhancer":
 
4249
 
4250
  if __name__ == "__main__":
4251
  uvicorn.run(fastapi_app, host="0.0.0.0", port=7860)