Spaces:
Sleeping
Sleeping
| """ | |
| Trend Engine ๊ณตํต ์ ํธ๋ฆฌํฐ โ ์ฌ์๋, ๊ธฐ๊ฐ ๊ณ์ฐ, retention ์ ๋ฆฌ | |
| S7: ์๋ฌ ํธ๋ค๋ง ๊ณตํต ๋ํผ | |
| S6: ๊ธฐ๊ฐ ๊ณ์ฐ ํฌํผ | |
| """ | |
| import functools | |
| import logging | |
| import random | |
| import time | |
| from datetime import date, timedelta | |
| logger = logging.getLogger(__name__) | |
| # ------------------------------------------------------------------ | |
| # S7: ์ฌ์๋ ๋ฐ์ฝ๋ ์ดํฐ | |
| # ------------------------------------------------------------------ | |
| def retry_on_failure(max_retries=3, base_delay=1.0, exceptions=(Exception,)): | |
| """API ํธ์ถ ์คํจ ์ ์ง์ ๋ฐฑ์คํ ์ฌ์๋ ๋ฐ์ฝ๋ ์ดํฐ.""" | |
| def decorator(func): | |
| def wrapper(*args, **kwargs): | |
| for attempt in range(max_retries): | |
| try: | |
| return func(*args, **kwargs) | |
| except exceptions as e: | |
| if attempt == max_retries - 1: | |
| logger.error( | |
| "%s failed after %d retries: %s", | |
| func.__name__, max_retries, e, | |
| ) | |
| raise | |
| wait = base_delay * (2 ** attempt) + random.uniform(0, 0.5) | |
| logger.warning( | |
| "%s attempt %d failed: %s. Retrying in %.1fs", | |
| func.__name__, attempt + 1, e, wait, | |
| ) | |
| time.sleep(wait) | |
| return wrapper | |
| return decorator | |
| # ------------------------------------------------------------------ | |
| # S6: ๊ธฐ๊ฐ ๊ณ์ฐ ํฌํผ | |
| # ------------------------------------------------------------------ | |
| def get_week_period() -> tuple[date, date]: | |
| """์ด๋ฒ ์ฃผ ์์์ผ(period_start)๊ณผ ์ค๋(period_end)์ ๋ฐํํ๋ค.""" | |
| today = date.today() | |
| period_start = today - timedelta(days=today.weekday()) # ์ด๋ฒ ์ฃผ ์์์ผ | |
| return period_start, today | |
| # ------------------------------------------------------------------ | |
| # S6: Retention ์ ๋ฆฌ | |
| # ------------------------------------------------------------------ | |
| def safe_upsert_spot_trend(supabase, row: dict) -> bool: | |
| """PostgREST partial unique index ํธํ upsert (DELETE + INSERT). | |
| spot_trends ํ ์ด๋ธ์ partial unique index(WHERE spot_id != '__pending__')๊ฐ | |
| PostgREST์ on_conflict ํ๋ผ๋ฏธํฐ์ ํธํ๋์ง ์์ 42P10 ์๋ฌ๊ฐ ๋ฐ์ํ๋ค. | |
| ์ด๋ฅผ ์ฐํํ๊ธฐ ์ํด ๊ธฐ์กด ๋ ์ฝ๋๋ฅผ ๋จผ์ ์ญ์ ํ ์ ๋ ์ฝ๋๋ฅผ ์ฝ์ ํ๋ค. | |
| __pending__ ํ์ ๊ฑด๋๋ฆฌ์ง ์๋๋ค (blog_post INSERT๋ ๋ณ๋ ๊ฒฝ๋ก). | |
| """ | |
| sid = row.get("spot_id", "") | |
| if sid == "__pending__": | |
| # __pending__์ ์ค๋ณต ํ์ฉ โ ์ผ๋ฐ INSERT ์ฌ์ฉ | |
| supabase.table("spot_trends").insert(row).execute() | |
| return True | |
| try: | |
| supabase.table("spot_trends").delete() \ | |
| .eq("spot_id", sid) \ | |
| .eq("source", row["source"]) \ | |
| .eq("metric_type", row["metric_type"]) \ | |
| .eq("period_start", row["period_start"]) \ | |
| .execute() | |
| except Exception: | |
| pass # ์ญ์ ํ ๋ ์ฝ๋ ์์ผ๋ฉด ๋ฌด์ | |
| supabase.table("spot_trends").insert(row).execute() | |
| return True | |
| def cleanup_old_trends(supabase, retention_weeks: int = 12) -> int: | |
| """retention_weeks ์ด์ ๋ spot_trends ๋ ์ฝ๋๋ฅผ ์ญ์ ํ๋ค. | |
| __pending__ ๋ธ๋ก๊ทธ ์๋ณธ ๋ ์ฝ๋๋ ํจ๊ป ์ ๋ฆฌํ๋ค. | |
| """ | |
| cutoff = (date.today() - timedelta(weeks=retention_weeks)).isoformat() | |
| try: | |
| result = ( | |
| supabase.table("spot_trends") | |
| .delete() | |
| .lt("collected_at", cutoff) | |
| .execute() | |
| ) | |
| deleted = len(result.data) if result.data else 0 | |
| logger.info( | |
| "retention ์ ๋ฆฌ ์๋ฃ: %d๊ฑด ์ญ์ (cutoff: %s, %d์ฃผ ์ด์ )", | |
| deleted, cutoff, retention_weeks, | |
| ) | |
| return deleted | |
| except Exception as e: | |
| logger.warning("retention ์ ๋ฆฌ ์คํจ: %s", e) | |
| return 0 | |