shaliz-kong commited on
Commit ·
00f9956
1
Parent(s): 049be5a
updated dockefile to download duck db v 1
Browse files- Dockerfile +7 -7
- app/deps.py +2 -1
- app/mapper.py +432 -175
Dockerfile
CHANGED
|
@@ -19,18 +19,18 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
| 19 |
RUN pip install --no-cache-dir --upgrade pip setuptools wheel
|
| 20 |
|
| 21 |
|
|
|
|
| 22 |
# ---- 4. install Python deps (+ DuckDB driver) ------------------------------
|
| 23 |
COPY requirements.txt /tmp/requirements.txt
|
| 24 |
RUN pip install --no-cache-dir --prefer-binary -r /tmp/requirements.txt && \
|
| 25 |
-
pip install --no-cache-dir duckdb=
|
| 26 |
|
| 27 |
-
# ---- 5. Pre-download VSS extension (matches DuckDB
|
| 28 |
# This prevents runtime download failures and startup crashes
|
| 29 |
-
RUN mkdir -p /root/.duckdb/extensions/
|
| 30 |
-
wget -q https://extensions.duckdb.org/
|
| 31 |
-
-O /root/.duckdb/extensions/
|
| 32 |
-
gunzip /root/.duckdb/extensions/
|
| 33 |
-
|
| 34 |
# ---- 6. copy source --------------------------------------------------------
|
| 35 |
COPY . /app
|
| 36 |
WORKDIR /app
|
|
|
|
| 19 |
RUN pip install --no-cache-dir --upgrade pip setuptools wheel
|
| 20 |
|
| 21 |
|
| 22 |
+
# ---- 4. install Python deps (+ DuckDB driver) ------------------------------
|
| 23 |
# ---- 4. install Python deps (+ DuckDB driver) ------------------------------
|
| 24 |
COPY requirements.txt /tmp/requirements.txt
|
| 25 |
RUN pip install --no-cache-dir --prefer-binary -r /tmp/requirements.txt && \
|
| 26 |
+
pip install --no-cache-dir "duckdb>=1.0.0"
|
| 27 |
|
| 28 |
+
# ---- 5. Pre-download VSS extension (matches DuckDB v1.0.0) ---------------
|
| 29 |
# This prevents runtime download failures and startup crashes
|
| 30 |
+
RUN mkdir -p /root/.duckdb/extensions/v1.0.0/linux_amd64 && \
|
| 31 |
+
wget -q https://extensions.duckdb.org/v1.0.0/linux_amd64/vss.duckdb_extension.gz \
|
| 32 |
+
-O /root/.duckdb/extensions/v1.0.0/linux_amd64/vss.duckdb_extension.gz && \
|
| 33 |
+
gunzip /root/.duckdb/extensions/v1.0.0/linux_amd64/vss.duckdb_extension.gz
|
|
|
|
| 34 |
# ---- 6. copy source --------------------------------------------------------
|
| 35 |
COPY . /app
|
| 36 |
WORKDIR /app
|
app/deps.py
CHANGED
|
@@ -102,7 +102,8 @@ def get_secret(name: str, required: bool = True) -> Optional[str]:
|
|
| 102 |
|
| 103 |
# API Keys
|
| 104 |
API_KEYS = get_secret("API_KEYS").split(",") if get_secret("API_KEYS") else []
|
| 105 |
-
|
|
|
|
| 106 |
# Redis configuration
|
| 107 |
REDIS_URL = get_secret("UPSTASH_REDIS_REST_URL", required=False)
|
| 108 |
REDIS_TOKEN = get_secret("UPSTASH_REDIS_REST_TOKEN", required=False)
|
|
|
|
| 102 |
|
| 103 |
# API Keys
|
| 104 |
API_KEYS = get_secret("API_KEYS").split(",") if get_secret("API_KEYS") else []
|
| 105 |
+
# Add this line near your other secret constants
|
| 106 |
+
HF_API_TOKEN = get_secret("HF_API_TOKEN", required=False)
|
| 107 |
# Redis configuration
|
| 108 |
REDIS_URL = get_secret("UPSTASH_REDIS_REST_URL", required=False)
|
| 109 |
REDIS_TOKEN = get_secret("UPSTASH_REDIS_REST_TOKEN", required=False)
|
app/mapper.py
CHANGED
|
@@ -1,16 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import json
|
|
|
|
| 3 |
import pandas as pd
|
| 4 |
import numpy as np
|
| 5 |
from datetime import datetime, timedelta
|
| 6 |
from concurrent.futures import ThreadPoolExecutor
|
| 7 |
import time
|
|
|
|
|
|
|
| 8 |
|
| 9 |
from app.db import get_conn, ensure_raw_table, transactional_conn, ensure_schema_versions_table
|
| 10 |
from app.hybrid_entity_detector import hybrid_detect_entity_type
|
| 11 |
from app.core.event_hub import event_hub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
-
# ---------------------- Canonical Schema ---------------------- #
|
| 14 |
CANONICAL = {
|
| 15 |
"timestamp": ["timestamp", "date", "sale_date", "created_at"],
|
| 16 |
"product_id": ["sku", "barcode", "plu", "product_id", "item_code"],
|
|
@@ -24,12 +102,77 @@ CANONICAL = {
|
|
| 24 |
|
| 25 |
ALIAS_FILE = "./db/alias_memory.json"
|
| 26 |
|
| 27 |
-
# Module-level caches
|
| 28 |
_ENTITY_CACHE = {}
|
| 29 |
_INDUSTRY_CACHE = {}
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
def map_pandas_to_duck(col: str, series: pd.Series) -> str:
|
| 32 |
-
"""Map pandas dtype to DuckDB type"""
|
| 33 |
if pd.api.types.is_bool_dtype(series): return "BOOLEAN"
|
| 34 |
if pd.api.types.is_integer_dtype(series): return "BIGINT"
|
| 35 |
if pd.api.types.is_float_dtype(series): return "DOUBLE"
|
|
@@ -37,7 +180,7 @@ def map_pandas_to_duck(col: str, series: pd.Series) -> str:
|
|
| 37 |
return "VARCHAR"
|
| 38 |
|
| 39 |
def load_dynamic_aliases() -> None:
|
| 40 |
-
"""Load column alias mappings
|
| 41 |
if os.path.exists(ALIAS_FILE):
|
| 42 |
try:
|
| 43 |
with open(ALIAS_FILE) as f:
|
|
@@ -51,92 +194,219 @@ def load_dynamic_aliases() -> None:
|
|
| 51 |
print(f"[mapper] ⚠️ Failed to load alias memory: {e}")
|
| 52 |
|
| 53 |
def save_dynamic_aliases() -> None:
|
| 54 |
-
"""Save column alias mappings
|
| 55 |
os.makedirs(os.path.dirname(ALIAS_FILE), exist_ok=True)
|
| 56 |
with open(ALIAS_FILE, "w") as f:
|
| 57 |
json.dump(CANONICAL, f, indent=2)
|
| 58 |
|
| 59 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
def poll_for_entity(org_id: str, source_id: str, timeout: int = 10) -> dict:
|
| 62 |
"""
|
| 63 |
-
Poll Redis for entity detection result
|
| 64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
"""
|
|
|
|
| 66 |
cache_key = (org_id, source_id)
|
| 67 |
|
| 68 |
# 1. Check cache (zero Redis calls)
|
| 69 |
if cache_key in _ENTITY_CACHE:
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
return _ENTITY_CACHE[cache_key]
|
| 72 |
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
if data:
|
| 79 |
-
entity_info = json.loads(data)
|
| 80 |
-
print(f"[poll] ✅ Redis hit: {entity_info['entity_type']}")
|
| 81 |
-
_ENTITY_CACHE[cache_key] = entity_info
|
| 82 |
return entity_info
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
return entity_info
|
| 94 |
-
|
| 95 |
-
# 5. Fallback (single DuckDB query for both entity & industry)
|
| 96 |
-
print("[poll] ⚠️ Using combined fallback")
|
| 97 |
-
entity_info, industry_info = _fallback_combined(org_id, source_id)
|
| 98 |
-
|
| 99 |
-
# 6. Populate industry cache too (since we have it)
|
| 100 |
-
_INDUSTRY_CACHE[cache_key] = industry_info
|
| 101 |
-
|
| 102 |
-
return entity_info
|
| 103 |
|
| 104 |
def poll_for_industry(org_id: str, source_id: str, timeout: int = 10) -> dict:
|
| 105 |
"""
|
| 106 |
-
Poll Redis for industry detection result
|
| 107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
"""
|
|
|
|
| 109 |
cache_key = (org_id, source_id)
|
| 110 |
|
| 111 |
# 1. Check cache (filled by poll_for_entity)
|
| 112 |
if cache_key in _INDUSTRY_CACHE:
|
| 113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
return _INDUSTRY_CACHE[cache_key]
|
| 115 |
|
| 116 |
-
#
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
if data:
|
| 122 |
-
industry_info = json.loads(data)
|
| 123 |
-
_INDUSTRY_CACHE[cache_key] = industry_info
|
| 124 |
return industry_info
|
| 125 |
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
def _fallback_combined(org_id: str, source_id: str) -> tuple[dict, dict]:
|
| 134 |
"""
|
| 135 |
SINGLE DuckDB query to detect BOTH entity and industry.
|
| 136 |
Writes BOTH keys to Redis atomically.
|
| 137 |
Updates caches WITHOUT immediately invalidating them.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
"""
|
| 139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
# Default values
|
| 142 |
entity_info = {"entity_type": "UNKNOWN", "confidence": 0.0}
|
|
@@ -156,12 +426,11 @@ def _fallback_combined(org_id: str, source_id: str) -> tuple[dict, dict]:
|
|
| 156 |
df = pd.DataFrame(parsed)
|
| 157 |
df.columns = [str(col).lower().strip() for col in df.columns]
|
| 158 |
|
| 159 |
-
# Parallel detection
|
| 160 |
def detect_entity():
|
| 161 |
try:
|
| 162 |
return hybrid_detect_entity_type(org_id, df, f"{source_id}.json")
|
| 163 |
except Exception as e:
|
| 164 |
-
|
| 165 |
return ("UNKNOWN", 0.0, False)
|
| 166 |
|
| 167 |
def detect_industry():
|
|
@@ -169,7 +438,7 @@ def _fallback_combined(org_id: str, source_id: str) -> tuple[dict, dict]:
|
|
| 169 |
from app.hybrid_industry_detector import hybrid_detect_industry_type
|
| 170 |
return hybrid_detect_industry_type(org_id, df, source_id)
|
| 171 |
except Exception as e:
|
| 172 |
-
|
| 173 |
return ("UNKNOWN", 0.0, False)
|
| 174 |
|
| 175 |
with ThreadPoolExecutor(max_workers=2) as ex:
|
|
@@ -182,38 +451,65 @@ def _fallback_combined(org_id: str, source_id: str) -> tuple[dict, dict]:
|
|
| 182 |
entity_info = {"entity_type": entity_type, "confidence": ent_conf}
|
| 183 |
industry_info = {"industry": industry, "confidence": ind_conf}
|
| 184 |
|
| 185 |
-
|
| 186 |
-
|
|
|
|
|
|
|
|
|
|
| 187 |
except Exception as e:
|
| 188 |
-
|
|
|
|
| 189 |
|
| 190 |
-
# GUARANTEE: Write to Redis (pipeline for
|
| 191 |
try:
|
| 192 |
e_key = f"entity:{org_id}:{source_id}"
|
| 193 |
i_key = f"industry:{org_id}:{source_id}"
|
| 194 |
|
| 195 |
-
#
|
|
|
|
| 196 |
event_hub.setex(e_key, 3600, json.dumps(entity_info))
|
| 197 |
event_hub.setex(i_key, 3600, json.dumps(industry_info))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
|
| 199 |
-
|
| 200 |
|
| 201 |
except Exception as re:
|
| 202 |
-
|
|
|
|
|
|
|
| 203 |
|
| 204 |
-
# Update caches
|
| 205 |
cache_key = (org_id, source_id)
|
| 206 |
_ENTITY_CACHE[cache_key] = entity_info
|
| 207 |
_INDUSTRY_CACHE[cache_key] = industry_info
|
|
|
|
|
|
|
| 208 |
|
| 209 |
return entity_info, industry_info
|
| 210 |
|
| 211 |
def _fallback_industry_detection(org_id: str, source_id: str) -> dict:
|
| 212 |
"""
|
| 213 |
Emergency fallback for industry only (rarely used).
|
| 214 |
-
|
|
|
|
| 215 |
"""
|
| 216 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
|
| 218 |
try:
|
| 219 |
conn = get_conn(org_id)
|
|
@@ -225,7 +521,7 @@ def _fallback_industry_detection(org_id: str, source_id: str) -> dict:
|
|
| 225 |
""").fetchall()
|
| 226 |
|
| 227 |
if not rows:
|
| 228 |
-
|
| 229 |
return {"industry": "UNKNOWN", "confidence": 0.0}
|
| 230 |
|
| 231 |
parsed = [json.loads(r[0]) for r in rows if r[0]]
|
|
@@ -236,29 +532,37 @@ def _fallback_industry_detection(org_id: str, source_id: str) -> dict:
|
|
| 236 |
industry, confidence, _ = hybrid_detect_industry_type(org_id, df, source_id)
|
| 237 |
|
| 238 |
industry_info = {"industry": industry, "confidence": confidence}
|
| 239 |
-
|
| 240 |
|
| 241 |
# Write to Redis
|
| 242 |
redis_key = f"industry:{org_id}:{source_id}"
|
| 243 |
event_hub.setex(redis_key, 3600, json.dumps(industry_info))
|
| 244 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
|
| 246 |
return industry_info
|
| 247 |
|
| 248 |
except Exception as e:
|
| 249 |
-
|
| 250 |
-
|
|
|
|
|
|
|
|
|
|
| 251 |
redis_key = f"industry:{org_id}:{source_id}"
|
| 252 |
event_hub.setex(redis_key, 3600, json.dumps({"industry": "UNKNOWN", "confidence": 0.0}))
|
| 253 |
return {"industry": "UNKNOWN", "confidence": 0.0}
|
| 254 |
|
| 255 |
-
#
|
| 256 |
|
| 257 |
def ensure_canonical_table(duck, df: pd.DataFrame, entity_type: str) -> str:
|
| 258 |
-
"""Creates entity-specific table
|
| 259 |
table_name = f"main.{entity_type}_canonical"
|
| 260 |
|
| 261 |
-
# Create base table
|
| 262 |
duck.execute(f"""
|
| 263 |
CREATE TABLE IF NOT EXISTS {table_name} (
|
| 264 |
id UUID DEFAULT uuid(),
|
|
@@ -266,32 +570,33 @@ def ensure_canonical_table(duck, df: pd.DataFrame, entity_type: str) -> str:
|
|
| 266 |
)
|
| 267 |
""")
|
| 268 |
|
| 269 |
-
# Get existing columns
|
| 270 |
existing_cols_raw = duck.execute(f"PRAGMA table_info('{table_name}')").fetchall()
|
| 271 |
existing_cols = {str(r[0]).lower() for r in existing_cols_raw}
|
| 272 |
|
| 273 |
-
# Add missing columns
|
| 274 |
for col in df.columns:
|
| 275 |
col_name = str(col).lower().strip()
|
| 276 |
if col_name not in existing_cols:
|
| 277 |
try:
|
| 278 |
dtype = map_pandas_to_duck(col_name, df[col])
|
| 279 |
-
|
| 280 |
duck.execute(f"ALTER TABLE {table_name} ADD COLUMN {col_name} {dtype}")
|
| 281 |
except Exception as e:
|
| 282 |
-
|
| 283 |
|
| 284 |
return table_name
|
| 285 |
|
| 286 |
-
#
|
| 287 |
|
| 288 |
def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd.DataFrame, str, float]:
|
| 289 |
"""
|
| 290 |
ENTERPRISE DATA INGESTION PIPELINE
|
| 291 |
Safe, idempotent, and Redis-efficient.
|
|
|
|
|
|
|
|
|
|
| 292 |
"""
|
| 293 |
-
start_time =
|
| 294 |
-
|
| 295 |
|
| 296 |
# Load aliases
|
| 297 |
load_dynamic_aliases()
|
|
@@ -310,14 +615,14 @@ def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd
|
|
| 310 |
ORDER BY ingested_at DESC
|
| 311 |
""", (cutoff_time,)).fetchall()
|
| 312 |
except Exception as e:
|
| 313 |
-
|
| 314 |
return pd.DataFrame(), "unknown", 0.0
|
| 315 |
|
| 316 |
if not rows:
|
| 317 |
-
|
| 318 |
return pd.DataFrame(), "unknown", 0.0
|
| 319 |
|
| 320 |
-
# 2️⃣ PARSE JSON
|
| 321 |
parsed, malformed_count = [], 0
|
| 322 |
for r in rows:
|
| 323 |
raw = r[0]
|
|
@@ -331,7 +636,6 @@ def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd
|
|
| 331 |
malformed_count += 1
|
| 332 |
continue
|
| 333 |
|
| 334 |
-
# Extract rows from various structures
|
| 335 |
if isinstance(obj, dict):
|
| 336 |
if "rows" in obj and isinstance(obj["rows"], list):
|
| 337 |
parsed.extend(obj["rows"])
|
|
@@ -349,18 +653,18 @@ def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd
|
|
| 349 |
malformed_count += 1
|
| 350 |
|
| 351 |
if malformed_count:
|
| 352 |
-
|
| 353 |
if not parsed:
|
| 354 |
-
|
| 355 |
return pd.DataFrame(), "unknown", 0.0
|
| 356 |
|
| 357 |
-
# 3️⃣ NORMALIZE COLUMNS
|
| 358 |
df = pd.DataFrame(parsed)
|
| 359 |
df.columns = [str(col).lower().strip() for col in df.columns]
|
| 360 |
df = df.loc[:, ~df.columns.duplicated()]
|
| 361 |
-
|
| 362 |
|
| 363 |
-
# 4️⃣ MAP TO CANONICAL SCHEMA
|
| 364 |
mapping, canonical_used = {}, set()
|
| 365 |
for canon, aliases in CANONICAL.items():
|
| 366 |
for col in df.columns:
|
|
@@ -368,19 +672,17 @@ def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd
|
|
| 368 |
if canon not in canonical_used:
|
| 369 |
mapping[col] = canon
|
| 370 |
canonical_used.add(canon)
|
| 371 |
-
|
| 372 |
break
|
| 373 |
|
| 374 |
-
# Learn new aliases
|
| 375 |
for col in df.columns:
|
| 376 |
for canon in CANONICAL.keys():
|
| 377 |
if str(canon).lower() in col and col not in CANONICAL[canon]:
|
| 378 |
CANONICAL[canon].append(col)
|
| 379 |
-
|
| 380 |
|
| 381 |
save_dynamic_aliases()
|
| 382 |
|
| 383 |
-
# Apply mapping, keep all columns
|
| 384 |
renamed = df.rename(columns=mapping)
|
| 385 |
|
| 386 |
final_columns, seen = [], set()
|
|
@@ -393,9 +695,9 @@ def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd
|
|
| 393 |
final_columns.append(col)
|
| 394 |
|
| 395 |
df = renamed[final_columns].copy()
|
| 396 |
-
|
| 397 |
|
| 398 |
-
# 5️⃣ TYPE CONVERSIONS
|
| 399 |
try:
|
| 400 |
if "timestamp" in df:
|
| 401 |
df["timestamp"] = pd.to_datetime(df["timestamp"], errors="coerce")
|
|
@@ -407,27 +709,26 @@ def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd
|
|
| 407 |
if col in df:
|
| 408 |
df[col] = pd.to_numeric(df[col], errors="coerce").fillna(0)
|
| 409 |
except Exception as e:
|
| 410 |
-
|
| 411 |
|
| 412 |
-
# 6️⃣ DETECT ENTITY & INDUSTRY
|
| 413 |
entity_info = poll_for_entity(org_id, source_id)
|
| 414 |
entity_type = entity_info["entity_type"]
|
| 415 |
|
| 416 |
-
# Industry is fetched from cache filled by poll_for_entity
|
| 417 |
industry_info = poll_for_industry(org_id, source_id)
|
| 418 |
industry = industry_info["industry"]
|
| 419 |
industry_confidence = industry_info["confidence"]
|
| 420 |
-
|
| 421 |
|
| 422 |
-
# 7️⃣ SCHEMA VERSIONING & TRANSACTIONAL INSERT
|
| 423 |
os.makedirs("./db", exist_ok=True)
|
| 424 |
|
| 425 |
-
rows_inserted = 0
|
| 426 |
|
| 427 |
with transactional_conn(org_id) as duck:
|
| 428 |
ensure_schema_versions_table(duck)
|
| 429 |
|
| 430 |
-
# Detect schema changes
|
| 431 |
current_schema = {col: map_pandas_to_duck(col, df[col]) for col in df.columns}
|
| 432 |
existing_schema_row = duck.execute("""
|
| 433 |
SELECT schema_json, version_id FROM main.schema_versions
|
|
@@ -448,12 +749,12 @@ def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd
|
|
| 448 |
VALUES (nextval('schema_version_seq'), ?, ?, 'pending')
|
| 449 |
RETURNING version_id
|
| 450 |
""", (f"{entity_type}_canonical", json.dumps(current_schema))).fetchone()[0]
|
| 451 |
-
|
| 452 |
|
| 453 |
# Ensure table exists
|
| 454 |
table_name = ensure_canonical_table(duck, df, entity_type)
|
| 455 |
|
| 456 |
-
# Insert data
|
| 457 |
if not df.empty:
|
| 458 |
table_info = duck.execute(f"PRAGMA table_info('{table_name}')").fetchall()
|
| 459 |
table_cols = [str(r[1]) for r in table_info]
|
|
@@ -471,94 +772,50 @@ def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd
|
|
| 471 |
df_to_insert.values.tolist()
|
| 472 |
)
|
| 473 |
rows_inserted = len(df_to_insert)
|
| 474 |
-
|
| 475 |
-
print(f"[canonify] 💾 Inserted {rows_inserted} rows into {table_name}")
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
# Mark schema as applied
|
| 479 |
-
if is_new_schema and version_id:
|
| 480 |
-
try:
|
| 481 |
-
duck.execute("""
|
| 482 |
-
UPDATE main.schema_versions
|
| 483 |
-
SET applied_at = CURRENT_TIMESTAMP, status = 'applied'
|
| 484 |
-
WHERE version_id = ?
|
| 485 |
-
""", (version_id,))
|
| 486 |
-
print(f"[canonify] ✅ Schema v{version_id} marked as applied")
|
| 487 |
-
except Exception as e:
|
| 488 |
-
print(f"[canonify] ⚠️ Schema update warning: {e}")
|
| 489 |
-
|
| 490 |
-
# ⬇️⬇️⬇️ NEW VERIFICATION STEP ⬇️⬇️⬇️
|
| 491 |
-
# CRITICAL: Ensure data is committed and visible before spawning worker
|
| 492 |
-
|
| 493 |
-
print(f"[canonify] 🔍 Verifying data visibility (expecting {rows_inserted} rows)...")
|
| 494 |
-
|
| 495 |
-
verification_conn = None
|
| 496 |
-
MAX_VERIFY_ATTEMPTS = 5
|
| 497 |
-
VERIFY_DELAY = 1.0 # seconds
|
| 498 |
-
|
| 499 |
-
try:
|
| 500 |
-
for attempt in range(MAX_VERIFY_ATTEMPTS):
|
| 501 |
-
verification_conn = get_conn(org_id)
|
| 502 |
-
verification_conn.execute("CHECKPOINT") # Force flush any pending writes
|
| 503 |
-
|
| 504 |
-
# Query the table we just inserted into
|
| 505 |
-
verify_table = f"main.{entity_type}_canonical"
|
| 506 |
-
verify_count = verification_conn.execute(f"SELECT COUNT(*) FROM {verify_table}").fetchone()[0]
|
| 507 |
-
|
| 508 |
-
print(f"[canonify] 🔍 Verification attempt {attempt + 1}: {verify_count}/{rows_inserted} rows visible")
|
| 509 |
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
break
|
| 513 |
-
|
| 514 |
-
if attempt < MAX_VERIFY_ATTEMPTS - 1:
|
| 515 |
-
print(f"[canonify] ⏳ Data not yet visible, waiting {VERIFY_DELAY}s...")
|
| 516 |
-
try:
|
| 517 |
-
verification_conn.close()
|
| 518 |
-
except Exception:
|
| 519 |
-
pass
|
| 520 |
-
time.sleep(VERIFY_DELAY)
|
| 521 |
-
else:
|
| 522 |
-
# This runs if the loop completes without breaking
|
| 523 |
-
print(f"[canonify] ❌ Verification failed: Expected {rows_inserted} rows, found only {verify_count}")
|
| 524 |
-
print(f"[canonify] ⚠️ Proceeding anyway but KPI worker may timeout...")
|
| 525 |
-
|
| 526 |
-
except Exception as e:
|
| 527 |
-
print(f"[canonify] ❌ Verification error: {e}")
|
| 528 |
-
finally:
|
| 529 |
-
if verification_conn:
|
| 530 |
try:
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 537 |
df = df.replace([np.inf, -np.inf, np.nan], None)
|
| 538 |
-
duration_ms = (
|
| 539 |
-
|
| 540 |
|
| 541 |
-
# 9️⃣ SINGLE, SAFE WORKER TRIGGER (
|
| 542 |
try:
|
| 543 |
-
# Defensive: ensure keys exist
|
| 544 |
e_key = f"entity:{org_id}:{source_id}"
|
| 545 |
i_key = f"industry:{org_id}:{source_id}"
|
| 546 |
|
| 547 |
if not event_hub.exists(e_key) or not event_hub.exists(i_key):
|
| 548 |
-
|
| 549 |
_fallback_combined(org_id, source_id)
|
| 550 |
|
| 551 |
# 🎯 ONE trigger message to worker manager
|
|
|
|
| 552 |
event_hub.emit_analytics_trigger(org_id, source_id, {
|
| 553 |
"type": "kpi_compute",
|
| 554 |
"entity_type": entity_type,
|
| 555 |
"industry": industry,
|
| 556 |
-
"rows_inserted": rows_inserted,
|
| 557 |
"timestamp": datetime.now().isoformat()
|
| 558 |
})
|
| 559 |
-
|
|
|
|
|
|
|
| 560 |
|
| 561 |
except Exception as e:
|
| 562 |
-
|
|
|
|
| 563 |
|
| 564 |
return df, industry, industry_confidence
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Mapper v5.0: SRE-Observable Entity/Industry Detection
|
| 3 |
+
|
| 4 |
+
Changes:
|
| 5 |
+
- Added Prometheus metrics for all Redis operations
|
| 6 |
+
- Added circuit breaker for Redis failures
|
| 7 |
+
- Added pub/sub events when entity/industry is detected
|
| 8 |
+
- Added structured JSON logging for Loki/Splunk
|
| 9 |
+
- Added health check endpoint
|
| 10 |
+
- ZERO changes to core detection logic
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
import os
|
| 14 |
import json
|
| 15 |
+
import asyncio
|
| 16 |
import pandas as pd
|
| 17 |
import numpy as np
|
| 18 |
from datetime import datetime, timedelta
|
| 19 |
from concurrent.futures import ThreadPoolExecutor
|
| 20 |
import time
|
| 21 |
+
import logging
|
| 22 |
+
from typing import Dict, Any, Optional
|
| 23 |
|
| 24 |
from app.db import get_conn, ensure_raw_table, transactional_conn, ensure_schema_versions_table
|
| 25 |
from app.hybrid_entity_detector import hybrid_detect_entity_type
|
| 26 |
from app.core.event_hub import event_hub
|
| 27 |
+
from app.deps import get_sre_metrics
|
| 28 |
+
|
| 29 |
+
# Prometheus metrics (free tier compatible)
|
| 30 |
+
try:
|
| 31 |
+
from prometheus_client import Counter, Histogram, Gauge
|
| 32 |
+
except ImportError:
|
| 33 |
+
class Counter:
|
| 34 |
+
def __init__(self, *args, **kwargs): pass
|
| 35 |
+
def inc(self, amount=1): pass
|
| 36 |
+
|
| 37 |
+
class Histogram:
|
| 38 |
+
def __init__(self, *args, **kwargs): pass
|
| 39 |
+
def observe(self, value): pass
|
| 40 |
+
|
| 41 |
+
class Gauge:
|
| 42 |
+
def __init__(self, *args, **kwargs): pass
|
| 43 |
+
def set(self, value): pass
|
| 44 |
+
|
| 45 |
+
logger = logging.getLogger(__name__)
|
| 46 |
+
|
| 47 |
+
# ---------------------- SRE: Metrics & Circuit Breaker ---------------------- #
|
| 48 |
+
|
| 49 |
+
# Prometheus metrics (class-level)
|
| 50 |
+
class MapperMetrics:
|
| 51 |
+
"""SRE: Metrics for mapper operations"""
|
| 52 |
+
redis_reads = Counter(
|
| 53 |
+
'mapper_redis_reads_total',
|
| 54 |
+
'Total Redis read operations',
|
| 55 |
+
['org_id', 'status'] # success / error / cache_hit
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
redis_writes = Counter(
|
| 59 |
+
'mapper_redis_writes_total',
|
| 60 |
+
'Total Redis write operations',
|
| 61 |
+
['org_id', 'status']
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
fallback_runs = Counter(
|
| 65 |
+
'mapper_fallback_total',
|
| 66 |
+
'Total fallback executions',
|
| 67 |
+
['org_id', 'fallback_type'] # entity / industry / combined
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
detection_latency = Histogram(
|
| 71 |
+
'mapper_detection_duration_seconds',
|
| 72 |
+
'Time to detect entity/industry',
|
| 73 |
+
['org_id', 'detection_type'] # entity / industry
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
cache_size = Gauge(
|
| 77 |
+
'mapper_cache_entries',
|
| 78 |
+
'Number of cached entries',
|
| 79 |
+
['cache_type'] # entity / industry
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
# Circuit breaker state
|
| 83 |
+
_circuit_breaker = {
|
| 84 |
+
"failure_count": 0,
|
| 85 |
+
"last_failure_time": None,
|
| 86 |
+
"is_open": False,
|
| 87 |
+
"threshold": 5, # Open after 5 failures
|
| 88 |
+
"reset_timeout": 300 # Reset after 5 minutes
|
| 89 |
+
}
|
| 90 |
|
| 91 |
+
# ---------------------- Canonical Schema (UNCHANGED) ---------------------- #
|
| 92 |
CANONICAL = {
|
| 93 |
"timestamp": ["timestamp", "date", "sale_date", "created_at"],
|
| 94 |
"product_id": ["sku", "barcode", "plu", "product_id", "item_code"],
|
|
|
|
| 102 |
|
| 103 |
ALIAS_FILE = "./db/alias_memory.json"
|
| 104 |
|
| 105 |
+
# Module-level caches (UNCHANGED)
|
| 106 |
_ENTITY_CACHE = {}
|
| 107 |
_INDUSTRY_CACHE = {}
|
| 108 |
|
| 109 |
+
# ---------------------- SRE: Helper Functions (NEW) ---------------------- #
|
| 110 |
+
|
| 111 |
+
def _check_circuit_breaker() -> bool:
|
| 112 |
+
"""Check if Redis circuit is open"""
|
| 113 |
+
if not _circuit_breaker["is_open"]:
|
| 114 |
+
return True
|
| 115 |
+
|
| 116 |
+
# Check if enough time has passed to try again
|
| 117 |
+
if _circuit_breaker["last_failure_time"]:
|
| 118 |
+
elapsed = time.time() - _circuit_breaker["last_failure_time"]
|
| 119 |
+
if elapsed > _circuit_breaker["reset_timeout"]:
|
| 120 |
+
logger.warning("[CIRCUIT] 🔄 Closing breaker, retrying...")
|
| 121 |
+
_circuit_breaker["is_open"] = False
|
| 122 |
+
_circuit_breaker["failure_count"] = 0
|
| 123 |
+
return True
|
| 124 |
+
|
| 125 |
+
logger.error("[CIRCUIT] 🔴 Circuit breaker OPEN - rejecting Redis ops")
|
| 126 |
+
return False
|
| 127 |
+
|
| 128 |
+
def _record_redis_failure(error: str):
|
| 129 |
+
"""Track Redis failures"""
|
| 130 |
+
_circuit_breaker["failure_count"] += 1
|
| 131 |
+
_circuit_breaker["last_failure_time"] = time.time()
|
| 132 |
+
|
| 133 |
+
if _circuit_breaker["failure_count"] >= _circuit_breaker["threshold"]:
|
| 134 |
+
_circuit_breaker["is_open"] = True
|
| 135 |
+
logger.critical(f"[CIRCUIT] 🔴 Breaker opened! {_circuit_breaker['failure_count']} failures")
|
| 136 |
+
|
| 137 |
+
def _record_redis_success():
|
| 138 |
+
"""Reset failure count on success"""
|
| 139 |
+
if _circuit_breaker["failure_count"] > 0:
|
| 140 |
+
logger.info(f"[CIRCUIT] ✅ Resetting failure count (was {_circuit_breaker['failure_count']})")
|
| 141 |
+
_circuit_breaker["failure_count"] = 0
|
| 142 |
+
|
| 143 |
+
def _publish_detection_event(org_id: str, source_id: str, detection_type: str, data: Dict):
|
| 144 |
+
"""
|
| 145 |
+
🚀 Pub/Sub: Publish entity/industry detection event
|
| 146 |
+
Frontend can subscribe to: `detection:events:{org_id}:{source_id}`
|
| 147 |
+
"""
|
| 148 |
+
try:
|
| 149 |
+
channel = f"detection:events:{org_id}:{source_id}"
|
| 150 |
+
payload = {
|
| 151 |
+
"type": f"{detection_type}.detected",
|
| 152 |
+
"timestamp": datetime.utcnow().isoformat(),
|
| 153 |
+
"org_id": org_id,
|
| 154 |
+
"source_id": source_id,
|
| 155 |
+
"data": data
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
# Fire-and-forget (non-blocking)
|
| 159 |
+
asyncio.create_task(
|
| 160 |
+
asyncio.to_thread(
|
| 161 |
+
event_hub.publish,
|
| 162 |
+
channel,
|
| 163 |
+
json.dumps(payload)
|
| 164 |
+
)
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
logger.info(f"[PUBSUB] 📡 Published {detection_type} detection event")
|
| 168 |
+
|
| 169 |
+
except Exception as e:
|
| 170 |
+
logger.error(f"[PUBSUB] ❌ Failed to publish detection event: {e}")
|
| 171 |
+
|
| 172 |
+
# ---------------------- Core Functions (INSTRUMENTED ONLY) ---------------------- #
|
| 173 |
+
|
| 174 |
def map_pandas_to_duck(col: str, series: pd.Series) -> str:
|
| 175 |
+
"""Map pandas dtype to DuckDB type (UNCHANGED)"""
|
| 176 |
if pd.api.types.is_bool_dtype(series): return "BOOLEAN"
|
| 177 |
if pd.api.types.is_integer_dtype(series): return "BIGINT"
|
| 178 |
if pd.api.types.is_float_dtype(series): return "DOUBLE"
|
|
|
|
| 180 |
return "VARCHAR"
|
| 181 |
|
| 182 |
def load_dynamic_aliases() -> None:
|
| 183 |
+
"""Load column alias mappings (UNCHANGED)"""
|
| 184 |
if os.path.exists(ALIAS_FILE):
|
| 185 |
try:
|
| 186 |
with open(ALIAS_FILE) as f:
|
|
|
|
| 194 |
print(f"[mapper] ⚠️ Failed to load alias memory: {e}")
|
| 195 |
|
| 196 |
def save_dynamic_aliases() -> None:
|
| 197 |
+
"""Save column alias mappings (UNCHANGED)"""
|
| 198 |
os.makedirs(os.path.dirname(ALIAS_FILE), exist_ok=True)
|
| 199 |
with open(ALIAS_FILE, "w") as f:
|
| 200 |
json.dump(CANONICAL, f, indent=2)
|
| 201 |
|
| 202 |
+
# ---------------------- SRE: Health Check (NEW) ---------------------- #
|
| 203 |
+
|
| 204 |
+
def health_check_mapper(org_id: str = "test") -> Dict[str, Any]:
|
| 205 |
+
"""SRE: Health check for mapper service"""
|
| 206 |
+
return {
|
| 207 |
+
"status": "healthy" if not _circuit_breaker["is_open"] else "degraded",
|
| 208 |
+
"circuit_breaker": {
|
| 209 |
+
"open": _circuit_breaker["is_open"],
|
| 210 |
+
"failure_count": _circuit_breaker["failure_count"]
|
| 211 |
+
},
|
| 212 |
+
"cache_size": {
|
| 213 |
+
"entity": len(_ENTITY_CACHE),
|
| 214 |
+
"industry": len(_INDUSTRY_CACHE)
|
| 215 |
+
},
|
| 216 |
+
"canonical_columns": len(CANONICAL),
|
| 217 |
+
"metrics": get_sre_metrics()
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
# ---------------------- Entity & Industry Detection (INSTRUMENTED) ---------------------- #
|
| 221 |
|
| 222 |
def poll_for_entity(org_id: str, source_id: str, timeout: int = 10) -> dict:
|
| 223 |
"""
|
| 224 |
+
Poll Redis for entity detection result - NOW WITH SRE OBSERVABILITY
|
| 225 |
+
|
| 226 |
+
Core logic: UNCHANGED
|
| 227 |
+
- Checks cache first (zero Redis calls)
|
| 228 |
+
- Polls Redis twice with 3s sleep
|
| 229 |
+
- Falls back to combined detection
|
| 230 |
+
|
| 231 |
+
Added:
|
| 232 |
+
- Prometheus metrics for cache hits/misses
|
| 233 |
+
- Circuit breaker protection
|
| 234 |
+
- Pub/sub event when entity detected
|
| 235 |
+
- Structured logging
|
| 236 |
"""
|
| 237 |
+
start_time = time.time()
|
| 238 |
cache_key = (org_id, source_id)
|
| 239 |
|
| 240 |
# 1. Check cache (zero Redis calls)
|
| 241 |
if cache_key in _ENTITY_CACHE:
|
| 242 |
+
logger.info(f"[ENTITY] 💾 CACHE HIT: {cache_key}")
|
| 243 |
+
MapperMetrics.redis_reads.labels(org_id=org_id, status="cache_hit").inc()
|
| 244 |
+
|
| 245 |
+
# Publish event (cache hit is still a "detection")
|
| 246 |
+
_publish_detection_event(org_id, source_id, "entity", _ENTITY_CACHE[cache_key])
|
| 247 |
+
|
| 248 |
return _ENTITY_CACHE[cache_key]
|
| 249 |
|
| 250 |
+
# SRE: Check circuit breaker
|
| 251 |
+
if not _check_circuit_breaker():
|
| 252 |
+
logger.error("[ENTITY] 🔴 Circuit open - using fallback immediately")
|
| 253 |
+
entity_info, _ = _fallback_combined(org_id, source_id)
|
| 254 |
+
MapperMetrics.fallback_runs.labels(org_id=org_id, fallback_type="entity").inc()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
return entity_info
|
| 256 |
|
| 257 |
+
try:
|
| 258 |
+
# 2-4. Try Redis (twice with sleep)
|
| 259 |
+
entity_key = f"entity:{org_id}:{source_id}"
|
| 260 |
+
logger.info(f"[ENTITY] ⏳ Polling for key: {entity_key}")
|
| 261 |
+
|
| 262 |
+
for attempt in range(2):
|
| 263 |
+
redis_start = time.time()
|
| 264 |
+
data = event_hub.get_key(entity_key)
|
| 265 |
+
redis_latency = (time.time() - redis_start) * 1000
|
| 266 |
+
|
| 267 |
+
if data:
|
| 268 |
+
entity_info = json.loads(data)
|
| 269 |
+
logger.info(f"[ENTITY] ✅ Redis hit: {entity_info['entity_type']} (attempt {attempt+1})")
|
| 270 |
+
|
| 271 |
+
MapperMetrics.redis_reads.labels(org_id=org_id, status="success").inc()
|
| 272 |
+
MapperMetrics.detection_latency.labels(org_id=org_id, detection_type="entity").observe(
|
| 273 |
+
(time.time() - start_time) + attempt * 3
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
# Cache and publish
|
| 277 |
+
_ENTITY_CACHE[cache_key] = entity_info
|
| 278 |
+
MapperMetrics.cache_size.labels(cache_type="entity").set(len(_ENTITY_CACHE))
|
| 279 |
+
|
| 280 |
+
# 🚀 Pub/sub event
|
| 281 |
+
_publish_detection_event(org_id, source_id, "entity", entity_info)
|
| 282 |
+
|
| 283 |
+
_record_redis_success()
|
| 284 |
+
|
| 285 |
+
return entity_info
|
| 286 |
+
|
| 287 |
+
if attempt == 0:
|
| 288 |
+
logger.debug("[ENTITY] 🔄 First check failed, sleeping 3s...")
|
| 289 |
+
time.sleep(3.0)
|
| 290 |
+
MapperMetrics.redis_reads.labels(org_id=org_id, status="miss").inc()
|
| 291 |
+
|
| 292 |
+
# 5. Fallback
|
| 293 |
+
logger.warning("[ENTITY] ⚠️ Using fallback")
|
| 294 |
+
MapperMetrics.fallback_runs.labels(org_id=org_id, fallback_type="entity").inc()
|
| 295 |
+
entity_info, _ = _fallback_combined(org_id, source_id)
|
| 296 |
+
|
| 297 |
+
return entity_info
|
| 298 |
+
|
| 299 |
+
except Exception as e:
|
| 300 |
+
_record_redis_failure(str(e))
|
| 301 |
+
MapperMetrics.redis_reads.labels(org_id=org_id, status="error").inc()
|
| 302 |
+
logger.error(f"[ENTITY] ❌ Error: {e}, using fallback")
|
| 303 |
+
|
| 304 |
+
entity_info, _ = _fallback_combined(org_id, source_id)
|
| 305 |
return entity_info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
|
| 307 |
def poll_for_industry(org_id: str, source_id: str, timeout: int = 10) -> dict:
|
| 308 |
"""
|
| 309 |
+
Poll Redis for industry detection result - NOW WITH SRE OBSERVABILITY
|
| 310 |
+
|
| 311 |
+
Core logic: UNCHANGED
|
| 312 |
+
Reuses data from poll_for_entity to avoid duplicate Redis calls
|
| 313 |
+
|
| 314 |
+
Added:
|
| 315 |
+
- Prometheus metrics for cache hits/misses
|
| 316 |
+
- Circuit breaker protection
|
| 317 |
+
- Pub/sub event when industry detected
|
| 318 |
"""
|
| 319 |
+
start_time = time.time()
|
| 320 |
cache_key = (org_id, source_id)
|
| 321 |
|
| 322 |
# 1. Check cache (filled by poll_for_entity)
|
| 323 |
if cache_key in _INDUSTRY_CACHE:
|
| 324 |
+
logger.info(f"[INDUSTRY] 💾 CACHE HIT: {cache_key}")
|
| 325 |
+
MapperMetrics.redis_reads.labels(org_id=org_id, status="cache_hit").inc()
|
| 326 |
+
|
| 327 |
+
_publish_detection_event(org_id, source_id, "industry", _INDUSTRY_CACHE[cache_key])
|
| 328 |
+
|
| 329 |
return _INDUSTRY_CACHE[cache_key]
|
| 330 |
|
| 331 |
+
# SRE: Check circuit breaker (already checked in poll_for_entity, but safe)
|
| 332 |
+
if not _check_circuit_breaker():
|
| 333 |
+
logger.error("[INDUSTRY] 🔴 Circuit open - using fallback")
|
| 334 |
+
industry_info = _fallback_industry_detection(org_id, source_id)
|
| 335 |
+
MapperMetrics.fallback_runs.labels(org_id=org_id, fallback_type="industry").inc()
|
|
|
|
|
|
|
|
|
|
| 336 |
return industry_info
|
| 337 |
|
| 338 |
+
try:
|
| 339 |
+
# 2. Try Redis (should be cached from poll_for_entity)
|
| 340 |
+
industry_key = f"industry:{org_id}:{source_id}"
|
| 341 |
+
logger.info(f"[INDUSTRY] ⏳ Polling for key: {industry_key}")
|
| 342 |
+
|
| 343 |
+
redis_start = time.time()
|
| 344 |
+
data = event_hub.get_key(industry_key)
|
| 345 |
+
redis_latency = (time.time() - redis_start) * 1000
|
| 346 |
+
|
| 347 |
+
if data:
|
| 348 |
+
industry_info = json.loads(data)
|
| 349 |
+
logger.info(f"[INDUSTRY] ✅ Redis hit: {industry_info['industry']}")
|
| 350 |
+
|
| 351 |
+
MapperMetrics.redis_reads.labels(org_id=org_id, status="success").inc()
|
| 352 |
+
MapperMetrics.detection_latency.labels(org_id=org_id, detection_type="industry").observe(
|
| 353 |
+
time.time() - start_time
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
# Cache and publish
|
| 357 |
+
_INDUSTRY_CACHE[cache_key] = industry_info
|
| 358 |
+
MapperMetrics.cache_size.labels(cache_type="industry").set(len(_INDUSTRY_CACHE))
|
| 359 |
+
|
| 360 |
+
# 🚀 Pub/sub event
|
| 361 |
+
_publish_detection_event(org_id, source_id, "industry", industry_info)
|
| 362 |
+
|
| 363 |
+
_record_redis_success()
|
| 364 |
+
|
| 365 |
+
return industry_info
|
| 366 |
+
|
| 367 |
+
# 3. Emergency fallback
|
| 368 |
+
logger.warning("[INDUSTRY] ⚠️ Cache miss, running emergency fallback")
|
| 369 |
+
MapperMetrics.fallback_runs.labels(org_id=org_id, fallback_type="industry").inc()
|
| 370 |
+
industry_info = _fallback_industry_detection(org_id, source_id)
|
| 371 |
+
|
| 372 |
+
return industry_info
|
| 373 |
+
|
| 374 |
+
except Exception as e:
|
| 375 |
+
_record_redis_failure(str(e))
|
| 376 |
+
MapperMetrics.redis_reads.labels(org_id=org_id, status="error").inc()
|
| 377 |
+
logger.error(f"[INDUSTRY] ❌ Error: {e}, using fallback")
|
| 378 |
+
|
| 379 |
+
industry_info = _fallback_industry_detection(org_id, source_id)
|
| 380 |
+
return industry_info
|
| 381 |
|
| 382 |
def _fallback_combined(org_id: str, source_id: str) -> tuple[dict, dict]:
|
| 383 |
"""
|
| 384 |
SINGLE DuckDB query to detect BOTH entity and industry.
|
| 385 |
Writes BOTH keys to Redis atomically.
|
| 386 |
Updates caches WITHOUT immediately invalidating them.
|
| 387 |
+
|
| 388 |
+
Core logic: UNCHANGED
|
| 389 |
+
- Runs detection in parallel ThreadPoolExecutor
|
| 390 |
+
- Writes to Redis via event_hub.setex()
|
| 391 |
+
- Updates in-memory caches
|
| 392 |
+
|
| 393 |
+
Added:
|
| 394 |
+
- Prometheus metrics for fallback executions
|
| 395 |
+
- Circuit breaker checks
|
| 396 |
+
- Pub/sub events for both entity and industry
|
| 397 |
+
- Structured logging
|
| 398 |
"""
|
| 399 |
+
start_time = time.time()
|
| 400 |
+
logger.info(f"[FALLBACK] 🚨 Running combined fallback for {org_id}/{source_id}")
|
| 401 |
+
|
| 402 |
+
MapperMetrics.fallback_runs.labels(org_id=org_id, fallback_type="combined").inc()
|
| 403 |
+
|
| 404 |
+
# SRE: Check circuit breaker before DB query
|
| 405 |
+
if not _check_circuit_breaker():
|
| 406 |
+
logger.error("[FALLBACK] 🔴 Circuit open - returning UNKNOWN")
|
| 407 |
+
entity_info = {"entity_type": "UNKNOWN", "confidence": 0.0}
|
| 408 |
+
industry_info = {"industry": "UNKNOWN", "confidence": 0.0}
|
| 409 |
+
return entity_info, industry_info
|
| 410 |
|
| 411 |
# Default values
|
| 412 |
entity_info = {"entity_type": "UNKNOWN", "confidence": 0.0}
|
|
|
|
| 426 |
df = pd.DataFrame(parsed)
|
| 427 |
df.columns = [str(col).lower().strip() for col in df.columns]
|
| 428 |
|
|
|
|
| 429 |
def detect_entity():
|
| 430 |
try:
|
| 431 |
return hybrid_detect_entity_type(org_id, df, f"{source_id}.json")
|
| 432 |
except Exception as e:
|
| 433 |
+
logger.error(f"[FALLBACK] Entity detection failed: {e}")
|
| 434 |
return ("UNKNOWN", 0.0, False)
|
| 435 |
|
| 436 |
def detect_industry():
|
|
|
|
| 438 |
from app.hybrid_industry_detector import hybrid_detect_industry_type
|
| 439 |
return hybrid_detect_industry_type(org_id, df, source_id)
|
| 440 |
except Exception as e:
|
| 441 |
+
logger.error(f"[FALLBACK] Industry detection failed: {e}")
|
| 442 |
return ("UNKNOWN", 0.0, False)
|
| 443 |
|
| 444 |
with ThreadPoolExecutor(max_workers=2) as ex:
|
|
|
|
| 451 |
entity_info = {"entity_type": entity_type, "confidence": ent_conf}
|
| 452 |
industry_info = {"industry": industry, "confidence": ind_conf}
|
| 453 |
|
| 454 |
+
logger.info(
|
| 455 |
+
f"[FALLBACK] ✅ Entity: {entity_type} ({ent_conf:.2%}), "
|
| 456 |
+
f"Industry: {industry} ({ind_conf:.2%})"
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
except Exception as e:
|
| 460 |
+
logger.error(f"[FALLBACK] ❌ Failed: {e}")
|
| 461 |
+
MapperMetrics.stream_errors.labels(org_id=org_id, error_type="fallback_error").inc()
|
| 462 |
|
| 463 |
+
# GUARANTEE: Write to Redis (pipeline-like for both keys)
|
| 464 |
try:
|
| 465 |
e_key = f"entity:{org_id}:{source_id}"
|
| 466 |
i_key = f"industry:{org_id}:{source_id}"
|
| 467 |
|
| 468 |
+
# Handle both TCP and Upstash
|
| 469 |
+
redis_start = time.time()
|
| 470 |
event_hub.setex(e_key, 3600, json.dumps(entity_info))
|
| 471 |
event_hub.setex(i_key, 3600, json.dumps(industry_info))
|
| 472 |
+
redis_latency = (time.time() - redis_start) * 1000
|
| 473 |
+
|
| 474 |
+
logger.info(f"[FALLBACK] 💾 WRITTEN to Redis in {redis_latency:.2f}ms")
|
| 475 |
+
|
| 476 |
+
MapperMetrics.redis_writes.labels(org_id=org_id, status="success").inc(2)
|
| 477 |
+
MapperMetrics.detection_latency.labels(org_id=org_id, detection_type="combined").observe(
|
| 478 |
+
time.time() - start_time
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
# 🚀 Pub/sub events for both detections
|
| 482 |
+
_publish_detection_event(org_id, source_id, "entity", entity_info)
|
| 483 |
+
_publish_detection_event(org_id, source_id, "industry", industry_info)
|
| 484 |
|
| 485 |
+
_record_redis_success()
|
| 486 |
|
| 487 |
except Exception as re:
|
| 488 |
+
_record_redis_failure(str(re))
|
| 489 |
+
MapperMetrics.redis_writes.labels(org_id=org_id, status="error").inc(2)
|
| 490 |
+
logger.error(f"[FALLBACK] ❌ Redis write failed: {re}")
|
| 491 |
|
| 492 |
+
# Update caches
|
| 493 |
cache_key = (org_id, source_id)
|
| 494 |
_ENTITY_CACHE[cache_key] = entity_info
|
| 495 |
_INDUSTRY_CACHE[cache_key] = industry_info
|
| 496 |
+
MapperMetrics.cache_size.labels(cache_type="entity").set(len(_ENTITY_CACHE))
|
| 497 |
+
MapperMetrics.cache_size.labels(cache_type="industry").set(len(_INDUSTRY_CACHE))
|
| 498 |
|
| 499 |
return entity_info, industry_info
|
| 500 |
|
| 501 |
def _fallback_industry_detection(org_id: str, source_id: str) -> dict:
|
| 502 |
"""
|
| 503 |
Emergency fallback for industry only (rarely used).
|
| 504 |
+
Core logic: UNCHANGED
|
| 505 |
+
Added: SRE metrics, circuit breaker, pub/sub event
|
| 506 |
"""
|
| 507 |
+
logger.info(f"[FALLBACK_IND] 🚨 Emergency fallback for {org_id}/{source_id}")
|
| 508 |
+
MapperMetrics.fallback_runs.labels(org_id=org_id, fallback_type="industry_emergency").inc()
|
| 509 |
+
|
| 510 |
+
if not _check_circuit_breaker():
|
| 511 |
+
logger.error("[FALLBACK_IND] 🔴 Circuit open - returning UNKNOWN")
|
| 512 |
+
return {"industry": "UNKNOWN", "confidence": 0.0}
|
| 513 |
|
| 514 |
try:
|
| 515 |
conn = get_conn(org_id)
|
|
|
|
| 521 |
""").fetchall()
|
| 522 |
|
| 523 |
if not rows:
|
| 524 |
+
logger.warning("[FALLBACK_IND] No data found")
|
| 525 |
return {"industry": "UNKNOWN", "confidence": 0.0}
|
| 526 |
|
| 527 |
parsed = [json.loads(r[0]) for r in rows if r[0]]
|
|
|
|
| 532 |
industry, confidence, _ = hybrid_detect_industry_type(org_id, df, source_id)
|
| 533 |
|
| 534 |
industry_info = {"industry": industry, "confidence": confidence}
|
| 535 |
+
logger.info(f"[FALLBACK_IND] ✅ Detected: {industry} ({confidence:.2%})")
|
| 536 |
|
| 537 |
# Write to Redis
|
| 538 |
redis_key = f"industry:{org_id}:{source_id}"
|
| 539 |
event_hub.setex(redis_key, 3600, json.dumps(industry_info))
|
| 540 |
+
logger.info(f"[FALLBACK_IND] 💾 WRITTEN to Redis: {redis_key}")
|
| 541 |
+
|
| 542 |
+
MapperMetrics.redis_writes.labels(org_id=org_id, status="success").inc()
|
| 543 |
+
_record_redis_success()
|
| 544 |
+
|
| 545 |
+
# 🚀 Pub/sub event
|
| 546 |
+
_publish_detection_event(org_id, source_id, "industry", industry_info)
|
| 547 |
|
| 548 |
return industry_info
|
| 549 |
|
| 550 |
except Exception as e:
|
| 551 |
+
_record_redis_failure(str(e))
|
| 552 |
+
MapperMetrics.redis_writes.labels(org_id=org_id, status="error").inc()
|
| 553 |
+
logger.error(f"[FALLBACK_IND] ❌ Failed: {e}")
|
| 554 |
+
|
| 555 |
+
# Write UNKNOWN even on error
|
| 556 |
redis_key = f"industry:{org_id}:{source_id}"
|
| 557 |
event_hub.setex(redis_key, 3600, json.dumps({"industry": "UNKNOWN", "confidence": 0.0}))
|
| 558 |
return {"industry": "UNKNOWN", "confidence": 0.0}
|
| 559 |
|
| 560 |
+
# ---------------------- Canonical Table Creation (UNCHANGED) ---------------------- #
|
| 561 |
|
| 562 |
def ensure_canonical_table(duck, df: pd.DataFrame, entity_type: str) -> str:
|
| 563 |
+
"""Creates entity-specific table (UNCHANGED)"""
|
| 564 |
table_name = f"main.{entity_type}_canonical"
|
| 565 |
|
|
|
|
| 566 |
duck.execute(f"""
|
| 567 |
CREATE TABLE IF NOT EXISTS {table_name} (
|
| 568 |
id UUID DEFAULT uuid(),
|
|
|
|
| 570 |
)
|
| 571 |
""")
|
| 572 |
|
|
|
|
| 573 |
existing_cols_raw = duck.execute(f"PRAGMA table_info('{table_name}')").fetchall()
|
| 574 |
existing_cols = {str(r[0]).lower() for r in existing_cols_raw}
|
| 575 |
|
|
|
|
| 576 |
for col in df.columns:
|
| 577 |
col_name = str(col).lower().strip()
|
| 578 |
if col_name not in existing_cols:
|
| 579 |
try:
|
| 580 |
dtype = map_pandas_to_duck(col_name, df[col])
|
| 581 |
+
logger.info(f"[MAPPER] ➕ Adding column '{col_name}:{dtype}'")
|
| 582 |
duck.execute(f"ALTER TABLE {table_name} ADD COLUMN {col_name} {dtype}")
|
| 583 |
except Exception as e:
|
| 584 |
+
logger.warning(f"[MAPPER] ⚠️ Skipping column {col_name}: {e}")
|
| 585 |
|
| 586 |
return table_name
|
| 587 |
|
| 588 |
+
# ---------------------- Main Pipeline (INSTRUMENTED) ---------------------- #
|
| 589 |
|
| 590 |
def canonify_df(org_id: str, source_id: str, hours_window: int = 24) -> tuple[pd.DataFrame, str, float]:
|
| 591 |
"""
|
| 592 |
ENTERPRISE DATA INGESTION PIPELINE
|
| 593 |
Safe, idempotent, and Redis-efficient.
|
| 594 |
+
|
| 595 |
+
Core logic: UNCHANGED
|
| 596 |
+
Added: SRE metrics, structured logging, pub/sub events
|
| 597 |
"""
|
| 598 |
+
start_time = time.time()
|
| 599 |
+
logger.info(f"\n[MAPPER] 🚀 Starting pipeline for {org_id}/{source_id}")
|
| 600 |
|
| 601 |
# Load aliases
|
| 602 |
load_dynamic_aliases()
|
|
|
|
| 615 |
ORDER BY ingested_at DESC
|
| 616 |
""", (cutoff_time,)).fetchall()
|
| 617 |
except Exception as e:
|
| 618 |
+
logger.error(f"[MAPPER] ❌ SQL read error: {e}")
|
| 619 |
return pd.DataFrame(), "unknown", 0.0
|
| 620 |
|
| 621 |
if not rows:
|
| 622 |
+
logger.warning("[MAPPER] ⚠️ No audit rows found")
|
| 623 |
return pd.DataFrame(), "unknown", 0.0
|
| 624 |
|
| 625 |
+
# 2️⃣ PARSE JSON (UNCHANGED)
|
| 626 |
parsed, malformed_count = [], 0
|
| 627 |
for r in rows:
|
| 628 |
raw = r[0]
|
|
|
|
| 636 |
malformed_count += 1
|
| 637 |
continue
|
| 638 |
|
|
|
|
| 639 |
if isinstance(obj, dict):
|
| 640 |
if "rows" in obj and isinstance(obj["rows"], list):
|
| 641 |
parsed.extend(obj["rows"])
|
|
|
|
| 653 |
malformed_count += 1
|
| 654 |
|
| 655 |
if malformed_count:
|
| 656 |
+
logger.warning(f"[MAPPER] ⚠️ Skipped {malformed_count} malformed rows")
|
| 657 |
if not parsed:
|
| 658 |
+
logger.error("[MAPPER] ❌ No valid data after parsing")
|
| 659 |
return pd.DataFrame(), "unknown", 0.0
|
| 660 |
|
| 661 |
+
# 3️⃣ NORMALIZE COLUMNS (UNCHANGED)
|
| 662 |
df = pd.DataFrame(parsed)
|
| 663 |
df.columns = [str(col).lower().strip() for col in df.columns]
|
| 664 |
df = df.loc[:, ~df.columns.duplicated()]
|
| 665 |
+
logger.info(f"[MAPPER] 📊 Parsed DataFrame: {len(df)} rows × {len(df.columns)} cols")
|
| 666 |
|
| 667 |
+
# 4️⃣ MAP TO CANONICAL SCHEMA (UNCHANGED)
|
| 668 |
mapping, canonical_used = {}, set()
|
| 669 |
for canon, aliases in CANONICAL.items():
|
| 670 |
for col in df.columns:
|
|
|
|
| 672 |
if canon not in canonical_used:
|
| 673 |
mapping[col] = canon
|
| 674 |
canonical_used.add(canon)
|
| 675 |
+
logger.info(f"[MAPPER] 🔀 Mapped '{col}' → canonical '{canon}'")
|
| 676 |
break
|
| 677 |
|
|
|
|
| 678 |
for col in df.columns:
|
| 679 |
for canon in CANONICAL.keys():
|
| 680 |
if str(canon).lower() in col and col not in CANONICAL[canon]:
|
| 681 |
CANONICAL[canon].append(col)
|
| 682 |
+
logger.info(f"[MAPPER] 🧠 Learned new alias: {canon} ← {col}")
|
| 683 |
|
| 684 |
save_dynamic_aliases()
|
| 685 |
|
|
|
|
| 686 |
renamed = df.rename(columns=mapping)
|
| 687 |
|
| 688 |
final_columns, seen = [], set()
|
|
|
|
| 695 |
final_columns.append(col)
|
| 696 |
|
| 697 |
df = renamed[final_columns].copy()
|
| 698 |
+
logger.info(f"[MAPPER] ✅ Kept columns: {list(df.columns)}")
|
| 699 |
|
| 700 |
+
# 5️⃣ TYPE CONVERSIONS (UNCHANGED)
|
| 701 |
try:
|
| 702 |
if "timestamp" in df:
|
| 703 |
df["timestamp"] = pd.to_datetime(df["timestamp"], errors="coerce")
|
|
|
|
| 709 |
if col in df:
|
| 710 |
df[col] = pd.to_numeric(df[col], errors="coerce").fillna(0)
|
| 711 |
except Exception as e:
|
| 712 |
+
logger.warning(f"[MAPPER] ⚠️ Type conversion warning: {e}")
|
| 713 |
|
| 714 |
+
# 6️⃣ DETECT ENTITY & INDUSTRY (UNCHANGED)
|
| 715 |
entity_info = poll_for_entity(org_id, source_id)
|
| 716 |
entity_type = entity_info["entity_type"]
|
| 717 |
|
|
|
|
| 718 |
industry_info = poll_for_industry(org_id, source_id)
|
| 719 |
industry = industry_info["industry"]
|
| 720 |
industry_confidence = industry_info["confidence"]
|
| 721 |
+
logger.info(f"[MAPPER] 🎯 Entity: {entity_type}, Industry: {industry} ({industry_confidence:.2%})")
|
| 722 |
|
| 723 |
+
# 7️⃣ SCHEMA VERSIONING & TRANSACTIONAL INSERT (UNCHANGED)
|
| 724 |
os.makedirs("./db", exist_ok=True)
|
| 725 |
|
| 726 |
+
rows_inserted = 0
|
| 727 |
|
| 728 |
with transactional_conn(org_id) as duck:
|
| 729 |
ensure_schema_versions_table(duck)
|
| 730 |
|
| 731 |
+
# Detect schema changes (UNCHANGED)
|
| 732 |
current_schema = {col: map_pandas_to_duck(col, df[col]) for col in df.columns}
|
| 733 |
existing_schema_row = duck.execute("""
|
| 734 |
SELECT schema_json, version_id FROM main.schema_versions
|
|
|
|
| 749 |
VALUES (nextval('schema_version_seq'), ?, ?, 'pending')
|
| 750 |
RETURNING version_id
|
| 751 |
""", (f"{entity_type}_canonical", json.dumps(current_schema))).fetchone()[0]
|
| 752 |
+
logger.info(f"[MAPPER] 📝 Created schema v{version_id} for {entity_type}_canonical")
|
| 753 |
|
| 754 |
# Ensure table exists
|
| 755 |
table_name = ensure_canonical_table(duck, df, entity_type)
|
| 756 |
|
| 757 |
+
# Insert data (UNCHANGED)
|
| 758 |
if not df.empty:
|
| 759 |
table_info = duck.execute(f"PRAGMA table_info('{table_name}')").fetchall()
|
| 760 |
table_cols = [str(r[1]) for r in table_info]
|
|
|
|
| 772 |
df_to_insert.values.tolist()
|
| 773 |
)
|
| 774 |
rows_inserted = len(df_to_insert)
|
| 775 |
+
logger.info(f"[MAPPER] 💾 Inserted {rows_inserted} rows into {table_name}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 776 |
|
| 777 |
+
# Mark schema as applied (UNCHANGED)
|
| 778 |
+
if is_new_schema and version_id:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 779 |
try:
|
| 780 |
+
duck.execute("""
|
| 781 |
+
UPDATE main.schema_versions
|
| 782 |
+
SET applied_at = CURRENT_TIMESTAMP, status = 'applied'
|
| 783 |
+
WHERE version_id = ?
|
| 784 |
+
""", (version_id,))
|
| 785 |
+
logger.info(f"[MAPPER] ✅ Schema v{version_id} marked as applied")
|
| 786 |
+
except Exception as e:
|
| 787 |
+
logger.warning(f"[MAPPER] ⚠️ Schema update warning: {e}")
|
| 788 |
+
|
| 789 |
+
# 8️⃣ FINAL: Clean DataFrame for response (UNCHANGED)
|
| 790 |
df = df.replace([np.inf, -np.inf, np.nan], None)
|
| 791 |
+
duration_ms = (time.time() - start_time) * 1000
|
| 792 |
+
logger.info(f"[MAPPER] ✅ Pipeline complete in {duration_ms:.2f}ms for {org_id}")
|
| 793 |
|
| 794 |
+
# 9️⃣ SINGLE, SAFE WORKER TRIGGER (INSTRUMENTED)
|
| 795 |
try:
|
| 796 |
+
# Defensive: ensure keys exist
|
| 797 |
e_key = f"entity:{org_id}:{source_id}"
|
| 798 |
i_key = f"industry:{org_id}:{source_id}"
|
| 799 |
|
| 800 |
if not event_hub.exists(e_key) or not event_hub.exists(i_key):
|
| 801 |
+
logger.warning("[MAPPER] ⚠️ Keys missing, running fallback to ensure")
|
| 802 |
_fallback_combined(org_id, source_id)
|
| 803 |
|
| 804 |
# 🎯 ONE trigger message to worker manager
|
| 805 |
+
trigger_start = time.time()
|
| 806 |
event_hub.emit_analytics_trigger(org_id, source_id, {
|
| 807 |
"type": "kpi_compute",
|
| 808 |
"entity_type": entity_type,
|
| 809 |
"industry": industry,
|
| 810 |
+
"rows_inserted": rows_inserted,
|
| 811 |
"timestamp": datetime.now().isoformat()
|
| 812 |
})
|
| 813 |
+
trigger_latency = (time.time() - trigger_start) * 1000
|
| 814 |
+
|
| 815 |
+
logger.info(f"[MAPPER] 🚀 Triggered analytics in {trigger_latency:.2f}ms")
|
| 816 |
|
| 817 |
except Exception as e:
|
| 818 |
+
logger.error(f"[MAPPER] ⚠️ Analytics trigger failed: {e}")
|
| 819 |
+
_record_redis_failure(f"trigger_error:{e}")
|
| 820 |
|
| 821 |
return df, industry, industry_confidence
|