Peter Mutwiri commited on
Commit
7b246f4
·
1 Parent(s): c87681f

removed background jobs on datasource endpoint

Browse files
app/routers/datasources.py CHANGED
@@ -1,4 +1,4 @@
1
- from fastapi import APIRouter, Query, Form, File, UploadFile, Depends, HTTPException
2
  from typing import Dict, Any, List, Union
3
  from fastapi.responses import JSONResponse
4
  from pydantic import BaseModel
@@ -13,8 +13,8 @@ import time
13
  from datetime import datetime, timedelta
14
  from app.redis_client import redis
15
  # Add this import
16
- from app.tasks.analytics_worker import trigger_kpi_computation
17
  from app.qstash_client import get_qstash_client, is_qstash_available
 
18
  router = APIRouter(tags=["datasources"]) # Remove
19
 
20
 
@@ -70,8 +70,7 @@ async def create_source_json(
70
  # Entity will be auto-queued by process_detect_industry()
71
 
72
  df, industry, confidence = canonify_df(org_id, source_id)
73
- # run autokpi computation in background
74
- background_tasks.add_task(trigger_kpi_computation, org_id, source_id)
75
  # Convert DataFrame to JSON-safe format
76
  preview_df = df.head(3).copy()
77
  for col in preview_df.columns:
 
1
+ from fastapi import APIRouter, Query, Form, File, UploadFile, Depends, HTTPException,BackgroundTasks
2
  from typing import Dict, Any, List, Union
3
  from fastapi.responses import JSONResponse
4
  from pydantic import BaseModel
 
13
  from datetime import datetime, timedelta
14
  from app.redis_client import redis
15
  # Add this import
 
16
  from app.qstash_client import get_qstash_client, is_qstash_available
17
+
18
  router = APIRouter(tags=["datasources"]) # Remove
19
 
20
 
 
70
  # Entity will be auto-queued by process_detect_industry()
71
 
72
  df, industry, confidence = canonify_df(org_id, source_id)
73
+
 
74
  # Convert DataFrame to JSON-safe format
75
  preview_df = df.head(3).copy()
76
  for col in preview_df.columns:
app/tasks/analytics_worker.py CHANGED
@@ -64,7 +64,7 @@ class AnalyticsWorker:
64
 
65
  logger.info(f"[WORKER] 📊 Loaded {len(df)} rows × {len(df.columns)} cols")
66
 
67
- # 2️⃣ SCHEMA DISCOVERY (Einstein's brain)
68
  # Fast from cache (~0ms), slow on first run (~30s)
69
  mapping = await self._discover_schema(df)
70
  if not mapping:
 
64
 
65
  logger.info(f"[WORKER] 📊 Loaded {len(df)} rows × {len(df.columns)} cols")
66
 
67
+
68
  # Fast from cache (~0ms), slow on first run (~30s)
69
  mapping = await self._discover_schema(df)
70
  if not mapping: