amitbhatt6075 commited on
Commit
6cb46f3
Β·
1 Parent(s): 14f0730

feat(briefing): Add python fallback for empty data

Browse files
Files changed (1) hide show
  1. api/main.py +22 -14
api/main.py CHANGED
@@ -427,7 +427,7 @@ async def generate_outline_route(request: OutlineRequest):
427
  except Exception as e:
428
  raise HTTPException(status_code=500, detail=f"An internal error occurred in the AI model: {e}")
429
 
430
-
431
  @app.post("/generate-dashboard-insights", response_model=StrategyResponse, summary="Generate Insights from Dashboard KPIs")
432
  @cached_response
433
  def generate_dashboard_insights_route(request: DashboardInsightsRequest):
@@ -1308,41 +1308,49 @@ You are a social media expert. Analyze the following caption... Respond ONLY wit
1308
  @app.post("/generate/daily-briefing", response_model=DailyBriefingResponse, summary="Generates a daily action plan for the Talent Manager")
1309
  def generate_daily_briefing(data: DailyBriefingData):
1310
  """
1311
- Takes various KPIs from the backend, synthesizes them, and uses the LLM
1312
- to generate a short, actionable daily briefing for a Talent Manager.
1313
  """
1314
- print(f"\nβœ… Received request on /generate/daily-briefing")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1315
  if not _llm_instance:
1316
  raise HTTPException(status_code=503, detail="The Llama model is not available for briefing.")
1317
 
1318
- # --- ✨ THE FINAL "IDIOT-PROOF" PROMPT FOR TINYLLAMA ---
1319
  final_prompt = f"""
1320
  Summarize these key points into 2-3 direct bullet points for a manager.
1321
 
1322
  DATA:
1323
- - Influencers without campaigns: {data.on_bench_influencers}
1324
- - Submissions needing review: {data.pending_submissions + data.revisions_requested}
1325
  - Total pending money: {data.highest_pending_payout:,.0f} INR
1326
 
1327
  SUMMARY:
1328
  - """
1329
 
1330
  try:
1331
- print("--- Sending briefing data to LLM (Idiot-Proof prompt)...")
1332
- # Temperature 0.1 karne se model aur zyada factual aur kam creative hoga
1333
  response = _llm_instance(final_prompt, max_tokens=150, temperature=0.1, stop=["DATA:"], echo=False)
1334
-
1335
  briefing_text = response['choices'][0]['text'].strip()
1336
 
1337
- # Add our own header to make it look nice
1338
  final_briefing = f"Here are your top priorities for today:\n- {briefing_text}"
1339
-
1340
- print("--- Successfully generated daily briefing.")
1341
  return DailyBriefingResponse(briefing_text=final_briefing)
1342
 
1343
  except Exception as e:
1344
  print(f"🚨 An unexpected error occurred during briefing generation:")
1345
- import traceback
1346
  traceback.print_exc()
1347
  raise HTTPException(status_code=500, detail="Failed to generate AI briefing.")
1348
 
 
427
  except Exception as e:
428
  raise HTTPException(status_code=500, detail=f"An internal error occurred in the AI model: {e}")
429
 
430
+
431
  @app.post("/generate-dashboard-insights", response_model=StrategyResponse, summary="Generate Insights from Dashboard KPIs")
432
  @cached_response
433
  def generate_dashboard_insights_route(request: DashboardInsightsRequest):
 
1308
  @app.post("/generate/daily-briefing", response_model=DailyBriefingResponse, summary="Generates a daily action plan for the Talent Manager")
1309
  def generate_daily_briefing(data: DailyBriefingData):
1310
  """
1311
+ [BULLETPROOF VERSION] Takes KPIs and uses either the LLM (if data exists) or
1312
+ Python logic (if data is empty) to generate a daily briefing.
1313
  """
1314
+ print(f"\nβœ… Received request on /generate/daily-briefing with data: {data}")
1315
+
1316
+ # === THE FINAL, BULLETPROOF FIX IS HERE ===
1317
+ on_bench = data.on_bench_influencers
1318
+ pending_tasks = data.pending_submissions + data.revisions_requested
1319
+
1320
+ # SAFETY CHECK: Agar koi important data nahi hai, toh AI ko call mat karo.
1321
+ # Python se hi ek accha, static message bhejo.
1322
+ if on_bench == 0 and pending_tasks == 0:
1323
+ print(" - βœ… No critical tasks found. Returning Python-generated 'All Clear' message.")
1324
+ return DailyBriefingResponse(
1325
+ briefing_text="All clear! No urgent actions are required. Your roster is fully engaged and up-to-date."
1326
+ )
1327
+ # === END OF FIX ===
1328
+
1329
  if not _llm_instance:
1330
  raise HTTPException(status_code=503, detail="The Llama model is not available for briefing.")
1331
 
 
1332
  final_prompt = f"""
1333
  Summarize these key points into 2-3 direct bullet points for a manager.
1334
 
1335
  DATA:
1336
+ - Influencers without campaigns: {on_bench}
1337
+ - Submissions needing review: {pending_tasks}
1338
  - Total pending money: {data.highest_pending_payout:,.0f} INR
1339
 
1340
  SUMMARY:
1341
  - """
1342
 
1343
  try:
1344
+ print("--- Sending briefing data to LLM (Data exists)...")
 
1345
  response = _llm_instance(final_prompt, max_tokens=150, temperature=0.1, stop=["DATA:"], echo=False)
 
1346
  briefing_text = response['choices'][0]['text'].strip()
1347
 
 
1348
  final_briefing = f"Here are your top priorities for today:\n- {briefing_text}"
1349
+ print("--- Successfully generated AI briefing.")
 
1350
  return DailyBriefingResponse(briefing_text=final_briefing)
1351
 
1352
  except Exception as e:
1353
  print(f"🚨 An unexpected error occurred during briefing generation:")
 
1354
  traceback.print_exc()
1355
  raise HTTPException(status_code=500, detail="Failed to generate AI briefing.")
1356