TushP commited on
Commit
b2f9b2b
Β·
verified Β·
1 Parent(s): be2f7a8

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. modal_backend.py +80 -43
modal_backend.py CHANGED
@@ -79,7 +79,7 @@ def calculate_sentiment(text: str) -> float:
79
  image=image,
80
  secrets=[modal.Secret.from_name("anthropic-api-key")],
81
  timeout=120, # 2 min per batch is plenty
82
- retries=2,
83
  )
84
  def process_batch(batch_data: Dict[str, Any]) -> Dict[str, Any]:
85
  """
@@ -209,12 +209,14 @@ Extract everything:"""
209
  image=image,
210
  secrets=[modal.Secret.from_name("anthropic-api-key")],
211
  timeout=180, # 3 min for insights
 
212
  )
213
  def generate_insights_parallel(analysis_data: Dict[str, Any], restaurant_name: str, role: str) -> Dict[str, Any]:
214
  """Generate insights for a single role - runs in parallel with other insights."""
215
  from anthropic import Anthropic
216
  import os
217
  import re
 
218
 
219
  print(f"🧠 Generating {role} insights...")
220
 
@@ -225,20 +227,20 @@ def generate_insights_parallel(analysis_data: Dict[str, Any], restaurant_name: s
225
  drinks = analysis_data.get('menu_analysis', {}).get('drinks', [])[:10]
226
  aspects = analysis_data.get('aspect_analysis', {}).get('aspects', [])[:20]
227
 
228
- # Format menu summary
229
  menu_lines = ["TOP MENU ITEMS:"]
230
  for item in menu_items:
231
  s = item.get('sentiment', 0)
232
- emoji = "🟒" if s >= 0.6 else "🟑" if s >= 0 else "πŸ”΄"
233
- menu_lines.append(f" {emoji} {item.get('name', '?')}: sentiment {s:+.2f}, {item.get('mention_count', 0)} mentions")
234
  menu_summary = "\n".join(menu_lines)
235
 
236
  # Format aspect summary
237
  aspect_lines = ["TOP ASPECTS:"]
238
  for a in aspects:
239
  s = a.get('sentiment', 0)
240
- emoji = "🟒" if s >= 0.6 else "🟑" if s >= 0 else "πŸ”΄"
241
- aspect_lines.append(f" {emoji} {a.get('name', '?')}: sentiment {s:+.2f}, {a.get('mention_count', 0)} mentions")
242
  aspect_summary = "\n".join(aspect_lines)
243
 
244
  if role == 'chef':
@@ -255,9 +257,9 @@ def generate_insights_parallel(analysis_data: Dict[str, Any], restaurant_name: s
255
  {aspect_summary}
256
 
257
  SENTIMENT SCALE:
258
- - 🟒 POSITIVE (>= 0.6): Highlight as STRENGTH
259
- - 🟑 NEUTRAL (0 to 0.59): Room for improvement
260
- - πŸ”΄ NEGATIVE (< 0): Flag as CONCERN
261
 
262
  YOUR TASK: Generate insights for the {"HEAD CHEF" if role == "chef" else "RESTAURANT MANAGER"}.
263
  {focus}
@@ -282,30 +284,59 @@ OUTPUT:
282
 
283
  Generate {role} insights:"""
284
 
285
- try:
286
- response = client.messages.create(
287
- model="claude-sonnet-4-20250514",
288
- max_tokens=2000,
289
- temperature=0.4,
290
- messages=[{"role": "user", "content": prompt}]
291
- )
292
-
293
- result_text = response.content[0].text.strip()
294
- result_text = result_text.replace('```json', '').replace('```', '').strip()
295
-
296
- # Find JSON in response
297
- match = re.search(r'\{[\s\S]*\}', result_text)
298
- if match:
299
- insights = json.loads(match.group())
300
- print(f"βœ… {role.title()} insights generated")
301
- return {"role": role, "insights": insights}
302
- else:
303
- print(f"⚠️ No JSON found in {role} response")
304
- return {"role": role, "insights": _fallback_insights(role)}
305
 
306
- except Exception as e:
307
- print(f"❌ Error generating {role} insights: {e}")
308
- return {"role": role, "insights": _fallback_insights(role)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
 
310
 
311
  def _fallback_insights(role: str) -> Dict[str, Any]:
@@ -838,21 +869,27 @@ def full_analysis_parallel(url: str, max_reviews: int = 100) -> Dict[str, Any]:
838
  }
839
  }
840
 
841
- # Phase 3: PARALLEL insights generation
842
- print("🧠 Phase 3: PARALLEL insights generation...")
843
  insights_start = time.time()
844
 
845
- # Generate both insights in parallel!
846
- insight_inputs = [
847
- (analysis_data, restaurant_name, "chef"),
848
- (analysis_data, restaurant_name, "manager")
849
- ]
850
 
851
- insight_results = list(generate_insights_parallel.starmap(insight_inputs))
 
 
 
 
852
 
853
- insights = {}
854
- for result in insight_results:
855
- insights[result["role"]] = result["insights"]
 
 
 
 
 
856
 
857
  print(f"βœ… Insights complete in {time.time() - insights_start:.1f}s")
858
 
 
79
  image=image,
80
  secrets=[modal.Secret.from_name("anthropic-api-key")],
81
  timeout=120, # 2 min per batch is plenty
82
+ retries=3, # Retry on transient failures
83
  )
84
  def process_batch(batch_data: Dict[str, Any]) -> Dict[str, Any]:
85
  """
 
209
  image=image,
210
  secrets=[modal.Secret.from_name("anthropic-api-key")],
211
  timeout=180, # 3 min for insights
212
+ retries=3, # Retry on failure
213
  )
214
  def generate_insights_parallel(analysis_data: Dict[str, Any], restaurant_name: str, role: str) -> Dict[str, Any]:
215
  """Generate insights for a single role - runs in parallel with other insights."""
216
  from anthropic import Anthropic
217
  import os
218
  import re
219
+ import time as time_module
220
 
221
  print(f"🧠 Generating {role} insights...")
222
 
 
227
  drinks = analysis_data.get('menu_analysis', {}).get('drinks', [])[:10]
228
  aspects = analysis_data.get('aspect_analysis', {}).get('aspects', [])[:20]
229
 
230
+ # Format menu summary (using text instead of emojis for reliability)
231
  menu_lines = ["TOP MENU ITEMS:"]
232
  for item in menu_items:
233
  s = item.get('sentiment', 0)
234
+ indicator = "[+]" if s >= 0.6 else "[~]" if s >= 0 else "[-]"
235
+ menu_lines.append(f" {indicator} {item.get('name', '?')}: sentiment {s:+.2f}, {item.get('mention_count', 0)} mentions")
236
  menu_summary = "\n".join(menu_lines)
237
 
238
  # Format aspect summary
239
  aspect_lines = ["TOP ASPECTS:"]
240
  for a in aspects:
241
  s = a.get('sentiment', 0)
242
+ indicator = "[+]" if s >= 0.6 else "[~]" if s >= 0 else "[-]"
243
+ aspect_lines.append(f" {indicator} {a.get('name', '?')}: sentiment {s:+.2f}, {a.get('mention_count', 0)} mentions")
244
  aspect_summary = "\n".join(aspect_lines)
245
 
246
  if role == 'chef':
 
257
  {aspect_summary}
258
 
259
  SENTIMENT SCALE:
260
+ - POSITIVE (>= 0.6): Highlight as STRENGTH
261
+ - NEUTRAL (0 to 0.59): Room for improvement
262
+ - NEGATIVE (< 0): Flag as CONCERN
263
 
264
  YOUR TASK: Generate insights for the {"HEAD CHEF" if role == "chef" else "RESTAURANT MANAGER"}.
265
  {focus}
 
284
 
285
  Generate {role} insights:"""
286
 
287
+ # Retry logic for transient errors (overloaded, rate limits, etc.)
288
+ max_retries = 3
289
+ for attempt in range(max_retries):
290
+ try:
291
+ print(f"πŸ”„ Calling API for {role} insights (attempt {attempt + 1}/{max_retries})...")
292
+ response = client.messages.create(
293
+ model="claude-sonnet-4-20250514",
294
+ max_tokens=2000,
295
+ temperature=0.4,
296
+ messages=[{"role": "user", "content": prompt}]
297
+ )
 
 
 
 
 
 
 
 
 
298
 
299
+ result_text = response.content[0].text.strip()
300
+ print(f"πŸ“ {role} raw response length: {len(result_text)} chars")
301
+
302
+ result_text = result_text.replace('```json', '').replace('```', '').strip()
303
+
304
+ # Find JSON in response
305
+ match = re.search(r'\{[\s\S]*\}', result_text)
306
+ if match:
307
+ try:
308
+ insights = json.loads(match.group())
309
+ # Validate the insights structure
310
+ if 'summary' in insights and 'strengths' in insights:
311
+ print(f"βœ… {role.title()} insights generated successfully")
312
+ return {"role": role, "insights": insights}
313
+ else:
314
+ print(f"⚠️ {role} insights missing required fields")
315
+ return {"role": role, "insights": _fallback_insights(role)}
316
+ except json.JSONDecodeError as je:
317
+ print(f"⚠️ {role} JSON parse error: {je}")
318
+ return {"role": role, "insights": _fallback_insights(role)}
319
+ else:
320
+ print(f"⚠️ No JSON found in {role} response")
321
+ return {"role": role, "insights": _fallback_insights(role)}
322
+
323
+ except Exception as e:
324
+ error_str = str(e)
325
+ # Check if it's a transient error (overloaded, rate limit, etc.)
326
+ if '529' in error_str or 'overloaded' in error_str.lower() or '429' in error_str or 'rate' in error_str.lower():
327
+ if attempt < max_retries - 1:
328
+ wait_time = (attempt + 1) * 5 # 5s, 10s, 15s
329
+ print(f"⚠️ API overloaded for {role}, waiting {wait_time}s before retry...")
330
+ time_module.sleep(wait_time)
331
+ continue
332
+ else:
333
+ print(f"❌ API still overloaded after {max_retries} retries for {role}")
334
+ return {"role": role, "insights": _fallback_insights(role)}
335
+ else:
336
+ print(f"❌ Error generating {role} insights: {e}")
337
+ return {"role": role, "insights": _fallback_insights(role)}
338
+
339
+ return {"role": role, "insights": _fallback_insights(role)}
340
 
341
 
342
  def _fallback_insights(role: str) -> Dict[str, Any]:
 
869
  }
870
  }
871
 
872
+ # Phase 3: Generate insights (sequential with delay to avoid API overload)
873
+ print("🧠 Phase 3: Generating insights...")
874
  insights_start = time.time()
875
 
876
+ # Generate insights sequentially with a small delay to avoid 529 errors
877
+ insights = {}
 
 
 
878
 
879
+ # Chef insights first
880
+ print("πŸ”„ Generating chef insights...")
881
+ chef_result = generate_insights_parallel.remote(analysis_data, restaurant_name, "chef")
882
+ insights[chef_result["role"]] = chef_result["insights"]
883
+ print(f"πŸ“Š Chef insights received: {len(chef_result['insights'].get('strengths', []))} strengths")
884
 
885
+ # Small delay before manager to avoid overloading
886
+ time.sleep(2)
887
+
888
+ # Manager insights
889
+ print("πŸ”„ Generating manager insights...")
890
+ manager_result = generate_insights_parallel.remote(analysis_data, restaurant_name, "manager")
891
+ insights[manager_result["role"]] = manager_result["insights"]
892
+ print(f"πŸ“Š Manager insights received: {len(manager_result['insights'].get('strengths', []))} strengths")
893
 
894
  print(f"βœ… Insights complete in {time.time() - insights_start:.1f}s")
895