AUXteam commited on
Commit
43cee92
·
verified ·
1 Parent(s): 71528d6

Fix: Fallback to sequential persona generation with delay to prevent Google 429 Too Many Requests errors

Browse files
backend/services/tinytroupe_manager.py CHANGED
@@ -39,17 +39,15 @@ class TinyTroupeSimulationManager:
39
  if missing_count > 0:
40
  try:
41
  # Utilize the TinyPersonFactory dynamic population pattern
42
- factory = TinyPersonFactory(
43
- sampling_space_description=customer_profile,
44
- total_population_size=missing_count,
45
- context=business_description
46
- )
47
-
48
- logger.info(f"Job {job_id}: Generating {missing_count} personas via TinyPersonFactory...")
49
 
50
- people = factory.generate_people(missing_count)
51
 
52
- for i, person in enumerate(people):
 
 
 
53
  if person:
54
  persona_data = person._persona
55
  persona_data["_assureness_score"] = 100 # New ones are perfectly matched to the description
@@ -64,6 +62,10 @@ class TinyTroupeSimulationManager:
64
 
65
  job_registry.update_job(job_id, progress_percentage=20 + int((i+1)/missing_count * 60))
66
 
 
 
 
 
67
  except Exception as e:
68
  logger.error(f"Error during persona generation: {e}")
69
  job_registry.update_job(job_id, status="FAILED", message=f"LLM Error: {str(e)}")
 
39
  if missing_count > 0:
40
  try:
41
  # Utilize the TinyPersonFactory dynamic population pattern
42
+ # Utilize the TinyPersonFactory dynamic population pattern, but generate sequentially to avoid Google 429 rate limits
43
+ factory = TinyPersonFactory(business_description)
 
 
 
 
 
44
 
45
+ logger.info(f"Job {job_id}: Generating {missing_count} personas via TinyPersonFactory sequentially...")
46
 
47
+ for i in range(missing_count):
48
+ logger.info(f"Job {job_id}: Requesting persona {i+1}/{missing_count} from LLM...")
49
+ person = factory.generate_person(customer_profile)
50
+
51
  if person:
52
  persona_data = person._persona
53
  persona_data["_assureness_score"] = 100 # New ones are perfectly matched to the description
 
62
 
63
  job_registry.update_job(job_id, progress_percentage=20 + int((i+1)/missing_count * 60))
64
 
65
+ # Throttle consecutive requests to respect Google Gemini free tier limits
66
+ if i < missing_count - 1:
67
+ time.sleep(10)
68
+
69
  except Exception as e:
70
  logger.error(f"Error during persona generation: {e}")
71
  job_registry.update_job(job_id, status="FAILED", message=f"LLM Error: {str(e)}")