harvesthealth commited on
Commit
44b5c9a
·
verified ·
1 Parent(s): a885531

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. app.py +41 -47
  2. test_remote_api_v3.py +16 -0
app.py CHANGED
@@ -378,58 +378,52 @@ def generate_tasks(theme, customer_profile):
378
  4. Emotional connection to the persona and content/styling
379
 
380
  The tasks must be in sequential order.
381
- Return the tasks as a JSON list of strings in the format: {{"tasks": ["task1", "task2", ...]}}
 
 
 
382
  """
383
 
384
- # Try alias-large first, then alias-fast
385
- for model_name in ["alias-large", "alias-fast"]:
 
386
  try:
387
- # Handle potential 502 with wait
388
- response = None
389
- for attempt in range(3):
390
- try:
391
- print(f"Attempting task generation with {model_name}, attempt {attempt+1}")
392
- if attempt > 0:
393
- # Parallel retry after first failure
394
- print("Proxy error occurred previously. Attempting parallel call to alias-large and alias-huge.")
395
- response = call_llm_parallel(client, ["alias-large", "alias-huge"], [{"role": "user", "content": prompt}])
396
- if isinstance(response, Exception):
397
- raise response
398
- else:
399
- response = client.chat.completions.create(
400
- model=model_name,
401
- messages=[{"role": "user", "content": prompt}]
402
- )
403
- break
404
- except Exception as e:
405
- print(f"Error during task generation with {model_name}: {e}")
406
- if "502" in str(e) or "Proxy Error" in str(e):
407
- print(f"Waiting 35 seconds before retry...")
408
- time.sleep(35)
409
- else:
410
- # For other errors, don't wait as long but still retry or move on
411
- time.sleep(1)
412
 
413
- if not response or isinstance(response, Exception):
414
- print(f"Failed to get response from {model_name} after all attempts.")
415
- continue
416
-
417
- content = response.choices[0].message.content
418
- # Extract JSON from content (it might be wrapped in code blocks)
419
- json_str = re.search(r"\{.*\}", content, re.DOTALL)
420
- if json_str:
421
- tasks_json = json.loads(json_str.group())
422
- if "tasks" in tasks_json:
423
- return tasks_json["tasks"]
424
- elif isinstance(tasks_json, list):
425
- return tasks_json
426
- else:
427
- return list(tasks_json.values())[0]
 
 
 
 
 
428
  except Exception as e:
429
- print(f"Error with model {model_name}: {e}")
430
- continue
431
-
432
- return [f"Task {i+1} for {theme} (Failed to generate)" for i in range(10)]
433
 
434
  def handle_generate(theme, customer_profile, num_personas):
435
  try:
 
378
  4. Emotional connection to the persona and content/styling
379
 
380
  The tasks must be in sequential order.
381
+
382
+ CRITICAL: You MUST return a JSON object with a "tasks" key containing a list of strings.
383
+ Example: {{"tasks": ["task 1", "task 2", ...]}}
384
+ Do not include any other text in your response.
385
  """
386
 
387
+ models_to_try = ["alias-large", "alias-huge", "alias-fast"]
388
+
389
+ for attempt in range(5):
390
  try:
391
+ print(f"Attempt {attempt+1} for task generation...")
392
+ if attempt > 0:
393
+ print(f"Retrying in parallel with {models_to_try}")
394
+ # Wait 35s if it's a retry (likely Proxy Error or Rate Limit)
395
+ time.sleep(35)
396
+ response = call_llm_parallel(client, models_to_try, [{"role": "user", "content": prompt}])
397
+ else:
398
+ response = client.chat.completions.create(
399
+ model="alias-large",
400
+ messages=[{"role": "user", "content": prompt}]
401
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402
 
403
+ if response and not isinstance(response, Exception):
404
+ content = response.choices[0].message.content
405
+ # Robust extraction
406
+ json_match = re.search(r"\{.*\}", content, re.DOTALL)
407
+ if json_match:
408
+ try:
409
+ tasks_json = json.loads(json_match.group())
410
+ tasks = tasks_json.get("tasks", [])
411
+ if tasks and isinstance(tasks, list) and len(tasks) >= 5:
412
+ return tasks[:10]
413
+ except:
414
+ pass
415
+
416
+ # Fallback: try to extract lines that look like tasks
417
+ lines = [re.sub(r'^\d+[\.\)]\s*', '', l).strip() for l in content.split('\n') if l.strip()]
418
+ tasks = [l for l in lines if len(l) > 20 and not l.startswith('{') and not l.startswith('`')]
419
+ if len(tasks) >= 5:
420
+ return tasks[:10]
421
+
422
+ print(f"Attempt {attempt+1} failed to yield valid tasks.")
423
  except Exception as e:
424
+ print(f"Error in attempt {attempt+1}: {e}")
425
+
426
+ return [f"Task {i+1} for {theme} (Manual fallback)" for i in range(10)]
 
427
 
428
  def handle_generate(theme, customer_profile, num_personas):
429
  try:
test_remote_api_v3.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio_client import Client
2
+ import json
3
+
4
+ client = Client("https://harvesthealth-xxg-backup.hf.space/")
5
+
6
+ print("\n--- Testing /handle_generate with Persona Pool logic ---")
7
+ try:
8
+ result = client.predict(
9
+ theme="Education",
10
+ customer_profile="Student",
11
+ num_personas=1,
12
+ api_name="/handle_generate"
13
+ )
14
+ print(f"Result: {result}")
15
+ except Exception as e:
16
+ print(f"Error: {e}")