harvesthealth commited on
Commit
d60954f
·
verified ·
1 Parent(s): e5432a9

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. analysis_template.md +3 -5
  2. app.py +18 -52
  3. verify_thinking.py +45 -0
analysis_template.md CHANGED
@@ -1,7 +1,3 @@
1
- # CONTEXT FILE: contexts/context_{{report_id}}.md
2
- # COMMAND: Use `read_file` or equivalent to read the context file at the path above.
3
- # It contains the persona JSON and the tasks list for this analysis.
4
-
5
  # UX STRATEGIST TEMPLATE (CLIENT-GRADE)
6
 
7
  You are a Senior UX Strategist and Researcher.
@@ -17,6 +13,8 @@ You will simulate a persona performing tasks, but the final output must read lik
17
 
18
  ## 1. Variables
19
 
 
 
20
  - Target URL: {{url}}
21
  - Report ID: {{report_id}}
22
 
@@ -26,7 +24,7 @@ You will simulate a persona performing tasks, but the final output must read lik
26
 
27
  ### Task Execution & Data Collection
28
 
29
- 1. **Sequential Execution**: Perform the 10 tasks provided in the external context file one by one.
30
  2. **Coordinate Tracking**: For every click or interaction, record the (x, y) coordinates relative to the viewport.
31
  3. **Heatmap Generation**: After completing all tasks, generate "Average User Journey Heatmaps" by overlaying the recorded interaction points onto screenshots of the relevant pages.
32
  - Save these heatmap images as PNG files in `/user_experience_reports/heatmaps/`.
 
 
 
 
 
1
  # UX STRATEGIST TEMPLATE (CLIENT-GRADE)
2
 
3
  You are a Senior UX Strategist and Researcher.
 
13
 
14
  ## 1. Variables
15
 
16
+ - Persona: {{persona_context}}
17
+ - Tasks: {{tasks_list}}
18
  - Target URL: {{url}}
19
  - Report ID: {{report_id}}
20
 
 
24
 
25
  ### Task Execution & Data Collection
26
 
27
+ 1. **Sequential Execution**: Perform the 10 tasks provided in `{{tasks_list}}` one by one.
28
  2. **Coordinate Tracking**: For every click or interaction, record the (x, y) coordinates relative to the viewport.
29
  3. **Heatmap Generation**: After completing all tasks, generate "Average User Journey Heatmaps" by overlaying the recorded interaction points onto screenshots of the relevant pages.
30
  - Save these heatmap images as PNG files in `/user_experience_reports/heatmaps/`.
app.py CHANGED
@@ -55,8 +55,9 @@ def patch_tinytroupe():
55
  if "_raw_model_call_parallel" not in content:
56
  content = content.replace("class OpenAIClient:", "class OpenAIClient:" + parallel_helper)
57
 
58
- # 2. Ensure alias-large is used (revert any previous fast patches)
59
- content = content.replace('"alias-fast"', '"alias-large"')
 
60
 
61
  # 3. Handle 502 errors by waiting 35 seconds and setting a parallel retry flag
62
  # We need to modify the send_message loop
@@ -68,8 +69,8 @@ def patch_tinytroupe():
68
  if 'if parallel_retry:' not in content:
69
  old_call = "response = self._raw_model_call(model, chat_api_params)"
70
  new_call = """if parallel_retry:
71
- logger.info("Attempting parallel call to alias-large and alias-huge.")
72
- response = self._raw_model_call_parallel(["alias-large", "alias-huge"], chat_api_params)
73
  if isinstance(response, Exception):
74
  raise response
75
  else:
@@ -310,35 +311,6 @@ def upload_persona_to_pool(persona_data):
310
  except Exception as e:
311
  print(f"Error uploading persona to pool: {e}")
312
 
313
- def upload_context_to_github(repo_name, session_id, persona, tasks):
314
- if not gh:
315
- add_log("ERROR: GitHub client not initialized for context upload.")
316
- return None
317
-
318
- file_path = f"contexts/context_{session_id}.md"
319
- content = f"""# Analysis Context for Session {session_id}
320
-
321
- ## Persona
322
- {json.dumps(persona, indent=2)}
323
-
324
- ## Tasks
325
- {json.dumps(tasks, indent=2)}
326
- """
327
- try:
328
- repo = gh.get_repo(repo_name)
329
- try:
330
- # Check if exists (unlikely for new session)
331
- existing = repo.get_contents(file_path, ref="main")
332
- repo.update_file(file_path, f"Update context for {session_id}", content, existing.sha, branch="main")
333
- except:
334
- repo.create_file(file_path, f"Add context for {session_id}", content, branch="main")
335
-
336
- add_log(f"Successfully uploaded context file to {repo_name} main branch.")
337
- return f"https://github.com/{repo_name}/blob/main/{file_path}"
338
- except Exception as e:
339
- add_log(f"ERROR uploading context to GitHub: {e}")
340
- return None
341
-
342
  def select_or_create_personas(theme, customer_profile, num_personas, force_method=None, example_file=None):
343
  if force_method == "Example Persona" and example_file:
344
  add_log(f"Loading example persona from {example_file}...")
@@ -424,7 +396,7 @@ def select_or_create_personas(theme, customer_profile, num_personas, force_metho
424
 
425
  try:
426
  response = client.chat.completions.create(
427
- model="alias-large",
428
  messages=[{"role": "user", "content": prompt}]
429
  )
430
  content = response.choices[0].message.content
@@ -463,7 +435,7 @@ def generate_persona_from_deeppersona(theme, customer_profile):
463
  if not client:
464
  return None
465
 
466
- # Step 1: Breakdown profile into parameters using LLM alias-large
467
  prompt = f"""
468
  You are an expert in persona creation.
469
  Break down the following business theme and customer profile into detailed attributes for a persona.
@@ -487,7 +459,7 @@ def generate_persona_from_deeppersona(theme, customer_profile):
487
 
488
  try:
489
  response = client.chat.completions.create(
490
- model="alias-large",
491
  messages=[{"role": "user", "content": prompt}],
492
  response_format={"type": "json_object"}
493
  )
@@ -564,9 +536,9 @@ def generate_personas(theme, customer_profile, num_personas):
564
 
565
  add_log("Falling back to TinyTroupe logic for remaining personas...")
566
 
567
- # Ensure alias-large is used
568
- config_manager.update("model", "alias-large")
569
- config_manager.update("reasoning_model", "alias-large")
570
 
571
  context = f"A company related to {theme}. Target customers: {customer_profile}"
572
 
@@ -644,7 +616,7 @@ def generate_tasks(theme, customer_profile, url):
644
  Do not include any other text in your response.
645
  """
646
 
647
- models_to_try = ["alias-large", "alias-huge", "alias-fast"]
648
 
649
  for attempt in range(5):
650
  try:
@@ -656,7 +628,7 @@ def generate_tasks(theme, customer_profile, url):
656
  response = call_llm_parallel(client, models_to_try, [{"role": "user", "content": prompt}], response_format={"type": "json_object"})
657
  else:
658
  response = client.chat.completions.create(
659
- model="alias-large",
660
  messages=[{"role": "user", "content": prompt}],
661
  response_format={"type": "json_object"}
662
  )
@@ -695,7 +667,7 @@ def handle_generate(theme, customer_profile, num_personas, method, example_file,
695
  if ex_personas:
696
  current_profile = ex_personas[0].get('minibio', customer_profile)
697
 
698
- yield "Generating tasks...", None, None, None
699
  tasks = generate_tasks(theme, current_profile, url)
700
  tasks_text = "\n".join(tasks) if isinstance(tasks, list) else str(tasks)
701
 
@@ -742,21 +714,15 @@ def start_and_monitor_sessions(personas, tasks, url, session_id):
742
  sessions = []
743
  jules_uuids = []
744
 
745
- # Upload context file to main branch
746
- context_url = upload_context_to_github(repo_name, session_id, personas[0], tasks)
747
- if not context_url:
748
- add_log("Warning: Failed to upload context file. Jules might not find it.")
749
- else:
750
- add_log(f"Waiting 5 seconds for GitHub propagation...")
751
- time.sleep(5)
752
-
753
  for persona in personas:
754
  # Use provided session_id or append to it if multiple personas?
755
  # For simplicity, we use session_id as the report_id too
756
  report_id = session_id
757
 
758
  # Format prompt
759
- prompt = template.replace("{{url}}", url)
 
 
760
  prompt = prompt.replace("{{report_id}}", report_id)
761
  prompt = prompt.replace("{{blablador_api_key}}", BLABLADOR_API_KEY if BLABLADOR_API_KEY else "YOUR_API_KEY")
762
 
@@ -770,7 +736,7 @@ def start_and_monitor_sessions(personas, tasks, url, session_id):
770
  "sourceContext": {
771
  "source": f"sources/github/{repo_name}",
772
  "githubRepoContext": {
773
- "startingBranch": session_id
774
  }
775
  },
776
  "automationMode": "AUTO_CREATE_PR",
 
55
  if "_raw_model_call_parallel" not in content:
56
  content = content.replace("class OpenAIClient:", "class OpenAIClient:" + parallel_helper)
57
 
58
+ # 2. Ensure alias-huge is used (alias-large is deprecated/down)
59
+ content = content.replace('"alias-fast"', '"alias-huge"')
60
+ content = content.replace('"alias-large"', '"alias-huge"')
61
 
62
  # 3. Handle 502 errors by waiting 35 seconds and setting a parallel retry flag
63
  # We need to modify the send_message loop
 
69
  if 'if parallel_retry:' not in content:
70
  old_call = "response = self._raw_model_call(model, chat_api_params)"
71
  new_call = """if parallel_retry:
72
+ logger.info("Attempting parallel call to alias-huge and alias-fast.")
73
+ response = self._raw_model_call_parallel(["alias-huge", "alias-fast"], chat_api_params)
74
  if isinstance(response, Exception):
75
  raise response
76
  else:
 
311
  except Exception as e:
312
  print(f"Error uploading persona to pool: {e}")
313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
  def select_or_create_personas(theme, customer_profile, num_personas, force_method=None, example_file=None):
315
  if force_method == "Example Persona" and example_file:
316
  add_log(f"Loading example persona from {example_file}...")
 
396
 
397
  try:
398
  response = client.chat.completions.create(
399
+ model="alias-huge",
400
  messages=[{"role": "user", "content": prompt}]
401
  )
402
  content = response.choices[0].message.content
 
435
  if not client:
436
  return None
437
 
438
+ # Step 1: Breakdown profile into parameters using LLM alias-huge
439
  prompt = f"""
440
  You are an expert in persona creation.
441
  Break down the following business theme and customer profile into detailed attributes for a persona.
 
459
 
460
  try:
461
  response = client.chat.completions.create(
462
+ model="alias-huge",
463
  messages=[{"role": "user", "content": prompt}],
464
  response_format={"type": "json_object"}
465
  )
 
536
 
537
  add_log("Falling back to TinyTroupe logic for remaining personas...")
538
 
539
+ # Ensure alias-huge is used
540
+ config_manager.update("model", "alias-huge")
541
+ config_manager.update("reasoning_model", "alias-huge")
542
 
543
  context = f"A company related to {theme}. Target customers: {customer_profile}"
544
 
 
616
  Do not include any other text in your response.
617
  """
618
 
619
+ models_to_try = ["alias-huge", "alias-fast", "alias-large"]
620
 
621
  for attempt in range(5):
622
  try:
 
628
  response = call_llm_parallel(client, models_to_try, [{"role": "user", "content": prompt}], response_format={"type": "json_object"})
629
  else:
630
  response = client.chat.completions.create(
631
+ model="alias-huge",
632
  messages=[{"role": "user", "content": prompt}],
633
  response_format={"type": "json_object"}
634
  )
 
667
  if ex_personas:
668
  current_profile = ex_personas[0].get('minibio', customer_profile)
669
 
670
+ yield "Thinking...", None, None, None
671
  tasks = generate_tasks(theme, current_profile, url)
672
  tasks_text = "\n".join(tasks) if isinstance(tasks, list) else str(tasks)
673
 
 
714
  sessions = []
715
  jules_uuids = []
716
 
 
 
 
 
 
 
 
 
717
  for persona in personas:
718
  # Use provided session_id or append to it if multiple personas?
719
  # For simplicity, we use session_id as the report_id too
720
  report_id = session_id
721
 
722
  # Format prompt
723
+ prompt = template.replace("{{persona_context}}", json.dumps(persona))
724
+ prompt = prompt.replace("{{tasks_list}}", json.dumps(tasks))
725
+ prompt = prompt.replace("{{url}}", url)
726
  prompt = prompt.replace("{{report_id}}", report_id)
727
  prompt = prompt.replace("{{blablador_api_key}}", BLABLADOR_API_KEY if BLABLADOR_API_KEY else "YOUR_API_KEY")
728
 
 
736
  "sourceContext": {
737
  "source": f"sources/github/{repo_name}",
738
  "githubRepoContext": {
739
+ "startingBranch": "main"
740
  }
741
  },
742
  "automationMode": "AUTO_CREATE_PR",
verify_thinking.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from playwright.async_api import async_playwright
3
+
4
+ async def verify_thinking_status():
5
+ async with async_playwright() as p:
6
+ browser = await p.chromium.launch(headless=True)
7
+ page = await browser.new_page()
8
+ try:
9
+ await page.goto("http://localhost:7860", timeout=60000)
10
+ print("Page loaded.")
11
+
12
+ # Fill mandatory fields
13
+ await page.get_by_label("Theme").fill("Test Theme")
14
+ await page.get_by_label("Target URL").fill("https://example.com")
15
+
16
+ # Click Generate
17
+ await page.get_by_role("button", name="Generate Personas & Tasks").click()
18
+ print("Clicked Generate button.")
19
+
20
+ # Check for Thinking... status
21
+ # It might be in the status textbox
22
+ status_box = page.get_by_label("Status")
23
+
24
+ # Use a loop to check for the text since it's transient
25
+ found = False
26
+ for _ in range(20):
27
+ val = await status_box.input_value()
28
+ if "Thinking..." in val:
29
+ print(f"Found status: {val}")
30
+ found = True
31
+ break
32
+ await asyncio.sleep(0.5)
33
+
34
+ if not found:
35
+ print("Thinking... status not found or appeared too quickly.")
36
+
37
+ await page.screenshot(path="thinking_verification.png")
38
+
39
+ except Exception as e:
40
+ print(f"Error: {e}")
41
+ finally:
42
+ await browser.close()
43
+
44
+ if __name__ == "__main__":
45
+ asyncio.run(verify_thinking_status())