fffffwl commited on
Commit
b70a584
Β·
1 Parent(s): 96a4a6f
Files changed (1) hide show
  1. app.py +15 -42
app.py CHANGED
@@ -562,36 +562,21 @@ def _stars_for_score(score: float) -> str:
562
  return "β˜…" * n + "β˜†" * (5 - n)
563
 
564
 
565
- def run_query(requirements: str, use_mcp: bool, debug_mode: bool):
566
- debug_text = ""
567
- if debug_mode:
568
- logger.info(
569
- f"RUN start use_mcp={use_mcp} keyword={keyword!r} min_prize={min_prize} days_ahead={days_ahead}"
570
- )
571
  # Defaults for agent flow
572
  min_prize = 0.0
573
  days_ahead = 90
574
 
575
  # Extract compact keywords from requirements
576
- keyword, kw_logs = _extract_keywords(requirements, debug=debug_mode)
577
-
578
- if use_mcp:
579
- items, err, dbg = _mcp_list_challenges(debug=debug_mode)
580
- debug_text = dbg or ""
581
- if kw_logs:
582
- debug_text = (debug_text + "\n\n" + kw_logs).strip()
583
- if err:
584
- items = FALLBACK_DATA
585
- status = f"MCP fallback: {err}"
586
- if debug_mode:
587
- logger.info(f"RUN fallback reason: {err}")
588
- else:
589
- status = "MCP OK"
590
- if debug_mode:
591
- logger.info("RUN MCP OK")
592
- else:
593
  items = FALLBACK_DATA
594
- status = "Local sample"
 
 
595
 
596
  # Apply time window first, then keyword/prize filtering
597
  items = _filter_by_days(items, days_ahead)
@@ -600,11 +585,10 @@ def run_query(requirements: str, use_mcp: bool, debug_mode: bool):
600
  filtered = shortlist(items, "", min_prize)
601
  # Enforce LLM config for scoring
602
  ok, cfg_msg = _require_llm_config()
603
- score_logs = ""
604
  if not ok:
605
  status = f"{status} | LLM config required: {cfg_msg}"
606
  else:
607
- scored, score_logs = _score_items(filtered, keyword, debug=debug_mode)
608
  if scored:
609
  scored.sort(key=lambda t: t[1], reverse=True)
610
  filtered = [c for c, _, _ in scored]
@@ -612,7 +596,7 @@ def run_query(requirements: str, use_mcp: bool, debug_mode: bool):
612
  if not filtered and items:
613
  filtered = items
614
  status = f"{status} (no matches; showing unfiltered)"
615
- plan_text, plan_logs = _generate_plan(filtered, keyword, min_prize, days_ahead, debug=debug_mode) if filtered else ("", "")
616
 
617
  # Build a map from id to (score, reason) for star display
618
  id_to_score_reason: dict[str, tuple[float, str]] = {}
@@ -633,33 +617,22 @@ def run_query(requirements: str, use_mcp: bool, debug_mode: bool):
633
  (r[:160] + ("…" if len(r) > 160 else "")) if r else "",
634
  c.id,
635
  ])
636
- # Append model logs into debug output when enabled and also log to console
637
- if debug_mode:
638
- merged_logs = (debug_text + "\n\n" + score_logs + "\n\n" + plan_logs).strip()
639
- debug_text = merged_logs
640
- if merged_logs:
641
- logger.info(merged_logs)
642
- return rows, status, debug_text, plan_text
643
 
644
 
645
  with gr.Blocks(title="Topcoder Challenge Scout") as demo:
646
  gr.Markdown("**Topcoder Challenge Scout** β€” agent picks tools, you provide requirements")
647
- with gr.Row():
648
- requirements = gr.Textbox(label="Requirements", placeholder="e.g. Looking for recent active LLM development challenges with web UI", lines=3)
649
- use_mcp = gr.Checkbox(label="Use MCP (recommended)", value=True)
650
- debug_mode = gr.Checkbox(label="Debug mode", value=False)
651
  gr.Markdown("Default filters: within last 90 days, active status. The agent extracts minimal keywords automatically.")
652
  run_btn = gr.Button("Find challenges")
653
  status = gr.Textbox(label="Status", interactive=False)
654
  table = gr.Dataframe(headers=["Title", "Prize", "Deadline", "Tags", "Recommend", "AI Reason", "Id"], wrap=True)
655
  plan_md = gr.Markdown("", label="Plan")
656
- with gr.Accordion("Debug output", open=False):
657
- debug_out = gr.Textbox(label="Logs", interactive=False, lines=8)
658
 
659
  run_btn.click(
660
  fn=run_query,
661
- inputs=[requirements, use_mcp, debug_mode],
662
- outputs=[table, status, debug_out, plan_md],
663
  )
664
 
665
 
 
562
  return "β˜…" * n + "β˜†" * (5 - n)
563
 
564
 
565
+ def run_query(requirements: str):
 
 
 
 
 
566
  # Defaults for agent flow
567
  min_prize = 0.0
568
  days_ahead = 90
569
 
570
  # Extract compact keywords from requirements
571
+ keyword, _ = _extract_keywords(requirements, debug=False)
572
+
573
+ # Always use MCP in production UI; fallback to local sample on error
574
+ items, err, _ = _mcp_list_challenges(debug=False)
575
+ if err:
 
 
 
 
 
 
 
 
 
 
 
 
576
  items = FALLBACK_DATA
577
+ status = f"MCP fallback: {err}"
578
+ else:
579
+ status = "MCP OK"
580
 
581
  # Apply time window first, then keyword/prize filtering
582
  items = _filter_by_days(items, days_ahead)
 
585
  filtered = shortlist(items, "", min_prize)
586
  # Enforce LLM config for scoring
587
  ok, cfg_msg = _require_llm_config()
 
588
  if not ok:
589
  status = f"{status} | LLM config required: {cfg_msg}"
590
  else:
591
+ scored, _ = _score_items(filtered, keyword, debug=False)
592
  if scored:
593
  scored.sort(key=lambda t: t[1], reverse=True)
594
  filtered = [c for c, _, _ in scored]
 
596
  if not filtered and items:
597
  filtered = items
598
  status = f"{status} (no matches; showing unfiltered)"
599
+ plan_text, _ = _generate_plan(filtered, keyword, min_prize, days_ahead, debug=False) if filtered else ("", "")
600
 
601
  # Build a map from id to (score, reason) for star display
602
  id_to_score_reason: dict[str, tuple[float, str]] = {}
 
617
  (r[:160] + ("…" if len(r) > 160 else "")) if r else "",
618
  c.id,
619
  ])
620
+ return rows, status, plan_text
 
 
 
 
 
 
621
 
622
 
623
  with gr.Blocks(title="Topcoder Challenge Scout") as demo:
624
  gr.Markdown("**Topcoder Challenge Scout** β€” agent picks tools, you provide requirements")
625
+ requirements = gr.Textbox(label="Requirements", placeholder="e.g. Looking for recent active LLM development challenges with web UI", lines=3)
 
 
 
626
  gr.Markdown("Default filters: within last 90 days, active status. The agent extracts minimal keywords automatically.")
627
  run_btn = gr.Button("Find challenges")
628
  status = gr.Textbox(label="Status", interactive=False)
629
  table = gr.Dataframe(headers=["Title", "Prize", "Deadline", "Tags", "Recommend", "AI Reason", "Id"], wrap=True)
630
  plan_md = gr.Markdown("", label="Plan")
 
 
631
 
632
  run_btn.click(
633
  fn=run_query,
634
+ inputs=[requirements],
635
+ outputs=[table, status, plan_md],
636
  )
637
 
638