fffffwl commited on
Commit
23d25ab
·
1 Parent(s): 3d3f278

optimized prompt

Browse files
Files changed (1) hide show
  1. app.py +62 -1
app.py CHANGED
@@ -381,6 +381,67 @@ def _generate_plan(items: List[Challenge], keyword: str, min_prize: float, days_
381
  return "", "\n".join(logs)
382
 
383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  def _score_items(items: List[Challenge], keyword: str, debug: bool = False) -> Tuple[List[tuple[Challenge, float, str]], str]:
385
  """Score challenges using OpenAI API and return (challenge, score, reason) tuples"""
386
  api_key = os.getenv("OPENAI_API_KEY")
@@ -598,7 +659,7 @@ def run_query(requirements: str):
598
  if not filtered and items:
599
  filtered = items
600
  status = f"{status} (no matches; showing unfiltered)"
601
- plan_text, _ = _generate_plan(filtered, keyword, min_prize, days_ahead, debug=False) if filtered else ("", "")
602
 
603
  # Build a map from id to (score, reason) for star display
604
  id_to_score_reason: dict[str, tuple[float, str]] = {}
 
381
  return "", "\n".join(logs)
382
 
383
 
384
+ def _generate_plan_fixed(
385
+ ranked: List[tuple[Challenge, float, str]],
386
+ keyword: str,
387
+ days_ahead: int,
388
+ debug: bool = False,
389
+ ) -> Tuple[str, str]:
390
+ """Generate a plan using the already-ranked top picks without changing order.
391
+
392
+ The LLM is only used to phrase reasons and the action plan. It must not re-rank.
393
+ """
394
+ top = ranked[:3]
395
+ data = [
396
+ {
397
+ "title": c.title,
398
+ "prize": c.prize,
399
+ "deadline": c.deadline,
400
+ "tags": c.tags[:6],
401
+ "score": round(s, 4),
402
+ "reason": (r or "").strip()[:300],
403
+ }
404
+ for c, s, r in top
405
+ ]
406
+
407
+ logs: List[str] = []
408
+ api_key = os.getenv("OPENAI_API_KEY")
409
+ base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
410
+ if not api_key:
411
+ logs.append("Missing OPENAI_API_KEY")
412
+ return "", "\n".join(logs)
413
+
414
+ try:
415
+ client = OpenAI(api_key=api_key, base_url=base_url)
416
+ prompt = (
417
+ "You are a concise challenge scout. You are given pre-ranked top picks (in order).\n"
418
+ "Do NOT change the order or add/remove items.\n"
419
+ "Output exactly:\n"
420
+ "- Top 3 picks (title + short reason).\n"
421
+ "- Quick plan of action (3 bullets).\n"
422
+ f"Constraints: query='{keyword}', within {days_ahead} days.\n"
423
+ f"Ranked data: {json.dumps(data)}"
424
+ )
425
+ if debug:
426
+ logs.append(f"PLAN(FIXED) prompt: {prompt[:1200]}")
427
+ resp = client.chat.completions.create(
428
+ model=os.getenv("OPENAI_MODEL", "gpt-4o-mini"),
429
+ messages=[
430
+ {"role": "system", "content": "Be terse. Do not re-rank or add items."},
431
+ {"role": "user", "content": prompt},
432
+ ],
433
+ temperature=0.3,
434
+ timeout=20,
435
+ )
436
+ text = (resp.choices[0].message.content or "").strip()
437
+ if debug:
438
+ logs.append(f"PLAN(FIXED) output: {text[:800]}")
439
+ return text, "\n".join(logs)
440
+ except Exception as e:
441
+ if debug:
442
+ logs.append(f"PLAN(FIXED) error: {e}")
443
+ return "", "\n".join(logs)
444
+
445
  def _score_items(items: List[Challenge], keyword: str, debug: bool = False) -> Tuple[List[tuple[Challenge, float, str]], str]:
446
  """Score challenges using OpenAI API and return (challenge, score, reason) tuples"""
447
  api_key = os.getenv("OPENAI_API_KEY")
 
659
  if not filtered and items:
660
  filtered = items
661
  status = f"{status} (no matches; showing unfiltered)"
662
+ plan_text, _ = _generate_plan_fixed(scored, keyword, days_ahead, debug=False) if ok and scored else ("", "")
663
 
664
  # Build a map from id to (score, reason) for star display
665
  id_to_score_reason: dict[str, tuple[float, str]] = {}