arka7 commited on
Commit
5f3a317
ยท
verified ยท
1 Parent(s): d0da0a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +313 -469
app.py CHANGED
@@ -29,8 +29,8 @@ load_dotenv()
29
 
30
  # Hugging Face Configuration
31
  HF_TOKEN = os.getenv("HF_TOKEN")
32
- HF_DATASET_REPO = os.getenv("HF_DATASET_REPO", "tuesday-trivia-posts") # e.g., "username/trivia-posts-db"
33
- HF_USERNAME = os.getenv("HF_USERNAME", None) # Your HF username
34
 
35
  # Auto-construct repo name if not provided
36
  if "/" not in HF_DATASET_REPO and HF_USERNAME:
@@ -75,6 +75,7 @@ class APIKeyRotator:
75
  print(f"โš ๏ธ No API keys found for {self.service_name}. Ensure secrets are set.")
76
  return []
77
 
 
78
  random.shuffle(keys)
79
  return keys
80
 
@@ -148,61 +149,31 @@ llm_small = create_llm_with_rotation("mistral-small-latest", temperature=0.2)
148
  llm_medium = create_llm_with_rotation("mistral-medium-latest", temperature=0.2)
149
  llm_large = create_llm_with_rotation("mistral-large-latest", temperature=0.2)
150
 
151
- # --- 4. DATABASE FUNCTIONS WITH HF SYNC ---
152
 
153
  def get_db_connection():
154
  return sqlite3.connect(str(DB_PATH))
155
 
156
  def sync_from_hf():
157
- """Download database from HF Dataset if it exists"""
158
  if not HF_TOKEN or not hf_api:
159
  print("โš ๏ธ No HF_TOKEN found, skipping sync from HF")
160
  return False
161
-
162
  try:
163
  print("๐Ÿ“ฅ Syncing database from Hugging Face...")
164
-
165
- # Try to load the dataset
166
  try:
167
  dataset = load_dataset(HF_DATASET_REPO, split="train", token=HF_TOKEN)
168
-
169
- # Check if dataset is empty
170
  if len(dataset) == 0:
171
  print("โ„น๏ธ Dataset exists but is empty")
172
  return False
173
-
174
- # Convert dataset to SQLite
175
  conn = get_db_connection()
176
  cursor = conn.cursor()
177
-
178
- # Clear existing data
179
  cursor.execute('DELETE FROM posts')
180
-
181
- # Insert data from HF
182
  for row in dataset:
183
- cursor.execute('''
184
- INSERT INTO posts (id, date, topic, summary, source_url, quality_score,
185
- engagement_score, hashtags, created_at, updated_at)
186
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
187
- ''', (
188
- row.get('id'),
189
- row.get('date'),
190
- row.get('topic'),
191
- row.get('summary'),
192
- row.get('source_url'),
193
- row.get('quality_score'),
194
- row.get('engagement_score'),
195
- row.get('hashtags'),
196
- row.get('created_at'),
197
- row.get('updated_at')
198
- ))
199
-
200
  conn.commit()
201
  conn.close()
202
-
203
  print(f"โœ… Synced {len(dataset)} posts from HF Dataset")
204
  return True
205
-
206
  except Exception as e:
207
  error_msg = str(e).lower()
208
  if "not found" in error_msg or "doesn't exist" in error_msg or "doesn't contain any data files" in error_msg:
@@ -211,126 +182,69 @@ def sync_from_hf():
211
  else:
212
  print(f"โš ๏ธ Error loading dataset: {e}")
213
  return False
214
-
215
  except Exception as e:
216
  print(f"โš ๏ธ Error syncing from HF: {e}")
217
  return False
218
 
219
  def sync_to_hf():
220
- """Upload database to HF Dataset"""
221
  if not HF_TOKEN or not hf_api:
222
  print("โš ๏ธ No HF_TOKEN found, skipping sync to HF")
223
  return False
224
-
225
  try:
226
  print("๐Ÿ“ค Syncing database to Hugging Face...")
227
-
228
  conn = get_db_connection()
229
  cursor = conn.cursor()
230
  cursor.execute('SELECT * FROM posts')
231
-
232
  columns = [description[0] for description in cursor.description]
233
  rows = cursor.fetchall()
234
  conn.close()
235
-
236
  if not rows:
237
  print("โ„น๏ธ No posts to sync")
238
  return False
239
-
240
- # Convert to dictionary format for HF Dataset
241
  data = {col: [] for col in columns}
242
  for row in rows:
243
  for col, value in zip(columns, row):
244
  data[col].append(value)
245
-
246
- # Create and push dataset
247
  dataset = Dataset.from_dict(data)
248
-
249
- # Try to push, create repo if it doesn't exist
250
  try:
251
- dataset.push_to_hub(
252
- HF_DATASET_REPO,
253
- token=HF_TOKEN,
254
- private=True # Make it private by default
255
- )
256
  print(f"โœ… Synced {len(rows)} posts to HF Dataset: {HF_DATASET_REPO}")
257
  return True
258
-
259
  except Exception as push_error:
260
  if "not found" in str(push_error).lower():
261
- # Create the repo first
262
  print(f"๐Ÿ“ฆ Creating new dataset repository: {HF_DATASET_REPO}")
263
- hf_api.create_repo(
264
- repo_id=HF_DATASET_REPO,
265
- token=HF_TOKEN,
266
- repo_type="dataset",
267
- private=True
268
- )
269
- # Try pushing again
270
- dataset.push_to_hub(
271
- HF_DATASET_REPO,
272
- token=HF_TOKEN,
273
- private=True
274
- )
275
  print(f"โœ… Created and synced to new dataset: {HF_DATASET_REPO}")
276
  return True
277
  else:
278
  raise push_error
279
-
280
  except Exception as e:
281
  print(f"โŒ Error syncing to HF: {e}")
282
  return False
283
 
284
  def init_database():
285
- """Initialize local database and sync from HF if available"""
286
  conn = get_db_connection()
287
  cursor = conn.cursor()
288
- cursor.execute('''
289
- CREATE TABLE IF NOT EXISTS posts (
290
- id INTEGER PRIMARY KEY AUTOINCREMENT,
291
- date TEXT NOT NULL,
292
- topic TEXT NOT NULL,
293
- summary TEXT NOT NULL,
294
- source_url TEXT,
295
- quality_score REAL CHECK(quality_score >= 0 AND quality_score <= 10),
296
- engagement_score REAL,
297
- hashtags TEXT,
298
- created_at TEXT NOT NULL,
299
- updated_at TEXT
300
- )
301
- ''')
302
  cursor.execute('CREATE INDEX IF NOT EXISTS idx_date ON posts(date)')
303
  cursor.execute('CREATE INDEX IF NOT EXISTS idx_quality_score ON posts(quality_score)')
304
  cursor.execute('CREATE INDEX IF NOT EXISTS idx_created_at ON posts(created_at)')
305
- cursor.execute('''
306
- CREATE TABLE IF NOT EXISTS metadata (
307
- key TEXT PRIMARY KEY, value TEXT, updated_at TEXT NOT NULL
308
- )
309
- ''')
310
- cursor.execute('''
311
- INSERT OR REPLACE INTO metadata (key, value, updated_at)
312
- VALUES ('db_version', '1.0', ?)
313
- ''', (datetime.now().isoformat(),))
314
  conn.commit()
315
  conn.close()
316
-
317
- # Sync from HF after initializing schema
318
  sync_from_hf()
319
 
320
- # Initialize immediately
321
  init_database()
322
 
323
- # --- 5. TOOLS ---
324
 
325
  @tool
326
  def search_science_breakthroughs(query: str) -> str:
327
  """Search for recent scientific breakthroughs."""
328
  try:
329
- search = TavilySearchResults(
330
- max_results=10,
331
- include_domains=["sciencedaily.com", "nature.com", "science.org", "newscientist.com", "scientificamerican.com"],
332
- search_depth="advanced"
333
- )
334
  results = search.invoke(query)
335
  return json.dumps(results, indent=2)
336
  except Exception as e:
@@ -355,7 +269,7 @@ def fetch_article_content(url: str) -> str:
355
 
356
  @tool
357
  def get_all_previous_posts() -> str:
358
- """Retrieve all previously published posts."""
359
  conn = None
360
  try:
361
  conn = get_db_connection()
@@ -365,7 +279,9 @@ def get_all_previous_posts() -> str:
365
  if not posts: return "No previous posts found."
366
  formatted = []
367
  for p in posts:
368
- formatted.append({"id": p[0], "date": p[1], "topic": p[2], "summary": p[3][:100], "source_url": p[4], "quality_score": p[5]})
 
 
369
  return json.dumps(formatted, indent=2)
370
  except Exception as e:
371
  return f"Error: {str(e)}"
@@ -399,18 +315,12 @@ def save_approved_post(topic: str, summary: str, source_url: str, quality_score:
399
  conn = get_db_connection()
400
  cursor = conn.cursor()
401
  now = datetime.now()
402
- cursor.execute('''
403
- INSERT INTO posts (date, topic, summary, source_url, quality_score, hashtags, created_at, updated_at)
404
- VALUES (?, ?, ?, ?, ?, ?, ?, ?)
405
- ''', (now.strftime('%Y-%m-%d'), topic, summary, source_url, quality_score, hashtags, now.isoformat(), now.isoformat()))
406
  conn.commit()
407
  post_id = cursor.lastrowid
408
  conn.close()
409
-
410
- # Sync to HF after saving
411
  sync_result = sync_to_hf()
412
  sync_msg = "and synced to HF Dataset โ˜๏ธ" if sync_result else "(HF sync skipped)"
413
-
414
  return f"Post saved successfully with ID: {post_id} {sync_msg}"
415
  except Exception as e:
416
  return f"Error saving post: {str(e)}"
@@ -423,26 +333,43 @@ def save_approved_post(topic: str, summary: str, source_url: str, quality_score:
423
 
424
  @tool
425
  def check_topic_similarity(new_topic: str) -> str:
426
- """Check topic similarity."""
427
  conn = None
428
  try:
429
  conn = get_db_connection()
430
  cursor = conn.cursor()
431
  cursor.execute('SELECT topic, summary FROM posts')
432
  previous = cursor.fetchall()
433
- if not previous: return json.dumps({"is_duplicate": False, "similar_posts": []})
 
 
 
 
434
 
435
- new_keywords = set(new_topic.lower().split())
436
  similar = []
437
  for prev_topic, prev_summary in previous:
438
- prev_keywords = set(prev_topic.lower().split())
 
 
 
439
  if not new_keywords: continue
440
  overlap = len(new_keywords & prev_keywords)
441
- similarity = overlap / len(new_keywords)
442
- if similarity > 0.5:
443
- similar.append({"topic": prev_topic, "similarity": similarity})
444
-
445
- return json.dumps({"is_duplicate": len(similar) > 0, "similar_posts": similar}, indent=2)
 
 
 
 
 
 
 
 
 
 
 
446
  except Exception as e:
447
  return f"Error: {str(e)}"
448
  finally:
@@ -455,69 +382,26 @@ def count_words(text: str) -> str:
455
 
456
  @tool
457
  def get_example_posts_for_writer() -> str:
458
- """Get example posts to guide the writer agent in creating high-quality content."""
459
  examples = [
460
- {
461
- "title": "Scientists Found 14 Mysterious Creatures in the Ocean's Darkest Depths",
462
- "content": """Scientists Found 14 Mysterious Creatures in the Ocean's Darkest Depths
463
 
464
  Scientists have just revealed 14 new species living miles beneath the ocean's surfaceโ€”some found at depths over 6,000 meters! This discovery is part of the Ocean Species Discoveries initiative, which is changing how new marine life is identified and shared with the world. Among the surprises: a record-setting deep-sea mollusk, a carnivorous bivalve, and even a popcorn-shaped parasitic isopod. Each of these creatures shows just how much life still hides in the ocean's darkest corners.
465
  What's even cooler? The team is using cutting-edge lab techniques to make classifying species faster, more open, and globally collaborativeโ€”helping scientists everywhere explore and protect our planet's last great frontier: the deep sea.
466
 
467
- #TuesdayTrivia #RnDCell #CCA #OceanDiscovery #MarineBiology #DeepSea""",
468
- "structure": "Title -> One line gap -> First paragraph -> Next line -> Second paragraph -> One line gap -> Hashtags (must include #TuesdayTrivia #RnDCell #CCA)"
469
- },
470
- {
471
- "title": "Researchers Have Developed Next-Generation Plant Immune System to Combat Bacterial Diseases",
472
- "content": """Researchers Have Developed Next-Generation Plant Immune System to Combat Bacterial Diseases
473
-
474
- Researchers at the University of California, Davis have used artificial intelligence to boost plant immunity, helping crops like tomatoes and potatoes resist bacterial infections. Using AlphaFold, an AI system that predicts protein structures, they redesigned the immune receptor FLS2, which identifies flagellin, a bacterial movement protein. Since bacteria mutate flagellin to escape detection, the AI-guided redesign enabled plants to recognize more bacterial variants and strengthen their defenses.
475
- By altering key amino acids, the team reactivated weakened receptors, restoring the plants' ability to detect pathogens. This innovation could provide broad-spectrum disease resistance in major crops, including protection against Ralstonia solanacearum, the bacterium that causes bacterial wilt. The researchers now plan to apply machine learning to forecast future receptor modifications and extend this strategy to other plants.
476
-
477
- #TuesdayTrivia #RnDCell #CCA #ArtificialIntelligence #PlantImmunity #AgriTech""",
478
- "structure": "Title -> One line gap -> First paragraph -> Next line -> Second paragraph -> One line gap -> Hashtags"
479
- }
480
  ]
481
  return json.dumps(examples, indent=2)
482
 
483
  @tool
484
  def get_example_posts_for_critic() -> str:
485
- """Get example posts with quality scores to guide the critic agent's evaluation."""
486
  examples = [
487
- {
488
- "title": "Researchers Have Developed a Cutting-Edge Silicon Photonic Chip to Enhance AI Efficiency",
489
- "content": """Researchers Have Developed a Cutting-Edge Silicon Photonic Chip to Enhance AI Efficiency
490
-
491
- Researchers at the University of Florida have developed a silicon photonic chip that uses light to handle convolution operations, the intensive pattern-recognition steps central to AI. The chip encodes data as laser light, directs it through microscopic Fresnel lenses etched onto silicon, and converts the results back into digital form. In tests, it classified handwritten digits with about 98% accuracy, comparable to electronic processors but with far lower energy use.
492
- The design also supports wavelength multiplexing, allowing multiple colored lasers to process different data streams at once. This is the first demonstration of on-chip optical computation applied to neural networks. With optical elements already emerging in commercial AI hardware, researchers believe chip-based optics could soon become common, offering faster and more sustainable paths for scaling AI systems.
493
-
494
- #TuesdayTrivia #RnDCell #CCA #ArtificialIntelligence #SiliconPhotonics #GreenTech""",
495
- "quality_score": 9.0
496
- },
497
- {
498
- "title": "Researchers Have Developed Injectable Skin That Brings Hope for Burn Victims and Scar-Free Healing",
499
- "content": """Researchers Have Developed Injectable Skin That Brings Hope for Burn Victims and Scar-Free Healing
500
-
501
- Imagine a reality in which burns and deep injuries can be healed with such ease that scarring is no longer a problem. This is now possible thanks to researchers at the University of Linkรถping in Sweden. They've created a special gel full of living skin cellsโ€”"skin in a syringe." This gel is either injected into wounds or 3D printed for skin transplants. Unlike traditional methods that only replace the outer layer of skin and almost inevitably result in scarring, this innovation helps the body heal deep dermis layers that provide strength, elasticity, and blood vessels to the skin.
502
- Here's how it works: fibroblast cells, which are the main builders of connective tissue, are grown and embedded in small gelatin beads. These are then combined through "Click Chemistry" with a hyaluronic acid-based gel. The final product is a material that easily squirts out of a syringe but then settles into place perfectly. In mouse experiments, cells survived and helped grow new blood vessels, which are essential for healthy skin. The team also fabricated thin elastic hydrogel fibers that act as microvesselsโ€”a fundamental development for future organ repair and transplantation.
503
-
504
- #TuesdayTrivia #RnDCell #CCA #TissueEngineering #RegenerativeMedicine #BiomedicalInnovation""",
505
- "quality_score": 9.5
506
- },
507
- {
508
- "title": "Researchers Have Developed AI-Driven Immune Cells to Precisely Target Cancer",
509
- "content": """Researchers Have Developed AI-Driven Immune Cells to Precisely Target Cancer
510
-
511
- Researchers have developed an advanced AI-driven approach that reprograms human immune cells to precisely target and destroy cancerโ€”all within a few weeks. Traditionally, creating such personalized cell therapies is a time-consuming process involving months of laboratory testing. By using powerful machine-learning tools, the team was able to rapidly design T cells capable of recognizing cancer cells with high accuracy while sparing healthy tissues.
512
- The AI system analyzed millions of receptor combinations and pinpointed the most effective designs, greatly accelerating the development timeline. Instead of relying on standard one-size-fits-all methods, doctors could soon adapt these AI-engineered immune cells to match the specific type of cancer found in each patient, offering new levels of precision and hope.
513
-
514
- #TuesdayTrivia #RnDCell #CCA #CancerResearch #ImmuneTherapy #FutureOfMedicine""",
515
- "quality_score": 9.0
516
- }
517
  ]
518
  return json.dumps(examples, indent=2)
519
 
520
- # --- 6. WORKFLOW STAGES ENUM ---
521
 
522
  class WorkflowStage(Enum):
523
  IDLE = "idle"
@@ -532,8 +416,6 @@ class WorkflowStage(Enum):
532
  COMPLETE = "complete"
533
  ERROR = "error"
534
 
535
- # --- 7. ENHANCED STATE ---
536
-
537
  class EnhancedAgentState(TypedDict):
538
  stage: str
539
  search_topic: str
@@ -546,282 +428,271 @@ class EnhancedAgentState(TypedDict):
546
  error_message: str
547
  progress_log: List[str]
548
 
549
- # --- 8. AGENT FUNCTIONS WITH PROGRESS TRACKING ---
550
 
551
  def run_discovery(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
552
- """Discovery Agent with progress tracking"""
553
  try:
 
 
 
 
 
 
554
  if progress_callback:
555
- progress_callback("๐Ÿ” Searching for scientific breakthroughs...")
556
 
557
  topic = state.get("search_topic", "general science")
558
 
559
  system_msg = SystemMessage(content=f"""You are the Discovery Agent for Tuesday Trivia.
560
 
561
- CRITICAL INSTRUCTIONS:
562
- 1. You MUST use the search_science_breakthroughs tool to find recent articles
563
- 2. You MUST use get_all_previous_posts to check for duplicates
564
- 3. You MUST use check_topic_similarity to avoid similar topics
565
- 4. Find 10-15 RECENT breakthroughs from the last 2-4 weeks
566
- 5. Each breakthrough needs: Title, Brief Description, Source URL, Why it's interesting
567
 
568
- DO NOT provide generic information. ALWAYS use the tools to search for current data.
 
 
 
569
 
570
  Output Format:
571
- For each breakthrough provide:
572
- **Title:** [Exact title from source]
573
  **Description:** [2-3 sentences]
574
  **URL:** [Source link]
575
  **Why Interesting:** [1 sentence hook]
576
- ---
577
- """)
578
 
579
- user_msg = HumanMessage(content=f"Search for recent breakthroughs in {topic}. Use the search tool to find current articles from the past 2-4 weeks.")
580
 
581
  discovery_llm = llm_small.bind_tools([search_science_breakthroughs, get_all_previous_posts, check_topic_similarity])
582
-
583
- if progress_callback:
584
- progress_callback("๐Ÿค– Running discovery agent...")
585
-
586
  response = discovery_llm.invoke([system_msg, user_msg])
587
  conversation = [system_msg, user_msg, response]
588
 
589
- max_steps = 8 # Increased to ensure tool usage
590
  steps = 0
591
- tool_was_called = False
592
 
593
  while hasattr(response, 'tool_calls') and response.tool_calls and steps < max_steps:
594
- if progress_callback:
595
- progress_callback(f"๐Ÿ”ง Executing tools (step {steps+1}/{max_steps})...")
596
-
597
  tool_messages = []
598
  for tool_call in response.tool_calls:
599
  name = tool_call['name']
600
- args = tool_call['args']
601
- tool_was_called = True
602
-
603
- if progress_callback:
604
- progress_callback(f"๐Ÿ”ง Using {name}...")
605
-
606
  if name == 'search_science_breakthroughs':
607
- res = search_science_breakthroughs.invoke(args)
608
  elif name == 'get_all_previous_posts':
609
- res = get_all_previous_posts.invoke(args)
610
  elif name == 'check_topic_similarity':
611
- res = check_topic_similarity.invoke(args)
612
  else:
613
  res = f"Unknown tool: {name}"
614
-
615
  tool_messages.append(ToolMessage(content=str(res), tool_call_id=tool_call['id']))
616
-
617
  conversation.extend(tool_messages)
618
  response = discovery_llm.invoke(conversation)
619
  conversation.append(response)
620
  steps += 1
621
 
622
- # If no tools were called, force a search
623
- if not tool_was_called:
624
- if progress_callback:
625
- progress_callback("โš ๏ธ Agent didn't search, forcing search...")
626
-
627
- # Manually call the search tool
628
- search_query = f"recent {topic} breakthroughs scientific discoveries"
629
- search_result = search_science_breakthroughs.invoke({"query": search_query})
630
-
631
- # Create a follow-up prompt
632
- force_msg = HumanMessage(content=f"Here are the search results. Now format them into 10-15 breakthroughs:\n\n{search_result}")
633
- conversation.append(force_msg)
634
- response = discovery_llm.invoke(conversation)
635
-
636
- # Parse candidates from response
637
- candidates_text = response.content
638
-
639
- if progress_callback:
640
- progress_callback("โœ… Discovery complete!")
641
-
642
- state["candidates"] = [{"raw": candidates_text}]
643
  state["stage"] = WorkflowStage.CHECKPOINT_1.value
644
- state["progress_log"].append("Discovery completed successfully")
645
-
646
  return state
647
-
648
  except Exception as e:
649
  state["stage"] = WorkflowStage.ERROR.value
650
  state["error_message"] = f"Discovery failed: {str(e)}"
651
- state["progress_log"].append(f"โŒ Error: {str(e)}")
652
  return state
653
 
654
  def run_curator(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
655
- """Curator Agent with progress tracking"""
656
  try:
 
 
 
 
 
 
657
  if progress_callback:
658
- progress_callback("๐ŸŽฏ Ranking and selecting best story...")
659
 
660
  candidates = state.get("candidates", [])
661
  candidates_text = candidates[0].get("raw", "") if candidates else ""
662
 
663
- system_msg = SystemMessage(content="""You are the Curator Agent.
664
- Rank candidates (1-10) on Recency, Significance, Engagement. Select TOP story.
665
- Output: RANKED CANDIDATES list, then SELECTED STORY with Justification.""")
 
 
 
 
 
 
 
 
 
 
666
 
667
- user_msg = HumanMessage(content=f"Evaluate these candidates:\n{candidates_text}")
668
 
669
- curator_llm = llm_large.bind_tools([])
670
  response = curator_llm.invoke([system_msg, user_msg])
 
671
 
672
- if progress_callback:
673
- progress_callback("โœ… Story selected!")
 
 
 
 
 
 
 
 
 
 
 
674
 
 
675
  state["selected_story"] = {"raw": response.content}
676
  state["stage"] = WorkflowStage.CHECKPOINT_2.value
677
- state["progress_log"].append("Curation completed")
678
-
679
  return state
680
-
681
  except Exception as e:
682
  state["stage"] = WorkflowStage.ERROR.value
683
  state["error_message"] = f"Curation failed: {str(e)}"
684
- state["progress_log"].append(f"โŒ Error: {str(e)}")
685
  return state
686
 
687
  def run_writer(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
688
- """Writer Agent with progress tracking"""
689
  try:
 
 
 
 
 
 
690
  if progress_callback:
691
- progress_callback("โœ๏ธ Writing draft post...")
692
 
693
  selected_story = state.get("selected_story", {})
694
  story_text = selected_story.get("raw", "")
695
  retry_count = state.get("retry_count", 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
696
 
697
- retry_context = ""
698
- if retry_count > 0:
699
- retry_context = f"\nPrevious attempt rejected. Feedback: {state.get('critic_feedback')}"
700
-
701
- system_msg = SystemMessage(content="""You are the Writer Agent for Tuesday Trivia posts.
702
-
703
- CRITICAL FORMAT REQUIREMENTS:
704
- 1. Title (capitalize important words)
705
- 2. ONE blank line
706
- 3. First paragraph (Hook - grab attention, set context)
707
- 4. Next line (no blank line between paragraphs)
708
- 5. Second paragraph (Technical details and impact)
709
- 6. ONE blank line
710
- 7. Hashtags: MUST include #TuesdayTrivia #RnDCell #CCA plus 2-4 topic-specific hashtags
711
-
712
- STYLE GUIDELINES:
713
- - Conversational, active voice
714
- - NO questions in the text
715
- - 140-180 words total
716
- - Make technical concepts accessible
717
- - Emphasize real-world impact
718
-
719
- HASHTAG FORMAT:
720
- - Always include: #TuesdayTrivia #RnDCell #CCA
721
- - Add relevant topic hashtags like #ArtificialIntelligence #Biotechnology #SpaceExploration
722
- - Use camelCase (e.g., #MarineBiology not #Marine_biology)
723
- - 5-7 hashtags total
724
-
725
- Use the get_example_posts_for_writer tool to see perfect examples of the format.""")
726
-
727
- user_msg = HumanMessage(content=f"Write a Tuesday Trivia post for:\n{story_text}\n{retry_context}")
728
-
729
- writer_llm = llm_medium.bind_tools([get_example_posts_for_writer, fetch_article_content, count_words])
730
 
 
731
  response = writer_llm.invoke([system_msg, user_msg])
732
  conversation = [system_msg, user_msg, response]
733
 
734
  steps = 0
735
  while hasattr(response, 'tool_calls') and response.tool_calls and steps < 5:
736
- if progress_callback:
737
- progress_callback(f"๐Ÿ”ง Writer using tools (step {steps+1})...")
738
-
739
- tool_messages = []
740
  for tool_call in response.tool_calls:
741
  name = tool_call['name']
742
- if name == 'get_example_posts_for_writer':
743
- res = get_example_posts_for_writer.invoke(tool_call['args'])
744
- elif name == 'fetch_article_content':
745
- res = fetch_article_content.invoke(tool_call['args'])
746
- elif name == 'count_words':
747
- res = count_words.invoke(tool_call['args'])
748
- else:
749
- res = "Unknown"
750
- tool_messages.append(ToolMessage(content=str(res), tool_call_id=tool_call['id']))
751
- conversation.extend(tool_messages)
752
  response = writer_llm.invoke(conversation)
753
  conversation.append(response)
754
  steps += 1
755
 
756
- if progress_callback:
757
- progress_callback("โœ… Draft completed!")
758
-
759
  state["draft_summary"] = response.content
760
  state["retry_count"] = retry_count + 1
761
  state["stage"] = WorkflowStage.CRITIC.value
762
- state["progress_log"].append(f"Draft written (attempt {retry_count + 1})")
763
-
764
  return state
765
-
766
  except Exception as e:
767
  state["stage"] = WorkflowStage.ERROR.value
768
  state["error_message"] = f"Writing failed: {str(e)}"
769
- state["progress_log"].append(f"โŒ Error: {str(e)}")
770
  return state
771
 
772
  def run_critic(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
773
- """Critic Agent with progress tracking"""
774
  try:
 
 
 
 
 
 
775
  if progress_callback:
776
- progress_callback("๐Ÿ” Evaluating draft quality...")
777
 
778
  draft = state.get("draft_summary", "")
779
 
780
- system_msg = SystemMessage(content="""You are the Critic Agent for Tuesday Trivia posts.
781
 
782
  EVALUATION CRITERIA:
783
 
784
- 1. FORMAT (3 points):
785
- - Title capitalized properly
 
 
 
 
 
786
  - One blank line after title
787
- - Two paragraphs (no blank line between them)
788
  - One blank line before hashtags
789
  - Hashtags include #TuesdayTrivia #RnDCell #CCA
790
 
791
- 2. CONTENT (3 points):
792
- - First paragraph: Engaging hook + context
793
- - Second paragraph: Technical details + real-world impact
794
- - Accurate scientific information
795
  - 140-180 words
 
796
 
797
- 3. STYLE (2 points):
798
  - Conversational, active voice
799
- - No questions in the text
800
- - Technical concepts explained clearly
801
  - Engaging and accessible
802
 
803
- 4. HASHTAGS (2 points):
804
- - MUST include: #TuesdayTrivia #RnDCell #CCA
805
- - 2-4 additional relevant hashtags
806
- - camelCase format
807
- - 5-7 hashtags total
808
-
809
- OUTPUT FORMAT:
810
- Provide detailed feedback, then:
811
  TOTAL SCORE: X/10
812
- Decision: APPROVED (if โ‰ฅ8) or REJECTED (if <8)
813
 
814
- Use get_example_posts_for_critic tool to see high-quality examples.""")
815
 
816
- user_msg = HumanMessage(content=f"Evaluate this Tuesday Trivia post:\n\n{draft}")
817
 
818
- critic_llm = llm_large.bind_tools([get_example_posts_for_critic])
819
  response = critic_llm.invoke([system_msg, user_msg])
820
  conversation = [system_msg, user_msg, response]
821
 
822
  if hasattr(response, 'tool_calls') and response.tool_calls:
823
  for tool_call in response.tool_calls:
824
- res = get_example_posts_for_critic.invoke(tool_call['args'])
 
 
 
 
 
 
 
825
  conversation.append(ToolMessage(content=str(res), tool_call_id=tool_call['id']))
826
  response = critic_llm.invoke(conversation)
827
 
@@ -833,27 +704,27 @@ Use get_example_posts_for_critic tool to see high-quality examples.""")
833
  except:
834
  pass
835
 
836
- if progress_callback:
837
- progress_callback(f"โœ… Evaluation complete! Score: {score}/10")
838
-
839
  state["quality_score"] = score
840
  state["critic_feedback"] = text
841
  state["stage"] = WorkflowStage.CHECKPOINT_3.value
842
- state["progress_log"].append(f"Critic evaluation: {score}/10")
843
-
844
  return state
845
-
846
  except Exception as e:
847
  state["stage"] = WorkflowStage.ERROR.value
848
  state["error_message"] = f"Critic failed: {str(e)}"
849
- state["progress_log"].append(f"โŒ Error: {str(e)}")
850
  return state
851
 
852
  def run_finalize(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
853
  """Finalize and save to database"""
854
  try:
 
 
 
 
 
855
  if progress_callback:
856
- progress_callback("๐Ÿ’พ Saving to database...")
857
 
858
  draft = state.get("draft_summary", "")
859
  score = state.get("quality_score", 0.0)
@@ -874,21 +745,16 @@ def run_finalize(state: EnhancedAgentState, progress_callback=None) -> EnhancedA
874
  "hashtags": hashtags
875
  })
876
 
877
- if progress_callback:
878
- progress_callback("โœ… Post saved successfully!")
879
-
880
  state["stage"] = WorkflowStage.COMPLETE.value
881
- state["progress_log"].append(f"Finalized: {res}")
882
-
883
  return state
884
-
885
  except Exception as e:
886
  state["stage"] = WorkflowStage.ERROR.value
887
  state["error_message"] = f"Finalization failed: {str(e)}"
888
- state["progress_log"].append(f"โŒ Error: {str(e)}")
889
  return state
890
 
891
- # --- 9. GRADIO INTERFACE ---
892
 
893
  def create_initial_state(topic: str) -> EnhancedAgentState:
894
  return {
@@ -920,8 +786,9 @@ def start_workflow(topic: str, progress=gr.Progress()):
920
  gr.update(visible=False),
921
  gr.update(visible=False),
922
  gr.update(visible=False),
923
- gr.update(visible=True), # Show restart
924
  "",
 
925
  gr.update(visible=False)
926
  )
927
 
@@ -930,11 +797,12 @@ def start_workflow(topic: str, progress=gr.Progress()):
930
  return (
931
  state,
932
  f"## ๐Ÿ” Discovery Results\n\n{candidates_text}",
933
- gr.update(visible=True), # Approve button
934
- gr.update(visible=True), # Reject button
935
  gr.update(visible=False),
936
  gr.update(visible=False),
937
  "",
 
938
  gr.update(visible=False)
939
  )
940
 
@@ -954,6 +822,7 @@ def handle_checkpoint1_approve(state, progress=gr.Progress()):
954
  gr.update(visible=False),
955
  gr.update(visible=True),
956
  "",
 
957
  gr.update(visible=False)
958
  )
959
 
@@ -962,17 +831,17 @@ def handle_checkpoint1_approve(state, progress=gr.Progress()):
962
  return (
963
  state,
964
  f"## ๐ŸŽฏ Selected Story\n\n{story_text}\n\n**Optional:** Provide instructions in the textbox below if you want to pick a different story.",
965
- gr.update(visible=True), # Approve button
966
- gr.update(visible=True), # Different story
967
  gr.update(visible=False),
968
  gr.update(visible=False),
969
  "",
970
- gr.update(visible=True) # Show instructions textbox
 
971
  )
972
 
973
  def handle_checkpoint1_reject(state, instructions, progress=gr.Progress()):
974
- """Handle rejection at checkpoint 1 - restart discovery with optional instructions"""
975
- # Add instructions to state if provided
976
  if instructions and instructions.strip():
977
  if "search_topic" in state:
978
  state["search_topic"] = f"{state['search_topic']} - Additional guidance: {instructions}"
@@ -991,6 +860,7 @@ def handle_checkpoint1_reject(state, instructions, progress=gr.Progress()):
991
  gr.update(visible=False),
992
  gr.update(visible=True),
993
  "",
 
994
  gr.update(visible=False)
995
  )
996
 
@@ -998,17 +868,18 @@ def handle_checkpoint1_reject(state, instructions, progress=gr.Progress()):
998
 
999
  return (
1000
  state,
1001
- f"## ๐Ÿ” Discovery Results (New Search)\n\n{candidates_text}\n\n**Optional:** Provide instructions in the textbox below if you want to reject again.",
1002
  gr.update(visible=True),
1003
  gr.update(visible=True),
1004
  gr.update(visible=False),
1005
  gr.update(visible=False),
1006
  "",
1007
- gr.update(visible=True) # Show instructions textbox
 
1008
  )
1009
 
1010
  def handle_checkpoint2_approve(state, progress=gr.Progress()):
1011
- """Handle approval at checkpoint 2 - proceed to writing"""
1012
  def update_progress(msg):
1013
  progress(0.6, desc=msg)
1014
 
@@ -1023,6 +894,7 @@ def handle_checkpoint2_approve(state, progress=gr.Progress()):
1023
  gr.update(visible=False),
1024
  gr.update(visible=True),
1025
  "",
 
1026
  gr.update(visible=False)
1027
  )
1028
 
@@ -1035,18 +907,18 @@ def handle_checkpoint2_approve(state, progress=gr.Progress()):
1035
 
1036
  return (
1037
  state,
1038
- f"## โœ๏ธ Draft Post\n\n{draft}\n\n---\n\n**Quality Score:** {score}/10\n\n**Feedback:**\n{feedback}\n\n**Optional:** Provide specific edit instructions in the textbox below if needed.",
1039
- gr.update(visible=True), # Finalize
1040
- gr.update(visible=True), # Edit/Rewrite
1041
  gr.update(visible=False),
1042
  gr.update(visible=False),
1043
  "",
1044
- gr.update(visible=True) # Show edit instructions
 
1045
  )
1046
 
1047
  def handle_checkpoint2_different(state, instructions, progress=gr.Progress()):
1048
- """Request different story - re-run curator with optional instructions"""
1049
- # Add instructions to curator's context if provided
1050
  if instructions and instructions.strip():
1051
  if "selected_story" in state:
1052
  state["critic_feedback"] = f"User preference: {instructions}"
@@ -1065,6 +937,7 @@ def handle_checkpoint2_different(state, instructions, progress=gr.Progress()):
1065
  gr.update(visible=False),
1066
  gr.update(visible=True),
1067
  "",
 
1068
  gr.update(visible=False)
1069
  )
1070
 
@@ -1072,13 +945,14 @@ def handle_checkpoint2_different(state, instructions, progress=gr.Progress()):
1072
 
1073
  return (
1074
  state,
1075
- f"## ๐ŸŽฏ Selected Story (Alternative)\n\n{story_text}\n\n**Optional:** Provide instructions in the textbox below if you want a different story.",
1076
  gr.update(visible=True),
1077
  gr.update(visible=True),
1078
  gr.update(visible=False),
1079
  gr.update(visible=False),
1080
  "",
1081
- gr.update(visible=True) # Show instructions textbox
 
1082
  )
1083
 
1084
  def handle_checkpoint3_finalize(state, progress=gr.Progress()):
@@ -1092,13 +966,14 @@ def handle_checkpoint3_finalize(state, progress=gr.Progress()):
1092
 
1093
  return (
1094
  state,
1095
- f"## โœ… Post Saved Successfully!\n\n{draft}\n\n---\n\n**Status:** Saved to database\n**Quality Score:** {state.get('quality_score', 0)}/10",
1096
  gr.update(visible=False),
1097
  gr.update(visible=False),
1098
  gr.update(visible=False),
1099
- gr.update(visible=True), # Show restart
1100
  "",
1101
- gr.update(visible=False)
 
1102
  )
1103
 
1104
  def handle_checkpoint3_edit(state, edit_instructions, progress=gr.Progress()):
@@ -1112,10 +987,10 @@ def handle_checkpoint3_edit(state, edit_instructions, progress=gr.Progress()):
1112
  gr.update(visible=False),
1113
  gr.update(visible=False),
1114
  edit_instructions,
1115
- gr.update(visible=True)
 
1116
  )
1117
 
1118
- # Add edit instructions to state for writer to use
1119
  if "critic_feedback" in state:
1120
  state["critic_feedback"] += f"\n\nUser edit request: {edit_instructions}"
1121
 
@@ -1133,6 +1008,7 @@ def handle_checkpoint3_edit(state, edit_instructions, progress=gr.Progress()):
1133
  gr.update(visible=False),
1134
  gr.update(visible=True),
1135
  "",
 
1136
  gr.update(visible=False)
1137
  )
1138
 
@@ -1151,23 +1027,31 @@ def handle_checkpoint3_edit(state, edit_instructions, progress=gr.Progress()):
1151
  gr.update(visible=False),
1152
  gr.update(visible=False),
1153
  "",
1154
- gr.update(visible=True)
 
1155
  )
1156
 
1157
  def restart_workflow():
1158
- """Reset everything to start fresh"""
1159
  return (
1160
  None,
1161
  "๐Ÿ‘‹ Ready to start! Enter a topic and click 'Start Discovery'",
1162
  gr.update(visible=False),
1163
  gr.update(visible=False),
1164
- gr.update(visible=True), # Show start button
1165
  gr.update(visible=False),
1166
  "",
 
1167
  gr.update(visible=False)
1168
  )
1169
 
1170
- # --- 10. BUILD GRADIO UI ---
 
 
 
 
 
 
1171
 
1172
  css = """
1173
  .output-box {
@@ -1187,50 +1071,28 @@ css = """
1187
  color: #1a1a1a !important;
1188
  font-weight: bold;
1189
  }
1190
- .output-box strong {
1191
- color: #000000 !important;
1192
- font-weight: 600;
1193
- }
1194
- .output-box a {
1195
- color: #0066cc !important;
1196
- text-decoration: underline;
1197
- }
1198
- .button-row {
1199
- display: flex;
1200
- gap: 10px;
1201
- margin-top: 10px;
1202
- }
1203
- .status-badge {
1204
- display: inline-block;
1205
- padding: 5px 10px;
1206
- border-radius: 5px;
1207
- font-weight: bold;
1208
- margin-bottom: 10px;
1209
- }
1210
- .gradio-container {
1211
- font-family: 'Inter', system-ui, sans-serif !important;
1212
- }
1213
  """
1214
 
1215
  with gr.Blocks(css=css, title="Tuesday Trivia Agent", theme=gr.themes.Soft()) as demo:
1216
  gr.Markdown("""
1217
  # ๐Ÿงช Tuesday Trivia Multi-Agent System
1218
 
1219
- An intelligent workflow for discovering, curating, and writing science trivia posts with human oversight at key checkpoints.
 
 
 
 
1220
 
1221
- **โ˜๏ธ Cloud Storage:** Posts are automatically synced to Hugging Face Datasets for persistence.
1222
  """)
1223
 
1224
- # State
1225
  state = gr.State()
1226
 
1227
  with gr.Row():
1228
  with gr.Column(scale=2):
1229
- # Main output area
1230
  output_display = gr.Markdown("๐Ÿ‘‹ Ready to start! Enter a topic and click 'Start Discovery'", elem_classes="output-box")
1231
 
1232
  with gr.Column(scale=1):
1233
- # Control panel
1234
  gr.Markdown("### ๐ŸŽฎ Control Panel")
1235
 
1236
  topic_input = gr.Textbox(
@@ -1246,7 +1108,6 @@ with gr.Blocks(css=css, title="Tuesday Trivia Agent", theme=gr.themes.Soft()) as
1246
  gr.Markdown("---")
1247
  gr.Markdown("### ๐Ÿ“‹ Decision Points")
1248
 
1249
- # Checkpoint buttons (hidden initially)
1250
  approve_btn = gr.Button("โœ… Approve", variant="primary", visible=False)
1251
  reject_btn = gr.Button("โŒ Reject / Different", variant="stop", visible=False)
1252
 
@@ -1257,136 +1118,119 @@ with gr.Blocks(css=css, title="Tuesday Trivia Agent", theme=gr.themes.Soft()) as
1257
  lines=3
1258
  )
1259
 
1260
- # Add sync status
 
 
1261
  with gr.Accordion("โ˜๏ธ Cloud Sync Status", open=False):
1262
  sync_status = gr.Markdown(f"""
1263
  **HF Dataset:** `{HF_DATASET_REPO}`
1264
  **Status:** {'โœ… Connected' if HF_TOKEN else 'โŒ Not configured'}
1265
 
1266
- Posts are automatically synced to Hugging Face Datasets after saving.
1267
  """)
1268
 
1269
  manual_sync_btn = gr.Button("๐Ÿ”„ Manual Sync to HF", size="sm")
1270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1271
  gr.Markdown("---")
1272
  gr.Markdown("""
1273
  ### โ„น๏ธ Instructions
1274
 
1275
- **Workflow Steps:**
1276
- 1. Enter topic โ†’ Start Discovery
1277
- 2. Review candidates โ†’ Approve/Reject
1278
- 3. Review story selection โ†’ Approve/Different
1279
- 4. Review draft โ†’ Finalize/Edit
1280
- 5. Post saved to database & HF!
1281
 
1282
- **Cloud Storage:**
1283
- - All posts auto-sync to HF Dataset
1284
- - Survives Space restarts
1285
- - Access your data anytime
1286
-
1287
- **Tips:**
1288
- - Be specific with topics for better results
1289
- - Edit instructions help refine drafts
1290
- - All posts are saved with quality scores
1291
  """)
1292
 
1293
- # Manual sync function
1294
  def manual_sync():
1295
  result = sync_to_hf()
1296
  if result:
1297
- return "โœ… Successfully synced to Hugging Face Dataset!"
1298
- else:
1299
- return "โš ๏ธ Sync failed or no data to sync. Check logs."
1300
 
1301
- manual_sync_btn.click(
1302
- fn=manual_sync,
1303
- outputs=sync_status
1304
- )
1305
 
1306
  # Event handlers
1307
  start_btn.click(
1308
  fn=start_workflow,
1309
  inputs=[topic_input],
1310
- outputs=[state, output_display, approve_btn, reject_btn, start_btn, restart_btn, edit_instructions, edit_instructions]
1311
  )
1312
 
1313
- # Checkpoint 1 handlers
1314
- def route_checkpoint1_approve(s):
1315
- if s and s.get("stage") == WorkflowStage.CHECKPOINT_1.value:
1316
- return handle_checkpoint1_approve(s)
1317
- return s, "Invalid state", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update()
1318
-
1319
- def route_checkpoint1_reject(s, instructions):
1320
- if s and s.get("stage") == WorkflowStage.CHECKPOINT_1.value:
1321
- return handle_checkpoint1_reject(s, instructions)
1322
- return s, "Invalid state", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update()
1323
-
1324
- # Checkpoint 2 handlers
1325
- def route_checkpoint2_approve(s):
1326
- if s and s.get("stage") == WorkflowStage.CHECKPOINT_2.value:
1327
- return handle_checkpoint2_approve(s)
1328
- return s, "Invalid state", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update()
1329
-
1330
- def route_checkpoint2_different(s, instructions):
1331
- if s and s.get("stage") == WorkflowStage.CHECKPOINT_2.value:
1332
- return handle_checkpoint2_different(s, instructions)
1333
- return s, "Invalid state", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update()
1334
-
1335
- # Checkpoint 3 handlers
1336
- def route_checkpoint3_finalize(s):
1337
- if s and s.get("stage") == WorkflowStage.CHECKPOINT_3.value:
1338
- return handle_checkpoint3_finalize(s)
1339
- return s, "Invalid state", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update()
1340
-
1341
- def route_checkpoint3_edit(s, instructions):
1342
- if s and s.get("stage") == WorkflowStage.CHECKPOINT_3.value:
1343
- return handle_checkpoint3_edit(s, instructions)
1344
- return s, "Invalid state", gr.update(), gr.update(), gr.update(), gr.update(), instructions, gr.update()
1345
-
1346
- # Wire up the routing based on current stage
1347
  def smart_approve(s, instructions):
1348
  if not s:
1349
- return s, "No active workflow", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update()
1350
 
1351
  stage = s.get("stage")
1352
  if stage == WorkflowStage.CHECKPOINT_1.value:
1353
- return route_checkpoint1_approve(s)
1354
  elif stage == WorkflowStage.CHECKPOINT_2.value:
1355
- return route_checkpoint2_approve(s)
1356
  elif stage == WorkflowStage.CHECKPOINT_3.value:
1357
- return route_checkpoint3_finalize(s)
1358
 
1359
- return s, "Invalid stage for approval", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update()
1360
 
1361
  def smart_reject(s, instructions):
1362
  if not s:
1363
- return s, "No active workflow", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update()
1364
 
1365
  stage = s.get("stage")
1366
  if stage == WorkflowStage.CHECKPOINT_1.value:
1367
- return route_checkpoint1_reject(s, instructions)
1368
  elif stage == WorkflowStage.CHECKPOINT_2.value:
1369
- return route_checkpoint2_different(s, instructions)
1370
  elif stage == WorkflowStage.CHECKPOINT_3.value:
1371
- return route_checkpoint3_edit(s, instructions)
1372
 
1373
- return s, "Invalid stage for rejection", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update()
1374
 
1375
  approve_btn.click(
1376
  fn=smart_approve,
1377
  inputs=[state, edit_instructions],
1378
- outputs=[state, output_display, approve_btn, reject_btn, start_btn, restart_btn, edit_instructions, edit_instructions]
1379
  )
1380
 
1381
  reject_btn.click(
1382
  fn=smart_reject,
1383
  inputs=[state, edit_instructions],
1384
- outputs=[state, output_display, approve_btn, reject_btn, start_btn, restart_btn, edit_instructions, edit_instructions]
1385
  )
1386
 
1387
  restart_btn.click(
1388
  fn=restart_workflow,
1389
- outputs=[state, output_display, approve_btn, reject_btn, start_btn, restart_btn, edit_instructions, edit_instructions]
 
 
 
 
 
 
 
 
 
1390
  )
1391
 
1392
  if __name__ == "__main__":
 
29
 
30
  # Hugging Face Configuration
31
  HF_TOKEN = os.getenv("HF_TOKEN")
32
+ HF_DATASET_REPO = os.getenv("HF_DATASET_REPO", "tuesday-trivia-posts")
33
+ HF_USERNAME = os.getenv("HF_USERNAME", None)
34
 
35
  # Auto-construct repo name if not provided
36
  if "/" not in HF_DATASET_REPO and HF_USERNAME:
 
75
  print(f"โš ๏ธ No API keys found for {self.service_name}. Ensure secrets are set.")
76
  return []
77
 
78
+ print(f"โœ… Loaded {len(keys)} API key(s) for {self.service_name}")
79
  random.shuffle(keys)
80
  return keys
81
 
 
149
  llm_medium = create_llm_with_rotation("mistral-medium-latest", temperature=0.2)
150
  llm_large = create_llm_with_rotation("mistral-large-latest", temperature=0.2)
151
 
152
+ # --- 4. DATABASE & HF SYNC (keeping original code) ---
153
 
154
  def get_db_connection():
155
  return sqlite3.connect(str(DB_PATH))
156
 
157
  def sync_from_hf():
 
158
  if not HF_TOKEN or not hf_api:
159
  print("โš ๏ธ No HF_TOKEN found, skipping sync from HF")
160
  return False
 
161
  try:
162
  print("๐Ÿ“ฅ Syncing database from Hugging Face...")
 
 
163
  try:
164
  dataset = load_dataset(HF_DATASET_REPO, split="train", token=HF_TOKEN)
 
 
165
  if len(dataset) == 0:
166
  print("โ„น๏ธ Dataset exists but is empty")
167
  return False
 
 
168
  conn = get_db_connection()
169
  cursor = conn.cursor()
 
 
170
  cursor.execute('DELETE FROM posts')
 
 
171
  for row in dataset:
172
+ cursor.execute('''INSERT INTO posts (id, date, topic, summary, source_url, quality_score, engagement_score, hashtags, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', (row.get('id'), row.get('date'), row.get('topic'), row.get('summary'), row.get('source_url'), row.get('quality_score'), row.get('engagement_score'), row.get('hashtags'), row.get('created_at'), row.get('updated_at')))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  conn.commit()
174
  conn.close()
 
175
  print(f"โœ… Synced {len(dataset)} posts from HF Dataset")
176
  return True
 
177
  except Exception as e:
178
  error_msg = str(e).lower()
179
  if "not found" in error_msg or "doesn't exist" in error_msg or "doesn't contain any data files" in error_msg:
 
182
  else:
183
  print(f"โš ๏ธ Error loading dataset: {e}")
184
  return False
 
185
  except Exception as e:
186
  print(f"โš ๏ธ Error syncing from HF: {e}")
187
  return False
188
 
189
  def sync_to_hf():
 
190
  if not HF_TOKEN or not hf_api:
191
  print("โš ๏ธ No HF_TOKEN found, skipping sync to HF")
192
  return False
 
193
  try:
194
  print("๐Ÿ“ค Syncing database to Hugging Face...")
 
195
  conn = get_db_connection()
196
  cursor = conn.cursor()
197
  cursor.execute('SELECT * FROM posts')
 
198
  columns = [description[0] for description in cursor.description]
199
  rows = cursor.fetchall()
200
  conn.close()
 
201
  if not rows:
202
  print("โ„น๏ธ No posts to sync")
203
  return False
 
 
204
  data = {col: [] for col in columns}
205
  for row in rows:
206
  for col, value in zip(columns, row):
207
  data[col].append(value)
 
 
208
  dataset = Dataset.from_dict(data)
 
 
209
  try:
210
+ dataset.push_to_hub(HF_DATASET_REPO, token=HF_TOKEN, private=True)
 
 
 
 
211
  print(f"โœ… Synced {len(rows)} posts to HF Dataset: {HF_DATASET_REPO}")
212
  return True
 
213
  except Exception as push_error:
214
  if "not found" in str(push_error).lower():
 
215
  print(f"๐Ÿ“ฆ Creating new dataset repository: {HF_DATASET_REPO}")
216
+ hf_api.create_repo(repo_id=HF_DATASET_REPO, token=HF_TOKEN, repo_type="dataset", private=True)
217
+ dataset.push_to_hub(HF_DATASET_REPO, token=HF_TOKEN, private=True)
 
 
 
 
 
 
 
 
 
 
218
  print(f"โœ… Created and synced to new dataset: {HF_DATASET_REPO}")
219
  return True
220
  else:
221
  raise push_error
 
222
  except Exception as e:
223
  print(f"โŒ Error syncing to HF: {e}")
224
  return False
225
 
226
  def init_database():
 
227
  conn = get_db_connection()
228
  cursor = conn.cursor()
229
+ cursor.execute('''CREATE TABLE IF NOT EXISTS posts (id INTEGER PRIMARY KEY AUTOINCREMENT, date TEXT NOT NULL, topic TEXT NOT NULL, summary TEXT NOT NULL, source_url TEXT, quality_score REAL CHECK(quality_score >= 0 AND quality_score <= 10), engagement_score REAL, hashtags TEXT, created_at TEXT NOT NULL, updated_at TEXT)''')
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  cursor.execute('CREATE INDEX IF NOT EXISTS idx_date ON posts(date)')
231
  cursor.execute('CREATE INDEX IF NOT EXISTS idx_quality_score ON posts(quality_score)')
232
  cursor.execute('CREATE INDEX IF NOT EXISTS idx_created_at ON posts(created_at)')
233
+ cursor.execute('''CREATE TABLE IF NOT EXISTS metadata (key TEXT PRIMARY KEY, value TEXT, updated_at TEXT NOT NULL)''')
234
+ cursor.execute('''INSERT OR REPLACE INTO metadata (key, value, updated_at) VALUES ('db_version', '1.0', ?)''', (datetime.now().isoformat(),))
 
 
 
 
 
 
 
235
  conn.commit()
236
  conn.close()
 
 
237
  sync_from_hf()
238
 
 
239
  init_database()
240
 
241
+ # --- 5. TOOLS (keeping original except check_topic_similarity) ---
242
 
243
  @tool
244
  def search_science_breakthroughs(query: str) -> str:
245
  """Search for recent scientific breakthroughs."""
246
  try:
247
+ search = TavilySearchResults(max_results=10, include_domains=["sciencedaily.com", "nature.com", "science.org", "newscientist.com", "scientificamerican.com"], search_depth="advanced")
 
 
 
 
248
  results = search.invoke(query)
249
  return json.dumps(results, indent=2)
250
  except Exception as e:
 
269
 
270
  @tool
271
  def get_all_previous_posts() -> str:
272
+ """Retrieve all previously published posts with their titles."""
273
  conn = None
274
  try:
275
  conn = get_db_connection()
 
279
  if not posts: return "No previous posts found."
280
  formatted = []
281
  for p in posts:
282
+ # Extract title (first line of summary)
283
+ title = p[3].split('\n')[0].strip() if p[3] else p[2]
284
+ formatted.append({"id": p[0], "date": p[1], "title": title, "topic": p[2], "summary_preview": p[3][:100], "source_url": p[4], "quality_score": p[5]})
285
  return json.dumps(formatted, indent=2)
286
  except Exception as e:
287
  return f"Error: {str(e)}"
 
315
  conn = get_db_connection()
316
  cursor = conn.cursor()
317
  now = datetime.now()
318
+ cursor.execute('''INSERT INTO posts (date, topic, summary, source_url, quality_score, hashtags, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)''', (now.strftime('%Y-%m-%d'), topic, summary, source_url, quality_score, hashtags, now.isoformat(), now.isoformat()))
 
 
 
319
  conn.commit()
320
  post_id = cursor.lastrowid
321
  conn.close()
 
 
322
  sync_result = sync_to_hf()
323
  sync_msg = "and synced to HF Dataset โ˜๏ธ" if sync_result else "(HF sync skipped)"
 
324
  return f"Post saved successfully with ID: {post_id} {sync_msg}"
325
  except Exception as e:
326
  return f"Error saving post: {str(e)}"
 
333
 
334
  @tool
335
  def check_topic_similarity(new_topic: str) -> str:
336
+ """CRITICAL: Check if the TITLE is too similar to previous post titles. Focus on TITLE similarity only."""
337
  conn = None
338
  try:
339
  conn = get_db_connection()
340
  cursor = conn.cursor()
341
  cursor.execute('SELECT topic, summary FROM posts')
342
  previous = cursor.fetchall()
343
+ if not previous: return json.dumps({"is_duplicate": False, "similar_posts": [], "checked_title": new_topic})
344
+
345
+ # Extract title from new topic (first line before any newlines)
346
+ new_title = new_topic.split('\n')[0].strip().lower()
347
+ new_keywords = set(word for word in new_title.split() if len(word) > 3) # Ignore small words
348
 
 
349
  similar = []
350
  for prev_topic, prev_summary in previous:
351
+ # Extract title from previous post (first line)
352
+ prev_title = prev_summary.split('\n')[0].strip().lower()
353
+ prev_keywords = set(word for word in prev_title.split() if len(word) > 3)
354
+
355
  if not new_keywords: continue
356
  overlap = len(new_keywords & prev_keywords)
357
+ similarity = overlap / len(new_keywords) if new_keywords else 0
358
+
359
+ # Higher threshold for title similarity (0.6 = 60% word overlap)
360
+ if similarity > 0.6:
361
+ similar.append({
362
+ "previous_title": prev_title,
363
+ "similarity_percentage": round(similarity * 100, 1),
364
+ "matching_words": list(new_keywords & prev_keywords)
365
+ })
366
+
367
+ return json.dumps({
368
+ "is_duplicate": len(similar) > 0,
369
+ "similar_posts": similar,
370
+ "new_title_checked": new_title,
371
+ "warning": "โš ๏ธ TITLE is too similar to existing posts!" if similar else "โœ… Title is unique"
372
+ }, indent=2)
373
  except Exception as e:
374
  return f"Error: {str(e)}"
375
  finally:
 
382
 
383
  @tool
384
  def get_example_posts_for_writer() -> str:
385
+ """Get example posts to guide the writer agent."""
386
  examples = [
387
+ {"title": "Scientists Found 14 Mysterious Creatures in the Ocean's Darkest Depths", "content": """Scientists Found 14 Mysterious Creatures in the Ocean's Darkest Depths
 
 
388
 
389
  Scientists have just revealed 14 new species living miles beneath the ocean's surfaceโ€”some found at depths over 6,000 meters! This discovery is part of the Ocean Species Discoveries initiative, which is changing how new marine life is identified and shared with the world. Among the surprises: a record-setting deep-sea mollusk, a carnivorous bivalve, and even a popcorn-shaped parasitic isopod. Each of these creatures shows just how much life still hides in the ocean's darkest corners.
390
  What's even cooler? The team is using cutting-edge lab techniques to make classifying species faster, more open, and globally collaborativeโ€”helping scientists everywhere explore and protect our planet's last great frontier: the deep sea.
391
 
392
+ #TuesdayTrivia #RnDCell #CCA #OceanDiscovery #MarineBiology #DeepSea"""}
 
 
 
 
 
 
 
 
 
 
 
 
393
  ]
394
  return json.dumps(examples, indent=2)
395
 
396
  @tool
397
  def get_example_posts_for_critic() -> str:
398
+ """Get example posts with quality scores."""
399
  examples = [
400
+ {"title": "Researchers Have Developed a Cutting-Edge Silicon Photonic Chip to Enhance AI Efficiency", "quality_score": 9.0}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
  ]
402
  return json.dumps(examples, indent=2)
403
 
404
+ # --- 6. WORKFLOW & STATE ---
405
 
406
  class WorkflowStage(Enum):
407
  IDLE = "idle"
 
416
  COMPLETE = "complete"
417
  ERROR = "error"
418
 
 
 
419
  class EnhancedAgentState(TypedDict):
420
  stage: str
421
  search_topic: str
 
428
  error_message: str
429
  progress_log: List[str]
430
 
431
+ # --- 7. AGENT FUNCTIONS WITH MODEL LOGGING ---
432
 
433
  def run_discovery(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
434
+ """Discovery Agent - Uses mistral-small-latest"""
435
  try:
436
+ print("\n" + "="*70)
437
+ print("๐Ÿค– DISCOVERY AGENT")
438
+ print(f"๐Ÿ“Š Model: mistral-small-latest")
439
+ print(f"๐ŸŽฏ Purpose: Search and find scientific breakthroughs")
440
+ print("="*70)
441
+
442
  if progress_callback:
443
+ progress_callback("๐Ÿ” Discovery Agent (mistral-small) searching...")
444
 
445
  topic = state.get("search_topic", "general science")
446
 
447
  system_msg = SystemMessage(content=f"""You are the Discovery Agent for Tuesday Trivia.
448
 
449
+ CRITICAL TITLE FOCUS:
450
+ 1. MUST check TITLE similarity using check_topic_similarity - focus on the TITLE (first line) ONLY
451
+ 2. The title is the most important unique identifier - titles should be significantly different
452
+ 3. When using check_topic_similarity, it specifically checks title word overlap
453
+ 4. REJECT stories with titles that have >60% word overlap with existing titles
 
454
 
455
+ OTHER REQUIREMENTS:
456
+ 1. Use search_science_breakthroughs to find recent articles
457
+ 2. Use get_all_previous_posts to see existing titles
458
+ 3. Find 10-15 RECENT breakthroughs (last 2-4 weeks)
459
 
460
  Output Format:
461
+ **Title:** [Unique, compelling title]
 
462
  **Description:** [2-3 sentences]
463
  **URL:** [Source link]
464
  **Why Interesting:** [1 sentence hook]
465
+ ---""")
 
466
 
467
+ user_msg = HumanMessage(content=f"Search for recent breakthroughs in {topic}. Check titles carefully to avoid duplicates.")
468
 
469
  discovery_llm = llm_small.bind_tools([search_science_breakthroughs, get_all_previous_posts, check_topic_similarity])
 
 
 
 
470
  response = discovery_llm.invoke([system_msg, user_msg])
471
  conversation = [system_msg, user_msg, response]
472
 
473
+ max_steps = 8
474
  steps = 0
 
475
 
476
  while hasattr(response, 'tool_calls') and response.tool_calls and steps < max_steps:
 
 
 
477
  tool_messages = []
478
  for tool_call in response.tool_calls:
479
  name = tool_call['name']
480
+ print(f"๐Ÿ”ง Tool: {name}")
 
 
 
 
 
481
  if name == 'search_science_breakthroughs':
482
+ res = search_science_breakthroughs.invoke(tool_call['args'])
483
  elif name == 'get_all_previous_posts':
484
+ res = get_all_previous_posts.invoke(tool_call['args'])
485
  elif name == 'check_topic_similarity':
486
+ res = check_topic_similarity.invoke(tool_call['args'])
487
  else:
488
  res = f"Unknown tool: {name}"
 
489
  tool_messages.append(ToolMessage(content=str(res), tool_call_id=tool_call['id']))
 
490
  conversation.extend(tool_messages)
491
  response = discovery_llm.invoke(conversation)
492
  conversation.append(response)
493
  steps += 1
494
 
495
+ print("โœ… Discovery complete")
496
+ state["candidates"] = [{"raw": response.content}]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
497
  state["stage"] = WorkflowStage.CHECKPOINT_1.value
498
+ state["progress_log"].append("โœ… Discovery (mistral-small-latest)")
 
499
  return state
 
500
  except Exception as e:
501
  state["stage"] = WorkflowStage.ERROR.value
502
  state["error_message"] = f"Discovery failed: {str(e)}"
 
503
  return state
504
 
505
  def run_curator(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
506
+ """Curator Agent - Uses mistral-large-latest"""
507
  try:
508
+ print("\n" + "="*70)
509
+ print("๐Ÿค– CURATOR AGENT")
510
+ print(f"๐Ÿ“Š Model: mistral-large-latest")
511
+ print(f"๐ŸŽฏ Purpose: Rank candidates and select best story")
512
+ print("="*70)
513
+
514
  if progress_callback:
515
+ progress_callback("๐ŸŽฏ Curator (mistral-large) selecting story...")
516
 
517
  candidates = state.get("candidates", [])
518
  candidates_text = candidates[0].get("raw", "") if candidates else ""
519
 
520
+ system_msg = SystemMessage(content="""You are the Curator Agent.
521
+
522
+ PRIMARY FOCUS: Check TITLE uniqueness carefully!
523
+ 1. Use check_topic_similarity to verify the selected story's TITLE is unique
524
+ 2. TITLES must have <60% word overlap with existing posts
525
+ 3. Prioritize stories with completely unique, fresh titles
526
+
527
+ Then rank on:
528
+ - Recency (1-10): How recent is the discovery?
529
+ - Significance (1-10): Scientific impact
530
+ - Engagement (1-10): Public interest potential
531
+
532
+ Output: RANKED CANDIDATES, then SELECTED STORY with title uniqueness check.""")
533
 
534
+ user_msg = HumanMessage(content=f"Rank and select the best story. CHECK TITLE SIMILARITY FIRST:\n{candidates_text}")
535
 
536
+ curator_llm = llm_large.bind_tools([check_topic_similarity, get_all_previous_posts])
537
  response = curator_llm.invoke([system_msg, user_msg])
538
+ conversation = [system_msg, user_msg, response]
539
 
540
+ # Allow tool usage
541
+ if hasattr(response, 'tool_calls') and response.tool_calls:
542
+ for tool_call in response.tool_calls:
543
+ name = tool_call['name']
544
+ print(f"๐Ÿ”ง Tool: {name}")
545
+ if name == 'check_topic_similarity':
546
+ res = check_topic_similarity.invoke(tool_call['args'])
547
+ elif name == 'get_all_previous_posts':
548
+ res = get_all_previous_posts.invoke(tool_call['args'])
549
+ else:
550
+ res = "Unknown tool"
551
+ conversation.append(ToolMessage(content=str(res), tool_call_id=tool_call['id']))
552
+ response = curator_llm.invoke(conversation)
553
 
554
+ print("โœ… Curation complete")
555
  state["selected_story"] = {"raw": response.content}
556
  state["stage"] = WorkflowStage.CHECKPOINT_2.value
557
+ state["progress_log"].append("โœ… Curation (mistral-large-latest)")
 
558
  return state
 
559
  except Exception as e:
560
  state["stage"] = WorkflowStage.ERROR.value
561
  state["error_message"] = f"Curation failed: {str(e)}"
 
562
  return state
563
 
564
  def run_writer(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
565
+ """Writer Agent - Uses mistral-medium-latest"""
566
  try:
567
+ print("\n" + "="*70)
568
+ print("๐Ÿค– WRITER AGENT")
569
+ print(f"๐Ÿ“Š Model: mistral-medium-latest")
570
+ print(f"๐ŸŽฏ Purpose: Write engaging Tuesday Trivia post")
571
+ print("="*70)
572
+
573
  if progress_callback:
574
+ progress_callback("โœ๏ธ Writer (mistral-medium) creating post...")
575
 
576
  selected_story = state.get("selected_story", {})
577
  story_text = selected_story.get("raw", "")
578
  retry_count = state.get("retry_count", 0)
579
+ retry_context = f"\nPrevious feedback: {state.get('critic_feedback')}" if retry_count > 0 else ""
580
+
581
+ system_msg = SystemMessage(content="""You are the Writer Agent for Tuesday Trivia.
582
+
583
+ CRITICAL FORMAT:
584
+ Title
585
+ [blank line]
586
+ Paragraph 1
587
+ [no blank line]
588
+ Paragraph 2
589
+ [blank line]
590
+ #TuesdayTrivia #RnDCell #CCA #Topic1 #Topic2
591
+
592
+ TITLE REQUIREMENTS:
593
+ - Must be unique and compelling
594
+ - Use check_topic_similarity to verify uniqueness
595
+ - If title is too similar (>60% overlap), create a completely different title approach
596
+
597
+ CONTENT: 140-180 words, conversational, technical but accessible
598
+ HASHTAGS: Always include #TuesdayTrivia #RnDCell #CCA + 2-4 topic hashtags""")
599
 
600
+ user_msg = HumanMessage(content=f"Write Tuesday Trivia post. VERIFY TITLE UNIQUENESS:\n{story_text}\n{retry_context}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
601
 
602
+ writer_llm = llm_medium.bind_tools([get_example_posts_for_writer, check_topic_similarity, fetch_article_content, count_words])
603
  response = writer_llm.invoke([system_msg, user_msg])
604
  conversation = [system_msg, user_msg, response]
605
 
606
  steps = 0
607
  while hasattr(response, 'tool_calls') and response.tool_calls and steps < 5:
 
 
 
 
608
  for tool_call in response.tool_calls:
609
  name = tool_call['name']
610
+ print(f"๐Ÿ”ง Tool: {name}")
611
+ if name == 'get_example_posts_for_writer': res = get_example_posts_for_writer.invoke(tool_call['args'])
612
+ elif name == 'check_topic_similarity': res = check_topic_similarity.invoke(tool_call['args'])
613
+ elif name == 'fetch_article_content': res = fetch_article_content.invoke(tool_call['args'])
614
+ elif name == 'count_words': res = count_words.invoke(tool_call['args'])
615
+ else: res = "Unknown"
616
+ conversation.append(ToolMessage(content=str(res), tool_call_id=tool_call['id']))
 
 
 
617
  response = writer_llm.invoke(conversation)
618
  conversation.append(response)
619
  steps += 1
620
 
621
+ print("โœ… Writing complete")
 
 
622
  state["draft_summary"] = response.content
623
  state["retry_count"] = retry_count + 1
624
  state["stage"] = WorkflowStage.CRITIC.value
625
+ state["progress_log"].append(f"โœ… Writing (mistral-medium-latest, attempt {retry_count + 1})")
 
626
  return state
 
627
  except Exception as e:
628
  state["stage"] = WorkflowStage.ERROR.value
629
  state["error_message"] = f"Writing failed: {str(e)}"
 
630
  return state
631
 
632
  def run_critic(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
633
+ """Critic Agent - Uses mistral-large-latest"""
634
  try:
635
+ print("\n" + "="*70)
636
+ print("๐Ÿค– CRITIC AGENT")
637
+ print(f"๐Ÿ“Š Model: mistral-large-latest")
638
+ print(f"๐ŸŽฏ Purpose: Evaluate post quality and title uniqueness")
639
+ print("="*70)
640
+
641
  if progress_callback:
642
+ progress_callback("๐Ÿ” Critic (mistral-large) evaluating...")
643
 
644
  draft = state.get("draft_summary", "")
645
 
646
+ system_msg = SystemMessage(content="""You are the Critic Agent for Tuesday Trivia.
647
 
648
  EVALUATION CRITERIA:
649
 
650
+ 1. TITLE UNIQUENESS (3 points) - MOST IMPORTANT:
651
+ - Use check_topic_similarity to verify title uniqueness
652
+ - Title must have <60% word overlap with existing posts
653
+ - Deduct 3 points if title is too similar
654
+
655
+ 2. FORMAT (2 points):
656
+ - Title on first line
657
  - One blank line after title
658
+ - Two paragraphs (no blank between them)
659
  - One blank line before hashtags
660
  - Hashtags include #TuesdayTrivia #RnDCell #CCA
661
 
662
+ 3. CONTENT (3 points):
663
+ - Engaging hook + context
664
+ - Technical details + impact
 
665
  - 140-180 words
666
+ - Accurate information
667
 
668
+ 4. STYLE (2 points):
669
  - Conversational, active voice
670
+ - Technical concepts clear
 
671
  - Engaging and accessible
672
 
673
+ OUTPUT:
674
+ Detailed feedback on each criterion
 
 
 
 
 
 
675
  TOTAL SCORE: X/10
676
+ Decision: APPROVED (โ‰ฅ8) or REJECTED (<8)
677
 
678
+ CRITICAL: Always check title similarity first!""")
679
 
680
+ user_msg = HumanMessage(content=f"Evaluate this post. CHECK TITLE UNIQUENESS FIRST:\n\n{draft}")
681
 
682
+ critic_llm = llm_large.bind_tools([get_example_posts_for_critic, check_topic_similarity])
683
  response = critic_llm.invoke([system_msg, user_msg])
684
  conversation = [system_msg, user_msg, response]
685
 
686
  if hasattr(response, 'tool_calls') and response.tool_calls:
687
  for tool_call in response.tool_calls:
688
+ name = tool_call['name']
689
+ print(f"๐Ÿ”ง Tool: {name}")
690
+ if name == 'get_example_posts_for_critic':
691
+ res = get_example_posts_for_critic.invoke(tool_call['args'])
692
+ elif name == 'check_topic_similarity':
693
+ res = check_topic_similarity.invoke(tool_call['args'])
694
+ else:
695
+ res = "Unknown"
696
  conversation.append(ToolMessage(content=str(res), tool_call_id=tool_call['id']))
697
  response = critic_llm.invoke(conversation)
698
 
 
704
  except:
705
  pass
706
 
707
+ print(f"โœ… Evaluation complete - Score: {score}/10")
 
 
708
  state["quality_score"] = score
709
  state["critic_feedback"] = text
710
  state["stage"] = WorkflowStage.CHECKPOINT_3.value
711
+ state["progress_log"].append(f"โœ… Evaluation (mistral-large-latest): {score}/10")
 
712
  return state
 
713
  except Exception as e:
714
  state["stage"] = WorkflowStage.ERROR.value
715
  state["error_message"] = f"Critic failed: {str(e)}"
 
716
  return state
717
 
718
  def run_finalize(state: EnhancedAgentState, progress_callback=None) -> EnhancedAgentState:
719
  """Finalize and save to database"""
720
  try:
721
+ print("\n" + "="*70)
722
+ print("๐Ÿ’พ FINALIZATION")
723
+ print(f"๐ŸŽฏ Purpose: Save post to database and sync to HF")
724
+ print("="*70)
725
+
726
  if progress_callback:
727
+ progress_callback("๐Ÿ’พ Saving to database and HF...")
728
 
729
  draft = state.get("draft_summary", "")
730
  score = state.get("quality_score", 0.0)
 
745
  "hashtags": hashtags
746
  })
747
 
748
+ print(f"โœ… Post saved: {res}")
 
 
749
  state["stage"] = WorkflowStage.COMPLETE.value
750
+ state["progress_log"].append(f"โœ… Finalized and saved")
 
751
  return state
 
752
  except Exception as e:
753
  state["stage"] = WorkflowStage.ERROR.value
754
  state["error_message"] = f"Finalization failed: {str(e)}"
 
755
  return state
756
 
757
+ # --- 8. GRADIO INTERFACE ---
758
 
759
  def create_initial_state(topic: str) -> EnhancedAgentState:
760
  return {
 
786
  gr.update(visible=False),
787
  gr.update(visible=False),
788
  gr.update(visible=False),
789
+ gr.update(visible=True),
790
  "",
791
+ gr.update(visible=False),
792
  gr.update(visible=False)
793
  )
794
 
 
797
  return (
798
  state,
799
  f"## ๐Ÿ” Discovery Results\n\n{candidates_text}",
800
+ gr.update(visible=True),
801
+ gr.update(visible=True),
802
  gr.update(visible=False),
803
  gr.update(visible=False),
804
  "",
805
+ gr.update(visible=False),
806
  gr.update(visible=False)
807
  )
808
 
 
822
  gr.update(visible=False),
823
  gr.update(visible=True),
824
  "",
825
+ gr.update(visible=False),
826
  gr.update(visible=False)
827
  )
828
 
 
831
  return (
832
  state,
833
  f"## ๐ŸŽฏ Selected Story\n\n{story_text}\n\n**Optional:** Provide instructions in the textbox below if you want to pick a different story.",
834
+ gr.update(visible=True),
835
+ gr.update(visible=True),
836
  gr.update(visible=False),
837
  gr.update(visible=False),
838
  "",
839
+ gr.update(visible=True),
840
+ gr.update(visible=False)
841
  )
842
 
843
  def handle_checkpoint1_reject(state, instructions, progress=gr.Progress()):
844
+ """Handle rejection at checkpoint 1"""
 
845
  if instructions and instructions.strip():
846
  if "search_topic" in state:
847
  state["search_topic"] = f"{state['search_topic']} - Additional guidance: {instructions}"
 
860
  gr.update(visible=False),
861
  gr.update(visible=True),
862
  "",
863
+ gr.update(visible=False),
864
  gr.update(visible=False)
865
  )
866
 
 
868
 
869
  return (
870
  state,
871
+ f"## ๐Ÿ” Discovery Results (New Search)\n\n{candidates_text}",
872
  gr.update(visible=True),
873
  gr.update(visible=True),
874
  gr.update(visible=False),
875
  gr.update(visible=False),
876
  "",
877
+ gr.update(visible=True),
878
+ gr.update(visible=False)
879
  )
880
 
881
  def handle_checkpoint2_approve(state, progress=gr.Progress()):
882
+ """Handle approval at checkpoint 2"""
883
  def update_progress(msg):
884
  progress(0.6, desc=msg)
885
 
 
894
  gr.update(visible=False),
895
  gr.update(visible=True),
896
  "",
897
+ gr.update(visible=False),
898
  gr.update(visible=False)
899
  )
900
 
 
907
 
908
  return (
909
  state,
910
+ f"## โœ๏ธ Draft Post\n\n{draft}\n\n---\n\n**Quality Score:** {score}/10\n\n**Feedback:**\n{feedback}",
911
+ gr.update(visible=True),
912
+ gr.update(visible=True),
913
  gr.update(visible=False),
914
  gr.update(visible=False),
915
  "",
916
+ gr.update(visible=True),
917
+ gr.update(visible=False)
918
  )
919
 
920
  def handle_checkpoint2_different(state, instructions, progress=gr.Progress()):
921
+ """Request different story"""
 
922
  if instructions and instructions.strip():
923
  if "selected_story" in state:
924
  state["critic_feedback"] = f"User preference: {instructions}"
 
937
  gr.update(visible=False),
938
  gr.update(visible=True),
939
  "",
940
+ gr.update(visible=False),
941
  gr.update(visible=False)
942
  )
943
 
 
945
 
946
  return (
947
  state,
948
+ f"## ๐ŸŽฏ Selected Story (Alternative)\n\n{story_text}",
949
  gr.update(visible=True),
950
  gr.update(visible=True),
951
  gr.update(visible=False),
952
  gr.update(visible=False),
953
  "",
954
+ gr.update(visible=True),
955
+ gr.update(visible=False)
956
  )
957
 
958
  def handle_checkpoint3_finalize(state, progress=gr.Progress()):
 
966
 
967
  return (
968
  state,
969
+ f"## โœ… Post Saved Successfully!\n\n{draft}\n\n---\n\n**Status:** Saved to database & HF Dataset\n**Quality Score:** {state.get('quality_score', 0)}/10",
970
  gr.update(visible=False),
971
  gr.update(visible=False),
972
  gr.update(visible=False),
973
+ gr.update(visible=True),
974
  "",
975
+ gr.update(visible=False),
976
+ gr.update(visible=True)
977
  )
978
 
979
  def handle_checkpoint3_edit(state, edit_instructions, progress=gr.Progress()):
 
987
  gr.update(visible=False),
988
  gr.update(visible=False),
989
  edit_instructions,
990
+ gr.update(visible=True),
991
+ gr.update(visible=False)
992
  )
993
 
 
994
  if "critic_feedback" in state:
995
  state["critic_feedback"] += f"\n\nUser edit request: {edit_instructions}"
996
 
 
1008
  gr.update(visible=False),
1009
  gr.update(visible=True),
1010
  "",
1011
+ gr.update(visible=False),
1012
  gr.update(visible=False)
1013
  )
1014
 
 
1027
  gr.update(visible=False),
1028
  gr.update(visible=False),
1029
  "",
1030
+ gr.update(visible=True),
1031
+ gr.update(visible=False)
1032
  )
1033
 
1034
  def restart_workflow():
1035
+ """Reset everything"""
1036
  return (
1037
  None,
1038
  "๐Ÿ‘‹ Ready to start! Enter a topic and click 'Start Discovery'",
1039
  gr.update(visible=False),
1040
  gr.update(visible=False),
1041
+ gr.update(visible=True),
1042
  gr.update(visible=False),
1043
  "",
1044
+ gr.update(visible=False),
1045
  gr.update(visible=False)
1046
  )
1047
 
1048
+ def copy_to_clipboard(state):
1049
+ """Return the final post for copying"""
1050
+ if state and state.get("draft_summary"):
1051
+ return state["draft_summary"]
1052
+ return "No post to copy"
1053
+
1054
+ # --- 9. GRADIO UI ---
1055
 
1056
  css = """
1057
  .output-box {
 
1071
  color: #1a1a1a !important;
1072
  font-weight: bold;
1073
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1074
  """
1075
 
1076
  with gr.Blocks(css=css, title="Tuesday Trivia Agent", theme=gr.themes.Soft()) as demo:
1077
  gr.Markdown("""
1078
  # ๐Ÿงช Tuesday Trivia Multi-Agent System
1079
 
1080
+ **Model Assignment:**
1081
+ - ๐Ÿ” Discovery Agent: `mistral-small-latest` (fast search & filtering)
1082
+ - ๐ŸŽฏ Curator Agent: `mistral-large-latest` (complex ranking & selection)
1083
+ - โœ๏ธ Writer Agent: `mistral-medium-latest` (creative writing)
1084
+ - ๐Ÿ” Critic Agent: `mistral-large-latest` (detailed evaluation)
1085
 
1086
+ **โ˜๏ธ Cloud Storage:** All posts auto-sync to Hugging Face Datasets
1087
  """)
1088
 
 
1089
  state = gr.State()
1090
 
1091
  with gr.Row():
1092
  with gr.Column(scale=2):
 
1093
  output_display = gr.Markdown("๐Ÿ‘‹ Ready to start! Enter a topic and click 'Start Discovery'", elem_classes="output-box")
1094
 
1095
  with gr.Column(scale=1):
 
1096
  gr.Markdown("### ๐ŸŽฎ Control Panel")
1097
 
1098
  topic_input = gr.Textbox(
 
1108
  gr.Markdown("---")
1109
  gr.Markdown("### ๐Ÿ“‹ Decision Points")
1110
 
 
1111
  approve_btn = gr.Button("โœ… Approve", variant="primary", visible=False)
1112
  reject_btn = gr.Button("โŒ Reject / Different", variant="stop", visible=False)
1113
 
 
1118
  lines=3
1119
  )
1120
 
1121
+ copy_btn = gr.Button("๐Ÿ“‹ Copy Final Post", variant="secondary", visible=False)
1122
+ copy_output = gr.Textbox(label="Post Content (for copying)", visible=False, lines=10)
1123
+
1124
  with gr.Accordion("โ˜๏ธ Cloud Sync Status", open=False):
1125
  sync_status = gr.Markdown(f"""
1126
  **HF Dataset:** `{HF_DATASET_REPO}`
1127
  **Status:** {'โœ… Connected' if HF_TOKEN else 'โŒ Not configured'}
1128
 
1129
+ Posts auto-sync to HF after saving.
1130
  """)
1131
 
1132
  manual_sync_btn = gr.Button("๐Ÿ”„ Manual Sync to HF", size="sm")
1133
 
1134
+ with gr.Accordion("๐Ÿ“Š Model Information", open=False):
1135
+ gr.Markdown("""
1136
+ **Discovery Agent:** mistral-small-latest
1137
+ โ†’ Fast, efficient for search and filtering
1138
+
1139
+ **Curator Agent:** mistral-large-latest
1140
+ โ†’ Best reasoning for ranking and selection
1141
+
1142
+ **Writer Agent:** mistral-medium-latest
1143
+ โ†’ Balanced creativity and quality
1144
+
1145
+ **Critic Agent:** mistral-large-latest
1146
+ โ†’ Detailed analysis and evaluation
1147
+
1148
+ All agents check **title similarity** to avoid duplicates.
1149
+ """)
1150
+
1151
  gr.Markdown("---")
1152
  gr.Markdown("""
1153
  ### โ„น๏ธ Instructions
1154
 
1155
+ **Workflow:**
1156
+ 1. Discovery โ†’ Review candidates
1157
+ 2. Curation โ†’ Review story selection
1158
+ 3. Writing โ†’ Review draft
1159
+ 4. Finalize โ†’ Copy & use!
 
1160
 
1161
+ **Title Focus:**
1162
+ - System checks title similarity carefully
1163
+ - Titles must be <60% similar to existing posts
1164
+ - Focus is on unique, compelling titles
 
 
 
 
 
1165
  """)
1166
 
 
1167
  def manual_sync():
1168
  result = sync_to_hf()
1169
  if result:
1170
+ return "โœ… Successfully synced to HF!"
1171
+ return "โš ๏ธ Sync failed. Check logs."
 
1172
 
1173
+ manual_sync_btn.click(fn=manual_sync, outputs=sync_status)
 
 
 
1174
 
1175
  # Event handlers
1176
  start_btn.click(
1177
  fn=start_workflow,
1178
  inputs=[topic_input],
1179
+ outputs=[state, output_display, approve_btn, reject_btn, start_btn, restart_btn, edit_instructions, edit_instructions, copy_btn]
1180
  )
1181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1182
  def smart_approve(s, instructions):
1183
  if not s:
1184
+ return s, "No active workflow", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update(), gr.update()
1185
 
1186
  stage = s.get("stage")
1187
  if stage == WorkflowStage.CHECKPOINT_1.value:
1188
+ return handle_checkpoint1_approve(s)
1189
  elif stage == WorkflowStage.CHECKPOINT_2.value:
1190
+ return handle_checkpoint2_approve(s)
1191
  elif stage == WorkflowStage.CHECKPOINT_3.value:
1192
+ return handle_checkpoint3_finalize(s)
1193
 
1194
+ return s, "Invalid stage", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update(), gr.update()
1195
 
1196
  def smart_reject(s, instructions):
1197
  if not s:
1198
+ return s, "No active workflow", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update(), gr.update()
1199
 
1200
  stage = s.get("stage")
1201
  if stage == WorkflowStage.CHECKPOINT_1.value:
1202
+ return handle_checkpoint1_reject(s, instructions)
1203
  elif stage == WorkflowStage.CHECKPOINT_2.value:
1204
+ return handle_checkpoint2_different(s, instructions)
1205
  elif stage == WorkflowStage.CHECKPOINT_3.value:
1206
+ return handle_checkpoint3_edit(s, instructions)
1207
 
1208
+ return s, "Invalid stage", gr.update(), gr.update(), gr.update(), gr.update(), "", gr.update(), gr.update()
1209
 
1210
  approve_btn.click(
1211
  fn=smart_approve,
1212
  inputs=[state, edit_instructions],
1213
+ outputs=[state, output_display, approve_btn, reject_btn, start_btn, restart_btn, edit_instructions, edit_instructions, copy_btn]
1214
  )
1215
 
1216
  reject_btn.click(
1217
  fn=smart_reject,
1218
  inputs=[state, edit_instructions],
1219
+ outputs=[state, output_display, approve_btn, reject_btn, start_btn, restart_btn, edit_instructions, edit_instructions, copy_btn]
1220
  )
1221
 
1222
  restart_btn.click(
1223
  fn=restart_workflow,
1224
+ outputs=[state, output_display, approve_btn, reject_btn, start_btn, restart_btn, edit_instructions, edit_instructions, copy_btn]
1225
+ )
1226
+
1227
+ copy_btn.click(
1228
+ fn=copy_to_clipboard,
1229
+ inputs=[state],
1230
+ outputs=[copy_output]
1231
+ ).then(
1232
+ lambda: gr.update(visible=True),
1233
+ outputs=[copy_output]
1234
  )
1235
 
1236
  if __name__ == "__main__":