Suhasdev commited on
Commit
539c75b
·
1 Parent(s): c0b1bdf

Fix OptimizedResult handling: use result.prompt property, improvement_data, and reflection_history for real backend

Browse files
Files changed (1) hide show
  1. app.py +61 -27
app.py CHANGED
@@ -74,8 +74,13 @@ def increment_iteration():
74
  try:
75
  from gepa_optimizer import quick_optimize_sync, OptimizedResult
76
  BACKEND_AVAILABLE = True
77
- except ImportError:
 
78
  BACKEND_AVAILABLE = False
 
 
 
 
79
  from dataclasses import dataclass
80
 
81
  @dataclass
@@ -279,6 +284,11 @@ def validate_api_keys(model: str, api_keys: Dict[str, str]) -> Tuple[bool, str]:
279
  def safe_optimize(seed_prompt, dataset, model, custom_model="", max_iterations=5, max_metric_calls=50, batch_size=4, use_llego=True, api_keys=None):
280
  """Safely run optimization with comprehensive error handling."""
281
  try:
 
 
 
 
 
282
  # Validate seed prompt
283
  if not seed_prompt or not isinstance(seed_prompt, str):
284
  return False, "Seed prompt is required and must be a string.", None
@@ -352,23 +362,20 @@ def safe_optimize(seed_prompt, dataset, model, custom_model="", max_iterations=5
352
  return False, "Optimization returned no result.", None
353
 
354
  # Check for both property-based (real backend) and attribute-based (mock backend)
355
- # Try to access the prompt to see if it exists (works for both attributes and properties)
356
- has_optimized_prompt = False
357
  try:
358
- if hasattr(result, 'optimized_prompt'):
359
- # Mock backend - direct attribute
360
- has_optimized_prompt = True
361
- elif hasattr(result, 'prompt'):
362
- # Real backend - property-based, try to access it
363
- _ = result.prompt
364
- has_optimized_prompt = True
365
- elif hasattr(result, '_result') and hasattr(result._result, 'optimized_prompt'):
366
- has_optimized_prompt = True
367
- except Exception:
368
- pass
369
 
370
- if not has_optimized_prompt:
371
- return False, "Optimization result is missing required fields.", None
372
 
373
  return True, "Success", result
374
 
@@ -695,21 +702,48 @@ def run_optimization_flow(seed, dataset, model, custom_model, iter_count, call_c
695
  # Show results
696
  try:
697
  # Handle both property-based (real backend) and attribute-based (mock backend)
698
- if hasattr(result, 'optimized_prompt'):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
699
  # Mock backend - direct attribute
700
  optimized_prompt = result.optimized_prompt or ""
701
  improvement_metrics = getattr(result, 'improvement_metrics', {})
702
  iteration_history = getattr(result, 'iteration_history', [])
703
- elif hasattr(result, 'prompt'):
704
- # Real backend - property-based
705
- optimized_prompt = result.prompt or ""
706
- improvement_data = result.improvement_data if hasattr(result, 'improvement_data') else {}
707
- # improvement_metrics might be in improvement_data or as separate field
708
- improvement_metrics = improvement_data if isinstance(improvement_data, dict) else {}
709
- # For iteration_history, check if it exists in improvement_data or as separate attribute
710
- iteration_history = getattr(result, 'iteration_history', [])
711
- if not iteration_history and isinstance(improvement_data, dict):
712
- iteration_history = improvement_data.get('iteration_history', [])
713
  else:
714
  optimized_prompt = ""
715
  improvement_metrics = {}
 
74
  try:
75
  from gepa_optimizer import quick_optimize_sync, OptimizedResult
76
  BACKEND_AVAILABLE = True
77
+ logger.info("✅ Successfully imported gepa_optimizer")
78
+ except ImportError as e:
79
  BACKEND_AVAILABLE = False
80
+ logger.error(f"❌ Failed to import gepa_optimizer: {str(e)}")
81
+ logger.error(f"Python path: {sys.path}")
82
+ logger.error(f"Current directory: {os.getcwd()}")
83
+ logger.error(f"src directory exists: {os.path.exists(os.path.join(os.path.dirname(__file__), 'src'))}")
84
  from dataclasses import dataclass
85
 
86
  @dataclass
 
284
  def safe_optimize(seed_prompt, dataset, model, custom_model="", max_iterations=5, max_metric_calls=50, batch_size=4, use_llego=True, api_keys=None):
285
  """Safely run optimization with comprehensive error handling."""
286
  try:
287
+ # Log backend status
288
+ if not BACKEND_AVAILABLE:
289
+ logger.warning("⚠️ Backend not available - using mock optimizer. Check gepa_optimizer installation.")
290
+ else:
291
+ logger.info("✅ Backend available - using real gepa_optimizer")
292
  # Validate seed prompt
293
  if not seed_prompt or not isinstance(seed_prompt, str):
294
  return False, "Seed prompt is required and must be a string.", None
 
362
  return False, "Optimization returned no result.", None
363
 
364
  # Check for both property-based (real backend) and attribute-based (mock backend)
365
+ has_prompt = False
 
366
  try:
367
+ # Real backend uses .prompt property
368
+ if hasattr(result, 'prompt'):
369
+ _ = result.prompt # Try to access property
370
+ has_prompt = True
371
+ # Mock backend uses .optimized_prompt attribute
372
+ elif hasattr(result, 'optimized_prompt'):
373
+ has_prompt = True
374
+ except Exception as e:
375
+ logger.warning(f"Error checking result structure: {str(e)}")
 
 
376
 
377
+ if not has_prompt:
378
+ return False, "Optimization result is missing required prompt field.", None
379
 
380
  return True, "Success", result
381
 
 
702
  # Show results
703
  try:
704
  # Handle both property-based (real backend) and attribute-based (mock backend)
705
+ if hasattr(result, 'prompt'):
706
+ # Real backend - use .prompt property
707
+ try:
708
+ optimized_prompt = result.prompt or ""
709
+ except Exception as e:
710
+ logger.error(f"Error accessing result.prompt: {str(e)}")
711
+ optimized_prompt = ""
712
+
713
+ # Get improvement_data (real backend)
714
+ improvement_data = result.improvement_data if hasattr(result, 'improvement_data') else {}
715
+
716
+ # Convert improvement_data to display format
717
+ if isinstance(improvement_data, dict):
718
+ improvement_metrics = {
719
+ "baseline_score": improvement_data.get("baseline_score", improvement_data.get("baseline_metrics", {}).get("composite_score", 0.0)),
720
+ "final_score": improvement_data.get("final_score", improvement_data.get("final_metrics", {}).get("composite_score", 0.0)),
721
+ "improvement": improvement_data.get("improvement_percent", "N/A"),
722
+ "iterations_run": result.total_iterations if hasattr(result, 'total_iterations') else improvement_data.get("iterations", 0),
723
+ "optimization_time": f"{result.optimization_time:.2f}s" if hasattr(result, 'optimization_time') else "N/A",
724
+ }
725
+ else:
726
+ improvement_metrics = {}
727
+
728
+ # Create iteration history from reflection_history if available
729
+ iteration_history = []
730
+ if hasattr(result, '_result') and hasattr(result._result, 'reflection_history'):
731
+ reflection_history = result._result.reflection_history
732
+ for i, reflection in enumerate(reflection_history, 1):
733
+ summary = reflection.get('summary', f'Iteration {i}')
734
+ iteration_history.append(f"Iter {i}: {summary}")
735
+ elif isinstance(improvement_data, dict) and 'iteration_history' in improvement_data:
736
+ iteration_history = improvement_data['iteration_history']
737
+ else:
738
+ # Fallback: create simple history
739
+ iterations = result.total_iterations if hasattr(result, 'total_iterations') else 0
740
+ iteration_history = [f"Iteration {i+1} completed" for i in range(iterations)]
741
+
742
+ elif hasattr(result, 'optimized_prompt'):
743
  # Mock backend - direct attribute
744
  optimized_prompt = result.optimized_prompt or ""
745
  improvement_metrics = getattr(result, 'improvement_metrics', {})
746
  iteration_history = getattr(result, 'iteration_history', [])
 
 
 
 
 
 
 
 
 
 
747
  else:
748
  optimized_prompt = ""
749
  improvement_metrics = {}