| import re |
| import random |
| from typing import List, Dict, Tuple, Optional |
| from dataclasses import dataclass |
| from openai import OpenAI |
|
|
| @dataclass |
| class EvaluationResult: |
| """Structured result from regex evaluation.""" |
| score: float |
| true_positives: List[Dict] |
| false_positives: List[Dict] |
| false_negatives: List[Dict] |
| error_message: Optional[str] = None |
|
|
| class RegexOptimizerAgent: |
| """ |
| An AI agent that iteratively optimizes regex patterns using LLM feedback. |
| |
| The agent uses a reflexion-based approach: |
| 1. Evaluates current regex on ground truth data |
| 2. Generates improvement suggestions via LLM |
| 3. Tests new regex and compares performance |
| 4. Learns from failures through critique |
| """ |
| |
| def __init__(self, ground_truth_data: List[Dict], openai_client: OpenAI, |
| model: str = "gpt-4", temperature: float = 0.7): |
| """ |
| Initialize the optimizer agent. |
| |
| Args: |
| ground_truth_data: List of dicts with 'text' and 'expected' keys |
| openai_client: OpenAI client instance |
| model: Model name (e.g., 'gpt-4', 'gpt-3.5-turbo') |
| temperature: Sampling temperature for generation |
| """ |
| self.data = ground_truth_data |
| self.client = openai_client |
| self.model = model |
| self.temperature = temperature |
| self.best_regex = "" |
| self.best_score = -1.0 |
| self.iteration_history = [] |
|
|
| def _evaluate_and_sample(self, regex_str: str, k: int = 3) -> EvaluationResult: |
| """ |
| Evaluate regex performance and sample results from each category. |
| |
| Args: |
| regex_str: Regular expression pattern to test |
| k: Number of samples to return per category |
| |
| Returns: |
| EvaluationResult with score and categorized samples |
| """ |
| tp_samples, fp_samples, fn_samples = [], [], [] |
| correct_count = 0 |
| error_msg = None |
| |
| |
| try: |
| compiled = re.compile(regex_str) |
| except Exception as e: |
| return EvaluationResult(0.0, [], [], [], str(e)) |
|
|
| |
| for item in self.data: |
| raw_text = item['text'] |
| expected = str(item['expected']) if item['expected'] is not None else None |
| |
| match = compiled.search(raw_text) |
| actual = match.group(0) if match else None |
| |
| is_correct = (actual == expected) |
| |
| result_payload = { |
| "raw": raw_text, |
| "truth": expected if expected else "NO_MATCH", |
| "extracted": actual if actual else "NONE" |
| } |
|
|
| if is_correct: |
| correct_count += 1 |
| tp_samples.append(result_payload) |
| else: |
| if actual is None and expected is not None: |
| fn_samples.append(result_payload) |
| else: |
| fp_samples.append(result_payload) |
|
|
| score = correct_count / len(self.data) if self.data else 0 |
| |
| |
| random.shuffle(tp_samples) |
| random.shuffle(fp_samples) |
| random.shuffle(fn_samples) |
|
|
| return EvaluationResult( |
| score=score, |
| true_positives=tp_samples[:k], |
| false_positives=fp_samples[:k], |
| false_negatives=fn_samples[:k], |
| error_message=error_msg |
| ) |
|
|
| def _generate_failure_critique(self, attempted_regex: str, best_regex: str, |
| new_fp: List[Dict], new_fn: List[Dict], |
| error_msg: Optional[str]) -> str: |
| """Generate critique explaining why attempted regex failed.""" |
| if error_msg: |
| return f"SYNTAX ERROR: {error_msg}" |
|
|
| prompt = f"""You are an expert regex analyst. Analyze why this regex attempt failed. |
| |
| BEST PERFORMING REGEX (baseline): {best_regex} |
| FAILED ATTEMPT: {attempted_regex} |
| |
| NEW ERRORS INTRODUCED: |
| False Positives (incorrectly matched): {new_fp[:2]} |
| False Negatives (failed to match): {new_fn[:2]} |
| |
| Task: In 1-2 concise sentences, explain the core flaw in the attempted regex that caused it to perform worse than the baseline. Focus on what pattern logic was incorrect. |
| |
| Critique:""" |
|
|
| try: |
| response = self.client.chat.completions.create( |
| model=self.model, |
| messages=[ |
| {"role": "system", "content": "You are an expert regular expression analyst."}, |
| {"role": "user", "content": prompt} |
| ], |
| temperature=self.temperature, |
| max_tokens=150 |
| ) |
| return response.choices[0].message.content.strip() |
| except Exception as e: |
| return f"Failed to generate critique: {str(e)}" |
|
|
| def _build_optimization_prompt(self, regex: str, score: float, |
| tp: List[Dict], fp: List[Dict], fn: List[Dict], |
| failure_context: Optional[str] = None) -> str: |
| """ |
| Build comprehensive prompt for regex improvement. |
| |
| Uses chain-of-thought reasoning and specific examples to guide the LLM. |
| """ |
| def format_examples(examples: List[Dict], limit: int = 5) -> str: |
| if not examples: |
| return "None" |
| lines = [] |
| for ex in examples[:limit]: |
| lines.append(f" • Input: '{ex['raw']}'") |
| lines.append(f" Expected: {ex['truth']} | Got: {ex['extracted']}") |
| return "\n".join(lines) |
|
|
| prompt = f"""You are an expert Regular Expression engineer. Your task is to improve a regex pattern to achieve 100% accuracy on the test cases below. |
| |
| CURRENT REGEX PATTERN: `{regex}` |
| CURRENT ACCURACY: {score:.1%} ({int(score * len(self.data))}/{len(self.data)} correct) |
| |
| ═══════════════════════════════════════════════════════════════ |
| |
| [✓ CORRECT MATCHES - These MUST continue working] |
| {format_examples(tp)} |
| |
| [✗ FALSE POSITIVES - Incorrectly matched (should return NO_MATCH)] |
| {format_examples(fp)} |
| |
| [✗ FALSE NEGATIVES - Failed to extract (should have matched)] |
| {format_examples(fn)} |
| |
| ═══════════════════════════════════════════════════════════════ |
| """ |
|
|
| if failure_context: |
| prompt += f""" |
| ⚠️ CRITICAL FEEDBACK FROM PREVIOUS ATTEMPT: |
| {failure_context} |
| |
| Learn from this mistake and avoid repeating it. |
| |
| ═══════════════════════════════════════════════════════════════ |
| """ |
|
|
| prompt += """ |
| INSTRUCTIONS: |
| 1. Analyze the patterns in false positives and false negatives |
| 2. Identify what distinguishes valid matches from invalid ones |
| 3. Design a regex that captures ONLY the expected patterns |
| 4. Ensure all current correct matches continue working |
| |
| Think step-by-step: |
| - What do all the expected matches have in common? |
| - What makes the false positives different from true matches? |
| - What patterns are the false negatives missing? |
| |
| Return ONLY the improved regex pattern inside backticks, like: `your_regex_here` |
| Do not include explanations, just the regex pattern. |
| |
| Improved regex:""" |
|
|
| return prompt |
|
|
| def _extract_regex_from_response(self, text: str) -> str: |
| """Extract regex pattern from LLM response.""" |
| |
| match = re.search(r"`([^`]+)`", text) |
| if match: |
| return match.group(1) |
| |
| |
| lines = [line.strip() for line in text.split('\n') if line.strip()] |
| return lines[0] if lines else text.strip() |
|
|
| def run_optimization(self, initial_regex: str, max_iterations: int = 10, |
| patience: int = 3, sample_size: int = 3) -> str: |
| """ |
| Run the iterative optimization loop. |
| |
| Args: |
| initial_regex: Starting regex pattern |
| max_iterations: Maximum optimization iterations |
| patience: Stop after N iterations without improvement |
| sample_size: Number of examples to show LLM per category |
| |
| Returns: |
| Best regex pattern found |
| """ |
| current_regex = initial_regex |
| result = self._evaluate_and_sample(current_regex, k=sample_size) |
| |
| self.best_regex = current_regex |
| self.best_score = result.score |
| no_improvement_counter = 0 |
| last_failure_critique = None |
|
|
| print("=" * 60) |
| print("REGEX OPTIMIZATION AGENT STARTED") |
| print("=" * 60) |
| print(f"Initial Regex: {initial_regex}") |
| print(f"Baseline Accuracy: {result.score:.1%} ({int(result.score * len(self.data))}/{len(self.data)})") |
| print() |
|
|
| for iteration in range(max_iterations): |
| |
| if self.best_score == 1.0: |
| print("✓ PERFECT SCORE ACHIEVED!") |
| break |
| |
| if no_improvement_counter >= patience: |
| print(f"⊗ Early stopping: No improvement for {patience} iterations") |
| break |
|
|
| print(f"{'─' * 60}") |
| print(f"Iteration {iteration + 1}/{max_iterations}") |
| print(f"{'─' * 60}") |
| |
| |
| prompt = self._build_optimization_prompt( |
| current_regex, self.best_score, |
| result.true_positives, result.false_positives, result.false_negatives, |
| last_failure_critique |
| ) |
| print(f"Candidate: {candidate}") |
| |
| |
| new_result = self._evaluate_and_sample(candidate, k=sample_size) |
| new_score = new_result.score |
| |
| |
| if new_score > self.best_score: |
| improvement = new_score - self.best_score |
| print(f"✓ IMPROVEMENT: {self.best_score:.1%} → {new_score:.1%} (+{improvement:.1%})") |
| |
| self.best_score = new_score |
| self.best_regex = candidate |
| current_regex = candidate |
| result = new_result |
| no_improvement_counter = 0 |
| last_failure_critique = None |
| |
| self.iteration_history.append({ |
| "iteration": iteration + 1, |
| "regex": candidate, |
| "score": new_score, |
| "improved": True |
| }) |
| else: |
| print(f"✗ No improvement: {new_score:.1%} (best: {self.best_score:.1%})") |
| no_improvement_counter += 1 |
| |
| |
| last_failure_critique = self._generate_failure_critique( |
| candidate, self.best_regex, |
| new_result.false_positives, new_result.false_negatives, |
| new_result.error_message |
| ) |
| print(f"Critique: {last_failure_critique}") |
| |
| |
| current_regex = self.best_regex |
| |
| self.iteration_history.append({ |
| "iteration": iteration + 1, |
| "regex": candidate, |
| "score": new_score, |
| "improved": False |
| }) |
| |
| print() |
|
|
| return self.best_regex |
|
|
| def get_final_report(self) -> str: |
| """Generate summary report of optimization process.""" |
| report = f""" |
| {'=' * 60} |
| OPTIMIZATION COMPLETE |
| {'=' * 60} |
| Final Regex: {self.best_regex} |
| Final Accuracy: {self.best_score:.1%} ({int(self.best_score * len(self.data))}/{len(self.data)}) |
| Total Iterations: {len(self.iteration_history)} |
| Improvements: {sum(1 for h in self.iteration_history if h['improved'])} |
| {'=' * 60} |
| """ |
| return report |
|
|
|
|
| def main(): |
| """Example usage with numerical ID extraction.""" |
| |
| |
| training_data = [ |
| {"text": "Order 5521 was shipped", "expected": "5521"}, |
| {"text": "Ref ID: 990", "expected": "990"}, |
| {"text": "Customer 12", "expected": "12"}, |
| {"text": "Invoice #7834 pending", "expected": "7834"}, |
| {"text": "Total is $50.00", "expected": None}, |
| {"text": "Call 911 for help", "expected": None}, |
| {"text": "Price: $19.99", "expected": None}, |
| {"text": "Tracking: 88234", "expected": "88234"} |
| ] |
|
|
| |
| |
| client = OpenAI() |
| |
| |
| agent = RegexOptimizerAgent( |
| training_data, |
| client, |
| model="gpt-4", |
| temperature=0.7 |
| ) |
| |
| |
| final_regex = agent.run_optimization( |
| initial_regex=r"\d", |
| max_iterations=10, |
| patience=3, |
| sample_size=3 |
| ) |
| |
| |
| print(agent.get_final_report()) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|