data / gepa.py
infinex's picture
Uploading dataset files from the local data folder.
16c5c77 verified
import streamlit as st
import dspy
import pandas as pd
import re
import httpx
import json
from openai import OpenAI
from st_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode, DataReturnMode
from typing import Optional, Dict, Any
import os
# --- Page Configuration ---
st. set_page_config(
layout="wide",
page_title="GEPA Regex Optimizer",
page_icon="๐Ÿงฌ",
initial_sidebar_state="expanded"
)
# --- Session State Initialization ---
DEFAULT_STATE = {
'dataset': None,
'optimized_program': None,
'optimization_history': [],
'config': {
'model_name': 'gpt-4o',
'api_key': '',
'base_url': 'https://api.openai.com/v1',
'timeout': 30,
'max_retries': 3,
'temperature': 0.7,
'max_tokens': 1024,
},
'gepa_config': {
'num_iterations': 5,
'num_candidates': 3,
'early_stopping_threshold': 0.95,
},
'prompts': {
'system_instruction': "You are a Regex Expert. Given the input text, provide a high-precision Python regex pattern to extract the target text.",
'gepa_meta_prompt': "Focus on precision. If the feedback says the match was too broad, use more specific character classes or anchors. If it missed the target, suggest more flexible patterns.",
'output_description': "A Python-compatible regular expression",
},
'train_test_split': 0.8,
'regex_flags': [],
}
for key, value in DEFAULT_STATE.items():
if key not in st.session_state:
st.session_state[key] = value
# --- Configuration Manager ---
class ConfigManager:
"""Manages application configuration with persistence."""
CONFIG_FILE = "gepa_config.json"
@staticmethod
def save_config():
"""Save current configuration to file."""
config_data = {
'config': st.session_state. config,
'gepa_config': st.session_state. gepa_config,
'prompts': st.session_state.prompts,
'train_test_split': st.session_state. train_test_split,
'regex_flags': st. session_state.regex_flags,
}
try:
with open(ConfigManager.CONFIG_FILE, 'w') as f:
json.dump(config_data, f, indent=2)
return True
except Exception as e:
st.error(f"Failed to save config: {e}")
return False
@staticmethod
def load_config():
"""Load configuration from file."""
try:
if os.path.exists(ConfigManager.CONFIG_FILE):
with open(ConfigManager.CONFIG_FILE, 'r') as f:
config_data = json.load(f)
for key, value in config_data. items():
if key in st.session_state:
if isinstance(value, dict):
st. session_state[key].update(value)
else:
st. session_state[key] = value
return True
except Exception as e:
st.warning(f"Failed to load config: {e}")
return False
@staticmethod
def reset_to_defaults():
"""Reset all configuration to defaults."""
for key, value in DEFAULT_STATE.items():
if key not in ['dataset', 'optimized_program', 'optimization_history']:
st.session_state[key] = value. copy() if isinstance(value, (dict, list)) else value
# --- LLM Setup ---
def setup_dspy() -> bool:
"""Configure DSPy with current settings."""
config = st.session_state. config
try:
http_client = httpx.Client(
timeout=config['timeout'],
limits=httpx.Limits(max_retries=config['max_retries'])
)
custom_openai_client = OpenAI(
api_key=config['api_key'] or os.getenv("OPENAI_API_KEY", "empty"),
base_url=config['base_url'] or None,
http_client=http_client
)
lm = dspy.LM(
model=config['model_name'],
client=custom_openai_client,
temperature=config['temperature'],
max_tokens=config['max_tokens']
)
dspy.configure(lm=lm)
return True
except Exception as e:
st. error(f"LLM Configuration Error: {e}")
return False
# --- Metric Function ---
def create_regex_metric(flags: list):
"""Factory function to create metric with configurable regex flags."""
compiled_flags = 0
for flag in flags:
compiled_flags |= getattr(re, flag, 0)
def regex_metric_with_feedback(example, prediction, trace=None):
"""GEPA Metric with rich feedback for regex optimization."""
target = example. ground_truth. strip()
raw_text = example. raw_text
pred_pattern = getattr(prediction, 'regex_pattern', '').strip()
# Handle missing output
if not pred_pattern:
feedback = (
f"No regex pattern provided. Target text: '{target}'. "
"Please output a valid Python regex string."
)
return dspy. Prediction(score=0.0, feedback=feedback)
# Syntax validation
try:
compiled = re.compile(pred_pattern, compiled_flags)
except re.error as e:
feedback = (
f"Invalid regex: '{pred_pattern}'. "
f"Error: {str(e)}. Check syntax and escape characters."
)
return dspy. Prediction(score=0.0, feedback=feedback)
# Match evaluation
match = compiled.search(raw_text)
extracted = match.group(0) if match else ""
if extracted == target:
return dspy.Prediction(
score=1.0,
feedback=f"Perfect match! Correctly extracted '{target}'."
)
# Failure analysis
score = 0.0
feedback = f"Pattern '{pred_pattern}' produced incorrect result.\n"
if not match:
feedback += f"NO MATCH found. Target: '{target}'."
elif target in extracted:
score = 0.3
feedback += (
f"TOO BROAD: Extracted '{extracted}' contains target '{target}' "
"plus extra characters. Use stricter boundaries or non-greedy quantifiers."
)
elif extracted in target:
score = 0.3
feedback += (
f"TOO NARROW: Extracted '{extracted}' but target is '{target}'. "
"Make pattern more inclusive."
)
else:
feedback += f"WRONG MATCH: Got '{extracted}' instead of '{target}'."
feedback += "\nAnalyze the target structure to isolate it uniquely."
return dspy.Prediction(score=score, feedback=feedback)
return regex_metric_with_feedback
# --- DSPy Program ---
class RegexSignature(dspy. Signature):
"""Dynamic signature for regex generation."""
raw_text = dspy. InputField()
regex_pattern = dspy.OutputField()
class RegexGenerator(dspy.Module):
"""Configurable regex generation module."""
def __init__(self, doc: str, output_desc: str):
super().__init__()
self.predictor = dspy.Predict(RegexSignature)
self.predictor.signature.__doc__ = doc
self.predictor.signature.regex_pattern. desc = output_desc
def forward(self, raw_text: str):
return self. predictor(raw_text=raw_text)
# --- Sidebar Configuration ---
def render_sidebar():
"""Render the configuration sidebar."""
with st.sidebar:
st.title("โš™๏ธ Configuration")
# Config management buttons
col1, col2, col3 = st.columns(3)
with col1:
if st.button("๐Ÿ’พ Save", use_container_width=True):
if ConfigManager.save_config():
st.success("Saved!")
with col2:
if st.button("๐Ÿ“‚ Load", use_container_width=True):
if ConfigManager.load_config():
st.success("Loaded!")
st.rerun()
with col3:
if st.button("๐Ÿ”„ Reset", use_container_width=True):
ConfigManager.reset_to_defaults()
st.rerun()
st.divider()
# LLM Configuration
with st.expander("๐Ÿค– LLM Settings", expanded=True):
st.session_state.config['model_name'] = st.text_input(
"Model Name",
value=st.session_state.config['model_name'],
help="e.g., gpt-4o, gpt-3.5-turbo, claude-3-opus"
)
st.session_state.config['api_key'] = st.text_input(
"API Key",
value=st.session_state.config['api_key'],
type="password",
help="Leave empty to use OPENAI_API_KEY env var"
)
st.session_state.config['base_url'] = st.text_input(
"Base URL",
value=st.session_state.config['base_url'],
help="Custom API endpoint (e.g., for Azure, local models)"
)
col1, col2 = st.columns(2)
with col1:
st.session_state.config['timeout'] = st.number_input(
"Timeout (s)",
min_value=5,
max_value=300,
value=st.session_state.config['timeout']
)
with col2:
st.session_state.config['max_retries'] = st.number_input(
"Max Retries",
min_value=0,
max_value=10,
value=st.session_state.config['max_retries']
)
col1, col2 = st.columns(2)
with col1:
st.session_state.config['temperature'] = st.slider(
"Temperature",
min_value=0.0,
max_value=2.0,
value=st. session_state.config['temperature'],
step=0.1
)
with col2:
st.session_state.config['max_tokens'] = st.number_input(
"Max Tokens",
min_value=64,
max_value=8192,
value=st.session_state.config['max_tokens']
)
# GEPA Optimizer Settings
with st. expander("๐Ÿงฌ GEPA Optimizer", expanded=False):
st.session_state.gepa_config['num_iterations'] = st.slider(
"Iterations",
min_value=1,
max_value=20,
value=st. session_state.gepa_config['num_iterations'],
help="Number of optimization iterations"
)
st.session_state. gepa_config['num_candidates'] = st.slider(
"Candidates per Iteration",
min_value=1,
max_value=10,
value=st.session_state.gepa_config['num_candidates'],
help="Number of candidate patterns to evaluate"
)
st. session_state.gepa_config['early_stopping_threshold'] = st.slider(
"Early Stopping Threshold",
min_value=0.5,
max_value=1.0,
value=st.session_state.gepa_config['early_stopping_threshold'],
step=0.05,
help="Stop if this score is reached"
)
# Prompt Configuration
with st.expander("๐Ÿ“ Prompts", expanded=False):
st.session_state.prompts['system_instruction'] = st.text_area(
"System Instruction",
value=st.session_state.prompts['system_instruction'],
height=100,
help="Initial instruction for regex generation"
)
st.session_state. prompts['gepa_meta_prompt'] = st.text_area(
"GEPA Evolution Prompt",
value=st.session_state.prompts['gepa_meta_prompt'],
height=100,
help="Instructions for GEPA's prompt evolution"
)
st.session_state. prompts['output_description'] = st. text_input(
"Output Field Description",
value=st.session_state.prompts['output_description'],
help="Description for the regex output field"
)
# Regex Configuration
with st. expander("๐Ÿ”ง Regex Options", expanded=False):
flag_options = ['IGNORECASE', 'MULTILINE', 'DOTALL', 'VERBOSE', 'ASCII']
st.session_state. regex_flags = st.multiselect(
"Regex Flags",
options=flag_options,
default=st.session_state. regex_flags,
help="Python regex flags to apply"
)
# Data Split Configuration
with st.expander("๐Ÿ“Š Data Settings", expanded=False):
st.session_state.train_test_split = st.slider(
"Train/Validation Split",
min_value=0.5,
max_value=0.95,
value=st.session_state.train_test_split,
step=0.05,
help="Proportion of data for training"
)
# --- Main Application Tabs ---
def render_data_ingestion_tab():
"""Render the data ingestion tab."""
st.header("๐Ÿ“ฅ Data Ingestion & Annotation")
col1, col2 = st.columns([2, 1])
with col1:
uploaded = st.file_uploader(
"Upload Dataset",
type=["csv", "json", "xlsx"],
help="CSV/JSON/Excel with 'text' column (ground_truth optional)"
)
with col2:
st.markdown("**Expected Format:**")
st.code("text,ground_truth\n'Sample text','expected'", language="csv")
if uploaded:
# Load based on file type
try:
if uploaded.name.endswith('.csv'):
df = pd.read_csv(uploaded)
elif uploaded.name. endswith('.json'):
df = pd.read_json(uploaded)
else:
df = pd.read_excel(uploaded)
# Ensure required columns
if 'text' not in df.columns:
st.error("Dataset must have a 'text' column.")
return
if 'ground_truth' not in df. columns:
df['ground_truth'] = ''
st.session_state. dataset = df
except Exception as e:
st.error(f"Failed to load file: {e}")
return
if st.session_state.dataset is not None:
df = st.session_state. dataset
st.subheader("๐Ÿ“ Annotate Ground Truth")
st.caption("Edit the 'ground_truth' column to specify expected extractions.")
# Configure AgGrid
gb = GridOptionsBuilder.from_dataframe(df)
gb.configure_default_column(
resizable=True,
filterable=True,
sortable=True
)
gb.configure_column(
"text",
width=500,
wrapText=True,
autoHeight=True,
editable=False
)
gb.configure_column(
"ground_truth",
editable=True,
width=300,
cellStyle={'backgroundColor': '#fffde7'}
)
gb.configure_selection(
selection_mode='multiple',
use_checkbox=True
)
gb.configure_pagination(paginationAutoPageSize=False, paginationPageSize=10)
grid_response = AgGrid(
df,
gridOptions=gb.build(),
update_mode=GridUpdateMode.VALUE_CHANGED,
data_return_mode=DataReturnMode.FILTERED_AND_SORTED,
fit_columns_on_grid_load=False,
theme='streamlit',
height=400
)
# Update session state with edited data
st.session_state. dataset = pd.DataFrame(grid_response['data'])
# Data statistics
col1, col2, col3 = st.columns(3)
annotated = (st.session_state. dataset['ground_truth'] != '').sum()
total = len(st. session_state.dataset)
train_size = int(total * st.session_state.train_test_split)
with col1:
st. metric("Total Samples", total)
with col2:
st.metric("Annotated", f"{annotated}/{total}")
with col3:
st.metric("Train/Val Split", f"{train_size}/{total - train_size}")
# Sample data preview
with st.expander("๐Ÿ“‹ Sample Preview"):
st.dataframe(
st.session_state.dataset.head(5),
use_container_width=True
)
def render_optimization_tab():
"""Render the optimization tab."""
st.header("๐Ÿงฌ GEPA Optimization")
if st.session_state. dataset is None:
st.warning("โš ๏ธ Please upload and annotate data first.")
return
df = st.session_state.dataset
annotated_df = df[df['ground_truth'] != '']
if len(annotated_df) < 2:
st.warning("โš ๏ธ Please annotate at least 2 samples.")
return
# Split configuration display
split_idx = int(len(annotated_df) * st.session_state.train_test_split)
train_df = annotated_df. iloc[:split_idx]
val_df = annotated_df.iloc[split_idx:]
col1, col2 = st.columns(2)
with col1:
st.info(f"๐Ÿ“š Training samples: {len(train_df)}")
with col2:
st.info(f"๐Ÿงช Validation samples: {len(val_df)}")
# Optimization controls
col1, col2, col3 = st.columns([1, 1, 2])
with col1:
run_button = st.button(
"๐Ÿš€ Run Optimization",
type="primary",
use_container_width=True
)
with col2:
if st.button("๐Ÿ”„ Reset Results", use_container_width=True):
st.session_state.optimized_program = None
st.session_state.optimization_history = []
st.rerun()
if run_button:
if not setup_dspy():
return
# Prepare training set
trainset = [
dspy.Example(
raw_text=row['text'],
ground_truth=row['ground_truth']
).with_inputs('raw_text')
for _, row in train_df.iterrows()
]
valset = [
dspy.Example(
raw_text=row['text'],
ground_truth=row['ground_truth']
).with_inputs('raw_text')
for _, row in val_df.iterrows()
]
# Progress tracking
progress_bar = st.progress(0)
status_text = st. empty()
try:
with st.spinner("๐Ÿงฌ GEPA is evolving regex patterns..."):
status_text.text("Initializing optimizer...")
optimizer = GEPA(
metric=create_regex_metric(st.session_state.regex_flags),
num_iterations=st. session_state.gepa_config['num_iterations'],
num_candidates=st.session_state.gepa_config['num_candidates'],
)
progress_bar.progress(20)
status_text.text("Creating initial program...")
program = RegexGenerator(
doc=st.session_state.prompts['system_instruction'],
output_desc=st. session_state.prompts['output_description']
)
progress_bar.progress(40)
status_text.text("Running optimization...")
optimized = optimizer.compile(
program,
trainset=trainset,
)
progress_bar.progress(80)
status_text.text("Evaluating on validation set...")
# Evaluate on validation set
metric_fn = create_regex_metric(st.session_state.regex_flags)
val_scores = []
for example in valset:
pred = optimized(raw_text=example. raw_text)
result = metric_fn(example, pred)
val_scores.append(result. score)
avg_score = sum(val_scores) / len(val_scores) if val_scores else 0
progress_bar. progress(100)
status_text.text("Complete!")
st.session_state. optimized_program = optimized
st.session_state.optimization_history.append({
'score': avg_score,
'prompt': optimized.predictor.signature.__doc__,
'timestamp': pd.Timestamp.now()
})
st. success(f"โœ… Optimization Complete! Validation Score: {avg_score:.2%}")
except Exception as e:
st.error(f"Optimization failed: {e}")
return
# Display results
if st. session_state.optimized_program:
st.subheader("๐Ÿ“Š Results")
with st.expander("๐Ÿ” Evolved Prompt", expanded=True):
st.code(
st.session_state.optimized_program.predictor. signature.__doc__,
language="text"
)
# Optimization history
if st.session_state.optimization_history:
with st.expander("๐Ÿ“ˆ Optimization History"):
history_df = pd. DataFrame(st.session_state. optimization_history)
st.dataframe(history_df, use_container_width=True)
def render_testing_tab():
"""Render the testing tab."""
st.header("๐Ÿ” Test & Validate")
if st.session_state.optimized_program is None:
st. warning("โš ๏ธ Please run optimization first.")
return
# Single test
st.subheader("๐Ÿงช Single Test")
test_input = st.text_area(
"Enter test text",
height=100,
placeholder="Paste text here to extract regex pattern..."
)
col1, col2 = st.columns([1, 3])
with col1:
test_button = st.button("โ–ถ๏ธ Generate & Run", type="primary")
if test_button and test_input:
if not setup_dspy():
return
with st.spinner("Generating regex... "):
try:
result = st.session_state.optimized_program(raw_text=test_input)
pattern = result.regex_pattern
st.code(f"Generated Regex: {pattern}", language="regex")
# Compile and test
flags = 0
for flag in st.session_state.regex_flags:
flags |= getattr(re, flag, 0)
compiled = re.compile(pattern, flags)
matches = compiled.findall(test_input)
if matches:
st.success(f"โœ… Found {len(matches)} match(es):")
for i, match in enumerate(matches, 1):
st.markdown(f"**Match {i}:** `{match}`")
# Highlight matches in text
highlighted = test_input
for match in matches:
highlighted = highlighted.replace(
match,
f"**: green[{match}]**"
)
st.markdown("**Highlighted text:**")
st.markdown(highlighted)
else:
st. warning("No matches found.")
except re.error as e:
st.error(f"Invalid regex generated: {e}")
except Exception as e:
st.error(f"Error: {e}")
st.divider()
# Batch testing
st. subheader("๐Ÿ“‹ Batch Testing")
batch_file = st.file_uploader(
"Upload test data (CSV with 'text' column)",
type=["csv"],
key="batch_test"
)
if batch_file:
test_df = pd. read_csv(batch_file)
if 'text' not in test_df. columns:
st.error("CSV must have 'text' column.")
return
if st.button("๐Ÿš€ Run Batch Test"):
if not setup_dspy():
return
results = []
progress = st.progress(0)
for i, row in test_df.iterrows():
try:
result = st.session_state.optimized_program(raw_text=row['text'])
pattern = result.regex_pattern
flags = 0
for flag in st.session_state. regex_flags:
flags |= getattr(re, flag, 0)
match = re.search(pattern, row['text'], flags)
extracted = match.group(0) if match else ""
results.append({
'text': row['text'][: 100] + '...' if len(row['text']) > 100 else row['text'],
'pattern': pattern,
'extracted': extracted,
'success': bool(match)
})
except Exception as e:
results.append({
'text': row['text'][:100] + '...',
'pattern': 'ERROR',
'extracted': str(e),
'success': False
})
progress. progress((i + 1) / len(test_df))
results_df = pd. DataFrame(results)
# Summary metrics
success_rate = results_df['success']. mean()
col1, col2 = st.columns(2)
with col1:
st.metric("Success Rate", f"{success_rate:.1%}")
with col2:
st.metric("Total Tests", len(results_df))
# Results table
st.dataframe(results_df, use_container_width=True)
# Download results
csv = results_df. to_csv(index=False)
st.download_button(
"๐Ÿ“ฅ Download Results",
csv,
"batch_test_results. csv",
"text/csv"
)
# --- Main Application ---
def main():
render_sidebar()
st.title("๐Ÿงฌ GEPA Regex Optimizer")
st.caption("Automated regex generation with DSPy and evolutionary optimization")
tab1, tab2, tab3 = st.tabs([
"๐Ÿ“ฅ Data Ingestion",
"๐Ÿงฌ Optimization",
"๐Ÿ” Testing"
])
with tab1:
render_data_ingestion_tab()
with tab2:
render_optimization_tab()
with tab3:
render_testing_tab()
# Footer
st.divider()
st.caption(
"Built with Streamlit, DSPy, and GEPA | "
"Configuration is auto-saved in the sidebar"
)
if __name__ == "__main__":
main()