|
|
import streamlit as st |
|
|
import dspy |
|
|
import pandas as pd |
|
|
import re |
|
|
import httpx |
|
|
import json |
|
|
from openai import OpenAI |
|
|
from st_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode, DataReturnMode |
|
|
from typing import Optional, Dict, Any |
|
|
import os |
|
|
|
|
|
|
|
|
st. set_page_config( |
|
|
layout="wide", |
|
|
page_title="GEPA Regex Optimizer", |
|
|
page_icon="๐งฌ", |
|
|
initial_sidebar_state="expanded" |
|
|
) |
|
|
|
|
|
|
|
|
DEFAULT_STATE = { |
|
|
'dataset': None, |
|
|
'optimized_program': None, |
|
|
'optimization_history': [], |
|
|
'config': { |
|
|
'model_name': 'gpt-4o', |
|
|
'api_key': '', |
|
|
'base_url': 'https://api.openai.com/v1', |
|
|
'timeout': 30, |
|
|
'max_retries': 3, |
|
|
'temperature': 0.7, |
|
|
'max_tokens': 1024, |
|
|
}, |
|
|
'gepa_config': { |
|
|
'num_iterations': 5, |
|
|
'num_candidates': 3, |
|
|
'early_stopping_threshold': 0.95, |
|
|
}, |
|
|
'prompts': { |
|
|
'system_instruction': "You are a Regex Expert. Given the input text, provide a high-precision Python regex pattern to extract the target text.", |
|
|
'gepa_meta_prompt': "Focus on precision. If the feedback says the match was too broad, use more specific character classes or anchors. If it missed the target, suggest more flexible patterns.", |
|
|
'output_description': "A Python-compatible regular expression", |
|
|
}, |
|
|
'train_test_split': 0.8, |
|
|
'regex_flags': [], |
|
|
} |
|
|
|
|
|
for key, value in DEFAULT_STATE.items(): |
|
|
if key not in st.session_state: |
|
|
st.session_state[key] = value |
|
|
|
|
|
|
|
|
|
|
|
class ConfigManager: |
|
|
"""Manages application configuration with persistence.""" |
|
|
|
|
|
CONFIG_FILE = "gepa_config.json" |
|
|
|
|
|
@staticmethod |
|
|
def save_config(): |
|
|
"""Save current configuration to file.""" |
|
|
config_data = { |
|
|
'config': st.session_state. config, |
|
|
'gepa_config': st.session_state. gepa_config, |
|
|
'prompts': st.session_state.prompts, |
|
|
'train_test_split': st.session_state. train_test_split, |
|
|
'regex_flags': st. session_state.regex_flags, |
|
|
} |
|
|
try: |
|
|
with open(ConfigManager.CONFIG_FILE, 'w') as f: |
|
|
json.dump(config_data, f, indent=2) |
|
|
return True |
|
|
except Exception as e: |
|
|
st.error(f"Failed to save config: {e}") |
|
|
return False |
|
|
|
|
|
@staticmethod |
|
|
def load_config(): |
|
|
"""Load configuration from file.""" |
|
|
try: |
|
|
if os.path.exists(ConfigManager.CONFIG_FILE): |
|
|
with open(ConfigManager.CONFIG_FILE, 'r') as f: |
|
|
config_data = json.load(f) |
|
|
for key, value in config_data. items(): |
|
|
if key in st.session_state: |
|
|
if isinstance(value, dict): |
|
|
st. session_state[key].update(value) |
|
|
else: |
|
|
st. session_state[key] = value |
|
|
return True |
|
|
except Exception as e: |
|
|
st.warning(f"Failed to load config: {e}") |
|
|
return False |
|
|
|
|
|
@staticmethod |
|
|
def reset_to_defaults(): |
|
|
"""Reset all configuration to defaults.""" |
|
|
for key, value in DEFAULT_STATE.items(): |
|
|
if key not in ['dataset', 'optimized_program', 'optimization_history']: |
|
|
st.session_state[key] = value. copy() if isinstance(value, (dict, list)) else value |
|
|
|
|
|
|
|
|
|
|
|
def setup_dspy() -> bool: |
|
|
"""Configure DSPy with current settings.""" |
|
|
config = st.session_state. config |
|
|
try: |
|
|
http_client = httpx.Client( |
|
|
timeout=config['timeout'], |
|
|
limits=httpx.Limits(max_retries=config['max_retries']) |
|
|
) |
|
|
|
|
|
custom_openai_client = OpenAI( |
|
|
api_key=config['api_key'] or os.getenv("OPENAI_API_KEY", "empty"), |
|
|
base_url=config['base_url'] or None, |
|
|
http_client=http_client |
|
|
) |
|
|
|
|
|
lm = dspy.LM( |
|
|
model=config['model_name'], |
|
|
client=custom_openai_client, |
|
|
temperature=config['temperature'], |
|
|
max_tokens=config['max_tokens'] |
|
|
) |
|
|
dspy.configure(lm=lm) |
|
|
return True |
|
|
except Exception as e: |
|
|
st. error(f"LLM Configuration Error: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
|
|
|
def create_regex_metric(flags: list): |
|
|
"""Factory function to create metric with configurable regex flags.""" |
|
|
|
|
|
compiled_flags = 0 |
|
|
for flag in flags: |
|
|
compiled_flags |= getattr(re, flag, 0) |
|
|
|
|
|
def regex_metric_with_feedback(example, prediction, trace=None): |
|
|
"""GEPA Metric with rich feedback for regex optimization.""" |
|
|
target = example. ground_truth. strip() |
|
|
raw_text = example. raw_text |
|
|
pred_pattern = getattr(prediction, 'regex_pattern', '').strip() |
|
|
|
|
|
|
|
|
if not pred_pattern: |
|
|
feedback = ( |
|
|
f"No regex pattern provided. Target text: '{target}'. " |
|
|
"Please output a valid Python regex string." |
|
|
) |
|
|
return dspy. Prediction(score=0.0, feedback=feedback) |
|
|
|
|
|
|
|
|
try: |
|
|
compiled = re.compile(pred_pattern, compiled_flags) |
|
|
except re.error as e: |
|
|
feedback = ( |
|
|
f"Invalid regex: '{pred_pattern}'. " |
|
|
f"Error: {str(e)}. Check syntax and escape characters." |
|
|
) |
|
|
return dspy. Prediction(score=0.0, feedback=feedback) |
|
|
|
|
|
|
|
|
match = compiled.search(raw_text) |
|
|
extracted = match.group(0) if match else "" |
|
|
|
|
|
if extracted == target: |
|
|
return dspy.Prediction( |
|
|
score=1.0, |
|
|
feedback=f"Perfect match! Correctly extracted '{target}'." |
|
|
) |
|
|
|
|
|
|
|
|
score = 0.0 |
|
|
feedback = f"Pattern '{pred_pattern}' produced incorrect result.\n" |
|
|
|
|
|
if not match: |
|
|
feedback += f"NO MATCH found. Target: '{target}'." |
|
|
elif target in extracted: |
|
|
score = 0.3 |
|
|
feedback += ( |
|
|
f"TOO BROAD: Extracted '{extracted}' contains target '{target}' " |
|
|
"plus extra characters. Use stricter boundaries or non-greedy quantifiers." |
|
|
) |
|
|
elif extracted in target: |
|
|
score = 0.3 |
|
|
feedback += ( |
|
|
f"TOO NARROW: Extracted '{extracted}' but target is '{target}'. " |
|
|
"Make pattern more inclusive." |
|
|
) |
|
|
else: |
|
|
feedback += f"WRONG MATCH: Got '{extracted}' instead of '{target}'." |
|
|
|
|
|
feedback += "\nAnalyze the target structure to isolate it uniquely." |
|
|
return dspy.Prediction(score=score, feedback=feedback) |
|
|
|
|
|
return regex_metric_with_feedback |
|
|
|
|
|
|
|
|
|
|
|
class RegexSignature(dspy. Signature): |
|
|
"""Dynamic signature for regex generation.""" |
|
|
raw_text = dspy. InputField() |
|
|
regex_pattern = dspy.OutputField() |
|
|
|
|
|
|
|
|
class RegexGenerator(dspy.Module): |
|
|
"""Configurable regex generation module.""" |
|
|
|
|
|
def __init__(self, doc: str, output_desc: str): |
|
|
super().__init__() |
|
|
self.predictor = dspy.Predict(RegexSignature) |
|
|
self.predictor.signature.__doc__ = doc |
|
|
self.predictor.signature.regex_pattern. desc = output_desc |
|
|
|
|
|
def forward(self, raw_text: str): |
|
|
return self. predictor(raw_text=raw_text) |
|
|
|
|
|
|
|
|
|
|
|
def render_sidebar(): |
|
|
"""Render the configuration sidebar.""" |
|
|
with st.sidebar: |
|
|
st.title("โ๏ธ Configuration") |
|
|
|
|
|
|
|
|
col1, col2, col3 = st.columns(3) |
|
|
with col1: |
|
|
if st.button("๐พ Save", use_container_width=True): |
|
|
if ConfigManager.save_config(): |
|
|
st.success("Saved!") |
|
|
with col2: |
|
|
if st.button("๐ Load", use_container_width=True): |
|
|
if ConfigManager.load_config(): |
|
|
st.success("Loaded!") |
|
|
st.rerun() |
|
|
with col3: |
|
|
if st.button("๐ Reset", use_container_width=True): |
|
|
ConfigManager.reset_to_defaults() |
|
|
st.rerun() |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
with st.expander("๐ค LLM Settings", expanded=True): |
|
|
st.session_state.config['model_name'] = st.text_input( |
|
|
"Model Name", |
|
|
value=st.session_state.config['model_name'], |
|
|
help="e.g., gpt-4o, gpt-3.5-turbo, claude-3-opus" |
|
|
) |
|
|
|
|
|
st.session_state.config['api_key'] = st.text_input( |
|
|
"API Key", |
|
|
value=st.session_state.config['api_key'], |
|
|
type="password", |
|
|
help="Leave empty to use OPENAI_API_KEY env var" |
|
|
) |
|
|
|
|
|
st.session_state.config['base_url'] = st.text_input( |
|
|
"Base URL", |
|
|
value=st.session_state.config['base_url'], |
|
|
help="Custom API endpoint (e.g., for Azure, local models)" |
|
|
) |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
st.session_state.config['timeout'] = st.number_input( |
|
|
"Timeout (s)", |
|
|
min_value=5, |
|
|
max_value=300, |
|
|
value=st.session_state.config['timeout'] |
|
|
) |
|
|
with col2: |
|
|
st.session_state.config['max_retries'] = st.number_input( |
|
|
"Max Retries", |
|
|
min_value=0, |
|
|
max_value=10, |
|
|
value=st.session_state.config['max_retries'] |
|
|
) |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
st.session_state.config['temperature'] = st.slider( |
|
|
"Temperature", |
|
|
min_value=0.0, |
|
|
max_value=2.0, |
|
|
value=st. session_state.config['temperature'], |
|
|
step=0.1 |
|
|
) |
|
|
with col2: |
|
|
st.session_state.config['max_tokens'] = st.number_input( |
|
|
"Max Tokens", |
|
|
min_value=64, |
|
|
max_value=8192, |
|
|
value=st.session_state.config['max_tokens'] |
|
|
) |
|
|
|
|
|
|
|
|
with st. expander("๐งฌ GEPA Optimizer", expanded=False): |
|
|
st.session_state.gepa_config['num_iterations'] = st.slider( |
|
|
"Iterations", |
|
|
min_value=1, |
|
|
max_value=20, |
|
|
value=st. session_state.gepa_config['num_iterations'], |
|
|
help="Number of optimization iterations" |
|
|
) |
|
|
|
|
|
st.session_state. gepa_config['num_candidates'] = st.slider( |
|
|
"Candidates per Iteration", |
|
|
min_value=1, |
|
|
max_value=10, |
|
|
value=st.session_state.gepa_config['num_candidates'], |
|
|
help="Number of candidate patterns to evaluate" |
|
|
) |
|
|
|
|
|
st. session_state.gepa_config['early_stopping_threshold'] = st.slider( |
|
|
"Early Stopping Threshold", |
|
|
min_value=0.5, |
|
|
max_value=1.0, |
|
|
value=st.session_state.gepa_config['early_stopping_threshold'], |
|
|
step=0.05, |
|
|
help="Stop if this score is reached" |
|
|
) |
|
|
|
|
|
|
|
|
with st.expander("๐ Prompts", expanded=False): |
|
|
st.session_state.prompts['system_instruction'] = st.text_area( |
|
|
"System Instruction", |
|
|
value=st.session_state.prompts['system_instruction'], |
|
|
height=100, |
|
|
help="Initial instruction for regex generation" |
|
|
) |
|
|
|
|
|
st.session_state. prompts['gepa_meta_prompt'] = st.text_area( |
|
|
"GEPA Evolution Prompt", |
|
|
value=st.session_state.prompts['gepa_meta_prompt'], |
|
|
height=100, |
|
|
help="Instructions for GEPA's prompt evolution" |
|
|
) |
|
|
|
|
|
st.session_state. prompts['output_description'] = st. text_input( |
|
|
"Output Field Description", |
|
|
value=st.session_state.prompts['output_description'], |
|
|
help="Description for the regex output field" |
|
|
) |
|
|
|
|
|
|
|
|
with st. expander("๐ง Regex Options", expanded=False): |
|
|
flag_options = ['IGNORECASE', 'MULTILINE', 'DOTALL', 'VERBOSE', 'ASCII'] |
|
|
st.session_state. regex_flags = st.multiselect( |
|
|
"Regex Flags", |
|
|
options=flag_options, |
|
|
default=st.session_state. regex_flags, |
|
|
help="Python regex flags to apply" |
|
|
) |
|
|
|
|
|
|
|
|
with st.expander("๐ Data Settings", expanded=False): |
|
|
st.session_state.train_test_split = st.slider( |
|
|
"Train/Validation Split", |
|
|
min_value=0.5, |
|
|
max_value=0.95, |
|
|
value=st.session_state.train_test_split, |
|
|
step=0.05, |
|
|
help="Proportion of data for training" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
def render_data_ingestion_tab(): |
|
|
"""Render the data ingestion tab.""" |
|
|
st.header("๐ฅ Data Ingestion & Annotation") |
|
|
|
|
|
col1, col2 = st.columns([2, 1]) |
|
|
|
|
|
with col1: |
|
|
uploaded = st.file_uploader( |
|
|
"Upload Dataset", |
|
|
type=["csv", "json", "xlsx"], |
|
|
help="CSV/JSON/Excel with 'text' column (ground_truth optional)" |
|
|
) |
|
|
|
|
|
with col2: |
|
|
st.markdown("**Expected Format:**") |
|
|
st.code("text,ground_truth\n'Sample text','expected'", language="csv") |
|
|
|
|
|
if uploaded: |
|
|
|
|
|
try: |
|
|
if uploaded.name.endswith('.csv'): |
|
|
df = pd.read_csv(uploaded) |
|
|
elif uploaded.name. endswith('.json'): |
|
|
df = pd.read_json(uploaded) |
|
|
else: |
|
|
df = pd.read_excel(uploaded) |
|
|
|
|
|
|
|
|
if 'text' not in df.columns: |
|
|
st.error("Dataset must have a 'text' column.") |
|
|
return |
|
|
|
|
|
if 'ground_truth' not in df. columns: |
|
|
df['ground_truth'] = '' |
|
|
|
|
|
st.session_state. dataset = df |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"Failed to load file: {e}") |
|
|
return |
|
|
|
|
|
if st.session_state.dataset is not None: |
|
|
df = st.session_state. dataset |
|
|
|
|
|
st.subheader("๐ Annotate Ground Truth") |
|
|
st.caption("Edit the 'ground_truth' column to specify expected extractions.") |
|
|
|
|
|
|
|
|
gb = GridOptionsBuilder.from_dataframe(df) |
|
|
gb.configure_default_column( |
|
|
resizable=True, |
|
|
filterable=True, |
|
|
sortable=True |
|
|
) |
|
|
gb.configure_column( |
|
|
"text", |
|
|
width=500, |
|
|
wrapText=True, |
|
|
autoHeight=True, |
|
|
editable=False |
|
|
) |
|
|
gb.configure_column( |
|
|
"ground_truth", |
|
|
editable=True, |
|
|
width=300, |
|
|
cellStyle={'backgroundColor': '#fffde7'} |
|
|
) |
|
|
gb.configure_selection( |
|
|
selection_mode='multiple', |
|
|
use_checkbox=True |
|
|
) |
|
|
gb.configure_pagination(paginationAutoPageSize=False, paginationPageSize=10) |
|
|
|
|
|
grid_response = AgGrid( |
|
|
df, |
|
|
gridOptions=gb.build(), |
|
|
update_mode=GridUpdateMode.VALUE_CHANGED, |
|
|
data_return_mode=DataReturnMode.FILTERED_AND_SORTED, |
|
|
fit_columns_on_grid_load=False, |
|
|
theme='streamlit', |
|
|
height=400 |
|
|
) |
|
|
|
|
|
|
|
|
st.session_state. dataset = pd.DataFrame(grid_response['data']) |
|
|
|
|
|
|
|
|
col1, col2, col3 = st.columns(3) |
|
|
annotated = (st.session_state. dataset['ground_truth'] != '').sum() |
|
|
total = len(st. session_state.dataset) |
|
|
train_size = int(total * st.session_state.train_test_split) |
|
|
|
|
|
with col1: |
|
|
st. metric("Total Samples", total) |
|
|
with col2: |
|
|
st.metric("Annotated", f"{annotated}/{total}") |
|
|
with col3: |
|
|
st.metric("Train/Val Split", f"{train_size}/{total - train_size}") |
|
|
|
|
|
|
|
|
with st.expander("๐ Sample Preview"): |
|
|
st.dataframe( |
|
|
st.session_state.dataset.head(5), |
|
|
use_container_width=True |
|
|
) |
|
|
|
|
|
|
|
|
def render_optimization_tab(): |
|
|
"""Render the optimization tab.""" |
|
|
st.header("๐งฌ GEPA Optimization") |
|
|
|
|
|
if st.session_state. dataset is None: |
|
|
st.warning("โ ๏ธ Please upload and annotate data first.") |
|
|
return |
|
|
|
|
|
df = st.session_state.dataset |
|
|
annotated_df = df[df['ground_truth'] != ''] |
|
|
|
|
|
if len(annotated_df) < 2: |
|
|
st.warning("โ ๏ธ Please annotate at least 2 samples.") |
|
|
return |
|
|
|
|
|
|
|
|
split_idx = int(len(annotated_df) * st.session_state.train_test_split) |
|
|
train_df = annotated_df. iloc[:split_idx] |
|
|
val_df = annotated_df.iloc[split_idx:] |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
st.info(f"๐ Training samples: {len(train_df)}") |
|
|
with col2: |
|
|
st.info(f"๐งช Validation samples: {len(val_df)}") |
|
|
|
|
|
|
|
|
col1, col2, col3 = st.columns([1, 1, 2]) |
|
|
|
|
|
with col1: |
|
|
run_button = st.button( |
|
|
"๐ Run Optimization", |
|
|
type="primary", |
|
|
use_container_width=True |
|
|
) |
|
|
|
|
|
with col2: |
|
|
if st.button("๐ Reset Results", use_container_width=True): |
|
|
st.session_state.optimized_program = None |
|
|
st.session_state.optimization_history = [] |
|
|
st.rerun() |
|
|
|
|
|
if run_button: |
|
|
if not setup_dspy(): |
|
|
return |
|
|
|
|
|
|
|
|
trainset = [ |
|
|
dspy.Example( |
|
|
raw_text=row['text'], |
|
|
ground_truth=row['ground_truth'] |
|
|
).with_inputs('raw_text') |
|
|
for _, row in train_df.iterrows() |
|
|
] |
|
|
|
|
|
valset = [ |
|
|
dspy.Example( |
|
|
raw_text=row['text'], |
|
|
ground_truth=row['ground_truth'] |
|
|
).with_inputs('raw_text') |
|
|
for _, row in val_df.iterrows() |
|
|
] |
|
|
|
|
|
|
|
|
progress_bar = st.progress(0) |
|
|
status_text = st. empty() |
|
|
|
|
|
try: |
|
|
with st.spinner("๐งฌ GEPA is evolving regex patterns..."): |
|
|
status_text.text("Initializing optimizer...") |
|
|
|
|
|
optimizer = GEPA( |
|
|
metric=create_regex_metric(st.session_state.regex_flags), |
|
|
num_iterations=st. session_state.gepa_config['num_iterations'], |
|
|
num_candidates=st.session_state.gepa_config['num_candidates'], |
|
|
) |
|
|
|
|
|
progress_bar.progress(20) |
|
|
status_text.text("Creating initial program...") |
|
|
|
|
|
program = RegexGenerator( |
|
|
doc=st.session_state.prompts['system_instruction'], |
|
|
output_desc=st. session_state.prompts['output_description'] |
|
|
) |
|
|
|
|
|
progress_bar.progress(40) |
|
|
status_text.text("Running optimization...") |
|
|
|
|
|
optimized = optimizer.compile( |
|
|
program, |
|
|
trainset=trainset, |
|
|
) |
|
|
|
|
|
progress_bar.progress(80) |
|
|
status_text.text("Evaluating on validation set...") |
|
|
|
|
|
|
|
|
metric_fn = create_regex_metric(st.session_state.regex_flags) |
|
|
val_scores = [] |
|
|
for example in valset: |
|
|
pred = optimized(raw_text=example. raw_text) |
|
|
result = metric_fn(example, pred) |
|
|
val_scores.append(result. score) |
|
|
|
|
|
avg_score = sum(val_scores) / len(val_scores) if val_scores else 0 |
|
|
|
|
|
progress_bar. progress(100) |
|
|
status_text.text("Complete!") |
|
|
|
|
|
st.session_state. optimized_program = optimized |
|
|
st.session_state.optimization_history.append({ |
|
|
'score': avg_score, |
|
|
'prompt': optimized.predictor.signature.__doc__, |
|
|
'timestamp': pd.Timestamp.now() |
|
|
}) |
|
|
|
|
|
st. success(f"โ
Optimization Complete! Validation Score: {avg_score:.2%}") |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"Optimization failed: {e}") |
|
|
return |
|
|
|
|
|
|
|
|
if st. session_state.optimized_program: |
|
|
st.subheader("๐ Results") |
|
|
|
|
|
with st.expander("๐ Evolved Prompt", expanded=True): |
|
|
st.code( |
|
|
st.session_state.optimized_program.predictor. signature.__doc__, |
|
|
language="text" |
|
|
) |
|
|
|
|
|
|
|
|
if st.session_state.optimization_history: |
|
|
with st.expander("๐ Optimization History"): |
|
|
history_df = pd. DataFrame(st.session_state. optimization_history) |
|
|
st.dataframe(history_df, use_container_width=True) |
|
|
|
|
|
|
|
|
def render_testing_tab(): |
|
|
"""Render the testing tab.""" |
|
|
st.header("๐ Test & Validate") |
|
|
|
|
|
if st.session_state.optimized_program is None: |
|
|
st. warning("โ ๏ธ Please run optimization first.") |
|
|
return |
|
|
|
|
|
|
|
|
st.subheader("๐งช Single Test") |
|
|
|
|
|
test_input = st.text_area( |
|
|
"Enter test text", |
|
|
height=100, |
|
|
placeholder="Paste text here to extract regex pattern..." |
|
|
) |
|
|
|
|
|
col1, col2 = st.columns([1, 3]) |
|
|
with col1: |
|
|
test_button = st.button("โถ๏ธ Generate & Run", type="primary") |
|
|
|
|
|
if test_button and test_input: |
|
|
if not setup_dspy(): |
|
|
return |
|
|
|
|
|
with st.spinner("Generating regex... "): |
|
|
try: |
|
|
result = st.session_state.optimized_program(raw_text=test_input) |
|
|
pattern = result.regex_pattern |
|
|
|
|
|
st.code(f"Generated Regex: {pattern}", language="regex") |
|
|
|
|
|
|
|
|
flags = 0 |
|
|
for flag in st.session_state.regex_flags: |
|
|
flags |= getattr(re, flag, 0) |
|
|
|
|
|
compiled = re.compile(pattern, flags) |
|
|
matches = compiled.findall(test_input) |
|
|
|
|
|
if matches: |
|
|
st.success(f"โ
Found {len(matches)} match(es):") |
|
|
for i, match in enumerate(matches, 1): |
|
|
st.markdown(f"**Match {i}:** `{match}`") |
|
|
|
|
|
|
|
|
highlighted = test_input |
|
|
for match in matches: |
|
|
highlighted = highlighted.replace( |
|
|
match, |
|
|
f"**: green[{match}]**" |
|
|
) |
|
|
st.markdown("**Highlighted text:**") |
|
|
st.markdown(highlighted) |
|
|
else: |
|
|
st. warning("No matches found.") |
|
|
|
|
|
except re.error as e: |
|
|
st.error(f"Invalid regex generated: {e}") |
|
|
except Exception as e: |
|
|
st.error(f"Error: {e}") |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
st. subheader("๐ Batch Testing") |
|
|
|
|
|
batch_file = st.file_uploader( |
|
|
"Upload test data (CSV with 'text' column)", |
|
|
type=["csv"], |
|
|
key="batch_test" |
|
|
) |
|
|
|
|
|
if batch_file: |
|
|
test_df = pd. read_csv(batch_file) |
|
|
|
|
|
if 'text' not in test_df. columns: |
|
|
st.error("CSV must have 'text' column.") |
|
|
return |
|
|
|
|
|
if st.button("๐ Run Batch Test"): |
|
|
if not setup_dspy(): |
|
|
return |
|
|
|
|
|
results = [] |
|
|
progress = st.progress(0) |
|
|
|
|
|
for i, row in test_df.iterrows(): |
|
|
try: |
|
|
result = st.session_state.optimized_program(raw_text=row['text']) |
|
|
pattern = result.regex_pattern |
|
|
|
|
|
flags = 0 |
|
|
for flag in st.session_state. regex_flags: |
|
|
flags |= getattr(re, flag, 0) |
|
|
|
|
|
match = re.search(pattern, row['text'], flags) |
|
|
extracted = match.group(0) if match else "" |
|
|
|
|
|
results.append({ |
|
|
'text': row['text'][: 100] + '...' if len(row['text']) > 100 else row['text'], |
|
|
'pattern': pattern, |
|
|
'extracted': extracted, |
|
|
'success': bool(match) |
|
|
}) |
|
|
except Exception as e: |
|
|
results.append({ |
|
|
'text': row['text'][:100] + '...', |
|
|
'pattern': 'ERROR', |
|
|
'extracted': str(e), |
|
|
'success': False |
|
|
}) |
|
|
|
|
|
progress. progress((i + 1) / len(test_df)) |
|
|
|
|
|
results_df = pd. DataFrame(results) |
|
|
|
|
|
|
|
|
success_rate = results_df['success']. mean() |
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
st.metric("Success Rate", f"{success_rate:.1%}") |
|
|
with col2: |
|
|
st.metric("Total Tests", len(results_df)) |
|
|
|
|
|
|
|
|
st.dataframe(results_df, use_container_width=True) |
|
|
|
|
|
|
|
|
csv = results_df. to_csv(index=False) |
|
|
st.download_button( |
|
|
"๐ฅ Download Results", |
|
|
csv, |
|
|
"batch_test_results. csv", |
|
|
"text/csv" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
render_sidebar() |
|
|
|
|
|
st.title("๐งฌ GEPA Regex Optimizer") |
|
|
st.caption("Automated regex generation with DSPy and evolutionary optimization") |
|
|
|
|
|
tab1, tab2, tab3 = st.tabs([ |
|
|
"๐ฅ Data Ingestion", |
|
|
"๐งฌ Optimization", |
|
|
"๐ Testing" |
|
|
]) |
|
|
|
|
|
with tab1: |
|
|
render_data_ingestion_tab() |
|
|
|
|
|
with tab2: |
|
|
render_optimization_tab() |
|
|
|
|
|
with tab3: |
|
|
render_testing_tab() |
|
|
|
|
|
|
|
|
st.divider() |
|
|
st.caption( |
|
|
"Built with Streamlit, DSPy, and GEPA | " |
|
|
"Configuration is auto-saved in the sidebar" |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|