|
|
import streamlit as st |
|
|
import dspy |
|
|
import pandas as pd |
|
|
import numpy as np |
|
|
import re |
|
|
import httpx |
|
|
import json |
|
|
from openai import OpenAI |
|
|
from st_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode, DataReturnMode |
|
|
from typing import Optional, Dict, Any, List |
|
|
import os |
|
|
|
|
|
|
|
|
st. set_page_config( |
|
|
layout="wide", |
|
|
page_title="GEPA Regex Optimizer", |
|
|
page_icon="๐งฌ", |
|
|
initial_sidebar_state="expanded" |
|
|
) |
|
|
|
|
|
|
|
|
DEFAULT_STATE = { |
|
|
'dataset': None, |
|
|
'selected_indices': [], |
|
|
'optimized_program': None, |
|
|
'optimization_history': [], |
|
|
'config': { |
|
|
'model_name': 'gpt-4o', |
|
|
'api_key': '', |
|
|
'base_url': 'https://api.openai.com/v1', |
|
|
'timeout': 30, |
|
|
'max_retries': 3, |
|
|
'temperature': 0.7, |
|
|
'max_tokens': 1024, |
|
|
}, |
|
|
'gepa_config': { |
|
|
'num_iterations': 5, |
|
|
'num_candidates': 3, |
|
|
'early_stopping_threshold': 0.95, |
|
|
}, |
|
|
'prompts': { |
|
|
'system_instruction': "You are a Regex Expert. Given the input text, provide a high-precision Python regex pattern to extract the target text.", |
|
|
'gepa_meta_prompt': "Focus on precision. If the feedback says the match was too broad, use more specific character classes or anchors. If it missed the target, suggest more flexible patterns.", |
|
|
'output_description': "A Python-compatible regular expression", |
|
|
}, |
|
|
'train_test_split': 0.8, |
|
|
'regex_flags': [], |
|
|
} |
|
|
|
|
|
for key, value in DEFAULT_STATE.items(): |
|
|
if key not in st.session_state: |
|
|
st.session_state[key] = value |
|
|
|
|
|
|
|
|
|
|
|
class ConfigManager: |
|
|
"""Manages application configuration with persistence.""" |
|
|
|
|
|
CONFIG_FILE = "gepa_config.json" |
|
|
|
|
|
@staticmethod |
|
|
def save_config(): |
|
|
"""Save current configuration to file.""" |
|
|
config_data = { |
|
|
'config': st.session_state. config, |
|
|
'gepa_config': st.session_state. gepa_config, |
|
|
'prompts': st.session_state.prompts, |
|
|
'train_test_split': st.session_state. train_test_split, |
|
|
'regex_flags': st. session_state.regex_flags, |
|
|
} |
|
|
try: |
|
|
with open(ConfigManager.CONFIG_FILE, 'w') as f: |
|
|
json.dump(config_data, f, indent=2) |
|
|
return True |
|
|
except Exception as e: |
|
|
st.error(f"Failed to save config: {e}") |
|
|
return False |
|
|
|
|
|
@staticmethod |
|
|
def load_config(): |
|
|
"""Load configuration from file.""" |
|
|
try: |
|
|
if os.path.exists(ConfigManager.CONFIG_FILE): |
|
|
with open(ConfigManager.CONFIG_FILE, 'r') as f: |
|
|
config_data = json.load(f) |
|
|
for key, value in config_data. items(): |
|
|
if key in st.session_state: |
|
|
if isinstance(value, dict): |
|
|
st. session_state[key].update(value) |
|
|
else: |
|
|
st. session_state[key] = value |
|
|
return True |
|
|
except Exception as e: |
|
|
st.warning(f"Failed to load config: {e}") |
|
|
return False |
|
|
|
|
|
@staticmethod |
|
|
def reset_to_defaults(): |
|
|
"""Reset all configuration to defaults.""" |
|
|
for key, value in DEFAULT_STATE.items(): |
|
|
if key not in ['dataset', 'optimized_program', 'optimization_history']: |
|
|
st.session_state[key] = value. copy() if isinstance(value, (dict, list)) else value |
|
|
|
|
|
|
|
|
|
|
|
def setup_dspy() -> bool: |
|
|
"""Configure DSPy with current settings.""" |
|
|
config = st.session_state. config |
|
|
try: |
|
|
http_client = httpx.Client( |
|
|
timeout=config['timeout'], |
|
|
limits=httpx.Limits(max_retries=config['max_retries']) |
|
|
) |
|
|
|
|
|
custom_openai_client = OpenAI( |
|
|
api_key=config['api_key'] or os.getenv("OPENAI_API_KEY", "empty"), |
|
|
base_url=config['base_url'] or None, |
|
|
http_client=http_client |
|
|
) |
|
|
|
|
|
lm = dspy.LM( |
|
|
model=config['model_name'], |
|
|
client=custom_openai_client, |
|
|
temperature=config['temperature'], |
|
|
max_tokens=config['max_tokens'] |
|
|
) |
|
|
dspy.configure(lm=lm) |
|
|
return True |
|
|
except Exception as e: |
|
|
st. error(f"LLM Configuration Error: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
|
|
|
def create_regex_metric(flags: list): |
|
|
"""Factory function to create metric with configurable regex flags.""" |
|
|
|
|
|
compiled_flags = 0 |
|
|
for flag in flags: |
|
|
compiled_flags |= getattr(re, flag, 0) |
|
|
|
|
|
def regex_metric_with_feedback(example, prediction, trace=None): |
|
|
"""GEPA Metric with rich feedback for regex optimization.""" |
|
|
target = example. ground_truth. strip() |
|
|
raw_text = example. raw_text |
|
|
pred_pattern = getattr(prediction, 'regex_pattern', '').strip() |
|
|
|
|
|
|
|
|
if not pred_pattern: |
|
|
feedback = ( |
|
|
f"No regex pattern provided. Target text: '{target}'. " |
|
|
"Please output a valid Python regex string." |
|
|
) |
|
|
return dspy. Prediction(score=0.0, feedback=feedback) |
|
|
|
|
|
|
|
|
try: |
|
|
compiled = re.compile(pred_pattern, compiled_flags) |
|
|
except re.error as e: |
|
|
feedback = ( |
|
|
f"Invalid regex: '{pred_pattern}'. " |
|
|
f"Error: {str(e)}. Check syntax and escape characters." |
|
|
) |
|
|
return dspy. Prediction(score=0.0, feedback=feedback) |
|
|
|
|
|
|
|
|
match = compiled.search(raw_text) |
|
|
extracted = match.group(0) if match else "" |
|
|
|
|
|
if extracted == target: |
|
|
return dspy.Prediction( |
|
|
score=1.0, |
|
|
feedback=f"Perfect match! Correctly extracted '{target}'." |
|
|
) |
|
|
|
|
|
|
|
|
score = 0.0 |
|
|
feedback = f"Pattern '{pred_pattern}' produced incorrect result.\n" |
|
|
|
|
|
if not match: |
|
|
feedback += f"NO MATCH found. Target: '{target}'." |
|
|
elif target in extracted: |
|
|
score = 0.3 |
|
|
feedback += ( |
|
|
f"TOO BROAD: Extracted '{extracted}' contains target '{target}' " |
|
|
"plus extra characters. Use stricter boundaries or non-greedy quantifiers." |
|
|
) |
|
|
elif extracted in target: |
|
|
score = 0.3 |
|
|
feedback += ( |
|
|
f"TOO NARROW: Extracted '{extracted}' but target is '{target}'. " |
|
|
"Make pattern more inclusive." |
|
|
) |
|
|
else: |
|
|
feedback += f"WRONG MATCH: Got '{extracted}' instead of '{target}'." |
|
|
|
|
|
feedback += "\nAnalyze the target structure to isolate it uniquely." |
|
|
return dspy.Prediction(score=score, feedback=feedback) |
|
|
|
|
|
return regex_metric_with_feedback |
|
|
|
|
|
|
|
|
|
|
|
class RegexSignature(dspy. Signature): |
|
|
"""Dynamic signature for regex generation.""" |
|
|
raw_text = dspy. InputField() |
|
|
regex_pattern = dspy.OutputField() |
|
|
|
|
|
|
|
|
class RegexGenerator(dspy.Module): |
|
|
"""Configurable regex generation module.""" |
|
|
|
|
|
def __init__(self, doc: str, output_desc: str): |
|
|
super().__init__() |
|
|
self.predictor = dspy.Predict(RegexSignature) |
|
|
self.predictor.signature.__doc__ = doc |
|
|
self.predictor.signature.regex_pattern. desc = output_desc |
|
|
|
|
|
def forward(self, raw_text: str): |
|
|
return self. predictor(raw_text=raw_text) |
|
|
|
|
|
|
|
|
|
|
|
def render_sidebar(): |
|
|
"""Render the configuration sidebar.""" |
|
|
with st.sidebar: |
|
|
st.title("โ๏ธ Configuration") |
|
|
|
|
|
|
|
|
col1, col2, col3 = st.columns(3) |
|
|
with col1: |
|
|
if st.button("๐พ Save", use_container_width=True): |
|
|
if ConfigManager.save_config(): |
|
|
st.success("Saved!") |
|
|
with col2: |
|
|
if st.button("๐ Load", use_container_width=True): |
|
|
if ConfigManager.load_config(): |
|
|
st.success("Loaded!") |
|
|
st.rerun() |
|
|
with col3: |
|
|
if st.button("๐ Reset", use_container_width=True): |
|
|
ConfigManager.reset_to_defaults() |
|
|
st.rerun() |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
with st.expander("๐ค LLM Settings", expanded=True): |
|
|
st.session_state.config['model_name'] = st.text_input( |
|
|
"Model Name", |
|
|
value=st.session_state.config['model_name'], |
|
|
help="e.g., gpt-4o, gpt-3.5-turbo, claude-3-opus" |
|
|
) |
|
|
|
|
|
st.session_state.config['api_key'] = st.text_input( |
|
|
"API Key", |
|
|
value=st.session_state.config['api_key'], |
|
|
type="password", |
|
|
help="Leave empty to use OPENAI_API_KEY env var" |
|
|
) |
|
|
|
|
|
st.session_state.config['base_url'] = st.text_input( |
|
|
"Base URL", |
|
|
value=st.session_state.config['base_url'], |
|
|
help="Custom API endpoint (e.g., for Azure, local models)" |
|
|
) |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
st.session_state.config['timeout'] = st.number_input( |
|
|
"Timeout (s)", |
|
|
min_value=5, |
|
|
max_value=300, |
|
|
value=st.session_state.config['timeout'] |
|
|
) |
|
|
with col2: |
|
|
st.session_state.config['max_retries'] = st.number_input( |
|
|
"Max Retries", |
|
|
min_value=0, |
|
|
max_value=10, |
|
|
value=st.session_state.config['max_retries'] |
|
|
) |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
st.session_state.config['temperature'] = st.slider( |
|
|
"Temperature", |
|
|
min_value=0.0, |
|
|
max_value=2.0, |
|
|
value=st. session_state.config['temperature'], |
|
|
step=0.1 |
|
|
) |
|
|
with col2: |
|
|
st.session_state.config['max_tokens'] = st.number_input( |
|
|
"Max Tokens", |
|
|
min_value=64, |
|
|
max_value=8192, |
|
|
value=st.session_state.config['max_tokens'] |
|
|
) |
|
|
|
|
|
|
|
|
with st. expander("๐งฌ GEPA Optimizer", expanded=False): |
|
|
st.session_state.gepa_config['num_iterations'] = st.slider( |
|
|
"Iterations", |
|
|
min_value=1, |
|
|
max_value=20, |
|
|
value=st. session_state.gepa_config['num_iterations'], |
|
|
help="Number of optimization iterations" |
|
|
) |
|
|
|
|
|
st.session_state. gepa_config['num_candidates'] = st.slider( |
|
|
"Candidates per Iteration", |
|
|
min_value=1, |
|
|
max_value=10, |
|
|
value=st.session_state.gepa_config['num_candidates'], |
|
|
help="Number of candidate patterns to evaluate" |
|
|
) |
|
|
|
|
|
st. session_state.gepa_config['early_stopping_threshold'] = st.slider( |
|
|
"Early Stopping Threshold", |
|
|
min_value=0.5, |
|
|
max_value=1.0, |
|
|
value=st.session_state.gepa_config['early_stopping_threshold'], |
|
|
step=0.05, |
|
|
help="Stop if this score is reached" |
|
|
) |
|
|
|
|
|
|
|
|
with st.expander("๐ Prompts", expanded=False): |
|
|
st.session_state.prompts['system_instruction'] = st.text_area( |
|
|
"System Instruction", |
|
|
value=st.session_state.prompts['system_instruction'], |
|
|
height=100, |
|
|
help="Initial instruction for regex generation" |
|
|
) |
|
|
|
|
|
st.session_state. prompts['gepa_meta_prompt'] = st.text_area( |
|
|
"GEPA Evolution Prompt", |
|
|
value=st.session_state.prompts['gepa_meta_prompt'], |
|
|
height=100, |
|
|
help="Instructions for GEPA's prompt evolution" |
|
|
) |
|
|
|
|
|
st.session_state. prompts['output_description'] = st. text_input( |
|
|
"Output Field Description", |
|
|
value=st.session_state.prompts['output_description'], |
|
|
help="Description for the regex output field" |
|
|
) |
|
|
|
|
|
|
|
|
with st. expander("๐ง Regex Options", expanded=False): |
|
|
flag_options = ['IGNORECASE', 'MULTILINE', 'DOTALL', 'VERBOSE', 'ASCII'] |
|
|
st.session_state. regex_flags = st.multiselect( |
|
|
"Regex Flags", |
|
|
options=flag_options, |
|
|
default=st.session_state. regex_flags, |
|
|
help="Python regex flags to apply" |
|
|
) |
|
|
|
|
|
|
|
|
with st.expander("๐ Data Settings", expanded=False): |
|
|
st.session_state.train_test_split = st.slider( |
|
|
"Train/Validation Split", |
|
|
min_value=0.5, |
|
|
max_value=0.95, |
|
|
value=st.session_state.train_test_split, |
|
|
step=0.05, |
|
|
help="Proportion of data for training" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
def stratified_train_val_split( |
|
|
df: pd.DataFrame, |
|
|
train_ratio: float = 0.8, |
|
|
stratify_column: str = 'ground_truth', |
|
|
random_state: int = 42 |
|
|
) -> tuple: |
|
|
""" |
|
|
Perform stratified train/validation split. |
|
|
Groups samples by ground_truth pattern and splits proportionally. |
|
|
""" |
|
|
np.random.seed(random_state) |
|
|
|
|
|
|
|
|
|
|
|
df = df.copy() |
|
|
df['_strat_key'] = df[stratify_column].apply( |
|
|
lambda x: str(x)[:50] if pd.notna(x) and x != '' else '_empty_' |
|
|
) |
|
|
|
|
|
train_indices = [] |
|
|
val_indices = [] |
|
|
|
|
|
|
|
|
for _, group in df.groupby('_strat_key'): |
|
|
indices = group.index.tolist() |
|
|
np.random.shuffle(indices) |
|
|
|
|
|
split_idx = max(1, int(len(indices) * train_ratio)) |
|
|
|
|
|
|
|
|
if len(indices) > 1 and split_idx == len(indices): |
|
|
split_idx = len(indices) - 1 |
|
|
|
|
|
train_indices.extend(indices[:split_idx]) |
|
|
val_indices.extend(indices[split_idx:]) |
|
|
|
|
|
train_df = df.loc[train_indices].drop(columns=['_strat_key']) |
|
|
val_df = df.loc[val_indices].drop(columns=['_strat_key']) if val_indices else pd.DataFrame() |
|
|
|
|
|
return train_df, val_df |
|
|
|
|
|
|
|
|
|
|
|
def save_annotated_data(df: pd.DataFrame, selected_indices: List[int], filepath: str) -> bool: |
|
|
"""Save annotated data with selection state.""" |
|
|
try: |
|
|
|
|
|
save_df = df.copy() |
|
|
save_df['_selected'] = save_df.index.isin(selected_indices) |
|
|
|
|
|
if filepath.endswith('.json'): |
|
|
save_df.to_json(filepath, orient='records', indent=2) |
|
|
else: |
|
|
save_df.to_csv(filepath, index=False) |
|
|
return True |
|
|
except Exception as e: |
|
|
st.error(f"Failed to save data: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
def load_annotated_data(filepath: str) -> tuple: |
|
|
"""Load annotated data with selection state.""" |
|
|
try: |
|
|
|
|
|
df = pd.read_csv(filepath) |
|
|
|
|
|
|
|
|
selected_indices = [] |
|
|
if '_selected' in df.columns: |
|
|
selected_indices = df[df['_selected'] == True].index.tolist() |
|
|
df = df.drop(columns=['_selected']) |
|
|
|
|
|
|
|
|
if 'text' not in df.columns: |
|
|
raise ValueError("Dataset must have a 'text' column.") |
|
|
|
|
|
if 'ground_truth' not in df.columns: |
|
|
df['ground_truth'] = '' |
|
|
|
|
|
return df, selected_indices |
|
|
except Exception as e: |
|
|
st.error(f"Failed to load data: {e}") |
|
|
return None, [] |
|
|
|
|
|
|
|
|
|
|
|
def render_data_ingestion_tab(): |
|
|
"""Render the data ingestion tab.""" |
|
|
st.header("๐ฅ Data Ingestion & Annotation") |
|
|
|
|
|
|
|
|
col1, col2 = st.columns([2, 1]) |
|
|
|
|
|
with col1: |
|
|
uploaded = st.file_uploader( |
|
|
"Upload Dataset", |
|
|
type=["csv", "json", "xlsx"], |
|
|
help="CSV/JSON/Excel with 'text' column (ground_truth optional, _selected for pre-selected rows)" |
|
|
) |
|
|
|
|
|
with col2: |
|
|
st.markdown("**Expected Format:**") |
|
|
st.code("text,ground_truth,_selected\n'Sample text','expected',true", language="csv") |
|
|
|
|
|
if uploaded: |
|
|
|
|
|
try: |
|
|
df, selected_indices = load_annotated_data(uploaded) |
|
|
if df is not None: |
|
|
st.session_state.dataset = df.reset_index(drop=True) |
|
|
st.session_state.selected_indices = selected_indices |
|
|
st.success(f"โ
Loaded {len(df)} samples ({len(selected_indices)} pre-selected)") |
|
|
except Exception as e: |
|
|
st.error(f"Failed to load file: {e}") |
|
|
return |
|
|
|
|
|
if st.session_state.dataset is not None: |
|
|
df = st.session_state.dataset.copy() |
|
|
|
|
|
st.subheader("๐ Annotate Ground Truth") |
|
|
st.caption("Edit 'ground_truth' column and select rows (checkbox) to include in training/validation.") |
|
|
|
|
|
|
|
|
pre_selected_rows = st.session_state.get('selected_indices', []) |
|
|
|
|
|
|
|
|
gb = GridOptionsBuilder.from_dataframe(df) |
|
|
gb.configure_default_column( |
|
|
resizable=True, |
|
|
filterable=True, |
|
|
sortable=True |
|
|
) |
|
|
gb.configure_column( |
|
|
"text", |
|
|
width=500, |
|
|
wrapText=True, |
|
|
autoHeight=True, |
|
|
editable=False |
|
|
) |
|
|
gb.configure_column( |
|
|
"ground_truth", |
|
|
editable=True, |
|
|
width=300, |
|
|
cellStyle={'backgroundColor': '#fffde7'} |
|
|
) |
|
|
gb.configure_selection( |
|
|
selection_mode='multiple', |
|
|
use_checkbox=True, |
|
|
pre_selected_rows=pre_selected_rows |
|
|
) |
|
|
gb.configure_pagination(paginationAutoPageSize=False, paginationPageSize=10) |
|
|
|
|
|
grid_response = AgGrid( |
|
|
df, |
|
|
gridOptions=gb.build(), |
|
|
update_mode=GridUpdateMode.MODEL_CHANGED | GridUpdateMode.SELECTION_CHANGED, |
|
|
data_return_mode=DataReturnMode.FILTERED_AND_SORTED, |
|
|
fit_columns_on_grid_load=False, |
|
|
theme='streamlit', |
|
|
height=400, |
|
|
key='annotation_grid' |
|
|
) |
|
|
|
|
|
|
|
|
st.session_state.dataset = pd.DataFrame(grid_response['data']) |
|
|
|
|
|
|
|
|
selected_rows = grid_response.get('selected_rows', []) |
|
|
if selected_rows is not None and len(selected_rows) > 0: |
|
|
|
|
|
selected_df = pd.DataFrame(selected_rows) |
|
|
if not selected_df.empty: |
|
|
|
|
|
st.session_state.selected_indices = selected_df.index.tolist() |
|
|
else: |
|
|
st.session_state.selected_indices = [] |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
st.subheader("๐พ Save Annotated Data") |
|
|
col1, col2, col3 = st.columns([2, 1, 1]) |
|
|
|
|
|
with col1: |
|
|
save_filename = st.text_input( |
|
|
"Filename", |
|
|
value="annotated_data.csv", |
|
|
help="Enter filename (.csv or .json)" |
|
|
) |
|
|
|
|
|
with col2: |
|
|
if st.button("๐พ Save to File", use_container_width=True): |
|
|
if save_annotated_data( |
|
|
st.session_state.dataset, |
|
|
st.session_state.selected_indices, |
|
|
save_filename |
|
|
): |
|
|
st.success(f"โ
Saved to {save_filename}") |
|
|
|
|
|
with col3: |
|
|
|
|
|
save_df = st.session_state.dataset.copy() |
|
|
save_df['_selected'] = save_df.index.isin(st.session_state.selected_indices) |
|
|
|
|
|
csv_data = save_df.to_csv(index=False) |
|
|
st.download_button( |
|
|
"๐ฅ Download CSV", |
|
|
csv_data, |
|
|
file_name="annotated_data.csv", |
|
|
mime="text/csv", |
|
|
use_container_width=True |
|
|
) |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
st.subheader("๐ Data Statistics") |
|
|
|
|
|
total = len(st.session_state.dataset) |
|
|
annotated = (st.session_state.dataset['ground_truth'].astype(str) != '').sum() |
|
|
selected_count = len(st.session_state.selected_indices) |
|
|
|
|
|
|
|
|
selected_df = st.session_state.dataset.iloc[st.session_state.selected_indices] if st.session_state.selected_indices else pd.DataFrame() |
|
|
selected_annotated = selected_df[selected_df['ground_truth'].astype(str) != ''] if not selected_df.empty else pd.DataFrame() |
|
|
|
|
|
if len(selected_annotated) >= 2: |
|
|
train_df, val_df = stratified_train_val_split( |
|
|
selected_annotated, |
|
|
train_ratio=st.session_state.train_test_split |
|
|
) |
|
|
train_size = len(train_df) |
|
|
val_size = len(val_df) |
|
|
else: |
|
|
train_size = 0 |
|
|
val_size = 0 |
|
|
|
|
|
col1, col2, col3, col4 = st.columns(4) |
|
|
|
|
|
with col1: |
|
|
st.metric("Total Samples", total) |
|
|
with col2: |
|
|
st.metric("Annotated", f"{annotated}/{total}") |
|
|
with col3: |
|
|
st.metric("Selected", selected_count, help="Rows selected for training/validation") |
|
|
with col4: |
|
|
st.metric("Train/Val", f"{train_size}/{val_size}", help="Stratified split of selected & annotated rows") |
|
|
|
|
|
|
|
|
if selected_count == 0: |
|
|
st.info("๐ก Select rows using checkboxes to include them in training/validation.") |
|
|
elif len(selected_annotated) < 2: |
|
|
st.warning("โ ๏ธ Please select at least 2 annotated rows for training.") |
|
|
|
|
|
|
|
|
if len(selected_annotated) >= 2: |
|
|
with st.expander("๐ Stratification Preview"): |
|
|
|
|
|
pattern_counts = selected_annotated['ground_truth'].apply( |
|
|
lambda x: str(x)[:30] + '...' if len(str(x)) > 30 else str(x) |
|
|
).value_counts() |
|
|
|
|
|
st.markdown("**Ground Truth Pattern Distribution:**") |
|
|
st.bar_chart(pattern_counts) |
|
|
|
|
|
st.caption(f"Training: {train_size} samples, Validation: {val_size} samples") |
|
|
|
|
|
|
|
|
with st.expander("๐ Sample Preview"): |
|
|
st.dataframe( |
|
|
st.session_state.dataset.head(5), |
|
|
use_container_width=True |
|
|
) |
|
|
|
|
|
|
|
|
def render_optimization_tab(): |
|
|
"""Render the optimization tab.""" |
|
|
st.header("๐งฌ GEPA Optimization") |
|
|
|
|
|
if st.session_state.dataset is None: |
|
|
st.warning("โ ๏ธ Please upload and annotate data first.") |
|
|
return |
|
|
|
|
|
df = st.session_state.dataset |
|
|
selected_indices = st.session_state.get('selected_indices', []) |
|
|
|
|
|
|
|
|
if selected_indices: |
|
|
selected_df = df.iloc[selected_indices] |
|
|
annotated_df = selected_df[selected_df['ground_truth'].astype(str) != ''] |
|
|
use_selection = True |
|
|
else: |
|
|
annotated_df = df[df['ground_truth'].astype(str) != ''] |
|
|
use_selection = False |
|
|
|
|
|
if len(annotated_df) < 2: |
|
|
if use_selection: |
|
|
st.warning("โ ๏ธ Please select and annotate at least 2 samples in the Data Ingestion tab.") |
|
|
else: |
|
|
st.warning("โ ๏ธ Please annotate at least 2 samples or select rows for training.") |
|
|
return |
|
|
|
|
|
|
|
|
train_df, val_df = stratified_train_val_split( |
|
|
annotated_df, |
|
|
train_ratio=st.session_state.train_test_split |
|
|
) |
|
|
|
|
|
col1, col2, col3 = st.columns(3) |
|
|
with col1: |
|
|
st.info(f"๐ Training samples: {len(train_df)}") |
|
|
with col2: |
|
|
st.info(f"๐งช Validation samples: {len(val_df)}") |
|
|
with col3: |
|
|
if use_selection: |
|
|
st.success("โ
Using selected rows") |
|
|
else: |
|
|
st.warning("โ ๏ธ Using all annotated rows") |
|
|
|
|
|
|
|
|
col1, col2, col3 = st.columns([1, 1, 2]) |
|
|
|
|
|
with col1: |
|
|
run_button = st.button( |
|
|
"๐ Run Optimization", |
|
|
type="primary", |
|
|
use_container_width=True |
|
|
) |
|
|
|
|
|
with col2: |
|
|
if st.button("๐ Reset Results", use_container_width=True): |
|
|
st.session_state.optimized_program = None |
|
|
st.session_state.optimization_history = [] |
|
|
st.rerun() |
|
|
|
|
|
if run_button: |
|
|
if not setup_dspy(): |
|
|
return |
|
|
|
|
|
|
|
|
trainset = [ |
|
|
dspy.Example( |
|
|
raw_text=row['text'], |
|
|
ground_truth=row['ground_truth'] |
|
|
).with_inputs('raw_text') |
|
|
for _, row in train_df.iterrows() |
|
|
] |
|
|
|
|
|
valset = [ |
|
|
dspy.Example( |
|
|
raw_text=row['text'], |
|
|
ground_truth=row['ground_truth'] |
|
|
).with_inputs('raw_text') |
|
|
for _, row in val_df.iterrows() |
|
|
] |
|
|
|
|
|
|
|
|
progress_bar = st.progress(0) |
|
|
status_text = st. empty() |
|
|
|
|
|
try: |
|
|
with st.spinner("๐งฌ GEPA is evolving regex patterns..."): |
|
|
status_text.text("Initializing optimizer...") |
|
|
|
|
|
optimizer = GEPA( |
|
|
metric=create_regex_metric(st.session_state.regex_flags), |
|
|
num_iterations=st. session_state.gepa_config['num_iterations'], |
|
|
num_candidates=st.session_state.gepa_config['num_candidates'], |
|
|
) |
|
|
|
|
|
progress_bar.progress(20) |
|
|
status_text.text("Creating initial program...") |
|
|
|
|
|
program = RegexGenerator( |
|
|
doc=st.session_state.prompts['system_instruction'], |
|
|
output_desc=st. session_state.prompts['output_description'] |
|
|
) |
|
|
|
|
|
progress_bar.progress(40) |
|
|
status_text.text("Running optimization...") |
|
|
|
|
|
optimized = optimizer.compile( |
|
|
program, |
|
|
trainset=trainset, |
|
|
) |
|
|
|
|
|
progress_bar.progress(80) |
|
|
status_text.text("Evaluating on validation set...") |
|
|
|
|
|
|
|
|
metric_fn = create_regex_metric(st.session_state.regex_flags) |
|
|
val_scores = [] |
|
|
for example in valset: |
|
|
pred = optimized(raw_text=example. raw_text) |
|
|
result = metric_fn(example, pred) |
|
|
val_scores.append(result. score) |
|
|
|
|
|
avg_score = sum(val_scores) / len(val_scores) if val_scores else 0 |
|
|
|
|
|
progress_bar. progress(100) |
|
|
status_text.text("Complete!") |
|
|
|
|
|
st.session_state. optimized_program = optimized |
|
|
st.session_state.optimization_history.append({ |
|
|
'score': avg_score, |
|
|
'prompt': optimized.predictor.signature.__doc__, |
|
|
'timestamp': pd.Timestamp.now() |
|
|
}) |
|
|
|
|
|
st. success(f"โ
Optimization Complete! Validation Score: {avg_score:.2%}") |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"Optimization failed: {e}") |
|
|
return |
|
|
|
|
|
|
|
|
if st. session_state.optimized_program: |
|
|
st.subheader("๐ Results") |
|
|
|
|
|
with st.expander("๐ Evolved Prompt", expanded=True): |
|
|
st.code( |
|
|
st.session_state.optimized_program.predictor. signature.__doc__, |
|
|
language="text" |
|
|
) |
|
|
|
|
|
|
|
|
if st.session_state.optimization_history: |
|
|
with st.expander("๐ Optimization History"): |
|
|
history_df = pd. DataFrame(st.session_state. optimization_history) |
|
|
st.dataframe(history_df, use_container_width=True) |
|
|
|
|
|
|
|
|
def render_testing_tab(): |
|
|
"""Render the testing tab.""" |
|
|
st.header("๐ Test & Validate") |
|
|
|
|
|
if st.session_state.optimized_program is None: |
|
|
st. warning("โ ๏ธ Please run optimization first.") |
|
|
return |
|
|
|
|
|
|
|
|
st.subheader("๐งช Single Test") |
|
|
|
|
|
test_input = st.text_area( |
|
|
"Enter test text", |
|
|
height=100, |
|
|
placeholder="Paste text here to extract regex pattern..." |
|
|
) |
|
|
|
|
|
col1, col2 = st.columns([1, 3]) |
|
|
with col1: |
|
|
test_button = st.button("โถ๏ธ Generate & Run", type="primary") |
|
|
|
|
|
if test_button and test_input: |
|
|
if not setup_dspy(): |
|
|
return |
|
|
|
|
|
with st.spinner("Generating regex... "): |
|
|
try: |
|
|
result = st.session_state.optimized_program(raw_text=test_input) |
|
|
pattern = result.regex_pattern |
|
|
|
|
|
st.code(f"Generated Regex: {pattern}", language="regex") |
|
|
|
|
|
|
|
|
flags = 0 |
|
|
for flag in st.session_state.regex_flags: |
|
|
flags |= getattr(re, flag, 0) |
|
|
|
|
|
compiled = re.compile(pattern, flags) |
|
|
matches = compiled.findall(test_input) |
|
|
|
|
|
if matches: |
|
|
st.success(f"โ
Found {len(matches)} match(es):") |
|
|
for i, match in enumerate(matches, 1): |
|
|
st.markdown(f"**Match {i}:** `{match}`") |
|
|
|
|
|
|
|
|
highlighted = test_input |
|
|
for match in matches: |
|
|
highlighted = highlighted.replace( |
|
|
match, |
|
|
f"**: green[{match}]**" |
|
|
) |
|
|
st.markdown("**Highlighted text:**") |
|
|
st.markdown(highlighted) |
|
|
else: |
|
|
st. warning("No matches found.") |
|
|
|
|
|
except re.error as e: |
|
|
st.error(f"Invalid regex generated: {e}") |
|
|
except Exception as e: |
|
|
st.error(f"Error: {e}") |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
st. subheader("๐ Batch Testing") |
|
|
|
|
|
batch_file = st.file_uploader( |
|
|
"Upload test data (CSV with 'text' column)", |
|
|
type=["csv"], |
|
|
key="batch_test" |
|
|
) |
|
|
|
|
|
if batch_file: |
|
|
test_df = pd. read_csv(batch_file) |
|
|
|
|
|
if 'text' not in test_df. columns: |
|
|
st.error("CSV must have 'text' column.") |
|
|
return |
|
|
|
|
|
if st.button("๐ Run Batch Test"): |
|
|
if not setup_dspy(): |
|
|
return |
|
|
|
|
|
results = [] |
|
|
progress = st.progress(0) |
|
|
|
|
|
for i, row in test_df.iterrows(): |
|
|
try: |
|
|
result = st.session_state.optimized_program(raw_text=row['text']) |
|
|
pattern = result.regex_pattern |
|
|
|
|
|
flags = 0 |
|
|
for flag in st.session_state. regex_flags: |
|
|
flags |= getattr(re, flag, 0) |
|
|
|
|
|
match = re.search(pattern, row['text'], flags) |
|
|
extracted = match.group(0) if match else "" |
|
|
|
|
|
results.append({ |
|
|
'text': row['text'][: 100] + '...' if len(row['text']) > 100 else row['text'], |
|
|
'pattern': pattern, |
|
|
'extracted': extracted, |
|
|
'success': bool(match) |
|
|
}) |
|
|
except Exception as e: |
|
|
results.append({ |
|
|
'text': row['text'][:100] + '...', |
|
|
'pattern': 'ERROR', |
|
|
'extracted': str(e), |
|
|
'success': False |
|
|
}) |
|
|
|
|
|
progress. progress((i + 1) / len(test_df)) |
|
|
|
|
|
results_df = pd. DataFrame(results) |
|
|
|
|
|
|
|
|
success_rate = results_df['success']. mean() |
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
st.metric("Success Rate", f"{success_rate:.1%}") |
|
|
with col2: |
|
|
st.metric("Total Tests", len(results_df)) |
|
|
|
|
|
|
|
|
st.dataframe(results_df, use_container_width=True) |
|
|
|
|
|
|
|
|
csv = results_df. to_csv(index=False) |
|
|
st.download_button( |
|
|
"๐ฅ Download Results", |
|
|
csv, |
|
|
"batch_test_results. csv", |
|
|
"text/csv" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
render_sidebar() |
|
|
|
|
|
st.title("๐งฌ GEPA Regex Optimizer") |
|
|
st.caption("Automated regex generation with DSPy and evolutionary optimization") |
|
|
|
|
|
tab1, tab2, tab3 = st.tabs([ |
|
|
"๐ฅ Data Ingestion", |
|
|
"๐งฌ Optimization", |
|
|
"๐ Testing" |
|
|
]) |
|
|
|
|
|
with tab1: |
|
|
render_data_ingestion_tab() |
|
|
|
|
|
with tab2: |
|
|
render_optimization_tab() |
|
|
|
|
|
with tab3: |
|
|
render_testing_tab() |
|
|
|
|
|
|
|
|
st.divider() |
|
|
st.caption( |
|
|
"Built with Streamlit, DSPy, and GEPA | " |
|
|
"Configuration is auto-saved in the sidebar" |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|