QSBench's picture
Update app.py
cb3607c verified
import ast
import logging
import re
from typing import Dict, List, Optional, Tuple
import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datasets import load_dataset
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
APP_TITLE = "Entanglement Score Regression"
APP_SUBTITLE = "Predict the continuous Meyer-Wallach entanglement score from circuit topology and gate structure."
REPO_CONFIG = {
"Core (Clean)": "QSBench/QSBench-Core-v1.0.0-demo",
"Depolarizing Noise": "QSBench/QSBench-Depolarizing-Demo-v1.0.0",
"Amplitude Damping": "QSBench/QSBench-Amplitude-v1.0.0-demo",
"Transpilation (10q)": "QSBench/QSBench-Transpilation-v1.0.0-demo",
}
NON_FEATURE_COLS = {
"sample_id",
"sample_seed",
"circuit_hash",
"split",
"circuit_qasm",
"qasm_raw",
"qasm_transpiled",
"circuit_type_resolved",
"circuit_type_requested",
"noise_type",
"noise_prob",
"observable_bases",
"observable_mode",
"backend_device",
"precision_mode",
"circuit_signature",
"entanglement",
"meyer_wallach",
}
SOFT_EXCLUDE_PATTERNS = ["ideal_", "noisy_", "error_", "sign_ideal_", "sign_noisy_"]
_ASSET_CACHE: Dict[str, pd.DataFrame] = {}
def load_dataset_df(dataset_key: str) -> pd.DataFrame:
if dataset_key not in _ASSET_CACHE:
ds = load_dataset(REPO_CONFIG[dataset_key])
df = pd.DataFrame(ds["train"])
df = enrich_dataframe(df)
_ASSET_CACHE[dataset_key] = df
return _ASSET_CACHE[dataset_key]
def safe_parse(value):
if isinstance(value, str):
try:
return ast.literal_eval(value)
except Exception:
return value
return value
def adjacency_features(adj_value) -> Dict[str, float]:
parsed = safe_parse(adj_value)
if not isinstance(parsed, list) or len(parsed) == 0:
return {
"adj_edge_count": np.nan,
"adj_density": np.nan,
"adj_degree_mean": np.nan,
"adj_degree_std": np.nan,
}
try:
arr = np.array(parsed, dtype=float)
n = arr.shape[0]
edge_count = float(np.triu(arr, k=1).sum())
possible_edges = float(n * (n - 1) / 2)
density = edge_count / possible_edges if possible_edges > 0 else np.nan
degrees = arr.sum(axis=1)
return {
"adj_edge_count": edge_count,
"adj_density": density,
"adj_degree_mean": float(np.mean(degrees)),
"adj_degree_std": float(np.std(degrees)),
}
except Exception:
return {
"adj_edge_count": np.nan,
"adj_density": np.nan,
"adj_degree_mean": np.nan,
"adj_degree_std": np.nan,
}
def qasm_features(qasm_value) -> Dict[str, float]:
if not isinstance(qasm_value, str) or not qasm_value.strip():
return {
"qasm_length": np.nan,
"qasm_line_count": np.nan,
"qasm_gate_keyword_count": np.nan,
"qasm_measure_count": np.nan,
"qasm_comment_count": np.nan,
}
text = qasm_value
lines = [line for line in text.splitlines() if line.strip()]
gate_keywords = re.findall(r"\b(cx|h|x|y|z|rx|ry|rz|u1|u2|u3|u|swap|cz|ccx|rxx|ryy|rzz)\b", text, flags=re.IGNORECASE)
measure_count = len(re.findall(r"\bmeasure\b", text, flags=re.IGNORECASE))
comment_count = sum(1 for line in lines if line.strip().startswith("//"))
return {
"qasm_length": float(len(text)),
"qasm_line_count": float(len(lines)),
"qasm_gate_keyword_count": float(len(gate_keywords)),
"qasm_measure_count": float(measure_count),
"qasm_comment_count": float(comment_count),
}
def enrich_dataframe(df: pd.DataFrame) -> pd.DataFrame:
df = df.copy()
if "adjacency" in df.columns:
adj_df = df["adjacency"].apply(adjacency_features).apply(pd.Series)
df = pd.concat([df, adj_df], axis=1)
qasm_source = "qasm_transpiled" if "qasm_transpiled" in df.columns else "qasm_raw"
if qasm_source in df.columns:
qasm_df = df[qasm_source].apply(qasm_features).apply(pd.Series)
df = pd.concat([df, qasm_df], axis=1)
return df
def load_guide_content() -> str:
try:
with open("GUIDE.md", "r", encoding="utf-8") as f:
return f.read()
except FileNotFoundError:
return "# Guide\n\nGuide file not found."
def get_available_feature_columns(df: pd.DataFrame) -> List[str]:
numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
features = []
for col in numeric_cols:
if col in NON_FEATURE_COLS:
continue
if any(pattern in col for pattern in SOFT_EXCLUDE_PATTERNS):
continue
features.append(col)
return sorted(features)
def default_feature_selection(features: List[str]) -> List[str]:
preferred = [
"gate_entropy",
"adj_density",
"adj_degree_mean",
"adj_degree_std",
"depth",
"total_gates",
"cx_count",
"qasm_length",
]
return [f for f in preferred if f in features]
def make_regression_figure(y_true, y_pred, feature_names=None, importances=None):
fig = plt.figure(figsize=(20, 6))
gs = fig.add_gridspec(1, 3)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[0, 2])
ax1.scatter(y_true, y_pred, alpha=0.75)
min_v = min(float(np.min(y_true)), float(np.min(y_pred)))
max_v = max(float(np.max(y_true)), float(np.max(y_pred)))
ax1.plot([min_v, max_v], [min_v, max_v], linestyle="--")
residuals = y_true - y_pred
ax2.hist(residuals, bins=20)
if importances is not None:
idx = np.argsort(importances)[-10:]
ax3.barh([feature_names[i] for i in idx], importances[idx])
fig.tight_layout()
return fig
def refresh_explorer(dataset_key, split_name):
df = load_dataset_df(dataset_key)
splits = df["split"].dropna().unique().tolist() if "split" in df.columns else ["train"]
if split_name not in splits:
split_name = splits[0]
filtered = df[df["split"] == split_name] if "split" in df.columns else df
display_df = filtered.head(10)
raw_qasm = display_df["qasm_raw"].iloc[0] if "qasm_raw" in display_df.columns else "// N/A"
transpiled_qasm = display_df["qasm_transpiled"].iloc[0] if "qasm_transpiled" in display_df.columns else "// N/A"
return (
gr.update(choices=splits, value=split_name),
display_df,
raw_qasm,
transpiled_qasm,
f"### {dataset_key} Explorer",
f"Rows: {len(df)}",
)
def sync_feature_picker(dataset_key):
df = load_dataset_df(dataset_key)
features = get_available_feature_columns(df)
defaults = default_feature_selection(features)
return gr.update(choices=features, value=defaults)
def train_regressor(dataset_key, feature_columns, test_size, n_estimators, max_depth, random_state):
if not feature_columns:
return None, "No features selected"
df = load_dataset_df(dataset_key)
train_df = df.dropna(subset=feature_columns + ["meyer_wallach"])
X = train_df[feature_columns]
y = train_df["meyer_wallach"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=int(random_state)
)
max_depth_value = int(max_depth) if max_depth is not None else None
model = Pipeline([
("imputer", SimpleImputer()),
("scaler", StandardScaler()),
("regressor", RandomForestRegressor(
n_estimators=int(n_estimators),
max_depth=max_depth_value,
random_state=int(random_state),
n_jobs=-1
))
])
model.fit(X_train, y_train)
preds = model.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test, preds))
mae = mean_absolute_error(y_test, preds)
r2 = r2_score(y_test, preds)
importances = model.named_steps["regressor"].feature_importances_
fig = make_regression_figure(y_test.to_numpy(), preds, feature_columns, importances)
results = f"RMSE: {rmse:.4f}\nMAE: {mae:.4f}\nR2: {r2:.4f}"
return fig, results
CUSTOM_CSS = """
.gradio-container {max-width: 1400px !important;}
"""
with gr.Blocks(title=APP_TITLE) as demo:
gr.Markdown(f"# 🌌 {APP_TITLE}")
gr.Markdown(APP_SUBTITLE)
with gr.Tabs():
with gr.TabItem("🔎 Explorer"):
dataset_dropdown = gr.Dropdown(
list(REPO_CONFIG.keys()),
value="Amplitude Damping",
label="Dataset"
)
split_dropdown = gr.Dropdown(
["train"],
value="train",
label="Split"
)
explorer_df = gr.Dataframe(label="Preview")
with gr.Row():
raw_qasm = gr.Code(label="Raw QASM", language=None)
transpiled_qasm = gr.Code(label="Transpiled QASM", language=None)
info_box = gr.Markdown()
summary_box = gr.Markdown()
with gr.TabItem("🧠 Regression"):
feature_picker = gr.CheckboxGroup(label="Input features")
test_size = gr.Slider(0.1, 0.4, value=0.2, label="Test split")
n_estimators = gr.Slider(50, 300, value=150, label="Trees")
max_depth = gr.Slider(2, 20, value=10, step=1, label="Max depth")
seed = gr.Number(value=42, label="Random seed")
run_btn = gr.Button("Train & Evaluate", variant="primary")
plot = gr.Plot()
metrics = gr.Markdown()
with gr.TabItem("📖 Guide"):
gr.Markdown(load_guide_content())
gr.Markdown("---")
gr.Markdown(
"### 🔗 Links\n"
"[Website](https://qsbench.github.io) | "
"[Hugging Face](https://huggingface.co/QSBench) | "
"[GitHub](https://github.com/QSBench)"
)
dataset_dropdown.change(
refresh_explorer,
[dataset_dropdown, split_dropdown],
[split_dropdown, explorer_df, raw_qasm, transpiled_qasm, info_box, summary_box],
)
split_dropdown.change(
refresh_explorer,
[dataset_dropdown, split_dropdown],
[split_dropdown, explorer_df, raw_qasm, transpiled_qasm, info_box, summary_box],
)
dataset_dropdown.change(sync_feature_picker, [dataset_dropdown], [feature_picker])
run_btn.click(
train_regressor,
[dataset_dropdown, feature_picker, test_size, n_estimators, max_depth, seed],
[plot, metrics],
)
demo.load(
refresh_explorer,
[dataset_dropdown, split_dropdown],
[split_dropdown, explorer_df, raw_qasm, transpiled_qasm, info_box, summary_box],
)
demo.load(sync_feature_picker, [dataset_dropdown], [feature_picker])
if __name__ == "__main__":
demo.launch(theme=gr.themes.Soft(), css=CUSTOM_CSS)