serliezer's picture
Add scripts/run_real.py
0a0d5dc verified
#!/usr/bin/env python3
"""Run real-data experiments."""
import os
import sys
import json
import time
import argparse
import yaml
import numpy as np
from datetime import datetime
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.data import load_lastfm_data, load_movielens_data, sample_deletions
from src.model import PoissonGammaVI
from src.graph_utils import build_adjacency, compute_graph_stats
from src.metrics import (compute_all_metrics, compute_deletion_influence_by_distance,
fit_exponential_decay, compute_local_error, compute_chi_poisson_gamma,
compute_gradient_interference)
from src.unlearning import one_step_downdate_poisson_gamma
from src.utils import generate_run_id, generate_config_id, save_jsonl, ensure_dir
def run_real_dataset(dataset_name, edges, N, M, preprocessing, config):
"""Run deletion experiments on a real dataset."""
K_values = config.get('K_values', [5, 10])
num_deletions = config.get('num_deletions', 50)
radii = config.get('radii', [1, 2, 3, 4])
prior = config.get('prior', {})
a0 = prior.get('a0', 0.3)
b0 = prior.get('b0', 1.0)
c0 = prior.get('c0', 0.3)
d0 = prior.get('d0', 1.0)
max_iter = config.get('max_iter', 300)
tol = config.get('tol', 1e-4)
seed = config.get('seed', 42)
all_records = []
for K in K_values:
print(f"\n K={K}")
run_id = generate_run_id()
config_id = generate_config_id({**config, 'K': K, 'dataset': dataset_name})
model = PoissonGammaVI(N, M, K, a0, b0, c0, d0, max_iter=max_iter, tol=tol, seed=seed)
print(f" Fitting full model...")
t0 = time.time()
full_result = model.fit_full(edges)
t_full = time.time() - t0
full_params = full_result.params
print(f" Full fit: {full_result.n_iterations} iters, {t_full:.1f}s")
user_to_items, item_to_users, edge_dict = build_adjacency(edges, N, M)
deletion_samples = sample_deletions(edges, user_to_items, item_to_users, num_deletions, seed=seed)
print(f" Running {len(deletion_samples)} deletions...")
for del_idx, (edge_to_del, del_type) in enumerate(deletion_samples):
if del_idx % 10 == 0:
print(f" Deletion {del_idx+1}/{len(deletion_samples)}")
i_del, j_del, x_del = edge_to_del
# Exact
exact_result = model.fit_without_edge(edges, edge_to_del, init_params=full_params)
exact_params = exact_result.params
# Local
local_results = {}
local_params = {}
for R in radii:
lr = model.fit_local(edges, edge_to_del, R, init_params=full_params)
local_results[R] = lr
local_params[R] = lr.params
# Warm-start
ws_result = model.fit_warm_start_global(edges, edge_to_del, init_params=full_params)
# One-step
os_result = one_step_downdate_poisson_gamma(
edges, edge_to_del, full_params, N, M, K, a0, b0, c0, d0)
# Metrics
model_kwargs = {'a0': a0, 'b0': b0, 'c0': c0, 'd0': d0}
metrics = compute_all_metrics(
full_params, exact_params, local_params,
ws_result.params, os_result.params,
edge_to_del, edges, N, M, K,
'poisson_gamma', model=model, radii=radii,
model_kwargs=model_kwargs)
record = {
'run_id': run_id,
'config_id': config_id,
'dataset_type': 'real',
'dataset_name': dataset_name,
'model_family': 'poisson_gamma',
'inference_type': 'vi',
'likelihood': 'poisson',
'prior': 'gamma',
'N': N, 'M': M, 'K': K,
'n_edges': len(edges),
'deletion_edge': [int(i_del), int(j_del), float(x_del)],
'deletion_type': del_type,
'deletion_index': del_idx,
'runtime_full': t_full,
'runtime_exact': exact_result.runtime_sec,
'runtime_warm_start': ws_result.runtime_sec,
'runtime_one_step': os_result.runtime_sec,
'exact_converged': exact_result.converged,
'a0': a0, 'b0': b0, 'c0': c0, 'd0': d0,
}
for R in radii:
record[f'runtime_local_R{R}'] = local_results[R].runtime_sec
record[f'local_R{R}_converged'] = local_results[R].converged
record.update(metrics)
if 'influence_by_distance' in record:
for d_str, val in record['influence_by_distance'].items():
record[f'influence_d{d_str}'] = val
all_records.append(record)
return all_records
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='config/real_data.yaml')
parser.add_argument('--datasets', nargs='*', default=None)
args = parser.parse_args()
with open(args.config) as f:
real_cfg = yaml.safe_load(f)
output_dir = ensure_dir('results/raw')
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_file = os.path.join(output_dir, f'real_{timestamp}.jsonl')
datasets_to_run = args.datasets or list(real_cfg.keys())
for ds_name in datasets_to_run:
if ds_name not in real_cfg:
print(f"Unknown dataset config: {ds_name}")
continue
ds_cfg = real_cfg[ds_name]
print(f"\n{'='*60}")
print(f"Dataset: {ds_name}")
print(f"{'='*60}")
# Load data
if 'lastfm' in ds_name:
edges, N, M, preproc = load_lastfm_data(
max_users=ds_cfg.get('max_users', 1000),
max_items=ds_cfg.get('max_items', 1000),
max_edges=ds_cfg.get('max_edges', 50000),
min_user_degree=ds_cfg.get('min_user_degree', 5),
min_item_degree=ds_cfg.get('min_item_degree', 5),
max_count=ds_cfg.get('max_count', 100),
seed=ds_cfg.get('seed', 42))
elif 'movielens' in ds_name:
mode = ds_cfg.get('mode', 'rating_count')
edges, N, M, preproc = load_movielens_data(
mode=mode,
max_users=ds_cfg.get('max_users', 1000),
max_items=ds_cfg.get('max_items', 1000),
max_edges=ds_cfg.get('max_edges', 50000),
min_user_degree=ds_cfg.get('min_user_degree', 5),
min_item_degree=ds_cfg.get('min_item_degree', 5),
seed=ds_cfg.get('seed', 42))
else:
print(f" Unsupported dataset: {ds_name}")
continue
# Save preprocessing
preproc_dir = ensure_dir('results/reports')
with open(os.path.join(preproc_dir, f'dataset_card_{ds_name}.json'), 'w') as f:
json.dump(preproc, f, indent=2)
graph_stats = compute_graph_stats([(e[0], e[1]) for e in edges], N, M)
print(f" Graph stats: {json.dumps(graph_stats, indent=2)}")
records = run_real_dataset(ds_name, edges, N, M, preproc, ds_cfg)
save_jsonl(records, output_file)
print(f" Saved {len(records)} records for {ds_name}")
print(f"\nOutput: {output_file}")
if __name__ == '__main__':
main()