| import transformers |
| import torch |
| import torch.nn.functional as F |
| import numpy as np |
| from data_builder import load_data, save_data |
| from metrics import get_roc_metrics, get_precision_recall_metrics |
| import argparse |
| import json |
|
|
| def load_detector(cache_dir, device): |
| |
| detector_path_or_id = "TrustSafeAI/RADAR-Vicuna-7B" |
| print(f"load model and tokenizer: {detector_path_or_id}") |
| detector = transformers.AutoModelForSequenceClassification.from_pretrained(detector_path_or_id, cache_dir=cache_dir) |
| tokenizer = transformers.AutoTokenizer.from_pretrained(detector_path_or_id, cache_dir=cache_dir) |
| detector.eval() |
| detector.to(device) |
| return detector, tokenizer |
|
|
| def radar_ai_text_prob(Text_input, tokenizer, detector, device): |
| |
| with torch.no_grad(): |
| inputs = tokenizer(Text_input, padding=True, truncation=True, max_length=512, return_tensors="pt") |
| inputs = {k:v.to(device) for k,v in inputs.items()} |
| output_probs = F.log_softmax(detector(**inputs).logits,-1)[:,0].exp().tolist() |
| |
| |
| |
| return output_probs |
|
|
| def experiment(args): |
| data = load_data(args.dataset_file) |
| detector, tokenizer = load_detector(args.cache_dir, args.device) |
| n_samples = len(data["sampled"]) |
| |
| name = "radar" |
| |
| torch.manual_seed(args.seed) |
| np.random.seed(args.seed) |
| |
| Human_texts = data["original"] |
| human_preds = radar_ai_text_prob(Human_texts, tokenizer, detector, args.device) |
| Text_input = data["sampled"] |
| ai_preds = radar_ai_text_prob(Text_input, tokenizer, detector, args.device) |
|
|
| results = [{'original': data["original"][idx], 'original_crit': human_preds[idx], 'sampled': data["sampled"][idx], 'sampled_crit': ai_preds[idx]} for idx in range(n_samples)] |
| |
| predictions = {'real': [x["original_crit"] for x in results], |
| 'samples': [x["sampled_crit"] for x in results]} |
| fpr, tpr, roc_auc = get_roc_metrics(predictions['real'], predictions['samples']) |
| p, r, pr_auc = get_precision_recall_metrics(predictions['real'], predictions['samples']) |
| print(f"Criterion {name}_threshold ROC AUC: {roc_auc:.4f}, PR AUC: {pr_auc:.4f}") |
| |
| results_file = f'{args.output_file}.{name}.json' |
| results = { 'name': f'{name}_threshold', |
| 'info': {'n_samples': n_samples}, |
| 'predictions': predictions, |
| 'raw_results': results, |
| 'metrics': {'roc_auc': roc_auc, 'fpr': fpr, 'tpr': tpr}, |
| 'pr_metrics': {'pr_auc': pr_auc, 'precision': p, 'recall': r}, |
| 'loss': 1 - pr_auc} |
| with open(results_file, 'w') as fout: |
| json.dump(results, fout) |
| print(f'Results written into {results_file}') |
|
|
| if __name__ == '__main__': |
| parser = argparse.ArgumentParser() |
| parser.add_argument('--output_file', type=str, default="./exp_main/results/xsum_gpt2-xl") |
| parser.add_argument('--dataset_file', type=str, default="./exp_main/data/xsum_gpt2-xl") |
| parser.add_argument('--seed', type=int, default=0) |
| parser.add_argument('--device', type=str, default="cuda") |
| parser.add_argument('--cache_dir', type=str, default="../cache") |
| args = parser.parse_args() |
|
|
| experiment(args) |
|
|