| import os | |
| import json | |
| import pandas as pd | |
| from datasets import load_dataset | |
| from lmppl import EncoderDecoderLM, LM, OpenAI | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None) | |
| prompt_dict = { | |
| "friend/ally of": "entities that are friends or allies", | |
| "competitor/rival of": "entities that are competitors or rivals", | |
| "known for": "examples of what entities are known for", | |
| "influenced by": "what has influenced different entities", | |
| "similar to": "examples of entities that are similar" | |
| } | |
| data = load_dataset("cardiffnlp/relentless", split="validation") | |
| full_result = [] | |
| for lm, ppl_class, batch, pretty_name in [ | |
| ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"), | |
| ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"), | |
| ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"), | |
| ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}") | |
| ]: | |
| os.makedirs(f"results_validation/lm_qa/{os.path.basename(lm)}", exist_ok=True) | |
| scorer = None | |
| for d in data: | |
| ppl_file = f"results_validation/lm_qa/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl" | |
| if not os.path.exists(ppl_file): | |
| if scorer is None: | |
| if ppl_class is OpenAI: | |
| scorer = ppl_class(OPENAI_API_KEY, model=lm) | |
| else: | |
| scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}") | |
| proto = ",".join([f'["{a}", "{b}"]' for a, b in d['prototypical_examples']]) | |
| prefix = f"Answer the question by yes or no. We know that {proto} are examples of {prompt_dict[d['relation_type']]}." | |
| if ppl_class is LM or ppl_class is OpenAI: | |
| prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?\n yes' for x, y in d['pairs']] | |
| ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch) | |
| output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)] | |
| elif ppl_class is EncoderDecoderLM: | |
| prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?' for x, y in d['pairs']] | |
| ppl = scorer.get_perplexity(input_texts=prompt_input, output_texts=["yes"] * len(prompt_input), batch=batch) | |
| output = [{"perplexity": p, "input": o, "output": "yes"} for p, o in zip(ppl, prompt_input)] | |
| else: | |
| raise ValueError(f"Unknown class {ppl_class}") | |
| with open(ppl_file, "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in output])) | |
| with open(ppl_file) as f: | |
| ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0] | |
| true_rank = d['ranks'] | |
| assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}" | |
| rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)} | |
| prediction = [rank_map[p] for p in ppl] | |
| tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T | |
| cor = tmp.corr("spearman").values[0, 1] | |
| full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor}) | |
| df = pd.DataFrame(full_result) | |
| models = df['model'].unique() | |
| df = df.pivot(columns="relation_type", index="model", values="correlation") | |
| df = df.T[models].T | |
| df['average'] = df.mean(1) | |
| df.to_csv("results_validation/lm_qa/lm.csv") | |
| df = (100 * df).round() | |
| print(df.to_markdown()) | |
| print(df.to_latex(escape=False)) |