| import pandas as pd |
| from datasets import load_dataset |
|
|
| for _type in ['nell', 'nell_filter']: |
| data = load_dataset("relbert/fewshot_link_prediction", _type, split='test') |
| df = data.to_pandas() |
|
|
| print(f"\nEntity Types ({_type})") |
| tail = df.groupby("tail_type")['relation'].count().to_dict() |
| head = df.groupby("head_type")['relation'].count().to_dict() |
| k = set(list(tail.keys()) + list(head.keys())) |
| df_types = pd.DataFrame([{"entity_type": _k, "tail": tail[_k] if _k in tail else 0, "head": head[_k] if _k in head else 0} for _k in k]) |
| print(df_types.to_markdown(index=False)) |
|
|
| print(f"\nRelation Types ({_type})") |
| print(df.groupby("relation")['relation'].count().to_markdown()) |
|
|
| print(f"\nVocab Size ({_type})") |
| with open(f"data/{_type}.vocab.txt") as f: |
| length = len([i for i in f.read().split('\n') if len(i) > 0]) |
| print(length) |
|
|
|
|