| from datasets import load_from_disk, load_dataset, Dataset | |
| import pandas as pd | |
| import numpy as np | |
| from functools import reduce | |
| import pickle as pkl | |
| file_path = "../graph.jsonl" | |
| ds = load_dataset('json', data_files=file_path, ) | |
| df = pd.DataFrame(list(ds["train"])) | |
| inference_l = df["inference"].map(lambda x: list(x.values())).map( | |
| lambda x: reduce(lambda a, b: a + b , map(lambda y: list(y.items()) ,x)) | |
| ).map( | |
| lambda x: reduce(lambda a, b: a + b ,map(lambda y: y[1], x)) | |
| ).explode().dropna().drop_duplicates().values.tolist() | |
| event_l = df["event"].dropna().drop_duplicates().values.tolist() | |
| len(inference_l), len(event_l) | |
| with open("event_inference_ja.pkl", "wb") as f: | |
| pkl.dump( | |
| { | |
| "event_l": event_l, | |
| "inference_l": inference_l | |
| }, f | |
| ) | |
| eil = list(set(event_l + inference_l)) | |
| peil = [NEED_PREFIX, EFFECT_PREFIX, INTENT_PREFIX, REACT_PREFIX] + eil | |
| with open("peil_ja.pkl", "wb") as f: | |
| pkl.dump(peil, f) | |
| pd.Series(peil).to_csv("peil_ja.txt", header = None, index = False) | |
| def read_file_to_lines(path): | |
| with open(path, "r") as f: | |
| return pd.Series(f.readlines()).map(lambda x: x.replace("\n", "")) | |
| peil_zh = read_file_to_lines("../peil_ja zh.txt") | |
| assert len(peil) == len(peil_zh) | |
| d = dict(zip(peil, peil_zh)) | |
| with open("peil_ja_zh_d.pkl", "wb") as f: | |
| pkl.dump(d, f) | |
| df["zh_event"] = df["event"].map(lambda x: d[x]) | |
| def one_infe_ele_map(inf_d): | |
| req = {} | |
| for k, v in inf_d.items(): | |
| req[k] = {} | |
| for kk, vv in v.items(): | |
| assert type(vv) == type([]) | |
| req[k][kk] = list(map(lambda x: d[x], vv)) | |
| return req | |
| df["zh_inference"] = df["inference"].map(one_infe_ele_map) | |
| with open("graph_ja_zh_df.pkl", "wb") as f: | |
| pkl.dump(df, f) | |
| from huggingface_hub import HfApi | |
| api = HfApi() | |
| api.upload_file( | |
| path_or_fileobj="graph_ja_zh_df.pkl", | |
| path_in_repo="graph_ja_zh_df.pkl", | |
| repo_id="svjack/comet-atomic-ja-zh", | |
| repo_type="dataset", | |
| ) | |
| zh_ds = Dataset.from_pandas( | |
| df[["zh_event", "zh_inference"]].rename( | |
| columns = { | |
| "zh_event": "event", | |
| "zh_inference": "inference" | |
| } | |
| ) | |
| ) | |
| zh_ds.save_to_disk("graph_zh_ds_dir") | |