| import json | |
| import os | |
| from random import shuffle, seed | |
| import pandas as pd | |
| with open("data/tweet_hate/full.jsonl") as f: | |
| data = [json.loads(i) for i in f if len(i)] | |
| df = pd.DataFrame(data) | |
| df.pop("source") | |
| df["date_dt"] = pd.to_datetime(df.date) | |
| df = df.sort_values(by="date_dt") | |
| dist_date = df.groupby("date_dt").size() | |
| total_n = len(df) | |
| n = 0 | |
| while True: | |
| n += 1 | |
| if dist_date[:n].sum() > total_n/2: | |
| break | |
| split_date = dist_date.index[n] | |
| print(split_date) | |
| train = df[df["date_dt"] <= split_date] | |
| test = df[df["date_dt"] > split_date] | |
| print(train.date_dt.min(), train.date_dt.max()) | |
| print(test.date_dt.min(), test.date_dt.max()) | |
| train.pop("date_dt") | |
| test.pop("date_dt") | |
| train = list(train.T.to_dict().values()) | |
| test = list(test.T.to_dict().values()) | |
| seed(42) | |
| shuffle(train) | |
| shuffle(test) | |
| valid = train[:int(len(train)*0.2)] | |
| train = train[len(valid):] | |
| n_test = int(len(test)/4) | |
| n_train = len(train) | |
| n_validation = len(valid) | |
| test_1 = test[:n_test] | |
| test_2 = test[n_test:n_test*2] | |
| test_3 = test[n_test*2:n_test*3] | |
| test_4 = test[n_test*3:] | |
| os.makedirs("data/tweet_hate", exist_ok=True) | |
| with open("data/tweet_hate/test.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in test])) | |
| with open("data/tweet_hate/test_1.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in test_1])) | |
| with open("data/tweet_hate/test_2.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in test_2])) | |
| with open("data/tweet_hate/test_3.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in test_3])) | |
| with open("data/tweet_hate/test_4.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in test_4])) | |
| with open("data/tweet_hate/train.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in train])) | |
| with open("data/tweet_hate/validation.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in valid])) | |
| def sampler(dataset_test, r_seed): | |
| seed(r_seed) | |
| shuffle(dataset_test) | |
| shuffle(train) | |
| shuffle(valid) | |
| test_tr = dataset_test[:int(n_train / 2)] | |
| test_vl = dataset_test[int(n_train / 2): int(n_train / 2) + int(n_validation / 2)] | |
| new_train = test_tr + train[:n_train - len(test_tr)] | |
| new_validation = test_vl + valid[:n_validation - len(test_vl)] | |
| return new_train, new_validation | |
| id2test = {n: t for n, t in enumerate([test_1, test_2, test_3, test_4])} | |
| for n, _test in enumerate([ | |
| test_4 + test_2 + test_3, | |
| test_1 + test_4 + test_3, | |
| test_1 + test_2 + test_4, | |
| test_1 + test_2 + test_3]): | |
| for s in range(3): | |
| os.makedirs(f"data/tweet_hate_test{n}_seed{s}", exist_ok=True) | |
| _train, _valid = sampler(_test, s) | |
| with open(f"data/tweet_hate_test{n}_seed{s}/train.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in _train])) | |
| with open(f"data/tweet_hate_test{n}_seed{s}/validation.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in _valid])) | |
| with open(f"data/tweet_hate_test{n}_seed{s}/test.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in id2test[n]])) | |