File size: 989 Bytes
46a7793 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 | import pandas as pd
import os
from typing import Union
import datasets
from datasets import load_dataset
def save_and_compress(dataset: Union[datasets.Dataset, pd.DataFrame], name: str, idx=None):
if idx:
path = f"{name}_{idx}.jsonl"
else:
path = f"{name}.jsonl"
print("Saving to", path)
dataset.to_json(path, force_ascii=False, orient='records', lines=True)
print("Compressing...")
os.system(f'xz -zkf -T0 --memlimit-compress=60% {path}') # -TO to use multithreading
for split in ["train", "valid", "test"]:
dataset = load_dataset("parquet", data_files=f"original_data/{split}_en.parquet", split="train")
dataset = dataset.remove_columns(['case_marked_as_closed']) # this column brings problems
# these are also potentially an issue: overflows
dataset = dataset.remove_columns(['filing_date', 'date_first_instance_ruling', 'date_appeal_panel_session'])
print(dataset[0])
save_and_compress(dataset, f"data/{split}")
|