|
|
import pandas as pd |
|
|
import os |
|
|
from typing import Union |
|
|
|
|
|
import datasets |
|
|
from datasets import load_dataset |
|
|
|
|
|
|
|
|
def save_and_compress(dataset: Union[datasets.Dataset, pd.DataFrame], name: str, idx=None): |
|
|
if idx: |
|
|
path = f"{name}_{idx}.jsonl" |
|
|
else: |
|
|
path = f"{name}.jsonl" |
|
|
|
|
|
print("Saving to", path) |
|
|
dataset.to_json(path, force_ascii=False, orient='records', lines=True) |
|
|
|
|
|
print("Compressing...") |
|
|
os.system(f'xz -zkf -T0 --memlimit-compress=60% {path}') |
|
|
|
|
|
|
|
|
for split in ["train", "valid", "test"]: |
|
|
dataset = load_dataset("parquet", data_files=f"original_data/{split}_en.parquet", split="train") |
|
|
dataset = dataset.remove_columns(['case_marked_as_closed']) |
|
|
|
|
|
dataset = dataset.remove_columns(['filing_date', 'date_first_instance_ruling', 'date_appeal_panel_session']) |
|
|
print(dataset[0]) |
|
|
save_and_compress(dataset, f"data/{split}") |
|
|
|