File size: 2,920 Bytes
1301e71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import argparse
import json
import shutil
from pathlib import Path
from typing import TypedDict

from datasets import DatasetDict, load_dataset

OUT_DIR = Path(__file__).parent / "data"
METADATA_PATH = OUT_DIR / "metadata.json"


class ConllExample(TypedDict):
    tokens: list[str]
    ner_tags: list[str]
    chunk_tags: list[str]
    pos_tags: list[str]


class LabelMaps(TypedDict):
    ner_tags: list[str]
    chunk_tags: list[str]
    pos_tags: list[str]


def ids_to_strings(example: dict, label_maps: LabelMaps) -> ConllExample:
    return {
        "tokens": example["tokens"],
        "ner_tags": [label_maps["ner_tags"][i] for i in example["ner_tags"]],
        "chunk_tags": [label_maps["chunk_tags"][i] for i in example["chunk_tags"]],
        "pos_tags": [label_maps["pos_tags"][i] for i in example["pos_tags"]],
    }


def extract_label_maps(data: DatasetDict) -> LabelMaps:
    feats = data["train"].features
    return {
        "ner_tags": feats["ner_tags"].feature.names,
        "chunk_tags": feats["chunk_tags"].feature.names,
        "pos_tags": feats["pos_tags"].feature.names,
    }


def extract_metadata(data: DatasetDict, label_maps: LabelMaps) -> dict:
    num_rows = {split_name: int(split.num_rows) for split_name, split in data.items()}
    features = {name: repr(feature) for name, feature in data["train"].features.items()}
    return {"num_rows": num_rows, "features": features, "label_maps": label_maps}


def main() -> None:
    """Load CoNLL-03 with datasets v3, save as Parquet and add metadata.

    Run: python preprocess.py --out-dir data
    """
    ap = argparse.ArgumentParser()
    ap.add_argument("--out-dir", type=Path, help="Output directory for Parquet files")
    ap.add_argument("--metadata-path", type=Path, help="Path for metadata.json")
    args = ap.parse_args()

    out_dir = args.out_dir or OUT_DIR
    metadata_path = args.metadata_path or METADATA_PATH

    out_dir.mkdir(parents=True, exist_ok=True)
    cache_path = Path(__file__).parent / "tmp"

    # using datasets v3.6
    data = load_dataset("conll2003", cache_dir=str(cache_path))

    split_map = {"train": "train", "validation": "validation", "test": "test"}
    if "validation" not in data and "valid" in data:
        split_map["validation"] = "valid"

    label_maps = extract_label_maps(data)
    meta = extract_metadata(data, label_maps)

    for split, split_name in split_map.items():
        if split_name not in data:
            continue

        out_path = out_dir / f"{split}.parquet"
        ds_str = data[split_name].map(ids_to_strings, fn_kwargs={"label_maps": label_maps})
        if "id" in ds_str.column_names:
            ds_str = ds_str.remove_columns("id")

        ds_str.to_parquet(str(out_path))

    metadata_path.write_text(json.dumps(meta, indent=2), encoding="utf-8")

    if cache_path.exists():
        shutil.rmtree(cache_path)


if __name__ == "__main__":
    main()