File size: 5,041 Bytes
4ae150f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import argparse
import json
import random
from pathlib import Path
from typing import Optional, Sequence, TypedDict

import requests
from datasets import Dataset, DatasetDict

OUT_DIR = Path(__file__).parent / "data"
METADATA_PATH = OUT_DIR / "metadata.json"

SEED = 42
VAL_RATIO = 0.1

URLS = {
    "train": "https://cogcomp.seas.upenn.edu/Data/QA/QC/train_5500.label",
    "test": "https://cogcomp.seas.upenn.edu/Data/QA/QC/TREC_10.label",
}

COARSE_DESC = {
    "ABBR": "abbreviation", "ENTY": "entities", "DESC": "description and abstract concepts",
    "HUM": "human beings", "LOC": "locations", "NUM": "numeric values"
}

FINE_DESC = {
    "ABBR:abb":"abbreviation","ABBR:exp":"expression abbreviated",
    "ENTY:animal":"animals","ENTY:body":"organs of body","ENTY:color":"colors","ENTY:cremat":"creative works",
    "ENTY:currency":"currency names","ENTY:dismed":"diseases and medicine","ENTY:event":"events","ENTY:food":"food",
    "ENTY:instru":"musical instrument","ENTY:lang":"languages","ENTY:letter":"letters like a-z","ENTY:other":"other entities",
    "ENTY:plant":"plants","ENTY:product":"products","ENTY:religion":"religions","ENTY:sport":"sports",
    "ENTY:substance":"elements and substances","ENTY:symbol":"symbols and signs","ENTY:techmeth":"techniques and methods",
    "ENTY:termeq":"equivalent terms","ENTY:veh":"vehicles","ENTY:word":"words with a special property",
    "DESC:def":"definition of something","DESC:desc":"description of something","DESC:manner":"manner of an action","DESC:reason":"reasons",
    "HUM:gr":"a group/organization","HUM:ind":"an individual","HUM:title":"title of a person","HUM:desc":"description of a person",
    "LOC:city":"cities","LOC:country":"countries","LOC:mount":"mountains","LOC:other":"other locations","LOC:state":"states",
    "NUM:code":"codes","NUM:count":"counts","NUM:date":"dates","NUM:dist":"distances","NUM:money":"prices","NUM:ord":"ranks",
    "NUM:other":"other numbers","NUM:period":"duration","NUM:perc":"percentages","NUM:speed":"speed","NUM:temp":"temperature",
    "NUM:volsize":"size/area/volume","NUM:weight":"weight",
}


class TrecExample(TypedDict):
    text: str
    coarse_label: str
    coarse_description: Optional[str]
    fine_label: str
    fine_description: Optional[str]


def fetch(url: str) -> list[bytes]:
    r = requests.get(url, timeout=30)
    r.raise_for_status()
    return r.content.splitlines()


def parse(lines: Sequence[bytes]) -> list[TrecExample]:
    rows: list[TrecExample] = []
    for b in lines:
        line = b.decode("utf-8", errors="replace").strip()
        if not line or " " not in line:
            continue
        fine, text = line.split(" ", 1)
        coarse = fine.split(":", 1)[0]
        rows.append(
            {
                "text": text.strip(),
                "coarse_label": coarse,
                "coarse_description": COARSE_DESC.get(coarse, ""),
                "fine_label": fine,
                "fine_description": FINE_DESC.get(fine, ""),
            }
        )
    return rows


def extract_metadata(ds: DatasetDict) -> dict:
    num_rows = {name: len(split) for name, split in ds.items()}
    first_split = next(iter(ds.values()))
    features = {name: repr(feat) for name, feat in first_split.features.items()}

    coarse_labels = {label for split in ds.values() for label in split["coarse_label"]}
    fine_labels = {label for split in ds.values() for label in split["fine_label"]}

    label_maps = {
        "coarse_label": sorted(coarse_labels),
        "fine_label": sorted(fine_labels),
    }

    return {
        "num_rows": num_rows,
        "features": features,
        "label_maps": label_maps}


if __name__ == "__main__":
    """Fetch TREC from source, split it, save as Parquet and add metadata.

    Run: python preprocess_trec.py --val-ratio 0.1 --seed 42 --out-dir data
    """
    ap = argparse.ArgumentParser()
    ap.add_argument("--val-ratio", type=float, default=VAL_RATIO, help="Fraction of training set for validation")
    ap.add_argument("--seed", type=int, default=SEED, help="Random seed for shuffling")
    ap.add_argument("--out-dir", type=Path, help="Output directory for Parquet files")
    ap.add_argument("--metadata-path", type=Path, help="Path for metadata.json")
    args = ap.parse_args()

    out_dir = args.out_dir or OUT_DIR
    metadata_path = args.metadata_path or METADATA_PATH

    train = parse(fetch(URLS["train"]))
    test = parse(fetch(URLS["test"]))

    rng = random.Random(args.seed)
    rng.shuffle(train)
    n_val = int(len(train) * args.val_ratio)
    validation = train[:n_val]
    train = train[n_val:]

    data = DatasetDict(
        {
            "train": Dataset.from_list(train),
            "validation": Dataset.from_list(validation),
            "test": Dataset.from_list(test),
        }
    )

    out_dir.mkdir(exist_ok=True, parents=True)
    for name, split in data.items():
        split.to_parquet(str(out_dir / f"{name}.parquet"))

    metadata_path.write_text(json.dumps(extract_metadata(data), indent=2))