lukasgarbas commited on
Commit
4ae150f
·
1 Parent(s): a27745f

add classic trec

Browse files
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: TREC Question Classification
3
+ task_categories:
4
+ - text-classification
5
+ language:
6
+ - en
7
+ configs:
8
+ - config_name: default
9
+ data_files:
10
+ train: data/train.parquet
11
+ validation: data/validation.parquet
12
+ test: data/test.parquet
13
+ ---
14
+
15
+ # TREC
16
+
17
+ A classic benchmark dataset for question classification with both coarse and fine-grained labels.
18
+
19
+ - **Size:** small, clean, ready to use
20
+ - **Source:** [original release](https://cogcomp.seas.upenn.edu/Data/QA/QC/)
21
+ - **Format:** stored in Parquet
22
+ - **Compatibility:** 🧩 works with `datasets >= 4.0` (script loaders deprecated)
23
+
24
+ ## Reference
25
+
26
+ Li, X., & Roth, D. (2002).
27
+ *Learning Question Classifiers.*
28
+ [ACL Anthology](https://aclanthology.org/C02-1150/)
data/metadata.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "num_rows": {
3
+ "train": 4907,
4
+ "validation": 545,
5
+ "test": 500
6
+ },
7
+ "features": {
8
+ "text": "Value(dtype='string', id=None)",
9
+ "coarse_label": "Value(dtype='string', id=None)",
10
+ "coarse_description": "Value(dtype='string', id=None)",
11
+ "fine_label": "Value(dtype='string', id=None)",
12
+ "fine_description": "Value(dtype='string', id=None)"
13
+ },
14
+ "label_maps": {
15
+ "coarse_label": [
16
+ "ABBR",
17
+ "DESC",
18
+ "ENTY",
19
+ "HUM",
20
+ "LOC",
21
+ "NUM"
22
+ ],
23
+ "fine_label": [
24
+ "ABBR:abb",
25
+ "ABBR:exp",
26
+ "DESC:def",
27
+ "DESC:desc",
28
+ "DESC:manner",
29
+ "DESC:reason",
30
+ "ENTY:animal",
31
+ "ENTY:body",
32
+ "ENTY:color",
33
+ "ENTY:cremat",
34
+ "ENTY:currency",
35
+ "ENTY:dismed",
36
+ "ENTY:event",
37
+ "ENTY:food",
38
+ "ENTY:instru",
39
+ "ENTY:lang",
40
+ "ENTY:letter",
41
+ "ENTY:other",
42
+ "ENTY:plant",
43
+ "ENTY:product",
44
+ "ENTY:religion",
45
+ "ENTY:sport",
46
+ "ENTY:substance",
47
+ "ENTY:symbol",
48
+ "ENTY:techmeth",
49
+ "ENTY:termeq",
50
+ "ENTY:veh",
51
+ "ENTY:word",
52
+ "HUM:desc",
53
+ "HUM:gr",
54
+ "HUM:ind",
55
+ "HUM:title",
56
+ "LOC:city",
57
+ "LOC:country",
58
+ "LOC:mount",
59
+ "LOC:other",
60
+ "LOC:state",
61
+ "NUM:code",
62
+ "NUM:count",
63
+ "NUM:date",
64
+ "NUM:dist",
65
+ "NUM:money",
66
+ "NUM:ord",
67
+ "NUM:other",
68
+ "NUM:perc",
69
+ "NUM:period",
70
+ "NUM:speed",
71
+ "NUM:temp",
72
+ "NUM:volsize",
73
+ "NUM:weight"
74
+ ]
75
+ }
76
+ }
data/test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfbf350bbe394d3586dd0a97807205f10e6586d0558f59ca42f0446d14e044ef
3
+ size 17302
data/train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f342ee3b9528a2fbb1543f1807d3b90cc1a78f6f7a2d706c8834c37b1a945b45
3
+ size 200114
data/validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5d0e78d62d1bf9a23a28760cff23e3d1677ef42266f933a31e414e2a21c48b9
3
+ size 25867
preprocess.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import random
4
+ from pathlib import Path
5
+ from typing import Optional, Sequence, TypedDict
6
+
7
+ import requests
8
+ from datasets import Dataset, DatasetDict
9
+
10
+ OUT_DIR = Path(__file__).parent / "data"
11
+ METADATA_PATH = OUT_DIR / "metadata.json"
12
+
13
+ SEED = 42
14
+ VAL_RATIO = 0.1
15
+
16
+ URLS = {
17
+ "train": "https://cogcomp.seas.upenn.edu/Data/QA/QC/train_5500.label",
18
+ "test": "https://cogcomp.seas.upenn.edu/Data/QA/QC/TREC_10.label",
19
+ }
20
+
21
+ COARSE_DESC = {
22
+ "ABBR": "abbreviation", "ENTY": "entities", "DESC": "description and abstract concepts",
23
+ "HUM": "human beings", "LOC": "locations", "NUM": "numeric values"
24
+ }
25
+
26
+ FINE_DESC = {
27
+ "ABBR:abb":"abbreviation","ABBR:exp":"expression abbreviated",
28
+ "ENTY:animal":"animals","ENTY:body":"organs of body","ENTY:color":"colors","ENTY:cremat":"creative works",
29
+ "ENTY:currency":"currency names","ENTY:dismed":"diseases and medicine","ENTY:event":"events","ENTY:food":"food",
30
+ "ENTY:instru":"musical instrument","ENTY:lang":"languages","ENTY:letter":"letters like a-z","ENTY:other":"other entities",
31
+ "ENTY:plant":"plants","ENTY:product":"products","ENTY:religion":"religions","ENTY:sport":"sports",
32
+ "ENTY:substance":"elements and substances","ENTY:symbol":"symbols and signs","ENTY:techmeth":"techniques and methods",
33
+ "ENTY:termeq":"equivalent terms","ENTY:veh":"vehicles","ENTY:word":"words with a special property",
34
+ "DESC:def":"definition of something","DESC:desc":"description of something","DESC:manner":"manner of an action","DESC:reason":"reasons",
35
+ "HUM:gr":"a group/organization","HUM:ind":"an individual","HUM:title":"title of a person","HUM:desc":"description of a person",
36
+ "LOC:city":"cities","LOC:country":"countries","LOC:mount":"mountains","LOC:other":"other locations","LOC:state":"states",
37
+ "NUM:code":"codes","NUM:count":"counts","NUM:date":"dates","NUM:dist":"distances","NUM:money":"prices","NUM:ord":"ranks",
38
+ "NUM:other":"other numbers","NUM:period":"duration","NUM:perc":"percentages","NUM:speed":"speed","NUM:temp":"temperature",
39
+ "NUM:volsize":"size/area/volume","NUM:weight":"weight",
40
+ }
41
+
42
+
43
+ class TrecExample(TypedDict):
44
+ text: str
45
+ coarse_label: str
46
+ coarse_description: Optional[str]
47
+ fine_label: str
48
+ fine_description: Optional[str]
49
+
50
+
51
+ def fetch(url: str) -> list[bytes]:
52
+ r = requests.get(url, timeout=30)
53
+ r.raise_for_status()
54
+ return r.content.splitlines()
55
+
56
+
57
+ def parse(lines: Sequence[bytes]) -> list[TrecExample]:
58
+ rows: list[TrecExample] = []
59
+ for b in lines:
60
+ line = b.decode("utf-8", errors="replace").strip()
61
+ if not line or " " not in line:
62
+ continue
63
+ fine, text = line.split(" ", 1)
64
+ coarse = fine.split(":", 1)[0]
65
+ rows.append(
66
+ {
67
+ "text": text.strip(),
68
+ "coarse_label": coarse,
69
+ "coarse_description": COARSE_DESC.get(coarse, ""),
70
+ "fine_label": fine,
71
+ "fine_description": FINE_DESC.get(fine, ""),
72
+ }
73
+ )
74
+ return rows
75
+
76
+
77
+ def extract_metadata(ds: DatasetDict) -> dict:
78
+ num_rows = {name: len(split) for name, split in ds.items()}
79
+ first_split = next(iter(ds.values()))
80
+ features = {name: repr(feat) for name, feat in first_split.features.items()}
81
+
82
+ coarse_labels = {label for split in ds.values() for label in split["coarse_label"]}
83
+ fine_labels = {label for split in ds.values() for label in split["fine_label"]}
84
+
85
+ label_maps = {
86
+ "coarse_label": sorted(coarse_labels),
87
+ "fine_label": sorted(fine_labels),
88
+ }
89
+
90
+ return {
91
+ "num_rows": num_rows,
92
+ "features": features,
93
+ "label_maps": label_maps}
94
+
95
+
96
+ if __name__ == "__main__":
97
+ """Fetch TREC from source, split it, save as Parquet and add metadata.
98
+
99
+ Run: python preprocess_trec.py --val-ratio 0.1 --seed 42 --out-dir data
100
+ """
101
+ ap = argparse.ArgumentParser()
102
+ ap.add_argument("--val-ratio", type=float, default=VAL_RATIO, help="Fraction of training set for validation")
103
+ ap.add_argument("--seed", type=int, default=SEED, help="Random seed for shuffling")
104
+ ap.add_argument("--out-dir", type=Path, help="Output directory for Parquet files")
105
+ ap.add_argument("--metadata-path", type=Path, help="Path for metadata.json")
106
+ args = ap.parse_args()
107
+
108
+ out_dir = args.out_dir or OUT_DIR
109
+ metadata_path = args.metadata_path or METADATA_PATH
110
+
111
+ train = parse(fetch(URLS["train"]))
112
+ test = parse(fetch(URLS["test"]))
113
+
114
+ rng = random.Random(args.seed)
115
+ rng.shuffle(train)
116
+ n_val = int(len(train) * args.val_ratio)
117
+ validation = train[:n_val]
118
+ train = train[n_val:]
119
+
120
+ data = DatasetDict(
121
+ {
122
+ "train": Dataset.from_list(train),
123
+ "validation": Dataset.from_list(validation),
124
+ "test": Dataset.from_list(test),
125
+ }
126
+ )
127
+
128
+ out_dir.mkdir(exist_ok=True, parents=True)
129
+ for name, split in data.items():
130
+ split.to_parquet(str(out_dir / f"{name}.parquet"))
131
+
132
+ metadata_path.write_text(json.dumps(extract_metadata(data), indent=2))