Refactor script
Browse files- top_quark_tagging.py +24 -8
top_quark_tagging.py
CHANGED
|
@@ -29,18 +29,27 @@ _TEST_DOWNLOAD_URL = "data/test-raw.parquet"
|
|
| 29 |
_URLS = {
|
| 30 |
"raw": {
|
| 31 |
"train": "data/train-raw.parquet",
|
|
|
|
| 32 |
"validation": "data/validation-raw.parquet",
|
|
|
|
| 33 |
"test": "data/test-raw.parquet",
|
|
|
|
| 34 |
},
|
| 35 |
"nsubjettiness": {
|
| 36 |
"train": "data/train-nsubjettiness.parquet",
|
|
|
|
| 37 |
"validation": "data/validation-nsubjettiness.parquet",
|
|
|
|
| 38 |
"test": "data/test-nsubjettiness.parquet",
|
|
|
|
| 39 |
},
|
| 40 |
"image": {
|
| 41 |
"train": "data/train-images.parquet",
|
|
|
|
| 42 |
"validation": "data/train-images.parquet",
|
|
|
|
| 43 |
"test": "data/train-images.parquet",
|
|
|
|
| 44 |
},
|
| 45 |
}
|
| 46 |
|
|
@@ -896,23 +905,30 @@ class TopQuarkTagging(datasets.GeneratorBasedBuilder):
|
|
| 896 |
data_paths = dl_manager.download_and_extract(urls)
|
| 897 |
return [
|
| 898 |
datasets.SplitGenerator(
|
| 899 |
-
name=datasets.Split.TRAIN,
|
|
|
|
| 900 |
),
|
| 901 |
datasets.SplitGenerator(
|
| 902 |
name=datasets.Split.VALIDATION,
|
| 903 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
|
| 904 |
),
|
| 905 |
datasets.SplitGenerator(
|
| 906 |
-
name=datasets.Split.TEST,
|
|
|
|
| 907 |
),
|
| 908 |
]
|
| 909 |
|
| 910 |
def _generate_examples(self, filepath, split):
|
| 911 |
"""Generate examples."""
|
| 912 |
-
|
| 913 |
-
|
|
|
|
| 914 |
if self.config.name == "image":
|
| 915 |
-
|
| 916 |
-
|
|
|
|
| 917 |
else:
|
| 918 |
-
yield id_, row
|
|
|
|
| 29 |
_URLS = {
|
| 30 |
"raw": {
|
| 31 |
"train": "data/train-raw.parquet",
|
| 32 |
+
"train-labels": "data/train-labels.parquet",
|
| 33 |
"validation": "data/validation-raw.parquet",
|
| 34 |
+
"validation-labels": "data/validation-labels.parquet",
|
| 35 |
"test": "data/test-raw.parquet",
|
| 36 |
+
"test-labels": "data/test-labels.parquet",
|
| 37 |
},
|
| 38 |
"nsubjettiness": {
|
| 39 |
"train": "data/train-nsubjettiness.parquet",
|
| 40 |
+
"train-labels": "data/train-labels.parquet",
|
| 41 |
"validation": "data/validation-nsubjettiness.parquet",
|
| 42 |
+
"validation-labels": "data/validation-labels.parquet",
|
| 43 |
"test": "data/test-nsubjettiness.parquet",
|
| 44 |
+
"test-labels": "data/test-labels.parquet",
|
| 45 |
},
|
| 46 |
"image": {
|
| 47 |
"train": "data/train-images.parquet",
|
| 48 |
+
"train-labels": "data/train-labels.parquet",
|
| 49 |
"validation": "data/train-images.parquet",
|
| 50 |
+
"validation-labels": "data/validation-labels.parquet",
|
| 51 |
"test": "data/train-images.parquet",
|
| 52 |
+
"test-labels": "data/test-labels.parquet",
|
| 53 |
},
|
| 54 |
}
|
| 55 |
|
|
|
|
| 905 |
data_paths = dl_manager.download_and_extract(urls)
|
| 906 |
return [
|
| 907 |
datasets.SplitGenerator(
|
| 908 |
+
name=datasets.Split.TRAIN,
|
| 909 |
+
gen_kwargs={"filepath": [data_paths["train"], data_paths["train-labels"]], "split": "train"},
|
| 910 |
),
|
| 911 |
datasets.SplitGenerator(
|
| 912 |
name=datasets.Split.VALIDATION,
|
| 913 |
+
gen_kwargs={
|
| 914 |
+
"filepath": [data_paths["validation"], data_paths["validation-labels"]],
|
| 915 |
+
"split": "validation",
|
| 916 |
+
},
|
| 917 |
),
|
| 918 |
datasets.SplitGenerator(
|
| 919 |
+
name=datasets.Split.TEST,
|
| 920 |
+
gen_kwargs={"filepath": [data_paths["test"], data_paths["test-labels"]], "split": "test"},
|
| 921 |
),
|
| 922 |
]
|
| 923 |
|
| 924 |
def _generate_examples(self, filepath, split):
|
| 925 |
"""Generate examples."""
|
| 926 |
+
examples = datasets.load_dataset("parquet", data_files={split: filepath[0]}, split=split)
|
| 927 |
+
labels = datasets.load_dataset("parquet", data_files={split: filepath[1]}, split=split)
|
| 928 |
+
for id_, (row, label) in enumerate(zip(examples, labels)):
|
| 929 |
if self.config.name == "image":
|
| 930 |
+
values = np.fromiter(row.values(), dtype=float)
|
| 931 |
+
image = values.reshape(33, 33)
|
| 932 |
+
yield id_, {"image": image, **label}
|
| 933 |
else:
|
| 934 |
+
yield id_, {**row, **label}
|