EiffL commited on
Commit
c823e67
·
1 Parent(s): d2baebb

adapting the reader to the dataset

Browse files
Files changed (1) hide show
  1. DESI.py +58 -67
DESI.py CHANGED
@@ -1,7 +1,8 @@
1
  import os
2
  import numpy as np
3
  import datasets
4
- from datasets import Features, Value, Array2D
 
5
  from datasets.utils.logging import get_logger
6
 
7
  # TODO: Add BibTeX citation
@@ -29,23 +30,47 @@ _LICENSE = ""
29
 
30
  # Download URLs for different variants of the dataset
31
  # TODO: these files should be versionned
32
- _DATA_PATH = "{data_dir}/{sample}/{healpixel}.parquet"
33
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  _VERSION = "0.0.1"
35
 
36
  logger = get_logger(__name__)
37
 
38
- class DESI(datasets.GeneratorBasedBuilder):
39
  """TODO: Short description of my dataset."""
40
 
41
  VERSION = _VERSION
42
 
43
  BUILDER_CONFIGS = [
44
- datasets.BuilderConfig(name="edr_sv3", version=VERSION,
45
  description="One percent survey from the DESI Early Data Release."),
46
  ]
47
 
48
- DEFAULT_CONFIG_NAME = "edr_sv3"
49
 
50
  def _info(self):
51
  """ Defines the features available in this dataset.
@@ -54,14 +79,25 @@ class DESI(datasets.GeneratorBasedBuilder):
54
  # This is the description that will appear on the datasets page.
55
  description=_DESCRIPTION,
56
  # This defines the different columns of the dataset and their types
57
- features=Features({
58
- 'spectrum': Array2D(shape=(None, 2), dtype='float32'), # Stores flux and ivar
59
- 'lambda_min': Value('float32'), # Min and max wavelength
60
- 'lambda_max': Value('float32'),
61
- 'resolution': Value('float32'), # Resolution of the spectrum
62
- 'z': Value('float32'),
63
- 'ebv': Value('float32'),
64
- }),
 
 
 
 
 
 
 
 
 
 
 
65
  # Homepage of the dataset for documentation
66
  homepage=_HOMEPAGE,
67
  # License for the dataset if available
@@ -71,61 +107,16 @@ class DESI(datasets.GeneratorBasedBuilder):
71
  )
72
 
73
  def _split_generators(self, dl_manager):
74
- # First, attempt to access the files locally, if unsuccessful, emit a warning and attempt to download them
75
- if dl_manager.manual_dir is not None:
76
- data_dir = dl_manager.manual_dir
77
- data_dir = {k: os.path.join(data_dir, self.URLS[k].split('/')[-1])
78
- for k in self.URLS}
79
- else:
80
- logger.warning("We recommend downloading data manually through GLOBUS"
81
- "and specifying the manual_dir argument to pass to the dataset builder."
82
- "Downloading data automatically through the dataset builder will proceed but is not recommended.")
83
- data_dir = dl_manager.download_and_extract(self.URLS)
84
-
85
  return [
86
  datasets.SplitGenerator(
87
  name=datasets.Split.TRAIN,
88
- gen_kwargs={**data_dir}
89
  )
90
  ]
91
-
92
- def _generate_examples(self, catalog, data, keys = None):
93
- """ Yields examples as (key, example) tuples.
94
- """
95
- import h5py
96
- from astropy.table import Table
97
-
98
- # Opening the catalog
99
- catalog = Table.read(catalog)
100
-
101
- # If no keys are provided, return all the examples
102
- if keys is None:
103
- keys = catalog['TARGETID']
104
-
105
- # Preparing an index for fast searching through the catalog
106
- sort_index = np.argsort(catalog['TARGETID'])
107
- sorted_ids = catalog['TARGETID'][sort_index]
108
-
109
- # Opening data file and iterating over the requested keys
110
- with h5py.File(data, 'r') as data:
111
- # Loop over the indices and yield the requested data
112
- for i, id in enumerate(keys):
113
- # Extract the indices of requested ids in the catalog
114
- idx = sort_index[np.searchsorted(sorted_ids, id)]
115
- row = catalog[idx]
116
- key = row['TARGETID']
117
-
118
- example = {
119
- 'spectrum': np.stack([data['flux'][idx],
120
- data['ivar'][idx]], axis=1).astype('float32'),# TODO: add correct values
121
- 'lambda_min': 0., # TODO: add correct values
122
- 'lambda_max': 1., # TODO: add correct values
123
- 'resolution': 0.1,
124
- 'z': row['Z'],
125
- 'ebv': row['EBV'],
126
- }
127
-
128
- # Checking that we are retriving the correct data
129
- assert (key == keys[i]) & (data['target_ids'][idx] == keys[i]) , ("There was an indexing error when reading desi spectra", (key, keys[i]))
130
-
131
- yield str(key), example
 
1
  import os
2
  import numpy as np
3
  import datasets
4
+ import pyarrow as pa
5
+ from datasets import Features, Value, Array2D, Sequence
6
  from datasets.utils.logging import get_logger
7
 
8
  # TODO: Add BibTeX citation
 
30
 
31
  # Download URLs for different variants of the dataset
32
  # TODO: these files should be versionned
33
+ _DATA_PATH = "{sample}/{healpixel}.parquet"
34
+ _HEALPIXELS = ['9144', '9145', '9146', '9147', '9148', '9149', '9150', '9151', '9334', '9335', '9337', '9338', '9339', '9340', '9341', '9342',
35
+ '9343', '9424', '9425', '9427', '9428', '9429', '9430', '9431', '9512', '9514', '9600', '9791', '9812', '9813', '9814', '9815',
36
+ '9817', '9818', '9819', '9820', '9821', '9822', '9823', '9829', '9830', '9831', '9834', '9835', '9836', '9837', '9838', '9839',
37
+ '9840', '9841', '9842', '9843', '9844', '9845', '9848', '9849', '9850', '9851', '9877', '9878', '9879', '9884', '9885', '9887',
38
+ '9920', '9921', '9922', '9923', '9924', '9925', '9926', '9928', '9929', '9930', '9931', '9936', '9983', '9984', '9986', '9987',
39
+ '9992', '9993', '9994', '10016', '10145', '10146', '10147', '10148', '10150', '10151', '10152', '10153', '10154', '10155', '10156',
40
+ '10157', '10158', '10159', '10197', '10198', '10199', '10204', '10205', '10206', '10207', '10228', '10229', '10230', '10231', '10236',
41
+ '10237', '10238', '10239', '10368', '10370', '10376', '10377', '10378', '10379', '10380', '10382', '10383', '10400', '10401', '10402',
42
+ '10403', '10404', '10405', '10406', '10408', '10409', '10512', '10513', '10514', '10515', '10516', '10517', '10518', '10519', '10521',
43
+ '10524', '10719', '10741', '10743', '11215', '11224', '11225', '11226', '11227', '11228', '11229', '11230', '11231', '11237', '11239',
44
+ '11248', '11249', '11250', '11251', '11252', '11253', '11254', '11349', '11400', '11401', '11402', '11403', '11404', '11405', '11406',
45
+ '11407', '11424', '11425', '11426', '11427', '11428', '11429', '11430', '11520', '11521', '11522', '11523', '11524', '11526', '11604',
46
+ '11605', '11606', '11607', '11612', '11613', '11912', '11914', '11936', '15338', '15339', '15340', '15341', '15342', '15343', '15352',
47
+ '15353', '15354', '15355', '15356', '15357', '15358', '15359', '16040', '16041', '16042', '16043', '16046', '25595', '25596', '25597',
48
+ '25598', '25599', '25909', '25911', '25913', '25914', '25915', '25916', '25917', '25918', '25919', '25926', '25927', '25929', '25930',
49
+ '25931', '25932', '25933', '25934', '25935', '25938', '25944', '25945', '25946', '25947', '25952', '25953', '25954', '25955', '25956',
50
+ '25957', '25958', '25959', '25960', '25961', '25962', '25963', '25964', '25965', '25966', '25967', '25968', '25969', '25970', '25971',
51
+ '25974', '25976', '25977', '25978', '25979', '25980', '25981', '25982', '25983', '26000', '26001', '26003', '26004', '26005', '26006',
52
+ '26007', '26012', '26013', '26039', '26045', '26047', '26048', '26050', '26053', '26064', '26065', '26068', '26081', '26082', '26083',
53
+ '26086', '26087', '26088', '26089', '26090', '26091', '26092', '26093', '26094', '26095', '26251', '26254', '26272', '26273', '26274',
54
+ '26275', '26276', '26277', '26278', '26279', '26280', '26281', '26282', '26283', '26284', '26285', '26286', '26287', '26290', '26296',
55
+ '26432', '26433', '26436', '26437', '26961', '26964', '26965', '26966', '26967', '26973', '27238', '27239', '27244', '27245', '27246',
56
+ '27247', '27250', '27251', '27256', '27257', '27258', '27259', '27260', '27262', '27333', '27344', '27345', '27346', '27347', '27348',
57
+ '27648', '27649', '27650', '27651', '27656', '28027', '28030', '28031', '28112', '28113', '28114', '28115', '28116', '28117', '28118',
58
+ '28119', '28120', '28121', '28123', '28124', '28125', '28126', '28127', '28149', '28151']
59
  _VERSION = "0.0.1"
60
 
61
  logger = get_logger(__name__)
62
 
63
+ class DESI(datasets.ArrowBasedBuilder):
64
  """TODO: Short description of my dataset."""
65
 
66
  VERSION = _VERSION
67
 
68
  BUILDER_CONFIGS = [
69
+ datasets.BuilderConfig(name="sv3", version=VERSION,
70
  description="One percent survey from the DESI Early Data Release."),
71
  ]
72
 
73
+ DEFAULT_CONFIG_NAME = "sv3"
74
 
75
  def _info(self):
76
  """ Defines the features available in this dataset.
 
79
  # This is the description that will appear on the datasets page.
80
  description=_DESCRIPTION,
81
  # This defines the different columns of the dataset and their types
82
+ features=Features(
83
+ {'TARGETID': Value(dtype='int64'),
84
+ 'HEALPIX': Value(dtype='int32'),
85
+ 'TARGET_RA': Value(dtype='float32'),
86
+ 'TARGET_DEC': Value(dtype='float32'),
87
+ 'RELEASE': Value(dtype='int16'),
88
+ 'BRICKID': Value(dtype='int32'),
89
+ 'BRICK_OBJID': Value(dtype='int32'),
90
+ 'Z': Value(dtype='float32'),
91
+ 'EBV': Value(dtype='float32'),
92
+ 'FLUX_G': Value(dtype='float32'),
93
+ 'FLUX_R': Value(dtype='float32'),
94
+ 'FLUX_Z': Value(dtype='float32'),
95
+ 'FLUX_IVAR_G': Value(dtype='float32'),
96
+ 'FLUX_IVAR_R': Value(dtype='float32'),
97
+ 'FLUX_IVAR_Z': Value(dtype='float32'),
98
+ 'SPECTRUM': Sequence(feature=Value(dtype='float32'), length=7781),
99
+ 'SPECTRUM_IVAR': Sequence(feature=Value(dtype='float32'), length=7781)
100
+ }),
101
  # Homepage of the dataset for documentation
102
  homepage=_HOMEPAGE,
103
  # License for the dataset if available
 
107
  )
108
 
109
  def _split_generators(self, dl_manager):
110
+ data_urls = [ _DATA_PATH.format(sample=self.config.name, healpixel=hp) for hp in _HEALPIXELS ]
111
+ downloaded_files = dl_manager.download_and_extract(data_urls)
 
 
 
 
 
 
 
 
 
112
  return [
113
  datasets.SplitGenerator(
114
  name=datasets.Split.TRAIN,
115
+ gen_kwargs={"filepaths": downloaded_files}
116
  )
117
  ]
118
+
119
+ def _generate_tables(self, filepaths):
120
+ for hp, filepath in zip(_HEALPIXELS, filepaths):
121
+ table = pa.parquet.read_table(filepath)
122
+ yield hp, table