| | import datasets |
| | from datasets import load_dataset |
| |
|
| |
|
| | _CONSTITUENT_DATASETS = ['SAT-4', 'SAT-6', 'NASC-TG2', 'WHU-RS19', 'RSSCN7', 'RS_C11', 'SIRI-WHU', 'EuroSAT', |
| | 'NWPU-RESISC45', 'PatternNet', 'RSD46-WHU', 'GID', 'CLRS', 'Optimal-31', |
| | 'Airbus-Wind-Turbines-Patches', 'USTC_SmokeRS', 'Canadian_Cropland', |
| | 'Ships-In-Satellite-Imagery', 'Satellite-Images-of-Hurricane-Damage', |
| | 'Brazilian_Coffee_Scenes', 'Brazilian_Cerrado-Savanna_Scenes', 'Million-AID', |
| | 'UC_Merced_LandUse_MultiLabel', 'MLRSNet', |
| | 'MultiScene', 'RSI-CB256', 'AID_MultiLabel'] |
| |
|
| |
|
| | class SATINConfig(datasets.BuilderConfig): |
| | """BuilderConfig for SATIN""" |
| |
|
| | def __init__(self, name, **kwargs): |
| |
|
| | super(SATINConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) |
| | self.name = name |
| | self.hf_dataset_name = 'jonathan-roberts1' + "/" + name |
| | self.description = None |
| | self.features = None |
| |
|
| |
|
| | class SATIN(datasets.GeneratorBasedBuilder): |
| | """SATIN Images dataset""" |
| |
|
| | BUILDER_CONFIGS = [SATINConfig(name=dataset_name) for dataset_name in _CONSTITUENT_DATASETS] |
| |
|
| | def _info(self): |
| | if self.config.description is None or self.config.features is None: |
| | stream_dataset_info = load_dataset(self.config.hf_dataset_name, streaming=True, split='train').info |
| | self.config.description = stream_dataset_info.description |
| | self.config.features = stream_dataset_info.features |
| | return datasets.DatasetInfo( |
| | description=self.config.description, |
| | features=self.config.features, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | dataset = load_dataset(self.config.hf_dataset_name) |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={"data_path": dataset}, |
| | ), |
| | ] |
| |
|
| | def _generate_examples(self, data_path): |
| | |
| | _DEFAULT_SPLIT = 'train' |
| | huggingface_dataset = data_path['train'] |
| | features = huggingface_dataset.features |
| | for idx, row in enumerate(huggingface_dataset): |
| | features_dict = {feature: row[feature] for feature in features} |
| | |
| | image = features_dict.pop('image') |
| | features_dict = {'image': image, **features_dict} |
| | yield idx, features_dict |
| |
|
| |
|
| |
|
| |
|