File size: 7,742 Bytes
007682c
 
9766e96
 
 
53eadfe
a05f582
 
9766e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a05f582
 
 
9766e96
 
 
a05f582
 
 
53eadfe
 
 
a05f582
53eadfe
a05f582
 
 
9766e96
a05f582
 
9766e96
007682c
 
 
eff6b7e
 
007682c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9766e96
 
a05f582
 
 
9766e96
 
 
a05f582
 
 
 
 
 
 
9766e96
a05f582
9766e96
a05f582
 
 
 
9766e96
 
a05f582
9766e96
 
 
 
 
a05f582
 
93fe4fb
 
 
a05f582
 
 
 
 
d679da5
a05f582
 
 
 
 
 
93fe4fb
a05f582
 
d679da5
 
 
79f5ef7
d679da5
007682c
79f5ef7
53eadfe
9766e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53eadfe
9766e96
 
 
53eadfe
9766e96
53eadfe
 
 
 
 
 
 
 
 
9766e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
import huggingface_hub
import requests.exceptions
import zarr
import numpy as np
import datasets
import fsspec
from huggingface_hub import HfFileSystem
from fsspec.implementations.zip import ZipFileSystem





# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{ocf:mrms,
title = {MRMS Archival Precipitation Rate Radar Dataset},
author={Jacob Bieker
},
year={2022}
}
"""

# You can copy an official description
_DESCRIPTION = """\
This dataset consists of MRMS precipitation radar data for the continental United States, 
sampled at a 1kmx1km area and 2-mimntely spatial resolution. 
"""

_HOMEPAGE = "https://mtarchive.geol.iastate.edu/"

_LICENSE = "US Government data, Open license, no restrictions"


class ZarrTest:

    @staticmethod
    def create():
        store1 = zarr.DirectoryStore('data.zarr')
        root1 = zarr.group(store=store1)
        z1 = root1.zeros('trace', shape=(100, 100), chunks=(100, 10), dtype='i1', overwrite=True)
        z1[:] = 42
        z1[0, :] = np.arange(100)
        z1[:, 0] = np.arange(100)
        z2 = root1.zeros('plaintext', shape=100, dtype='u1', overwrite=True)
        z3 = root1.zeros('ciphertext', shape=100, dtype='u1', overwrite=True)
        z4 = root1.zeros('key', shape=100, dtype='u1', overwrite=True)
        z2[:] = 22
        z3[:] = 33
        z4[:] = 44
        zarr.consolidate_metadata(store1)

        store2 = zarr.DirectoryStore('data.zarr')
        root2 = zarr.group(store=store2)
        test = np.all(z1[:] == root2['trace'][:])
        print(test)
        store1.close()
        store2.close()

    @staticmethod
    def create_in_hf():
        try:
            ...
            # huggingface_hub.delete_folder(path_in_repo="data.zarr", repo_id="spikingneurons/test", repo_type="dataset")
        except requests.exceptions.HTTPError as e:
            ...
        store1 = zarr.DirectoryStore('hf://datasets/spikingneurons/test/data.zarr')
        root1 = zarr.group(store=store1)
        z1 = root1.zeros('trace', shape=(100, 100), chunks=(100, 10), dtype='i1', overwrite=True)
        z1[:] = 42
        z1[0, :] = np.arange(100)
        z1[:, 0] = np.arange(100)
        z2 = root1.zeros('plaintext', shape=100, dtype='u1', overwrite=True)
        z3 = root1.zeros('ciphertext', shape=100, dtype='u1', overwrite=True)
        z4 = root1.zeros('key', shape=100, dtype='u1', overwrite=True)
        z2[:] = 22
        z3[:] = 33
        z4[:] = 44
        zarr.consolidate_metadata(store1)
        print(z1[:])
        store1.close()


        store2 = zarr.DirectoryStore('hf://datasets/spikingneurons/test/data.zarr')
        root2 = zarr.open_consolidated(store2)
        print(root2['trace'][:])
        store2.close()

    @staticmethod
    def create_in_zip():
        store1 = zarr.ZipStore('data.zarr.zip')
        root1 = zarr.group(store=store1)
        z1 = root1.zeros('trace', shape=(100, 100), chunks=(100, 10), dtype='i1', overwrite=True)
        z1[:] = 42
        z1[0, :] = np.arange(100)
        z1[:, 0] = np.arange(100)
        z2 = root1.zeros('plaintext', shape=100, dtype='u1', overwrite=True)
        z3 = root1.zeros('ciphertext', shape=100, dtype='u1', overwrite=True)
        z4 = root1.zeros('key', shape=100, dtype='u1', overwrite=True)
        z2[:] = 22
        z3[:] = 33
        z4[:] = 44
        zarr.consolidate_metadata(store1)
        print(z1[:])
        store1.close()

        store2 = zarr.ZipStore('data.zarr.zip')
        root2 = zarr.open_consolidated(store2)
        print(root2['trace'][:])
        store2.close()

    @staticmethod
    def load_from_hf():
        """
        Note that we load data generated via create_in_zip to upload to HF
        """
        from datasets import load_dataset
        import datasets
        # ds = load_dataset("spikingneurons/test", streaming=True, trust_remote_code=True)
        # print(ds)
        # _fs = HfFileSystem()
        # _files = _fs.ls("datasets/spikingneurons/test", detail=False)
        # print(_files)
        # import xarray as xr
        # _arr = xr.open_dataset('hf://datasets/spikingneurons/test/data.zarr', engine='zarr', chunks={})
        from zarr.storage import FSStore
        store = FSStore('hf://datasets/spikingneurons/test/data.zarr')
        root = zarr.group(store=store)
        print(root['trace'][:])
        # store = FSStore('zip://*::hf://datasets/spikingneurons/test/example.zip')
        # store11 = FSStore('zip://hf://datasets/spikingneurons/test/example.zip')
        # store22 = FSStore('zip+hf://datasets/spikingneurons/test/example.zip')
        # store = zarr.DirectoryStore(UPath('hf://datasets/spikingneurons/test') / 'data.zarr')
        # store1 = zarr.DirectoryStore('data.zarr')
        # root1 = zarr.group(store=store1)
        with fsspec.open('hf://datasets/spikingneurons/test/data.zarr.zip', 'rb') as zip_file:
            _zip_fs = ZipFileSystem(zip_file)
            store4 = FSStore(url="", fs=_zip_fs)
            root4 = zarr.open_consolidated(store=store4)
            print(root4['trace'][:])

ZarrTest.create()
# ZarrTest.create_in_zip()
# ZarrTest.load_from_hf()
# ZarrTest.create_in_hf()

class Test(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    BUILDER_CONFIGS = []
    # DEFAULT_CONFIG_NAME = "default"

    def _info(self):
        _features = datasets.Features({
            "data": datasets.Value("string"),
            "label": datasets.ClassLabel(names=["0", "1"]),
        })

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=_features,
            # Homepage of the dataset for documentation
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE,
            # Citation for the dataset
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        from fsspec.implementations.zip import ZipFileSystem
        print(dl_manager)
        print(self.config.name)
        urls = ["example.zip"]
        data_dir = dl_manager.download_and_extract(urls)
        print(data_dir)

        # Open the zip file using fsspec
        with fsspec.open(data_dir[0]) as f:
            with fsspec.filesystem("zip", fo=f) as fs:
                # List all files in the zip
                all_files = fs.glob("*")
                print("All files in the zip:", all_files)


        streaming = dl_manager.is_streaming
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": urls if streaming else data_dir["train"],
                    "split": "train",
                    "streaming": False,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": urls if streaming else data_dir["test"],
                    "split": "test",
                    "streaming": False,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": urls if streaming else data_dir["valid"],
                    "split": "valid",
                    "streaming": False,
                },
            ),
        ]