pbk0 commited on
Commit
44bf929
·
1 Parent(s): 0665713

Add dataset management classes and loading script for HuggingFace integration

Browse files
Files changed (3) hide show
  1. requirements.txt +2 -1
  2. test.py +77 -0
  3. test_dataset.py +16 -0
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  gradio
2
  huggingface[cli]
3
  numpy
4
- fsspec
 
 
1
  gradio
2
  huggingface[cli]
3
  numpy
4
+ fsspec
5
+ datasets
test.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import datasets
4
+
5
+ _CITATION = r"""
6
+ @misc{test2025,
7
+ title={Test Dataset},
8
+ author={Your Name},
9
+ year={2025},
10
+ howpublished={\url{https://huggingface.co/datasets/DLSCA/test}}
11
+ }
12
+ """
13
+
14
+ _DESCRIPTION = """
15
+ A test dataset using local numpy arrays for HuggingFace Datasets.
16
+ """
17
+
18
+ _HOMEPAGE = "https://huggingface.co/datasets/DLSCA/test"
19
+ _LICENSE = "MIT"
20
+
21
+ class TestDownloadManager(datasets.DownloadManager):
22
+ def __init__(self, data_dir):
23
+ self.data_dir = data_dir
24
+
25
+ def download_and_extract(self, url_or_urls):
26
+ # No download needed, just return the local data dir
27
+ return self.data_dir
28
+
29
+ class TestDataset(datasets.GeneratorBasedBuilder):
30
+ VERSION = datasets.Version("1.0.0")
31
+
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {
37
+ "trace": datasets.features.Sequence(
38
+ datasets.Value("int8"), length=20971
39
+ ),
40
+ "label0": datasets.Value("int32"),
41
+ "label1": datasets.Value("int32"),
42
+ "label2": datasets.Value("int32"),
43
+ "label3": datasets.Value("int32"),
44
+ }
45
+ ),
46
+ supervised_keys=None,
47
+ homepage=_HOMEPAGE,
48
+ license=_LICENSE,
49
+ citation=_CITATION,
50
+ )
51
+
52
+ def _split_generators(self, dl_manager):
53
+ # Use the provided data_dir from load_dataset
54
+ data_dir = dl_manager.manual_dir if dl_manager.manual_dir else dl_manager.data_dir
55
+ traces_path = os.path.join(data_dir, "traces.npy")
56
+ labels_path = os.path.join(data_dir, "labels.npy")
57
+ return [
58
+ datasets.SplitGenerator(
59
+ name=datasets.Split.TRAIN,
60
+ gen_kwargs={
61
+ "traces_path": traces_path,
62
+ "labels_path": labels_path,
63
+ },
64
+ ),
65
+ ]
66
+
67
+ def _generate_examples(self, traces_path, labels_path):
68
+ traces = np.load(traces_path)
69
+ labels = np.load(labels_path)
70
+ for idx, (trace, label) in enumerate(zip(traces, labels)):
71
+ yield idx, {
72
+ "trace": trace.tolist(),
73
+ "label0": int(label[0]),
74
+ "label1": int(label[1]),
75
+ "label2": int(label[2]),
76
+ "label3": int(label[3]),
77
+ }
test_dataset.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ def main():
4
+ # Load the dataset from the local script
5
+ ds = load_dataset(
6
+ 'test.py',
7
+ data_dir='data',
8
+ split='train',
9
+ trust_remote_code=True,
10
+ )
11
+ print(ds)
12
+ print(ds[0]) # Show the first example
13
+ print('Features:', ds.features)
14
+
15
+ if __name__ == "__main__":
16
+ main()