Kateht commited on
Commit
3b90238
·
verified ·
1 Parent(s): e0eafad

Upload dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset.py +82 -0
dataset.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import datasets
4
+
5
+ # Mô tả dataset
6
+ _DESCRIPTION = """
7
+ VOYA Vietnamese Sign Language (VSL) dataset.
8
+ Dataset gồm các chuỗi keypoints đã trích xuất bằng MediaPipe cho nhận dạng ngôn ngữ ký hiệu.
9
+ Mỗi sample có shape (60, 1605), lưu trong 'sequences', với nhãn tương ứng trong 'labels'.
10
+ """
11
+
12
+ _HOMEPAGE = "https://huggingface.co/datasets/Kateht/VOYA_VSL"
13
+ _LICENSE = "MIT"
14
+ _CITATION = """
15
+ @misc{voya_vsl_2025,
16
+ author = {Kateht et al.},
17
+ title = {VOYA Vietnamese Sign Language Dataset},
18
+ year = {2025},
19
+ publisher = {Hugging Face},
20
+ howpublished = {\\url{https://huggingface.co/datasets/Kateht/VOYA_VSL}}
21
+ }
22
+ """
23
+
24
+
25
+ class VOYAVSLConfig(datasets.BuilderConfig):
26
+ def __init__(self, **kwargs):
27
+ super().__init__(**kwargs)
28
+
29
+
30
+ class VOYAVSL(datasets.GeneratorBasedBuilder):
31
+ BUILDER_CONFIGS = [
32
+ VOYAVSLConfig(
33
+ name="default",
34
+ version=datasets.Version("1.0.0"),
35
+ description="VOYA Vietnamese Sign Language dataset",
36
+ )
37
+ ]
38
+
39
+ def _info(self):
40
+ return datasets.DatasetInfo(
41
+ description=_DESCRIPTION,
42
+ features=datasets.Features(
43
+ {
44
+ "sequences": datasets.Array2D(
45
+ shape=(60, 1605), dtype="float32"
46
+ ),
47
+ "labels": datasets.Value("int32"),
48
+ }
49
+ ),
50
+ supervised_keys=("sequences", "labels"),
51
+ homepage=_HOMEPAGE,
52
+ license=_LICENSE,
53
+ citation=_CITATION,
54
+ )
55
+
56
+ def _split_generators(self, dl_manager):
57
+ # Tải cả folder Merged thay vì 1 file
58
+ data_dir = dl_manager.download_and_extract(
59
+ "https://huggingface.co/datasets/Kateht/VOYA_VSL/resolve/main/Merged"
60
+ )
61
+ return [
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.TRAIN, # bạn sẽ tự chia train/val/test sau
64
+ gen_kwargs={"data_dir": data_dir},
65
+ ),
66
+ ]
67
+
68
+ def _generate_examples(self, data_dir):
69
+ idx = 0
70
+ for fname in sorted(os.listdir(data_dir)):
71
+ if not fname.endswith(".npz"):
72
+ continue
73
+ fpath = os.path.join(data_dir, fname)
74
+ data = np.load(fpath)
75
+
76
+ sequences, labels = data["sequences"], data["labels"]
77
+ for seq, label in zip(sequences, labels):
78
+ yield idx, {
79
+ "sequences": seq.astype("float32"),
80
+ "labels": int(label),
81
+ }
82
+ idx += 1