jordand commited on
Commit
57816c2
·
verified ·
1 Parent(s): 3993afa

Initial tarred dataset upload (2025-11-15T21:47:05.550723Z)

Browse files
Files changed (6) hide show
  1. .gitattributes +0 -58
  2. LICENSE +2 -0
  3. README.md +29 -0
  4. VCTK.tar +3 -0
  5. index.jsonl +109 -0
  6. loader.py +29 -0
.gitattributes CHANGED
@@ -1,59 +1 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
  *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.tar filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
LICENSE ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ License: Creative Commons Attribution 4.0 International (CC BY 4.0)
2
+ https://creativecommons.org/licenses/by/4.0/
README.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ pretty_name: "VCTK Speaker embeddings (tarred)"
4
+ tags:
5
+ - audio
6
+ - tts
7
+ - speaker-embeddings
8
+ - VCTK
9
+ ---
10
+
11
+ # VCTK Speaker Embeddings (tarred)
12
+
13
+ Items: 109
14
+
15
+ This dataset ships as a single tar at the repo root. Members preserve paths like
16
+ `VCTK/<id>/audio.mp3` and `VCTK/<id>/speaker_latent.safetensors`.
17
+
18
+ Quick start (Python):
19
+ from huggingface_hub import hf_hub_download
20
+ import runpy
21
+ loader = runpy.run_path(hf_hub_download('jordand/echo-embeddings-vctk-tar', repo_type='dataset', filename='loader.py'))
22
+ VCTK_PATH = loader['ensure_tar_tree']('jordand/echo-embeddings-vctk-tar', 'VCTK')
23
+
24
+ Minimal index:
25
+ one JSON object per line in index.jsonl:
26
+ {"id": "<folder_name>"}
27
+
28
+ Attribution:
29
+ Contains audio and embeddings derived from the CSTR VCTK Corpus. Distributed under CC BY 4.0; attribution required.
VCTK.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d69dc7f39e25185c688e29443332ab0f58370db1eefa1b0851e0690e0db83c61
3
+ size 122542080
index.jsonl ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"id": "p225"}
2
+ {"id": "p226"}
3
+ {"id": "p227"}
4
+ {"id": "p228"}
5
+ {"id": "p229"}
6
+ {"id": "p230"}
7
+ {"id": "p231"}
8
+ {"id": "p232"}
9
+ {"id": "p233"}
10
+ {"id": "p234"}
11
+ {"id": "p236"}
12
+ {"id": "p237"}
13
+ {"id": "p238"}
14
+ {"id": "p239"}
15
+ {"id": "p240"}
16
+ {"id": "p241"}
17
+ {"id": "p243"}
18
+ {"id": "p244"}
19
+ {"id": "p245"}
20
+ {"id": "p246"}
21
+ {"id": "p247"}
22
+ {"id": "p248"}
23
+ {"id": "p249"}
24
+ {"id": "p250"}
25
+ {"id": "p251"}
26
+ {"id": "p252"}
27
+ {"id": "p253"}
28
+ {"id": "p254"}
29
+ {"id": "p255"}
30
+ {"id": "p256"}
31
+ {"id": "p257"}
32
+ {"id": "p258"}
33
+ {"id": "p259"}
34
+ {"id": "p260"}
35
+ {"id": "p261"}
36
+ {"id": "p262"}
37
+ {"id": "p263"}
38
+ {"id": "p264"}
39
+ {"id": "p265"}
40
+ {"id": "p266"}
41
+ {"id": "p267"}
42
+ {"id": "p268"}
43
+ {"id": "p269"}
44
+ {"id": "p270"}
45
+ {"id": "p271"}
46
+ {"id": "p272"}
47
+ {"id": "p273"}
48
+ {"id": "p274"}
49
+ {"id": "p275"}
50
+ {"id": "p276"}
51
+ {"id": "p277"}
52
+ {"id": "p278"}
53
+ {"id": "p279"}
54
+ {"id": "p280"}
55
+ {"id": "p281"}
56
+ {"id": "p282"}
57
+ {"id": "p283"}
58
+ {"id": "p284"}
59
+ {"id": "p285"}
60
+ {"id": "p286"}
61
+ {"id": "p287"}
62
+ {"id": "p288"}
63
+ {"id": "p292"}
64
+ {"id": "p293"}
65
+ {"id": "p294"}
66
+ {"id": "p295"}
67
+ {"id": "p297"}
68
+ {"id": "p298"}
69
+ {"id": "p299"}
70
+ {"id": "p300"}
71
+ {"id": "p301"}
72
+ {"id": "p302"}
73
+ {"id": "p303"}
74
+ {"id": "p304"}
75
+ {"id": "p305"}
76
+ {"id": "p306"}
77
+ {"id": "p307"}
78
+ {"id": "p308"}
79
+ {"id": "p310"}
80
+ {"id": "p311"}
81
+ {"id": "p312"}
82
+ {"id": "p313"}
83
+ {"id": "p314"}
84
+ {"id": "p316"}
85
+ {"id": "p317"}
86
+ {"id": "p318"}
87
+ {"id": "p323"}
88
+ {"id": "p326"}
89
+ {"id": "p329"}
90
+ {"id": "p330"}
91
+ {"id": "p333"}
92
+ {"id": "p334"}
93
+ {"id": "p335"}
94
+ {"id": "p336"}
95
+ {"id": "p339"}
96
+ {"id": "p340"}
97
+ {"id": "p341"}
98
+ {"id": "p343"}
99
+ {"id": "p345"}
100
+ {"id": "p347"}
101
+ {"id": "p351"}
102
+ {"id": "p360"}
103
+ {"id": "p361"}
104
+ {"id": "p362"}
105
+ {"id": "p363"}
106
+ {"id": "p364"}
107
+ {"id": "p374"}
108
+ {"id": "p376"}
109
+ {"id": "s5"}
loader.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, tarfile
2
+ from pathlib import Path
3
+
4
+ def _safe_members(tf, prefix):
5
+ if not prefix.endswith('/'):
6
+ prefix += '/'
7
+ for m in tf.getmembers():
8
+ if not m.name.startswith(prefix):
9
+ continue
10
+ p = Path(m.name)
11
+ if any(part == '..' for part in p.parts) or p.is_absolute():
12
+ continue
13
+ yield m
14
+
15
+ def ensure_tar_tree(repo_id: str, root: str, *, token: str | None = None, max_workers: int = 4):
16
+ os.environ.setdefault('HF_HUB_ENABLE_HF_TRANSFER', '1')
17
+ from huggingface_hub import snapshot_download
18
+ base = Path(snapshot_download(repo_id=repo_id, repo_type='dataset',
19
+ allow_patterns=[f'{root}.tar', 'index.jsonl', 'README.md', 'LICENSE'],
20
+ resume_download=True, token=token, max_workers=max_workers))
21
+ root_dir = base / root
22
+ if root_dir.exists():
23
+ return root_dir
24
+ tar_path = base / f'{root}.tar'
25
+ if not tar_path.exists():
26
+ raise FileNotFoundError(f'Expected {tar_path} in snapshot')
27
+ with tarfile.open(tar_path, 'r') as tf:
28
+ tf.extractall(base, members=_safe_members(tf, root))
29
+ return root_dir