elsayedissa commited on
Commit
558d8ad
·
1 Parent(s): 65490e8

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .DS_Store +0 -0
  2. data/files.zip +3 -0
  3. data/metadata.zip +3 -0
  4. saudi-trans.py +66 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
data/files.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6281031e0f9f76fb5847ebef612ab4c3f9687e65a97b987f0ffe250218011223
3
+ size 9725534568
data/metadata.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7d9d270b058df94ec7fd2b9f27234245452eced9132db9e4ee14c3d3dc6a00d
3
+ size 893113
saudi-trans.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+ import pandas as pd
4
+
5
+ _DESCRIPTION = """\
6
+ This Saudi dataset is intended for speech translation from Saudi Arabic into English.
7
+ """
8
+
9
+ _CITATION = "Some citation"
10
+
11
+ _data_dir = "data"
12
+
13
+ class SaudiTranslation(datasets.GeneratorBasedBuilder):
14
+
15
+ def _info(self):
16
+ return datasets.DatasetInfo(
17
+ description=_DESCRIPTION,
18
+ features=datasets.Features(
19
+ {
20
+ "path": datasets.Value("string"),
21
+ "audio": datasets.Audio(sampling_rate=16_000),
22
+ "sentence": datasets.Value("string"),
23
+ }
24
+ ),
25
+ supervised_keys=None,
26
+ citation=_CITATION,
27
+ )
28
+
29
+ def _split_generators(self, dl_manager):
30
+ download_dir = dl_manager.download_and_extract(
31
+ {
32
+ "files": os.path.join(_data_dir, "files.zip"),
33
+ "metadata": os.path.join(_data_dir, "metadata.zip")
34
+ }
35
+ )
36
+
37
+ return [
38
+ datasets.SplitGenerator(
39
+ name=datasets.Split.TRAIN,
40
+ gen_kwargs={
41
+ "split": datasets.Split.TRAIN,
42
+ "data_dir": os.path.join(download_dir["files"], "files"),
43
+ "metapath": os.path.join(download_dir["metadata"], "metadata", "train.csv"),
44
+
45
+ },
46
+ ),
47
+ datasets.SplitGenerator(
48
+ name=datasets.Split.TEST,
49
+ gen_kwargs={
50
+ "split": datasets.Split.TEST,
51
+ "data_dir": os.path.join(download_dir["files"], "files"),
52
+ "metapath": os.path.join(download_dir["metadata"], "metadata", "test.csv"),
53
+
54
+ },
55
+ ),
56
+ ]
57
+
58
+ def _generate_examples(self, data_dir, metapath, split):
59
+ metadata = pd.read_csv(metapath)
60
+ for key, row in metadata.iterrows():
61
+ audio_path = os.path.join(data_dir, row["audio"])
62
+ yield key, {
63
+ "audio": audio_path,
64
+ "sentence": row["text"],
65
+ "path": audio_path,
66
+ }