parquet-converter commited on
Commit
39052b1
·
1 Parent(s): c229910

Update parquet files

Browse files
data/train.tar.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a208fb7b71e36a49adbabc14c010f7bc14f384330dd39298446e82306f6ffb95
3
- size 959848246
 
 
 
 
data/metadata-test.csv → default/hinglish-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed7ff641ada1bfaad7f20f2acfbc886a90ca7abbfa2d1d9897b0e7345242bf8a
3
- size 524398
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a23e202a622a6d7935f5cf911127f0f12f5528926533ec5eb0adb1028171d780
3
+ size 57306248
data/metadata.csv → default/hinglish-train-00000-of-00002.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba611b683474f8f49a8dfb3cc27afd5d956edee79eec931d0292715bf43ef430
3
- size 8742550
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:655f41064ec246e8574862285dfde28852ca0d7f95644ffdde2938e05bbdc186
3
+ size 502847100
data/test.tar.gz → default/hinglish-train-00001-of-00002.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b631b2a45d77c64fb706d553fba35b5b7edd4d35368300532b86e4b66d82578
3
- size 55633425
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dcd503ebdf665da8977b8fcc654072a29a7589f6c5c6cb6f8580ee469eac7e8
3
+ size 486874467
hinglish.py DELETED
@@ -1,90 +0,0 @@
1
- import csv
2
- import json
3
- import os
4
- import datasets
5
-
6
- # _ANNOT_URL = {
7
- # "train": "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/metadata.csv",
8
- # "test": "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/metadata-test.csv",
9
- # }
10
-
11
- _ANNOT_URL = {
12
- "train": "./data/metadata.csv",
13
- "test": "./data/metadata-test.csv"
14
- }
15
-
16
- # _DATA_URL = [
17
- # "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/train.tar.gz",
18
- # "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/test.tar.gz"
19
- # ]
20
-
21
- _DATA_URL = [
22
- "./data/train.tar.gz",
23
- "./data/test.tar.gz"
24
- ]
25
-
26
- _DESCRIPTION = """\
27
- A Hugginface version of the Hindi-English code-switched dataset from OpenSLR-104.
28
- """
29
-
30
- class HinglishDataset(datasets.GeneratorBasedBuilder):
31
- VERSION = datasets.Version("1.0.0")
32
-
33
- def _info(self):
34
- return datasets.DatasetInfo(
35
- description=_DESCRIPTION,
36
- features=datasets.Features({
37
- "path": datasets.Value("string"),
38
- "audio": datasets.Audio(sampling_rate=16_000),
39
- "sentence": datasets.Value("string"),
40
- }),
41
- supervised_keys=None,
42
- )
43
-
44
- def _split_generators(self, dl_manager):
45
- prompts_paths = dl_manager.download(_ANNOT_URL)
46
- archive = dl_manager.download(_DATA_URL)
47
- train_dir = 'train'
48
- test_dir = 'test'
49
- return [
50
- datasets.SplitGenerator(
51
- name=datasets.Split.TRAIN,
52
- gen_kwargs={
53
- "prompts_path": prompts_paths["train"],
54
- "path_to_clips": train_dir,
55
- "audio_files": dl_manager.iter_archive(archive[0]),
56
- },
57
- ),
58
- datasets.SplitGenerator(
59
- name=datasets.Split.TEST,
60
- gen_kwargs={
61
- "prompts_path": prompts_paths["test"],
62
- "path_to_clips": test_dir,
63
- "audio_files": dl_manager.iter_archive(archive[1]),
64
- },
65
- ),
66
- ]
67
-
68
- def _generate_examples(self, prompts_path, path_to_clips, audio_files):
69
- examples = {}
70
- with open(prompts_path, encoding="utf-8") as f:
71
- for row in f:
72
- data = row.strip().split(",")
73
- audio_path = "/".join([data[0]])
74
- examples[audio_path] = {
75
- "path": audio_path,
76
- "sentence": data[1]
77
- }
78
- inside_clips_dir = False
79
- id_ = 0
80
- # print(examples)
81
- for path, f in audio_files:
82
- if path.startswith(path_to_clips):
83
- inside_clips_dir = True
84
- if path in examples:
85
- audio = {"path": path, "bytes": f.read()}
86
- # print(audio)
87
- yield id_, {**examples[path], "audio": audio}
88
- id_ += 1
89
- elif inside_clips_dir:
90
- break