| | |
| |
|
| | |
| | |
| |
|
| |
|
| | """Raw merged dump of Hinglish (hi-EN) datasets.""" |
| |
|
| |
|
| | import pandas as pd |
| | import os |
| |
|
| | import datasets |
| |
|
| | _DESCRIPTION = """\ |
| | Raw merged dump of Hinglish (hi-EN) datasets. |
| | """ |
| |
|
| | _HOMEPAGE = "https://huggingface.co/datasets/diwank/hinglish-dump" |
| | _LICENSE = "MIT" |
| |
|
| | _URLS = { |
| | subset: f"{_HOMEPAGE}/resolve/main/data/{subset}/data.h5" |
| | for subset in "crowd_transliteration hindi_romanized_dump hindi_xlit hinge hinglish_norm news2018".split() } |
| |
|
| | _FEATURE_NAMES = [ |
| | "target_hinglish", |
| | "source_hindi", |
| | "parallel_english", |
| | "annotations", |
| | "raw_input", |
| | "alternates", |
| | ] |
| |
|
| | config_names = _URLS.keys() |
| | version = datasets.Version("1.0.0") |
| |
|
| | class HinglishDumpDataset(datasets.GeneratorBasedBuilder): |
| | """Raw merged dump of Hinglish (hi-EN) datasets.""" |
| |
|
| | VERSION = version |
| | CONFIGS = config_names |
| |
|
| | BUILDER_CONFIGS = [ |
| | datasets.BuilderConfig(name=subset, version=version, description=f"Config for {subset}") |
| | for subset in config_names |
| | ] |
| | |
| | DEFAULT_CONFIG_NAME = None |
| |
|
| | def _info(self): |
| |
|
| | features = datasets.Features({ |
| | feature: datasets.Value("string") |
| | for feature in _FEATURE_NAMES |
| | }) |
| |
|
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=features, |
| | homepage=_HOMEPAGE, |
| | license=_LICENSE, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | |
| | |
| | |
| |
|
| | urls = _URLS[self.config.name] |
| | filepath = self.data_dir = dl_manager.download_and_extract(urls) |
| | |
| | return [ |
| | datasets.SplitGenerator( |
| | name=getattr(datasets.Split, "VALIDATION" if split == "eval" else split.upper()), |
| | gen_kwargs=dict(filepath=filepath, split=split) ) |
| | for split in ["train", "eval", "test"] |
| | ] |
| | |
| | def _generate_examples(self, filepath, split): |
| | df = pd.read_hdf(filepath, key=split) |
| | |
| | for i, row in enumerate(df.to_dict('records')): |
| | yield i, row |