File size: 3,782 Bytes
33268ec
 
 
 
 
 
8a6ee80
33268ec
 
 
 
 
 
 
8a6ee80
33268ec
 
 
 
 
 
 
 
 
f669f4f
33268ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import json

import datasets
from datasets import Value, Sequence, Features


_CITATION = """\\n@article{srinivasan2021wit,
  title={WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning},
  author={Srinivasan, Krishna and Raman, Karthik and Chen, Jiecao and Bendersky, Michael and Najork, Marc},
  journal={arXiv preprint arXiv:2103.01913},
  year={2021}
}
"""

_DESCRIPTION = """\\nWikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset. WIT is composed of a curated set
 of 37.6 million entity rich image-text examples with 11.5 million unique images across 108 Wikipedia languages. Its
 size enables WIT to be used as a pretraining dataset for multimodal machine learning models.
"""

_HOMEPAGE = "https://github.com/google-research-datasets/wit"

_URL = "https://storage.googleapis.com/huggingface-nlp/datasets/wit/"
_URLS = {
    # TODO - This should be in range(400). Haven't mirrored all the files yet.
    'train': [_URL + f"part-{'%05d' % i}-48a6f07e-bb86-4735-aac7-883349f41a28-c000.json.gz" for i in range(10)]
}

class Wit(datasets.GeneratorBasedBuilder):
    """WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning"""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=Features({
                'b64_bytes': Value('string'),
                'embedding': Sequence(Value('float64')),
                'image_url': Value('string'),
                'metadata_url': Value('string'),
                'wit_features': Sequence({
                    "language": Value('string'),
                    "page_url": Value('string'),
                    "image_url": Value('string'),
                    "attribution_passes_lang_id": Value("string"),
                    "caption_alt_text_description": Value('string'),
                    "caption_attribution_description": Value('string'),
                    "caption_reference_description": Value('string'),
                    "caption_title_and_reference_description": Value('string'),
                    "context_page_description": Value('string'),
                    "context_section_description": Value('string'),
                    "hierarchical_section_title": Value('string'),
                    "is_main_image": Value('string'),
                    "mime_type": Value('string'),
                    "original_height": Value('string'),
                    "original_width": Value('string'),
                    "page_changed_recently": Value('string'),
                    "page_title": Value('string'),
                    "section_title": Value('string'),
                })
            }),
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = _URLS
        downloaded_files = dl_manager.download_and_extract(urls_to_download)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["train"]}),
        ]

    def _generate_examples(self, filepaths):
        """Yields examples."""
        wit_feature_names = self.info.features['wit_features'].feature.keys()
        for filepath in filepaths:
            with open(filepath, "rb") as f:
                for i, line in enumerate(f):
                    line = line.strip()
                    row_data = json.loads(line, encoding='utf-8')
                    for feature in row_data['wit_features']:
                        for fname in wit_feature_names:
                            if fname not in feature:
                                feature[fname] = None
                    yield str(i), row_data