File size: 3,534 Bytes
8d22c51
 
 
 
 
a8a66a9
8d22c51
 
a8a66a9
 
 
9078e85
 
 
 
 
 
8d22c51
 
 
 
 
 
9078e85
8d22c51
 
 
 
9078e85
8d22c51
 
 
9078e85
8d22c51
 
 
 
 
 
 
9078e85
8d22c51
 
9078e85
8d22c51
 
 
9078e85
 
 
 
 
8d22c51
 
 
 
 
 
 
 
 
9078e85
 
 
8d22c51
 
 
 
 
 
 
 
 
9078e85
 
 
 
8d22c51
 
 
 
 
 
 
ab97400
8d22c51
 
 
ab97400
8d22c51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9078e85
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import json
import os
from pathlib import Path
from PIL import Image
import datasets
import logging

logger = datasets.logging.get_logger(__name__)
logger.setLevel("INFO")
stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)

"""
- o.a/o.a.f/0011656044-53.layout.json file has been deleted because it was empty

"""

def load_json(json_path):
    with open(json_path, "r") as f:
        data = json.load(f)
    return data


_CITATION = """
}
"""

_DESCRIPTION = """\
https://zenodo.org/record/6540454
"""


class CDIPAnnotationsConfig(datasets.BuilderConfig):
    """BuilderConfig for FUNSD"""

    def __init__(self, **kwargs):
        """BuilderConfig for FUNSD.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(CDIPAnnotationsConfig, self).__init__(**kwargs)


class CDIPAnnotations(datasets.GeneratorBasedBuilder):
    """Conll2003 dataset."""

    BUILDER_CONFIGS = [
        CDIPAnnotationsConfig(
            name="cdip-annotations",
            version=datasets.Version("1.0.0"),
            description="CDIP Annotations",
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "words": datasets.Sequence(datasets.Value("string")),
                    "bboxes": datasets.Sequence(
                        datasets.Sequence(datasets.Value("int64"))
                    ),
                }
            ),
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip")
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "annot_root": f"/home/jinho/datasets/IIT-CDIP-annotations"
                },
            ),
        ]

    def _generate_examples(self, annot_root):
        logger.info("⏳ Generating examples from = %s", annot_root)

        cnt = 0
        for subfolder in sorted(list(os.listdir(annot_root))):  # b.a, b.b
            subfolder_path = os.path.join(annot_root, subfolder)
            logger.info(f"{subfolder = }")

            for json_folder in os.listdir(subfolder_path):  # b.a.p, b.a.c
                json_folder_path = os.path.join(subfolder_path, json_folder)
                json_paths = list(Path(json_folder_path).glob("**/*.json"))
                logger.info(f"{json_folder, len(json_paths) = }")

                for idx, json_path in enumerate(json_paths):
                    words = []
                    bboxes = []
                    data = load_json(json_path)
                    for block in data["blocks"]:
                        for paragraph in block["paragraphs"]:
                            for line in paragraph["lines"]:
                                line_words = line["words"]
                                line_texts = [w["text"] for w in line_words]
                                line_bboxes = [w["box"] for w in line_words]
                                words.extend(line_texts)
                                bboxes.extend(line_bboxes)
                    cnt += 1

                    yield cnt, {
                        "id": str(json_path.name),
                        "words": words,
                        "bboxes": bboxes,
                    }