File size: 5,762 Bytes
ec9fdfa
49508ed
ec9fdfa
49508ed
 
ec9fdfa
eff025e
49508ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eff025e
 
49508ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76e6925
ec9fdfa
d3a654c
49508ed
 
 
 
ec9fdfa
 
49508ed
ec9fdfa
 
49508ed
76e6925
fa99337
76e6925
49508ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import os
from io import BytesIO
from tqdm import tqdm
from typing import Callable, Dict, List, Literal, NamedTuple, Optional, Tuple, Union

from datasets import load_dataset, load_dataset_builder, load_from_disk

from PIL import Image
from codetiming import Timer

import pdf2image
import PyPDF2
import pikepdf
import fitz as PyMuPDF

#https://gist.github.com/gstorer/f6a9f1dfe41e8e64dcf58d07afa9ab2a




# if os.getlogin() == "jordy": #local testing
#     DATASET = "/home/jordy/code/opensource/Beyond-Document-Page-Classification/src/tests/rvl_cdip_multi/rvl_cdip_multi.py"
#     stepwise = True


DATASET = "jordyvl/rvl_cdip_multi"
stepwise = False


testds = load_dataset("jordyvl/rvl_cdip_multi", cache_dir="/mnt/lerna/data/HFcache", split="test")


# time how long each way to pdf2image lasts


def batched_conversion(pdf_file):
    info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None)
    maxPages = info["Pages"]

    logger.info(f"{pdf_file} has {str(maxPages)} pages")

    images = []

    for page in range(1, maxPages + 1, 10):
        images.extend(
            pdf2image.convert_from_path(
                pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages)
            )
        )
    return images


def batch_get_images(streams, wrapper):
    return {"images": [get_images(pdf_stream, wrapper=wrapper) for pdf_stream in streams]}


def batch_get_pages(streams, wrapper):
    return {"pages": [get_pages(pdf_stream, wrapper=wrapper) for pdf_stream in streams]}

def get_pages(pdf_stream, wrapper="pike"):
    if wrapper == "pdf2image":
        return pdf2image.pdfinfo_from_bytes(pdf_stream, userpw=None, poppler_path=None)["Pages"]
    pdf_file = BytesIO(pdf_stream)
    if wrapper == "pike":
        return len(pikepdf.Pdf.open(pdf_file).pages)
    if wrapper == "PyPDF2":
        return len(PyPDF2.PdfReader(pdf_file).pages)
    if wrapper == "pymupdf":
        return len(PyMuPDF.open(stream=pdf_stream, filetype="pdf"))


def pymupdf_image_extraction(data: bytes):
    images = []
    with PyMuPDF.open(stream=data, filetype="pdf") as pdf_file:
        for page_index in range(len(pdf_file)):
            page = pdf_file[page_index]
            for image_index, img in enumerate(page.get_images(), start=1):
                xref = img[0]
                base_image = pdf_file.extract_image(xref)
                image_bytes = base_image["image"]
                image_ext = base_image["ext"]
                im = Image.open(BytesIO(image_bytes))
                images.append(
                    im
                    #(f"image{page_index+1}_{image_index}.{image_ext}", image_bytes)
                )
    return images

def pypdf_image_extraction(data: bytes): # -> List[Tuple[str, bytes]]
    images = []
    try:
        reader = PyPDF2.PdfReader(BytesIO(data))
        for page in reader.pages:
            for image in page.images:
                im = Image.open(BytesIO(image.data))
                images.append(im)
                #images.append(Image.frombytes("RGB",(image.width,image.height),image.data)) #(image.name, #width,height
    except Exception as exc:
        print(f"PyPDF2 Image extraction failure: {exc}")
    return images

def pike_image_extraction(data: bytes):
    images = []
    reader = pikepdf.Pdf.open(BytesIO(data))
    for page in reader.pages:
        for raw_name, raw_image in page.images.items():
            image = pikepdf.PdfImage(raw_image).as_pil_image()
            images.append(image)
    return images

def get_images(pdf_stream, wrapper="pike"):
    if wrapper == "pike":
        return pike_image_extraction(pdf_stream)
    if wrapper == "pdf2image":
        return pdf2image.convert_from_bytes(pdf_stream)

    if wrapper == "pymupdf":
        return pymupdf_image_extraction(pdf_stream)

    if wrapper == "PyPDF2":
        return pypdf_image_extraction(pdf_stream)


#use same tool for page and images

subset = testds.select(list(range(0, 100)))

images_per_binding = {}
for binding in ["pike", "pdf2image", "PyPDF2", "pymupdf"]:
    with Timer(name=f"{binding}", text=binding + " Elapsed time: {:.4f} seconds"):
        func = lambda batch: batch_get_images(batch["file"], wrapper=binding)
        images_per_binding[binding] = subset.map(func, batched=True, keep_in_memory=False)

"""
Image.open(BytesIO(images_per_binding["pymupdf"]["images"][0][0]["bytes"])).show()
Image.open(BytesIO(images_per_binding["PyPDF2"]["images"][0][0]["bytes"])).show()
Image.open(BytesIO(images_per_binding["pike"]["images"][0][0]["bytes"])).show()
Image.open(BytesIO(images_per_binding["pdf2image"]["images"][0][0]["bytes"])).show()
"""

# whut?
# now they are PIL: Image.open(BytesIO(images_per_binding["pymupdf"]["images"][0][0]["bytes"]))

import pdb; pdb.set_trace()  # breakpoint 01d24a47 //


if stepwise:
    ds = load_dataset(DATASET, cache_dir="/mnt/lerna/data/HFcache")
    import pdb

    pdb.set_trace()  # breakpoint 1898e7e0 //

    builder = load_dataset_builder(DATASET)
    ds = builder.download_and_prepare()
    ds = builder.as_dataset(split="test")


# count pages
counter = []
for i, d in tqdm(enumerate(ds["test"])):
    counter.append(len(d["file"]))
print(sum(counter) / len(counter))

"""
pdfinfo ylhw0010.pdf
Creator:        CaptureSDK ENGine - www.scansoft.com
Producer:       Scansoft Capture Development System V12.0; modified using iText 2.1.7 by 1T3XT
CreationDate:   Sat Jan  1 01:00:00 2005 CET
ModDate:        Sat Dec 29 08:04:44 2018 CET
Tagged:         no
UserProperties: no
Suspects:       no
Form:           none
JavaScript:     no
Pages:          1
Encrypted:      no
Page size:      576 x 756 pts
Page rot:       0
File size:      24443 bytes
Optimized:      yes
PDF version:    1.3
jordy@jordy-OMEN:~/Downl
"""