parquet-converter commited on
Commit
28ea423
·
1 Parent(s): 8e418a3

Update parquet files

Browse files
README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---
4
-
5
- ## Required installation
6
-
7
- ```bash
8
- pip3 install pypdf2 pdf2image
9
- sudo apt-get install poppler-utils
10
- ```
 
 
 
 
 
 
 
 
 
 
 
data/data_dir.zip → default/unit-test_pd_ffolder-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89a60a715e225655873d441df5f03cc635134cede115d5e3feb605d60e77c320
3
- size 573872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7fde82f823fa9e527516fc991f6a49e6c2c8554fdae3c9c79e8657f1c6e318b
3
+ size 17154588
default/unit-test_pd_ffolder-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7fde82f823fa9e527516fc991f6a49e6c2c8554fdae3c9c79e8657f1c6e318b
3
+ size 17154588
data/data.tar.gz → default/unit-test_pd_ffolder-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e7002d267f56b381eb8049312f389f87b9a8e525daf9e03b65c2fb30dfe9b7d
3
- size 572943
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1072eef50e6fc765fd8c96e3ceec3dc779ccc9d2e33d188706e849da1a879543
3
+ size 1058
unit-test_PDFfolder.py DELETED
@@ -1,80 +0,0 @@
1
- import os
2
- from typing import List
3
-
4
- import datasets
5
- import pdf2image
6
-
7
- logger = datasets.logging.get_logger(__name__)
8
-
9
- _DESCRIPTION = "A generic pdf folder"
10
-
11
- _CLASSES = ["categoryA", "categoryB"] # define in advance
12
-
13
- _URL = "https://huggingface.co/datasets/jordyvl/unit-test_PDFfolder/resolve/main/data/data.tar.gz"
14
-
15
- # folder
16
- # train
17
- # categoryA
18
- # file1
19
- # test
20
- # ...
21
-
22
-
23
- class PdfFolder(datasets.GeneratorBasedBuilder):
24
- def _info(self):
25
- return datasets.DatasetInfo(
26
- description=_DESCRIPTION,
27
- features=datasets.Features(
28
- {
29
- "file": datasets.Sequence(datasets.Image()),
30
- "labels": datasets.features.ClassLabel(names=_CLASSES),
31
- }
32
- ),
33
- task_templates=None,
34
- )
35
-
36
- def _split_generators(
37
- self, dl_manager: datasets.DownloadManager
38
- ) -> List[datasets.SplitGenerator]:
39
-
40
- archive_path = dl_manager.download(_URL)
41
-
42
- return [
43
- datasets.SplitGenerator(
44
- name=datasets.Split.TRAIN,
45
- gen_kwargs={
46
- "archive_iterator": dl_manager.iter_archive(archive_path),
47
- "supposed_labelset": "train",
48
- },
49
- ),
50
- datasets.SplitGenerator(
51
- name=datasets.Split.TEST,
52
- gen_kwargs={
53
- "archive_iterator": dl_manager.iter_archive(archive_path),
54
- "supposed_labelset": "test",
55
- },
56
- ),
57
- datasets.SplitGenerator(
58
- name=datasets.Split.VALIDATION,
59
- gen_kwargs={
60
- "archive_iterator": dl_manager.iter_archive(archive_path),
61
- "supposed_labelset": "val",
62
- },
63
- ),
64
- ]
65
-
66
- def _generate_examples(self, archive_iterator, supposed_labelset):
67
-
68
- extensions = {"pdf", "PDF"}
69
- for file_path, file_obj in archive_iterator:
70
-
71
- if file_path.split(".")[-1] not in extensions: # metadata.jsonlines
72
- continue
73
-
74
- folder, labelset, label, filename = file_path.split("/")
75
- if labelset != supposed_labelset:
76
- continue
77
-
78
- images = pdf2image.convert_from_bytes(file_obj.read())
79
-
80
- yield file_path, {"file": images, "labels": label}