konfuzio-com JaMe76 commited on
Commit
b5f5be6
·
0 Parent(s):

Duplicate from deepdoctection/deepdoctection

Browse files

Co-authored-by: Janis Meyer <JaMe76@users.noreply.huggingface.co>

Files changed (9) hide show
  1. .gitattributes +27 -0
  2. README.md +14 -0
  3. app.py +249 -0
  4. conf_dd_one.yaml +26 -0
  5. packages.txt +1 -0
  6. requirements.txt +3 -0
  7. sample_1.jpg +0 -0
  8. sample_2.png +0 -0
  9. sample_3.pdf +0 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.onnx filter=lfs diff=lfs merge=lfs -text
13
+ *.ot filter=lfs diff=lfs merge=lfs -text
14
+ *.parquet filter=lfs diff=lfs merge=lfs -text
15
+ *.pb filter=lfs diff=lfs merge=lfs -text
16
+ *.pt filter=lfs diff=lfs merge=lfs -text
17
+ *.pth filter=lfs diff=lfs merge=lfs -text
18
+ *.rar filter=lfs diff=lfs merge=lfs -text
19
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
21
+ *.tflite filter=lfs diff=lfs merge=lfs -text
22
+ *.tgz filter=lfs diff=lfs merge=lfs -text
23
+ *.wasm filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Deepdoctection
3
+ emoji: 🏃
4
+ colorFrom: yellow
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.0.20
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: deepdoctection/deepdoctection
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system('pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html')
3
+
4
+ # work around: https://discuss.huggingface.co/t/how-to-install-a-specific-version-of-gradio-in-spaces/13552
5
+ os.system("pip uninstall -y gradio")
6
+ os.system("pip install gradio==3.4.1")
7
+
8
+ from os import getcwd, path, environ
9
+ import deepdoctection as dd
10
+ from deepdoctection.dataflow.serialize import DataFromList
11
+
12
+ import gradio as gr
13
+
14
+
15
+ _DD_ONE = "conf_dd_one.yaml"
16
+ _DETECTIONS = ["table", "ocr"]
17
+
18
+ dd.ModelCatalog.register("layout/model_final_inf_only.pt",dd.ModelProfile(
19
+ name="layout/model_final_inf_only.pt",
20
+ description="Detectron2 layout detection model trained on private datasets",
21
+ config="dd/d2/layout/CASCADE_RCNN_R_50_FPN_GN.yaml",
22
+ size=[274632215],
23
+ tp_model=False,
24
+ hf_repo_id=environ.get("HF_REPO"),
25
+ hf_model_name="model_final_inf_only.pt",
26
+ hf_config_file=["Base-RCNN-FPN.yaml", "CASCADE_RCNN_R_50_FPN_GN.yaml"],
27
+ categories={"1": dd.LayoutType.text,
28
+ "2": dd.LayoutType.title,
29
+ "3": dd.LayoutType.list,
30
+ "4": dd.LayoutType.table,
31
+ "5": dd.LayoutType.figure},
32
+ ))
33
+
34
+ # Set up of the configuration and logging. Models are globally defined, so that they are not re-loaded once the input
35
+ # updates
36
+ cfg = dd.set_config_by_yaml(path.join(getcwd(),_DD_ONE))
37
+ cfg.freeze(freezed=False)
38
+ cfg.DEVICE = "cpu"
39
+ cfg.freeze()
40
+
41
+ # layout detector
42
+ layout_config_path = dd.ModelCatalog.get_full_path_configs(cfg.CONFIG.D2LAYOUT)
43
+ layout_weights_path = dd.ModelDownloadManager.maybe_download_weights_and_configs(cfg.WEIGHTS.D2LAYOUT)
44
+ categories_layout = dd.ModelCatalog.get_profile(cfg.WEIGHTS.D2LAYOUT).categories
45
+ assert categories_layout is not None
46
+ assert layout_weights_path is not None
47
+ d_layout = dd.D2FrcnnDetector(layout_config_path, layout_weights_path, categories_layout, device=cfg.DEVICE)
48
+
49
+ # cell detector
50
+ cell_config_path = dd.ModelCatalog.get_full_path_configs(cfg.CONFIG.D2CELL)
51
+ cell_weights_path = dd.ModelDownloadManager.maybe_download_weights_and_configs(cfg.WEIGHTS.D2CELL)
52
+ categories_cell = dd.ModelCatalog.get_profile(cfg.WEIGHTS.D2CELL).categories
53
+ assert categories_cell is not None
54
+ d_cell = dd.D2FrcnnDetector(cell_config_path, cell_weights_path, categories_cell, device=cfg.DEVICE)
55
+
56
+ # row/column detector
57
+ item_config_path = dd.ModelCatalog.get_full_path_configs(cfg.CONFIG.D2ITEM)
58
+ item_weights_path = dd.ModelDownloadManager.maybe_download_weights_and_configs(cfg.WEIGHTS.D2ITEM)
59
+ categories_item = dd.ModelCatalog.get_profile(cfg.WEIGHTS.D2ITEM).categories
60
+ assert categories_item is not None
61
+ d_item = dd.D2FrcnnDetector(item_config_path, item_weights_path, categories_item, device=cfg.DEVICE)
62
+
63
+ # word detector
64
+ det = dd.DoctrTextlineDetector()
65
+
66
+ # text recognizer
67
+ rec = dd.DoctrTextRecognizer()
68
+
69
+
70
+ def build_gradio_analyzer(table, table_ref, ocr):
71
+ """Building the Detectron2/DocTr analyzer based on the given config"""
72
+
73
+ cfg.freeze(freezed=False)
74
+ cfg.TAB = table
75
+ cfg.TAB_REF = table_ref
76
+ cfg.OCR = ocr
77
+ cfg.freeze()
78
+
79
+ pipe_component_list = []
80
+ layout = dd.ImageLayoutService(d_layout, to_image=True, crop_image=True)
81
+ pipe_component_list.append(layout)
82
+
83
+ if cfg.TAB:
84
+
85
+ detect_result_generator = dd.DetectResultGenerator(categories_cell)
86
+ cell = dd.SubImageLayoutService(d_cell, dd.LayoutType.table, {1: 6}, detect_result_generator)
87
+ pipe_component_list.append(cell)
88
+
89
+ detect_result_generator = dd.DetectResultGenerator(categories_item)
90
+ item = dd.SubImageLayoutService(d_item, dd.LayoutType.table, {1: 7, 2: 8}, detect_result_generator)
91
+ pipe_component_list.append(item)
92
+
93
+ table_segmentation = dd.TableSegmentationService(
94
+ cfg.SEGMENTATION.ASSIGNMENT_RULE,
95
+ cfg.SEGMENTATION.IOU_THRESHOLD_ROWS
96
+ if cfg.SEGMENTATION.ASSIGNMENT_RULE in ["iou"]
97
+ else cfg.SEGMENTATION.IOA_THRESHOLD_ROWS,
98
+ cfg.SEGMENTATION.IOU_THRESHOLD_COLS
99
+ if cfg.SEGMENTATION.ASSIGNMENT_RULE in ["iou"]
100
+ else cfg.SEGMENTATION.IOA_THRESHOLD_COLS,
101
+ cfg.SEGMENTATION.FULL_TABLE_TILING,
102
+ cfg.SEGMENTATION.REMOVE_IOU_THRESHOLD_ROWS,
103
+ cfg.SEGMENTATION.REMOVE_IOU_THRESHOLD_COLS,
104
+ )
105
+ pipe_component_list.append(table_segmentation)
106
+
107
+ if cfg.TAB_REF:
108
+ table_segmentation_refinement = dd.TableSegmentationRefinementService()
109
+ pipe_component_list.append(table_segmentation_refinement)
110
+
111
+ if cfg.OCR:
112
+ d_layout_text = dd.ImageLayoutService(det, to_image=True, crop_image=True)
113
+ pipe_component_list.append(d_layout_text)
114
+
115
+ d_text = dd.TextExtractionService(rec, extract_from_roi="WORD")
116
+ pipe_component_list.append(d_text)
117
+
118
+ match = dd.MatchingService(
119
+ parent_categories=cfg.WORD_MATCHING.PARENTAL_CATEGORIES,
120
+ child_categories=dd.LayoutType.word,
121
+ matching_rule=cfg.WORD_MATCHING.RULE,
122
+ threshold=cfg.WORD_MATCHING.IOU_THRESHOLD
123
+ if cfg.WORD_MATCHING.RULE in ["iou"]
124
+ else cfg.WORD_MATCHING.IOA_THRESHOLD,
125
+ )
126
+ pipe_component_list.append(match)
127
+ order = dd.TextOrderService(
128
+ text_container=dd.LayoutType.word,
129
+ floating_text_block_names=[dd.LayoutType.title, dd.LayoutType.text, dd.LayoutType.list],
130
+ text_block_names=[
131
+ dd.LayoutType.title,
132
+ dd.LayoutType.text,
133
+ dd.LayoutType.list,
134
+ dd.LayoutType.cell,
135
+ dd.CellType.header,
136
+ dd.CellType.body,
137
+ ],
138
+ )
139
+ pipe_component_list.append(order)
140
+
141
+ pipe = dd.DoctectionPipe(pipeline_component_list=pipe_component_list)
142
+
143
+ return pipe
144
+
145
+
146
+ def prepare_output(dp, add_table, add_ocr):
147
+ out = dp.as_dict()
148
+ out.pop("_image")
149
+
150
+ layout_items = dp.layouts
151
+ if add_ocr:
152
+ layout_items.sort(key=lambda x: x.reading_order)
153
+ layout_items_str = ""
154
+ for item in layout_items:
155
+ layout_items_str += f"\n {item.category_name}: {item.text}"
156
+ if add_table:
157
+ html_list = [table.html for table in dp.tables]
158
+ if html_list:
159
+ html = ("\n").join(html_list)
160
+ else:
161
+ html = None
162
+ else:
163
+ html = None
164
+
165
+ return dp.viz(show_table_structure=False), layout_items_str, html, out
166
+
167
+
168
+ def analyze_image(img, pdf, attributes):
169
+
170
+ # creating an image object and passing to the analyzer by using dataflows
171
+ add_table = _DETECTIONS[0] in attributes
172
+ add_ocr = _DETECTIONS[1] in attributes
173
+
174
+ analyzer = build_gradio_analyzer(add_table, add_table, add_ocr)
175
+
176
+ if img is not None:
177
+ image = dd.Image(file_name="input.png", location="")
178
+ image.image = img[:, :, ::-1]
179
+
180
+ df = DataFromList(lst=[image])
181
+ df = analyzer.analyze(dataset_dataflow=df)
182
+ elif pdf:
183
+ df = analyzer.analyze(path=pdf.name, max_datapoints=3)
184
+ else:
185
+ raise ValueError
186
+
187
+ df.reset_state()
188
+ df_iter = iter(df)
189
+
190
+ dp = next(df_iter)
191
+
192
+ return prepare_output(dp, add_table, add_ocr)
193
+
194
+
195
+ demo = gr.Blocks(css="scrollbar.css")
196
+
197
+ with demo:
198
+ with gr.Box():
199
+ gr.Markdown("<h1><center>deepdoctection - A Document AI Package</center></h1>")
200
+ gr.Markdown("<strong>deep</strong>doctection is a Python library that orchestrates document extraction"
201
+ " and document layout analysis tasks using deep learning models. It does not implement models"
202
+ " but enables you to build pipelines using highly acknowledged libraries for object detection,"
203
+ " OCR and selected NLP tasks and provides an integrated frameworks for fine-tuning, evaluating"
204
+ " and running models.\n This pipeline consists of a stack of models powered by <strong>Detectron2"
205
+ "</strong> for layout analysis and table recognition and <strong>DocTr</strong> for OCR.")
206
+ with gr.Box():
207
+ gr.Markdown("<h2><center>Upload a document and choose setting</center></h2>")
208
+ with gr.Row():
209
+ with gr.Column():
210
+ with gr.Tab("Image upload"):
211
+ with gr.Column():
212
+ inputs = gr.Image(type='numpy', label="Original Image")
213
+ with gr.Tab("PDF upload (only first image will be processed) *"):
214
+ with gr.Column():
215
+ inputs_pdf = gr.File(label="PDF")
216
+ gr.Markdown("<sup>* If an image is cached in tab, remove it first</sup>")
217
+ with gr.Column():
218
+ gr.Examples(
219
+ examples=[path.join(getcwd(), "sample_1.jpg"), path.join(getcwd(), "sample_2.png")],
220
+ inputs = inputs)
221
+ gr.Examples(examples=[path.join(getcwd(), "sample_3.pdf")], inputs = inputs_pdf)
222
+
223
+ with gr.Row():
224
+ tok_input = gr.CheckboxGroup(
225
+ _DETECTIONS, value=_DETECTIONS, label="Additional extractions", interactive=True)
226
+ with gr.Row():
227
+ btn = gr.Button("Run model", variant="primary")
228
+
229
+ with gr.Box():
230
+ gr.Markdown("<h2><center>Outputs</center></h2>")
231
+ with gr.Row():
232
+ with gr.Column():
233
+ with gr.Box():
234
+ gr.Markdown("<center><strong>Contiguous text</strong></center>")
235
+ image_text = gr.Textbox()
236
+ with gr.Box():
237
+ gr.Markdown("<center><strong>Table</strong></center>")
238
+ html = gr.HTML()
239
+ with gr.Box():
240
+ gr.Markdown("<center><strong>JSON</strong></center>")
241
+ json = gr.JSON()
242
+ with gr.Column():
243
+ with gr.Box():
244
+ gr.Markdown("<center><strong>Layout detection</strong></center>")
245
+ image_output = gr.Image(type="numpy", label="Output Image")
246
+
247
+ btn.click(fn=analyze_image, inputs=[inputs, inputs_pdf, tok_input], outputs=[image_output, image_text, html, json])
248
+
249
+ demo.launch()
conf_dd_one.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONFIG:
2
+ D2LAYOUT: dd/d2/layout/CASCADE_RCNN_R_50_FPN_GN.yaml
3
+ D2CELL: dd/d2/cell/CASCADE_RCNN_R_50_FPN_GN.yaml
4
+ D2ITEM: dd/d2/item/CASCADE_RCNN_R_50_FPN_GN.yaml
5
+ WEIGHTS:
6
+ D2LAYOUT: layout/model_final_inf_only.pt
7
+ D2CELL: cell/d2_model_1849999_cell_inf_only.pt
8
+ D2ITEM: item/d2_model_1639999_item_inf_only.pt
9
+ SEGMENTATION:
10
+ ASSIGNMENT_RULE: ioa
11
+ IOU_THRESHOLD_ROWS: 0.01
12
+ IOU_THRESHOLD_COLS: 0.001
13
+ IOA_THRESHOLD_ROWS: 0.4
14
+ IOA_THRESHOLD_COLS: 0.4
15
+ FULL_TABLE_TILING: True
16
+ REMOVE_IOU_THRESHOLD_ROWS: 0.001
17
+ REMOVE_IOU_THRESHOLD_COLS: 0.001
18
+ WORD_MATCHING:
19
+ PARENTAL_CATEGORIES:
20
+ - TEXT
21
+ - TITLE
22
+ - CELL
23
+ - LIST
24
+ RULE: ioa
25
+ IOU_THRESHOLD: 0.001
26
+ IOA_THRESHOLD: 0.6
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ poppler-utils
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch==1.9.0
2
+ torchvision==0.10.0
3
+ git+https://github.com/deepdoctection/deepdoctection#egg=deepdoctection[hf]
sample_1.jpg ADDED
sample_2.png ADDED
sample_3.pdf ADDED
Binary file (203 kB). View file