vineelnani16 commited on
Commit
26b6aee
·
1 Parent(s): 9fedf38
Files changed (4) hide show
  1. app.py +201 -0
  2. packages.txt +3 -0
  3. requirements.txt +16 -0
  4. setup.sh +5 -0
app.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+
4
+ # Ensure poppler-utils and tesseract-ocr are installed
5
+ def install_dependencies():
6
+ try:
7
+ subprocess.run(["apt-get", "update"], check=True)
8
+ subprocess.run(["apt-get", "install", "-y", "poppler-utils", "tesseract-ocr", "libtesseract-dev"], check=True)
9
+ subprocess.run(["mkdir", "-p", "/usr/share/tesseract-ocr/4.00/tessdata"], check=True)
10
+ subprocess.run(["wget", "https://github.com/tesseract-ocr/tessdata_best/raw/main/tel.traineddata", "-P", "/usr/share/tesseract-ocr/4.00/tessdata/"], check=True)
11
+ except subprocess.CalledProcessError as e:
12
+ print(f"An error occurred while installing dependencies: {e}")
13
+ raise
14
+
15
+ install_dependencies()
16
+
17
+ os.environ["TESSDATA_PREFIX"] = "/usr/share/tesseract-ocr/4.00/"
18
+
19
+ import cv2 as cv
20
+ import numpy as np
21
+ import pytesseract
22
+ from pdf2image import convert_from_path
23
+ import gradio as gr
24
+ import json
25
+ import os
26
+
27
+ # Function to rescale the frame
28
+ def rescaleFrame(frame, scale=0.75):
29
+ width = int(frame.shape[1] * scale)
30
+ height = int(frame.shape[0] * scale)
31
+ dimensions = (width, height)
32
+ return cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)
33
+
34
+ # Function to apply gamma correction
35
+ def apply_gamma(image, gamma=1.0):
36
+ invGamma = 1.0 / gamma
37
+ table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
38
+ return cv.LUT(image, table)
39
+
40
+ # Function to apply adaptive thresholding
41
+ def adaptive_threshold(image):
42
+ gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
43
+ return cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
44
+
45
+ # Function to apply edge detection
46
+ def edge_detection(image):
47
+ gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
48
+ return cv.Canny(gray, 50, 150)
49
+
50
+ # Function to apply morphological transformations
51
+ def morphological_transformation(image):
52
+ gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
53
+ _, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
54
+ kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
55
+ return cv.morphologyEx(binary, cv.MORPH_CLOSE, kernel)
56
+
57
+ # Function to process image for text extraction
58
+ def process_image(img, method='default'):
59
+ resized_image = rescaleFrame(img)
60
+
61
+ if method == 'default':
62
+ gray = cv.cvtColor(resized_image, cv.COLOR_BGR2GRAY)
63
+ blur = cv.GaussianBlur(gray, (3, 3), 0)
64
+ gamma_corrected = apply_gamma(blur, gamma=0.3)
65
+ _, thresh = cv.threshold(gamma_corrected, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
66
+ kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
67
+ return cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel)
68
+ elif method == 'adaptive_threshold':
69
+ return adaptive_threshold(resized_image)
70
+ elif method == 'edge_detection':
71
+ return edge_detection(resized_image)
72
+ elif method == 'morphological':
73
+ return morphological_transformation(resized_image)
74
+
75
+ # Function to extract text from processed image
76
+ def extract_text_from_image(image, langs='tel'):
77
+ return pytesseract.image_to_string(image, lang=langs)
78
+
79
+ output_dir = "output"
80
+ if not os.path.exists(output_dir):
81
+ os.makedirs(output_dir)
82
+
83
+ all_texts = {}
84
+
85
+ def save_and_next(page_num, text, extracted_texts, original_images, total_pages):
86
+ page_num = int(page_num) # Ensure page_num is an integer
87
+ total_pages = int(total_pages) # Ensure total_pages is an integer
88
+ formatted_text = {
89
+ f"Page number: {page_num}": {
90
+ "Content": [
91
+ line for line in text.split('\n') if line.strip() != ''
92
+ ]
93
+ }
94
+ }
95
+ all_texts.update(formatted_text)
96
+ json_path = os.path.join(output_dir, "all_texts.json")
97
+ with open(json_path, 'w', encoding='utf-8') as f:
98
+ json.dump(all_texts, f, ensure_ascii=False, indent=4)
99
+
100
+ next_page_num = page_num + 1 # Increment to next page
101
+ if next_page_num <= total_pages:
102
+ next_page_image = original_images[next_page_num - 1]
103
+ methods = ['default', 'adaptive_threshold', 'edge_detection', 'morphological']
104
+ best_text = ""
105
+ max_confidence = -1
106
+ for method in methods:
107
+ processed_image = process_image(next_page_image, method=method)
108
+ text = extract_text_from_image(processed_image, langs='tel')
109
+ confidence = len(text)
110
+ if confidence > max_confidence:
111
+ max_confidence = confidence
112
+ best_text = text
113
+ extracted_texts.append(best_text)
114
+ return gr.update(value=best_text), next_page_num, gr.update(value=next_page_image, height=None, width=None), json_path
115
+ else:
116
+ return "All pages processed", page_num, None, json_path
117
+
118
+ def skip_page(page_num, extracted_texts, original_images, total_pages):
119
+ next_page_num = int(page_num) + 1 # Ensure page_num is an integer and increment to next page
120
+ total_pages = int(total_pages) # Ensure total_pages is an integer
121
+ if next_page_num <= total_pages:
122
+ next_page_image = original_images[next_page_num - 1]
123
+ methods = ['default', 'adaptive_threshold', 'edge_detection', 'morphological']
124
+ best_text = ""
125
+ max_confidence = -1
126
+ for method in methods:
127
+ processed_image = process_image(next_page_image, method=method)
128
+ text = extract_text_from_image(processed_image, langs='tel')
129
+ confidence = len(text)
130
+ if confidence > max_confidence:
131
+ max_confidence = confidence
132
+ best_text = text
133
+ extracted_texts.append(best_text)
134
+ return gr.update(value=best_text), next_page_num, gr.update(value=next_page_image, height=None, width=None)
135
+ else:
136
+ return "All pages processed", page_num, None
137
+
138
+ def upload_pdf(pdf):
139
+ pdf_path = pdf.name
140
+ pages = convert_from_path(pdf_path)
141
+ first_page = np.array(pages[0])
142
+ methods = ['default', 'adaptive_threshold', 'edge_detection', 'morphological']
143
+ best_text = ""
144
+ max_confidence = -1
145
+ for method in methods:
146
+ processed_image = process_image(first_page, method=method)
147
+ text = extract_text_from_image(processed_image, langs='tel')
148
+ confidence = len(text)
149
+ if confidence > max_confidence:
150
+ max_confidence = confidence
151
+ best_text = text
152
+ original_images = [np.array(page) for page in pages]
153
+ extracted_texts = [best_text]
154
+ return gr.update(value=original_images[0], height=None, width=None), gr.update(value=best_text), 1, extracted_texts, original_images, len(pages)
155
+
156
+ def navigate_to_page(page_num, extracted_texts, original_images):
157
+ if 0 <= page_num - 1 < len(original_images):
158
+ return gr.update(value=original_images[page_num - 1], height=None, width=None), gr.update(value=extracted_texts[page_num - 1]), page_num
159
+ else:
160
+ return gr.update(value="Invalid Page Number"), None, page_num
161
+
162
+ def display_pdf_and_text():
163
+ with gr.Blocks() as demo:
164
+ gr.Markdown("## PDF Viewer and Text Editor")
165
+ pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
166
+ with gr.Row():
167
+ image_output = gr.Image(label="Page Image", type="numpy")
168
+ text_editor = gr.Textbox(label="Extracted Text", lines=10, interactive=True)
169
+ page_num = gr.Number(value=1, label="Page Number", visible=True)
170
+ extracted_texts = gr.State()
171
+ original_images = gr.State()
172
+ total_pages = gr.State()
173
+ save_next_button = gr.Button("Save and Next")
174
+ skip_button = gr.Button("Skip")
175
+ pdf_input.upload(upload_pdf, inputs=pdf_input, outputs=[image_output, text_editor, page_num, extracted_texts, original_images, total_pages])
176
+
177
+ save_next_button.click(fn=save_and_next,
178
+ inputs=[page_num, text_editor, extracted_texts, original_images, total_pages],
179
+ outputs=[text_editor, page_num, image_output, gr.File(label="Download JSON")])
180
+
181
+ skip_button.click(fn=skip_page,
182
+ inputs=[page_num, extracted_texts, original_images, total_pages],
183
+ outputs=[text_editor, page_num, image_output])
184
+
185
+ page_buttons = gr.Row()
186
+
187
+ def update_page_buttons(total_pages, extracted_texts, original_images):
188
+ page_buttons.clear() # Clear previous buttons if any
189
+ buttons = []
190
+ for i in range(1, total_pages + 1):
191
+ button = gr.Button(str(i), variant="primary", size="small")
192
+ button.click(navigate_to_page, inputs=[i, extracted_texts, original_images], outputs=[image_output, text_editor, page_num])
193
+ buttons.append(button)
194
+ return buttons
195
+
196
+ total_pages.change(fn=update_page_buttons, inputs=[total_pages, extracted_texts, original_images], outputs=[page_buttons])
197
+
198
+ return demo
199
+
200
+ iface = display_pdf_and_text()
201
+ iface.launch()
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ poppler-utils
2
+ tesseract-ocr
3
+ libtesseract-dev
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ opencv-python==4.10.0.82
2
+ opencv-python-headless==4.10.0.82
3
+ numpy==1.26.4
4
+ pytesseract==0.3.10
5
+ pdf2image==1.17.0
6
+ gradio==4.36.1
7
+ gradio_client==1.0.1
8
+ json5==0.9.25
9
+ jsonpointer==2.4
10
+ jsonschema==4.22.0
11
+ jsonschema-specifications==2023.12.1
12
+ fastjsonschema==2.19.1
13
+ orjson==3.10.4
14
+ python-json-logger==2.0.7
15
+ ujson==5.10.0
16
+ !./setup.sh
setup.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ apt-get update
3
+ apt-get install -y poppler-utils tesseract-ocr libtesseract-dev
4
+ mkdir -p /usr/share/tesseract-ocr/4.00/tessdata
5
+ wget https://github.com/tesseract-ocr/tessdata_best/raw/main/tel.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata/